code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""Converting tools for extinction."""
import pylab as P
import numpy as N
def from_ebv_sfd_to_sdss_albd(ebv):
"""Return A(lbd) for the 5 SDSS filters: u, g, r, i, z."""
coeff = {'u': 5.155, 'g': 3.793, 'r': 2.751, 'i': 2.086, 'z': 1.479}
return {f: coeff[f] * N.array(ebv) for f in coeff}
def from_sdss_albd_to_megacam_albd(sdss):
"""Return A(lbd) for the 6 Megecam filters: u, g, r, i_old, i_new, z."""
megacam = {}
megacam['u'] = sdss['u'] - 0.241 * (sdss['u'] - sdss['g'])
megacam['g'] = sdss['g'] - 0.153 * (sdss['g'] - sdss['r'])
megacam['r'] = sdss['r'] - 0.024 * (sdss['g'] - sdss['r'])
megacam['z'] = sdss['z'] - 0.074 * (sdss['i'] - sdss['z'])
megacam['i_old'] = sdss['i'] - 0.085 * (sdss['r'] - sdss['i'])
megacam['i_new'] = sdss['i'] - 0.003 * (sdss['r'] - sdss['i'])
return megacam
def from_ebv_sfd_to_megacam_albd(ebv):
"""Return A(lbd) for the 6 Megacam filters: u, g, r, i, z."""
return from_sdss_albd_to_megacam_albd(from_ebv_sfd_to_sdss_albd(ebv))
def plots(ra, dec, ebv, albd, title=None, figname=""):
"""Plot the extinction sky-map."""
fig = P.figure()
ax = fig.add_subplot(111, xlabel='RA (deg)', ylabel='DEC (deg)')
scat = ax.scatter(ra, dec, c=ebv, cmap=(P.cm.jet), edgecolor='none')
cbar = fig.colorbar(scat)
cbar.set_label('E(B-V)')
if title is not None:
ax.set_title(title)
fig.savefig(figname + "_ebmv_map.png")
fig = P.figure()
ax = fig.add_subplot(111, xlabel='A(lbd)', ylabel='#')
ax.hist(albd, histtype='step', lw=2, label='<>=%.2f' % N.mean(albd))
if title is not None:
ax.set_title(title)
ax.legend(loc='best')
fig.savefig(figname + "_albd.png")
P.show()
| [
"pylab.figure",
"numpy.array",
"numpy.mean",
"pylab.show"
] | [((1135, 1145), 'pylab.figure', 'P.figure', ([], {}), '()\n', (1143, 1145), True, 'import pylab as P\n'), ((1455, 1465), 'pylab.figure', 'P.figure', ([], {}), '()\n', (1463, 1465), True, 'import pylab as P\n'), ((1722, 1730), 'pylab.show', 'P.show', ([], {}), '()\n', (1728, 1730), True, 'import pylab as P\n'), ((276, 288), 'numpy.array', 'N.array', (['ebv'], {}), '(ebv)\n', (283, 288), True, 'import numpy as N\n'), ((1584, 1596), 'numpy.mean', 'N.mean', (['albd'], {}), '(albd)\n', (1590, 1596), True, 'import numpy as N\n')] |
import os
from packaging.version import Version
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box
from fiona.errors import DriverError
import geopandas
from geopandas import GeoDataFrame, GeoSeries, overlay, read_file
from geopandas import _compat
from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal
import pytest
DATA = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data", "overlay")
pytestmark = pytest.mark.skip_no_sindex
pandas_133 = Version(pd.__version__) == Version("1.3.3")
@pytest.fixture
def dfs(request):
s1 = GeoSeries(
[
Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
Polygon([(2, 2), (4, 2), (4, 4), (2, 4)]),
]
)
s2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": s1})
df2 = GeoDataFrame({"col2": [1, 2], "geometry": s2})
return df1, df2
@pytest.fixture(params=["default-index", "int-index", "string-index"])
def dfs_index(request, dfs):
df1, df2 = dfs
if request.param == "int-index":
df1.index = [1, 2]
df2.index = [0, 2]
if request.param == "string-index":
df1.index = ["row1", "row2"]
return df1, df2
@pytest.fixture(
params=["union", "intersection", "difference", "symmetric_difference", "identity"]
)
def how(request):
if pandas_133 and request.param in ["symmetric_difference", "identity", "union"]:
pytest.xfail("Regression in pandas 1.3.3 (GH #2101)")
return request.param
@pytest.fixture(params=[True, False])
def keep_geom_type(request):
return request.param
def test_overlay(dfs_index, how):
"""
Basic overlay test with small dummy example dataframes (from docs).
Results obtained using QGIS 2.16 (Vector -> Geoprocessing Tools ->
Intersection / Union / ...), saved to GeoJSON
"""
df1, df2 = dfs_index
result = overlay(df1, df2, how=how)
# construction of result
def _read(name):
expected = read_file(
os.path.join(DATA, "polys", "df1_df2-{0}.geojson".format(name))
)
expected.crs = None
return expected
if how == "identity":
expected_intersection = _read("intersection")
expected_difference = _read("difference")
expected = pd.concat(
[expected_intersection, expected_difference], ignore_index=True, sort=False
)
expected["col1"] = expected["col1"].astype(float)
else:
expected = _read(how)
# TODO needed adaptations to result
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
elif how == "difference":
result = result.reset_index(drop=True)
assert_geodataframe_equal(result, expected, check_column_type=False)
# for difference also reversed
if how == "difference":
result = overlay(df2, df1, how=how)
result = result.reset_index(drop=True)
expected = _read("difference-inverse")
assert_geodataframe_equal(result, expected, check_column_type=False)
@pytest.mark.filterwarnings("ignore:GeoSeries crs mismatch:UserWarning")
def test_overlay_nybb(how):
polydf = read_file(geopandas.datasets.get_path("nybb"))
# The circles have been constructed and saved at the time the expected
# results were created (exact output of buffer algorithm can slightly
# change over time -> use saved ones)
# # construct circles dataframe
# N = 10
# b = [int(x) for x in polydf.total_bounds]
# polydf2 = GeoDataFrame(
# [
# {"geometry": Point(x, y).buffer(10000), "value1": x + y, "value2": x - y}
# for x, y in zip(
# range(b[0], b[2], int((b[2] - b[0]) / N)),
# range(b[1], b[3], int((b[3] - b[1]) / N)),
# )
# ],
# crs=polydf.crs,
# )
polydf2 = read_file(os.path.join(DATA, "nybb_qgis", "polydf2.shp"))
result = overlay(polydf, polydf2, how=how)
cols = ["BoroCode", "BoroName", "Shape_Leng", "Shape_Area", "value1", "value2"]
if how == "difference":
cols = cols[:-2]
# expected result
if how == "identity":
# read union one, further down below we take the appropriate subset
expected = read_file(os.path.join(DATA, "nybb_qgis", "qgis-union.shp"))
else:
expected = read_file(
os.path.join(DATA, "nybb_qgis", "qgis-{0}.shp".format(how))
)
# The result of QGIS for 'union' contains incorrect geometries:
# 24 is a full original circle overlapping with unioned geometries, and
# 27 is a completely duplicated row)
if how == "union":
expected = expected.drop([24, 27])
expected.reset_index(inplace=True, drop=True)
# Eliminate observations without geometries (issue from QGIS)
expected = expected[expected.is_valid]
expected.reset_index(inplace=True, drop=True)
if how == "identity":
expected = expected[expected.BoroCode.notnull()].copy()
# Order GeoDataFrames
expected = expected.sort_values(cols).reset_index(drop=True)
# TODO needed adaptations to result
result = result.sort_values(cols).reset_index(drop=True)
if how in ("union", "identity"):
# concat < 0.23 sorts, so changes the order of the columns
# but at least we ensure 'geometry' is the last column
assert result.columns[-1] == "geometry"
assert len(result.columns) == len(expected.columns)
result = result.reindex(columns=expected.columns)
# the ordering of the spatial index results causes slight deviations
# in the resultant geometries for multipolygons
# for more details on the discussion, see:
# https://github.com/geopandas/geopandas/pull/1338
# https://github.com/geopandas/geopandas/issues/1337
# Temporary workaround below:
# simplify multipolygon geometry comparison
# since the order of the constituent polygons depends on
# the ordering of spatial indexing results, we cannot
# compare symmetric_difference results directly when the
# resultant geometry is a multipolygon
# first, check that all bounds and areas are approx equal
# this is a very rough check for multipolygon equality
if not _compat.PANDAS_GE_11:
kwargs = dict(check_less_precise=True)
else:
kwargs = {}
pd.testing.assert_series_equal(
result.geometry.area, expected.geometry.area, **kwargs
)
pd.testing.assert_frame_equal(
result.geometry.bounds, expected.geometry.bounds, **kwargs
)
# There are two cases where the multipolygon have a different number
# of sub-geometries -> not solved by normalize (and thus drop for now)
if how == "symmetric_difference":
expected.loc[9, "geometry"] = None
result.loc[9, "geometry"] = None
if how == "union":
expected.loc[24, "geometry"] = None
result.loc[24, "geometry"] = None
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_crs=False,
check_column_type=False,
check_less_precise=True,
)
def test_overlay_overlap(how):
"""
Overlay test with overlapping geometries in both dataframes.
Test files are created with::
import geopandas
from geopandas import GeoSeries, GeoDataFrame
from shapely.geometry import Point, Polygon, LineString
s1 = GeoSeries([Point(0, 0), Point(1.5, 0)]).buffer(1, resolution=2)
s2 = GeoSeries([Point(1, 1), Point(2, 2)]).buffer(1, resolution=2)
df1 = GeoDataFrame({'geometry': s1, 'col1':[1,2]})
df2 = GeoDataFrame({'geometry': s2, 'col2':[1, 2]})
ax = df1.plot(alpha=0.5)
df2.plot(alpha=0.5, ax=ax, color='C1')
df1.to_file('geopandas/geopandas/tests/data/df1_overlap.geojson',
driver='GeoJSON')
df2.to_file('geopandas/geopandas/tests/data/df2_overlap.geojson',
driver='GeoJSON')
and then overlay results are obtained from using QGIS 2.16
(Vector -> Geoprocessing Tools -> Intersection / Union / ...),
saved to GeoJSON.
"""
df1 = read_file(os.path.join(DATA, "overlap", "df1_overlap.geojson"))
df2 = read_file(os.path.join(DATA, "overlap", "df2_overlap.geojson"))
result = overlay(df1, df2, how=how)
if how == "identity":
raise pytest.skip()
expected = read_file(
os.path.join(DATA, "overlap", "df1_df2_overlap-{0}.geojson".format(how))
)
if how == "union":
# the QGIS result has the last row duplicated, so removing this
expected = expected.iloc[:-1]
# TODO needed adaptations to result
result = result.reset_index(drop=True)
if how == "union":
result = result.sort_values(["col1", "col2"]).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
)
@pytest.mark.parametrize("other_geometry", [False, True])
def test_geometry_not_named_geometry(dfs, how, other_geometry):
# Issue #306
# Add points and flip names
df1, df2 = dfs
df3 = df1.copy()
df3 = df3.rename(columns={"geometry": "polygons"})
df3 = df3.set_geometry("polygons")
if other_geometry:
df3["geometry"] = df1.centroid.geometry
assert df3.geometry.name == "polygons"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df3, df2, how=how)
assert df3.geometry.name == "polygons"
if how == "difference":
# in case of 'difference', column names of left frame are preserved
assert res2.geometry.name == "polygons"
if other_geometry:
assert "geometry" in res2.columns
assert_geoseries_equal(
res2["geometry"], df3["geometry"], check_series_type=False
)
res2 = res2.drop(["geometry"], axis=1)
res2 = res2.rename(columns={"polygons": "geometry"})
res2 = res2.set_geometry("geometry")
# TODO if existing column is overwritten -> geometry not last column
if other_geometry and how == "intersection":
res2 = res2.reindex(columns=res1.columns)
assert_geodataframe_equal(res1, res2)
df4 = df2.copy()
df4 = df4.rename(columns={"geometry": "geom"})
df4 = df4.set_geometry("geom")
if other_geometry:
df4["geometry"] = df2.centroid.geometry
assert df4.geometry.name == "geom"
res1 = overlay(df1, df2, how=how)
res2 = overlay(df1, df4, how=how)
assert_geodataframe_equal(res1, res2)
def test_bad_how(dfs):
df1, df2 = dfs
with pytest.raises(ValueError):
overlay(df1, df2, how="spandex")
def test_duplicate_column_name(dfs, how):
if how == "difference":
pytest.skip("Difference uses columns from one df only.")
df1, df2 = dfs
df2r = df2.rename(columns={"col2": "col1"})
res = overlay(df1, df2r, how=how)
assert ("col1_1" in res.columns) and ("col1_2" in res.columns)
def test_geoseries_warning(dfs):
df1, df2 = dfs
# Issue #305
with pytest.raises(NotImplementedError):
overlay(df1, df2.geometry, how="union")
def test_preserve_crs(dfs, how):
df1, df2 = dfs
result = overlay(df1, df2, how=how)
assert result.crs is None
crs = "epsg:4326"
df1.crs = crs
df2.crs = crs
result = overlay(df1, df2, how=how)
assert result.crs == crs
def test_crs_mismatch(dfs, how):
df1, df2 = dfs
df1.crs = 4326
df2.crs = 3857
with pytest.warns(UserWarning, match="CRS mismatch between the CRS"):
overlay(df1, df2, how=how)
def test_empty_intersection(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(-1, -1), (-3, -1), (-3, -3), (-1, -3)]),
Polygon([(-3, -3), (-5, -3), (-5, -5), (-3, -5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2]})
expected = GeoDataFrame([], columns=["col1", "col3", "geometry"])
result = overlay(df1, df3)
assert_geodataframe_equal(result, expected, check_dtype=False)
def test_correct_index(dfs):
# GH883 - case where the index was not properly reset
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3, "col3": [1, 2, 3]})
i1 = Polygon([(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)])
i2 = Polygon([(3, 3), (3, 5), (5, 5), (5, 3), (3, 3)])
expected = GeoDataFrame(
[[1, 1, i1], [3, 2, i2]], columns=["col3", "col2", "geometry"]
)
result = overlay(df3, df2, keep_geom_type=True)
assert_geodataframe_equal(result, expected)
def test_warn_on_keep_geom_type(dfs):
df1, df2 = dfs
polys3 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df3 = GeoDataFrame({"geometry": polys3})
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
overlay(df2, df3, keep_geom_type=None)
@pytest.mark.parametrize(
"geom_types", ["polys", "poly_line", "poly_point", "line_poly", "point_poly"]
)
def test_overlay_strict(how, keep_geom_type, geom_types):
"""
Test of mixed geometry types on input and output. Expected results initially
generated using following snippet.
polys1 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df1 = gpd.GeoDataFrame({'col1': [1, 2], 'geometry': polys1})
polys2 = gpd.GeoSeries([Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])])
df2 = gpd.GeoDataFrame({'geometry': polys2, 'col2': [1, 2, 3]})
lines1 = gpd.GeoSeries([LineString([(2, 0), (2, 4), (6, 4)]),
LineString([(0, 3), (6, 3)])])
df3 = gpd.GeoDataFrame({'col3': [1, 2], 'geometry': lines1})
points1 = gpd.GeoSeries([Point((2, 2)),
Point((3, 3))])
df4 = gpd.GeoDataFrame({'col4': [1, 2], 'geometry': points1})
params=["union", "intersection", "difference", "symmetric_difference",
"identity"]
stricts = [True, False]
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df2, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('polys_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df3, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_line_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
for p in params:
for s in stricts:
exp = gpd.overlay(df1, df4, how=p, keep_geom_type=s)
if not exp.empty:
exp.to_file('poly_point_{p}_{s}.geojson'.format(p=p, s=s),
driver='GeoJSON')
"""
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
polys2 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(-1, 1), (1, 1), (1, 3), (-1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df2 = GeoDataFrame({"geometry": polys2, "col2": [1, 2, 3]})
lines1 = GeoSeries(
[LineString([(2, 0), (2, 4), (6, 4)]), LineString([(0, 3), (6, 3)])]
)
df3 = GeoDataFrame({"col3": [1, 2], "geometry": lines1})
points1 = GeoSeries([Point((2, 2)), Point((3, 3))])
df4 = GeoDataFrame({"col4": [1, 2], "geometry": points1})
if geom_types == "polys":
result = overlay(df1, df2, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_line":
result = overlay(df1, df3, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "poly_point":
result = overlay(df1, df4, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "line_poly":
result = overlay(df3, df1, how=how, keep_geom_type=keep_geom_type)
elif geom_types == "point_poly":
result = overlay(df4, df1, how=how, keep_geom_type=keep_geom_type)
try:
expected = read_file(
os.path.join(
DATA,
"strict",
"{t}_{h}_{s}.geojson".format(t=geom_types, h=how, s=keep_geom_type),
)
)
# the order depends on the spatial index used
# so we sort the resultant dataframes to get a consistent order
# independently of the spatial index implementation
assert all(expected.columns == result.columns), "Column name mismatch"
cols = list(set(result.columns) - set(["geometry"]))
expected = expected.sort_values(cols, axis=0).reset_index(drop=True)
result = result.sort_values(cols, axis=0).reset_index(drop=True)
assert_geodataframe_equal(
result,
expected,
normalize=True,
check_column_type=False,
check_less_precise=True,
check_crs=False,
check_dtype=False,
)
except DriverError: # fiona >= 1.8
assert result.empty
except OSError: # fiona < 1.8
assert result.empty
def test_mixed_geom_error():
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
mixed = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
dfmixed = GeoDataFrame({"col1": [1, 2], "geometry": mixed})
with pytest.raises(NotImplementedError):
overlay(df1, dfmixed, keep_geom_type=True)
def test_keep_geom_type_error():
gcol = GeoSeries(
GeometryCollection(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
LineString([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
)
dfcol = GeoDataFrame({"col1": [2], "geometry": gcol})
polys1 = GeoSeries(
[
Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),
Polygon([(3, 3), (5, 3), (5, 5), (3, 5)]),
]
)
df1 = GeoDataFrame({"col1": [1, 2], "geometry": polys1})
with pytest.raises(TypeError):
overlay(dfcol, df1, keep_geom_type=True)
def test_keep_geom_type_geometry_collection():
# GH 1581
df1 = read_file(os.path.join(DATA, "geom_type", "df1.geojson"))
df2 = read_file(os.path.join(DATA, "geom_type", "df2.geojson"))
with pytest.warns(UserWarning, match="`keep_geom_type=True` in overlay"):
intersection = overlay(df1, df2, keep_geom_type=None)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=True)
assert len(intersection) == 1
assert (intersection.geom_type == "Polygon").all()
intersection = overlay(df1, df2, keep_geom_type=False)
assert len(intersection) == 1
assert (intersection.geom_type == "GeometryCollection").all()
def test_keep_geom_type_geometry_collection2():
polys1 = [
box(0, 0, 1, 1),
box(1, 1, 3, 3).union(box(1, 3, 5, 5)),
]
polys2 = [
box(0, 0, 1, 1),
box(3, 1, 4, 2).union(box(4, 1, 5, 4)),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df1, df2, keep_geom_type=True)
expected1 = GeoDataFrame(
{
"left": [0, 1],
"right": [0, 1],
"geometry": [box(0, 0, 1, 1), box(4, 3, 5, 4)],
}
)
assert_geodataframe_equal(result1, expected1)
result1 = overlay(df1, df2, keep_geom_type=False)
expected1 = GeoDataFrame(
{
"left": [0, 1, 1],
"right": [0, 0, 1],
"geometry": [
box(0, 0, 1, 1),
Point(1, 1),
GeometryCollection([box(4, 3, 5, 4), LineString([(3, 1), (3, 2)])]),
],
}
)
assert_geodataframe_equal(result1, expected1)
def test_keep_geom_type_geomcoll_different_types():
polys1 = [box(0, 1, 1, 3), box(10, 10, 12, 12)]
polys2 = [
Polygon([(1, 0), (3, 0), (3, 3), (1, 3), (1, 2), (2, 2), (2, 1), (1, 1)]),
box(11, 11, 13, 13),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df1, df2, keep_geom_type=True)
expected1 = GeoDataFrame(
{
"left": [1],
"right": [1],
"geometry": [box(11, 11, 12, 12)],
}
)
assert_geodataframe_equal(result1, expected1)
result2 = overlay(df1, df2, keep_geom_type=False)
expected2 = GeoDataFrame(
{
"left": [0, 1],
"right": [0, 1],
"geometry": [
GeometryCollection([LineString([(1, 2), (1, 3)]), Point(1, 1)]),
box(11, 11, 12, 12),
],
}
)
assert_geodataframe_equal(result2, expected2)
def test_keep_geom_type_geometry_collection_difference():
# GH 2163
polys1 = [
box(0, 0, 1, 1),
box(1, 1, 2, 2),
]
# the tiny sliver in the second geometry may be converted to a
# linestring during the overlay process due to floating point errors
# on some platforms
polys2 = [
box(0, 0, 1, 1),
box(1, 1, 2, 3).union(box(2, 2, 3, 2.00000000000000001)),
]
df1 = GeoDataFrame({"left": [0, 1], "geometry": polys1})
df2 = GeoDataFrame({"right": [0, 1], "geometry": polys2})
result1 = overlay(df2, df1, keep_geom_type=True, how="difference")
expected1 = GeoDataFrame(
{
"right": [1],
"geometry": [box(1, 2, 2, 3)],
},
)
assert_geodataframe_equal(result1, expected1)
@pytest.mark.parametrize("make_valid", [True, False])
def test_overlap_make_valid(make_valid):
bowtie = Polygon([(1, 1), (9, 9), (9, 1), (1, 9), (1, 1)])
assert not bowtie.is_valid
fixed_bowtie = bowtie.buffer(0)
assert fixed_bowtie.is_valid
df1 = GeoDataFrame({"col1": ["region"], "geometry": GeoSeries([box(0, 0, 10, 10)])})
df_bowtie = GeoDataFrame(
{"col1": ["invalid", "valid"], "geometry": GeoSeries([bowtie, fixed_bowtie])}
)
if make_valid:
df_overlay_bowtie = overlay(df1, df_bowtie, make_valid=make_valid)
assert df_overlay_bowtie.at[0, "geometry"].equals(fixed_bowtie)
assert df_overlay_bowtie.at[1, "geometry"].equals(fixed_bowtie)
else:
with pytest.raises(ValueError, match="1 invalid input geometries"):
overlay(df1, df_bowtie, make_valid=make_valid)
def test_empty_overlay_return_non_duplicated_columns():
nybb = geopandas.read_file(geopandas.datasets.get_path("nybb"))
nybb2 = nybb.copy()
nybb2.geometry = nybb2.translate(20000000)
result = geopandas.overlay(nybb, nybb2)
expected = GeoDataFrame(
columns=[
"BoroCode_1",
"BoroName_1",
"Shape_Leng_1",
"Shape_Area_1",
"BoroCode_2",
"BoroName_2",
"Shape_Leng_2",
"Shape_Area_2",
"geometry",
],
crs=nybb.crs,
)
assert_geodataframe_equal(result, expected, check_dtype=False)
def test_non_overlapping(how):
p1 = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
p2 = Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])
df1 = GeoDataFrame({"col1": [1], "geometry": [p1]})
df2 = GeoDataFrame({"col2": [2], "geometry": [p2]})
result = overlay(df1, df2, how=how)
if how == "intersection":
expected = GeoDataFrame(
{
"col1": np.array([], dtype="int64"),
"col2": np.array([], dtype="int64"),
"geometry": [],
},
index=pd.Index([], dtype="object"),
)
elif how == "union":
expected = GeoDataFrame(
{
"col1": [1, np.nan],
"col2": [np.nan, 2],
"geometry": [p1, p2],
}
)
elif how == "identity":
expected = GeoDataFrame(
{
"col1": [1.0],
"col2": [np.nan],
"geometry": [p1],
}
)
elif how == "symmetric_difference":
expected = GeoDataFrame(
{
"col1": [1, np.nan],
"col2": [np.nan, 2],
"geometry": [p1, p2],
}
)
elif how == "difference":
expected = GeoDataFrame(
{
"col1": [1],
"geometry": [p1],
}
)
assert_geodataframe_equal(result, expected)
def test_no_intersection():
# overlapping bounds but non-overlapping geometries
gs = GeoSeries([Point(x, x).buffer(0.1) for x in range(3)])
gdf1 = GeoDataFrame({"foo": ["a", "b", "c"]}, geometry=gs)
gdf2 = GeoDataFrame({"bar": ["1", "3", "5"]}, geometry=gs.translate(1))
expected = GeoDataFrame(columns=["foo", "bar", "geometry"])
result = overlay(gdf1, gdf2, how="intersection")
assert_geodataframe_equal(result, expected, check_index_type=False)
class TestOverlayWikiExample:
def setup_method(self):
self.layer_a = GeoDataFrame(geometry=[box(0, 2, 6, 6)])
self.layer_b = GeoDataFrame(geometry=[box(4, 0, 10, 4)])
self.intersection = GeoDataFrame(geometry=[box(4, 2, 6, 4)])
self.union = GeoDataFrame(
geometry=[
box(4, 2, 6, 4),
Polygon([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]),
Polygon([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]),
]
)
self.a_difference_b = GeoDataFrame(
geometry=[Polygon([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)])]
)
self.b_difference_a = GeoDataFrame(
geometry=[
Polygon([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)])
]
)
self.symmetric_difference = GeoDataFrame(
geometry=[
Polygon([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]),
Polygon([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]),
]
)
self.a_identity_b = GeoDataFrame(
geometry=[
box(4, 2, 6, 4),
Polygon([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]),
]
)
self.b_identity_a = GeoDataFrame(
geometry=[
box(4, 2, 6, 4),
Polygon([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]),
]
)
def test_intersection(self):
df_result = overlay(self.layer_a, self.layer_b, how="intersection")
assert df_result.geom_equals(self.intersection).bool()
def test_union(self):
df_result = overlay(self.layer_a, self.layer_b, how="union")
assert_geodataframe_equal(df_result, self.union)
def test_a_difference_b(self):
df_result = overlay(self.layer_a, self.layer_b, how="difference")
assert_geodataframe_equal(df_result, self.a_difference_b)
def test_b_difference_a(self):
df_result = overlay(self.layer_b, self.layer_a, how="difference")
assert_geodataframe_equal(df_result, self.b_difference_a)
def test_symmetric_difference(self):
df_result = overlay(self.layer_a, self.layer_b, how="symmetric_difference")
assert_geodataframe_equal(df_result, self.symmetric_difference)
def test_a_identity_b(self):
df_result = overlay(self.layer_a, self.layer_b, how="identity")
assert_geodataframe_equal(df_result, self.a_identity_b)
def test_b_identity_a(self):
df_result = overlay(self.layer_b, self.layer_a, how="identity")
assert_geodataframe_equal(df_result, self.b_identity_a)
| [
"pytest.mark.filterwarnings",
"shapely.geometry.box",
"shapely.geometry.Point",
"pandas.Index",
"numpy.array",
"shapely.geometry.Polygon",
"geopandas.overlay",
"pytest.fixture",
"pandas.testing.assert_frame_equal",
"pytest.xfail",
"geopandas.testing.assert_geodataframe_equal",
"pytest.skip",
... | [((1085, 1154), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['default-index', 'int-index', 'string-index']"}), "(params=['default-index', 'int-index', 'string-index'])\n", (1099, 1154), False, 'import pytest\n'), ((1394, 1496), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['union', 'intersection', 'difference', 'symmetric_difference', 'identity']"}), "(params=['union', 'intersection', 'difference',\n 'symmetric_difference', 'identity'])\n", (1408, 1496), False, 'import pytest\n'), ((1693, 1729), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[True, False]'}), '(params=[True, False])\n', (1707, 1729), False, 'import pytest\n'), ((3245, 3316), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:GeoSeries crs mismatch:UserWarning"""'], {}), "('ignore:GeoSeries crs mismatch:UserWarning')\n", (3271, 3316), False, 'import pytest\n'), ((9181, 9237), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other_geometry"""', '[False, True]'], {}), "('other_geometry', [False, True])\n", (9204, 9237), False, 'import pytest\n'), ((13450, 13556), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom_types"""', "['polys', 'poly_line', 'poly_point', 'line_poly', 'point_poly']"], {}), "('geom_types', ['polys', 'poly_line', 'poly_point',\n 'line_poly', 'point_poly'])\n", (13473, 13556), False, 'import pytest\n'), ((22836, 22888), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""make_valid"""', '[True, False]'], {}), "('make_valid', [True, False])\n", (22859, 22888), False, 'import pytest\n'), ((556, 579), 'packaging.version.Version', 'Version', (['pd.__version__'], {}), '(pd.__version__)\n', (563, 579), False, 'from packaging.version import Version\n'), ((583, 599), 'packaging.version.Version', 'Version', (['"""1.3.3"""'], {}), "('1.3.3')\n", (590, 599), False, 'from packaging.version import Version\n'), ((958, 1004), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, 2], 'geometry': s1}"], {}), "({'col1': [1, 2], 'geometry': s1})\n", (970, 1004), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((1015, 1061), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col2': [1, 2], 'geometry': s2}"], {}), "({'col2': [1, 2], 'geometry': s2})\n", (1027, 1061), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((2067, 2093), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (2074, 2093), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((2894, 2962), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'check_column_type': '(False)'}), '(result, expected, check_column_type=False)\n', (2919, 2962), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((4126, 4159), 'geopandas.overlay', 'overlay', (['polydf', 'polydf2'], {'how': 'how'}), '(polydf, polydf2, how=how)\n', (4133, 4159), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((6538, 6628), 'pandas.testing.assert_series_equal', 'pd.testing.assert_series_equal', (['result.geometry.area', 'expected.geometry.area'], {}), '(result.geometry.area, expected.geometry.area,\n **kwargs)\n', (6568, 6628), True, 'import pandas as pd\n'), ((6643, 6737), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result.geometry.bounds', 'expected.geometry.bounds'], {}), '(result.geometry.bounds, expected.geometry.\n bounds, **kwargs)\n', (6672, 6737), True, 'import pandas as pd\n'), ((7133, 7263), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'normalize': '(True)', 'check_crs': '(False)', 'check_column_type': '(False)', 'check_less_precise': '(True)'}), '(result, expected, normalize=True, check_crs=False,\n check_column_type=False, check_less_precise=True)\n', (7158, 7263), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((8502, 8528), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (8509, 8528), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((9021, 9134), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'normalize': '(True)', 'check_column_type': '(False)', 'check_less_precise': '(True)'}), '(result, expected, normalize=True,\n check_column_type=False, check_less_precise=True)\n', (9046, 9134), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((9611, 9637), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (9618, 9637), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((9649, 9675), 'geopandas.overlay', 'overlay', (['df3', 'df2'], {'how': 'how'}), '(df3, df2, how=how)\n', (9656, 9675), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((10405, 10442), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['res1', 'res2'], {}), '(res1, res2)\n', (10430, 10442), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((10673, 10699), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (10680, 10699), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((10711, 10737), 'geopandas.overlay', 'overlay', (['df1', 'df4'], {'how': 'how'}), '(df1, df4, how=how)\n', (10718, 10737), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((10742, 10779), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['res1', 'res2'], {}), '(res1, res2)\n', (10767, 10779), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((11115, 11142), 'geopandas.overlay', 'overlay', (['df1', 'df2r'], {'how': 'how'}), '(df1, df2r, how=how)\n', (11122, 11142), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((11441, 11467), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (11448, 11467), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((11569, 11595), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (11576, 11595), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12067, 12117), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'geometry': polys3, 'col3': [1, 2]}"], {}), "({'geometry': polys3, 'col3': [1, 2]})\n", (12079, 12117), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12133, 12187), 'geopandas.GeoDataFrame', 'GeoDataFrame', (['[]'], {'columns': "['col1', 'col3', 'geometry']"}), "([], columns=['col1', 'col3', 'geometry'])\n", (12145, 12187), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12201, 12218), 'geopandas.overlay', 'overlay', (['df1', 'df3'], {}), '(df1, df3)\n', (12208, 12218), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12223, 12285), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'check_dtype': '(False)'}), '(result, expected, check_dtype=False)\n', (12248, 12285), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((12621, 12674), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'geometry': polys3, 'col3': [1, 2, 3]}"], {}), "({'geometry': polys3, 'col3': [1, 2, 3]})\n", (12633, 12674), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12684, 12733), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)]'], {}), '([(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)])\n', (12691, 12733), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((12743, 12792), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (3, 5), (5, 5), (5, 3), (3, 3)]'], {}), '([(3, 3), (3, 5), (5, 5), (5, 3), (3, 3)])\n', (12750, 12792), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((12808, 12884), 'geopandas.GeoDataFrame', 'GeoDataFrame', (['[[1, 1, i1], [3, 2, i2]]'], {'columns': "['col3', 'col2', 'geometry']"}), "([[1, 1, i1], [3, 2, i2]], columns=['col3', 'col2', 'geometry'])\n", (12820, 12884), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12912, 12950), 'geopandas.overlay', 'overlay', (['df3', 'df2'], {'keep_geom_type': '(True)'}), '(df3, df2, keep_geom_type=True)\n', (12919, 12950), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((12955, 12998), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {}), '(result, expected)\n', (12980, 12998), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((13286, 13320), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'geometry': polys3}"], {}), "({'geometry': polys3})\n", (13298, 13320), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((15812, 15862), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, 2], 'geometry': polys1}"], {}), "({'col1': [1, 2], 'geometry': polys1})\n", (15824, 15862), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((16091, 16144), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'geometry': polys2, 'col2': [1, 2, 3]}"], {}), "({'geometry': polys2, 'col2': [1, 2, 3]})\n", (16103, 16144), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((16262, 16312), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col3': [1, 2], 'geometry': lines1}"], {}), "({'col3': [1, 2], 'geometry': lines1})\n", (16274, 16312), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((16379, 16430), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col4': [1, 2], 'geometry': points1}"], {}), "({'col4': [1, 2], 'geometry': points1})\n", (16391, 16430), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((18267, 18317), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, 2], 'geometry': polys1}"], {}), "({'col1': [1, 2], 'geometry': polys1})\n", (18279, 18317), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((18494, 18543), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, 2], 'geometry': mixed}"], {}), "({'col1': [1, 2], 'geometry': mixed})\n", (18506, 18543), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((18902, 18947), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [2], 'geometry': gcol}"], {}), "({'col1': [2], 'geometry': gcol})\n", (18914, 18947), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((19118, 19168), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, 2], 'geometry': polys1}"], {}), "({'col1': [1, 2], 'geometry': polys1})\n", (19130, 19168), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((19703, 19741), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': '(True)'}), '(df1, df2, keep_geom_type=True)\n', (19710, 19741), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((19851, 19890), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': '(False)'}), '(df1, df2, keep_geom_type=False)\n', (19858, 19890), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20240, 20290), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'left': [0, 1], 'geometry': polys1}"], {}), "({'left': [0, 1], 'geometry': polys1})\n", (20252, 20290), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20301, 20352), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'right': [0, 1], 'geometry': polys2}"], {}), "({'right': [0, 1], 'geometry': polys2})\n", (20313, 20352), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20368, 20406), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': '(True)'}), '(df1, df2, keep_geom_type=True)\n', (20375, 20406), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20584, 20629), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result1', 'expected1'], {}), '(result1, expected1)\n', (20609, 20629), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((20645, 20684), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': '(False)'}), '(df1, df2, keep_geom_type=False)\n', (20652, 20684), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20996, 21041), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result1', 'expected1'], {}), '(result1, expected1)\n', (21021, 21041), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((21291, 21341), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'left': [0, 1], 'geometry': polys1}"], {}), "({'left': [0, 1], 'geometry': polys1})\n", (21303, 21341), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((21352, 21403), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'right': [0, 1], 'geometry': polys2}"], {}), "({'right': [0, 1], 'geometry': polys2})\n", (21364, 21403), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((21418, 21456), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': '(True)'}), '(df1, df2, keep_geom_type=True)\n', (21425, 21456), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((21615, 21660), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result1', 'expected1'], {}), '(result1, expected1)\n', (21640, 21660), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((21676, 21715), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': '(False)'}), '(df1, df2, keep_geom_type=False)\n', (21683, 21715), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((21992, 22037), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result2', 'expected2'], {}), '(result2, expected2)\n', (22017, 22037), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((22471, 22521), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'left': [0, 1], 'geometry': polys1}"], {}), "({'left': [0, 1], 'geometry': polys1})\n", (22483, 22521), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((22532, 22583), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'right': [0, 1], 'geometry': polys2}"], {}), "({'right': [0, 1], 'geometry': polys2})\n", (22544, 22583), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((22599, 22655), 'geopandas.overlay', 'overlay', (['df2', 'df1'], {'keep_geom_type': '(True)', 'how': '"""difference"""'}), "(df2, df1, keep_geom_type=True, how='difference')\n", (22606, 22655), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((22787, 22832), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result1', 'expected1'], {}), '(result1, expected1)\n', (22812, 22832), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((22943, 22992), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (9, 9), (9, 1), (1, 9), (1, 1)]'], {}), '([(1, 1), (9, 9), (9, 1), (1, 9), (1, 1)])\n', (22950, 22992), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((23901, 23931), 'geopandas.overlay', 'geopandas.overlay', (['nybb', 'nybb2'], {}), '(nybb, nybb2)\n', (23918, 23931), False, 'import geopandas\n'), ((23948, 24124), 'geopandas.GeoDataFrame', 'GeoDataFrame', ([], {'columns': "['BoroCode_1', 'BoroName_1', 'Shape_Leng_1', 'Shape_Area_1', 'BoroCode_2',\n 'BoroName_2', 'Shape_Leng_2', 'Shape_Area_2', 'geometry']", 'crs': 'nybb.crs'}), "(columns=['BoroCode_1', 'BoroName_1', 'Shape_Leng_1',\n 'Shape_Area_1', 'BoroCode_2', 'BoroName_2', 'Shape_Leng_2',\n 'Shape_Area_2', 'geometry'], crs=nybb.crs)\n", (23960, 24124), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((24263, 24325), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'check_dtype': '(False)'}), '(result, expected, check_dtype=False)\n', (24288, 24325), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((24368, 24409), 'shapely.geometry.Polygon', 'Polygon', (['[(0, 0), (2, 0), (2, 2), (0, 2)]'], {}), '([(0, 0), (2, 0), (2, 2), (0, 2)])\n', (24375, 24409), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((24419, 24460), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (24426, 24460), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((24471, 24516), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1], 'geometry': [p1]}"], {}), "({'col1': [1], 'geometry': [p1]})\n", (24483, 24516), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((24527, 24572), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col2': [2], 'geometry': [p2]}"], {}), "({'col2': [2], 'geometry': [p2]})\n", (24539, 24572), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((24586, 24612), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (24593, 24612), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((25700, 25743), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {}), '(result, expected)\n', (25725, 25743), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((25905, 25956), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'foo': ['a', 'b', 'c']}"], {'geometry': 'gs'}), "({'foo': ['a', 'b', 'c']}, geometry=gs)\n", (25917, 25956), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((26049, 26097), 'geopandas.GeoDataFrame', 'GeoDataFrame', ([], {'columns': "['foo', 'bar', 'geometry']"}), "(columns=['foo', 'bar', 'geometry'])\n", (26061, 26097), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((26111, 26150), 'geopandas.overlay', 'overlay', (['gdf1', 'gdf2'], {'how': '"""intersection"""'}), "(gdf1, gdf2, how='intersection')\n", (26118, 26150), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((26155, 26222), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'check_index_type': '(False)'}), '(result, expected, check_index_type=False)\n', (26180, 26222), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((454, 479), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (469, 479), False, 'import os\n'), ((1611, 1664), 'pytest.xfail', 'pytest.xfail', (['"""Regression in pandas 1.3.3 (GH #2101)"""'], {}), "('Regression in pandas 1.3.3 (GH #2101)')\n", (1623, 1664), False, 'import pytest\n'), ((2464, 2554), 'pandas.concat', 'pd.concat', (['[expected_intersection, expected_difference]'], {'ignore_index': '(True)', 'sort': '(False)'}), '([expected_intersection, expected_difference], ignore_index=True,\n sort=False)\n', (2473, 2554), True, 'import pandas as pd\n'), ((3044, 3070), 'geopandas.overlay', 'overlay', (['df2', 'df1'], {'how': 'how'}), '(df2, df1, how=how)\n', (3051, 3070), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((3173, 3241), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'check_column_type': '(False)'}), '(result, expected, check_column_type=False)\n', (3198, 3241), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((3368, 3403), 'geopandas.datasets.get_path', 'geopandas.datasets.get_path', (['"""nybb"""'], {}), "('nybb')\n", (3395, 3403), False, 'import geopandas\n'), ((4064, 4110), 'os.path.join', 'os.path.join', (['DATA', '"""nybb_qgis"""', '"""polydf2.shp"""'], {}), "(DATA, 'nybb_qgis', 'polydf2.shp')\n", (4076, 4110), False, 'import os\n'), ((8360, 8412), 'os.path.join', 'os.path.join', (['DATA', '"""overlap"""', '"""df1_overlap.geojson"""'], {}), "(DATA, 'overlap', 'df1_overlap.geojson')\n", (8372, 8412), False, 'import os\n'), ((8434, 8486), 'os.path.join', 'os.path.join', (['DATA', '"""overlap"""', '"""df2_overlap.geojson"""'], {}), "(DATA, 'overlap', 'df2_overlap.geojson')\n", (8446, 8486), False, 'import os\n'), ((8570, 8583), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8581, 8583), False, 'import pytest\n'), ((10833, 10858), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10846, 10858), False, 'import pytest\n'), ((10868, 10900), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': '"""spandex"""'}), "(df1, df2, how='spandex')\n", (10875, 10900), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((10981, 11037), 'pytest.skip', 'pytest.skip', (['"""Difference uses columns from one df only."""'], {}), "('Difference uses columns from one df only.')\n", (10992, 11037), False, 'import pytest\n'), ((11290, 11324), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (11303, 11324), False, 'import pytest\n'), ((11334, 11373), 'geopandas.overlay', 'overlay', (['df1', 'df2.geometry'], {'how': '"""union"""'}), "(df1, df2.geometry, how='union')\n", (11341, 11373), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((11726, 11789), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""CRS mismatch between the CRS"""'}), "(UserWarning, match='CRS mismatch between the CRS')\n", (11738, 11789), False, 'import pytest\n'), ((11799, 11825), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how'}), '(df1, df2, how=how)\n', (11806, 11825), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((13331, 13398), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""`keep_geom_type=True` in overlay"""'}), "(UserWarning, match='`keep_geom_type=True` in overlay')\n", (13343, 13398), False, 'import pytest\n'), ((13408, 13446), 'geopandas.overlay', 'overlay', (['df2', 'df3'], {'keep_geom_type': 'None'}), '(df2, df3, keep_geom_type=None)\n', (13415, 13446), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((16479, 16536), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'how': 'how', 'keep_geom_type': 'keep_geom_type'}), '(df1, df2, how=how, keep_geom_type=keep_geom_type)\n', (16486, 16536), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((17692, 17845), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['result', 'expected'], {'normalize': '(True)', 'check_column_type': '(False)', 'check_less_precise': '(True)', 'check_crs': '(False)', 'check_dtype': '(False)'}), '(result, expected, normalize=True,\n check_column_type=False, check_less_precise=True, check_crs=False,\n check_dtype=False)\n', (17717, 17845), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((18553, 18587), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (18566, 18587), False, 'import pytest\n'), ((18597, 18639), 'geopandas.overlay', 'overlay', (['df1', 'dfmixed'], {'keep_geom_type': '(True)'}), '(df1, dfmixed, keep_geom_type=True)\n', (18604, 18639), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((19178, 19202), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (19191, 19202), False, 'import pytest\n'), ((19212, 19252), 'geopandas.overlay', 'overlay', (['dfcol', 'df1'], {'keep_geom_type': '(True)'}), '(dfcol, df1, keep_geom_type=True)\n', (19219, 19252), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((19337, 19383), 'os.path.join', 'os.path.join', (['DATA', '"""geom_type"""', '"""df1.geojson"""'], {}), "(DATA, 'geom_type', 'df1.geojson')\n", (19349, 19383), False, 'import os\n'), ((19405, 19451), 'os.path.join', 'os.path.join', (['DATA', '"""geom_type"""', '"""df2.geojson"""'], {}), "(DATA, 'geom_type', 'df2.geojson')\n", (19417, 19451), False, 'import os\n'), ((19463, 19530), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""`keep_geom_type=True` in overlay"""'}), "(UserWarning, match='`keep_geom_type=True` in overlay')\n", (19475, 19530), False, 'import pytest\n'), ((19555, 19593), 'geopandas.overlay', 'overlay', (['df1', 'df2'], {'keep_geom_type': 'None'}), '(df1, df2, keep_geom_type=None)\n', (19562, 19593), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20064, 20079), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (20067, 20079), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20159, 20174), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (20162, 20174), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21110, 21125), 'shapely.geometry.box', 'box', (['(0)', '(1)', '(1)', '(3)'], {}), '(0, 1, 1, 3)\n', (21113, 21125), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21127, 21146), 'shapely.geometry.box', 'box', (['(10)', '(10)', '(12)', '(12)'], {}), '(10, 10, 12, 12)\n', (21130, 21146), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21171, 21244), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 0), (3, 0), (3, 3), (1, 3), (1, 2), (2, 2), (2, 1), (1, 1)]'], {}), '([(1, 0), (3, 0), (3, 3), (1, 3), (1, 2), (2, 2), (2, 1), (1, 1)])\n', (21178, 21244), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21254, 21273), 'shapely.geometry.box', 'box', (['(11)', '(11)', '(13)', '(13)'], {}), '(11, 11, 13, 13)\n', (21257, 21273), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((22136, 22151), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (22139, 22151), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((22161, 22176), 'shapely.geometry.box', 'box', (['(1)', '(1)', '(2)', '(2)'], {}), '(1, 1, 2, 2)\n', (22164, 22176), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((22372, 22387), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (22375, 22387), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((23353, 23399), 'geopandas.overlay', 'overlay', (['df1', 'df_bowtie'], {'make_valid': 'make_valid'}), '(df1, df_bowtie, make_valid=make_valid)\n', (23360, 23399), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((23779, 23814), 'geopandas.datasets.get_path', 'geopandas.datasets.get_path', (['"""nybb"""'], {}), "('nybb')\n", (23806, 23814), False, 'import geopandas\n'), ((27825, 27880), 'geopandas.overlay', 'overlay', (['self.layer_a', 'self.layer_b'], {'how': '"""intersection"""'}), "(self.layer_a, self.layer_b, how='intersection')\n", (27832, 27880), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((27991, 28039), 'geopandas.overlay', 'overlay', (['self.layer_a', 'self.layer_b'], {'how': '"""union"""'}), "(self.layer_a, self.layer_b, how='union')\n", (27998, 28039), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((28048, 28096), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['df_result', 'self.union'], {}), '(df_result, self.union)\n', (28073, 28096), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((28153, 28206), 'geopandas.overlay', 'overlay', (['self.layer_a', 'self.layer_b'], {'how': '"""difference"""'}), "(self.layer_a, self.layer_b, how='difference')\n", (28160, 28206), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((28215, 28272), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['df_result', 'self.a_difference_b'], {}), '(df_result, self.a_difference_b)\n', (28240, 28272), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((28329, 28382), 'geopandas.overlay', 'overlay', (['self.layer_b', 'self.layer_a'], {'how': '"""difference"""'}), "(self.layer_b, self.layer_a, how='difference')\n", (28336, 28382), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((28391, 28448), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['df_result', 'self.b_difference_a'], {}), '(df_result, self.b_difference_a)\n', (28416, 28448), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((28511, 28574), 'geopandas.overlay', 'overlay', (['self.layer_a', 'self.layer_b'], {'how': '"""symmetric_difference"""'}), "(self.layer_a, self.layer_b, how='symmetric_difference')\n", (28518, 28574), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((28583, 28646), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['df_result', 'self.symmetric_difference'], {}), '(df_result, self.symmetric_difference)\n', (28608, 28646), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((28701, 28752), 'geopandas.overlay', 'overlay', (['self.layer_a', 'self.layer_b'], {'how': '"""identity"""'}), "(self.layer_a, self.layer_b, how='identity')\n", (28708, 28752), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((28761, 28816), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['df_result', 'self.a_identity_b'], {}), '(df_result, self.a_identity_b)\n', (28786, 28816), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((28871, 28922), 'geopandas.overlay', 'overlay', (['self.layer_b', 'self.layer_a'], {'how': '"""identity"""'}), "(self.layer_b, self.layer_a, how='identity')\n", (28878, 28922), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((28931, 28986), 'geopandas.testing.assert_geodataframe_equal', 'assert_geodataframe_equal', (['df_result', 'self.b_identity_a'], {}), '(df_result, self.b_identity_a)\n', (28956, 28986), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((678, 719), 'shapely.geometry.Polygon', 'Polygon', (['[(0, 0), (2, 0), (2, 2), (0, 2)]'], {}), '([(0, 0), (2, 0), (2, 2), (0, 2)])\n', (685, 719), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((733, 774), 'shapely.geometry.Polygon', 'Polygon', (['[(2, 2), (4, 2), (4, 4), (2, 4)]'], {}), '([(2, 2), (4, 2), (4, 4), (2, 4)])\n', (740, 774), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((834, 875), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (841, 875), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((889, 930), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (896, 930), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((4453, 4502), 'os.path.join', 'os.path.join', (['DATA', '"""nybb_qgis"""', '"""qgis-union.shp"""'], {}), "(DATA, 'nybb_qgis', 'qgis-union.shp')\n", (4465, 4502), False, 'import os\n'), ((9958, 10045), 'geopandas.testing.assert_geoseries_equal', 'assert_geoseries_equal', (["res2['geometry']", "df3['geometry']"], {'check_series_type': '(False)'}), "(res2['geometry'], df3['geometry'], check_series_type\n =False)\n", (9980, 10045), False, 'from geopandas.testing import assert_geodataframe_equal, assert_geoseries_equal\n'), ((11927, 11976), 'shapely.geometry.Polygon', 'Polygon', (['[(-1, -1), (-3, -1), (-3, -3), (-1, -3)]'], {}), '([(-1, -1), (-3, -1), (-3, -3), (-1, -3)])\n', (11934, 11976), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((11990, 12039), 'shapely.geometry.Polygon', 'Polygon', (['[(-3, -3), (-5, -3), (-5, -5), (-3, -5)]'], {}), '([(-3, -3), (-5, -3), (-5, -5), (-3, -5)])\n', (11997, 12039), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((12440, 12481), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (12447, 12481), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((12495, 12538), 'shapely.geometry.Polygon', 'Polygon', (['[(-1, 1), (1, 1), (1, 3), (-1, 3)]'], {}), '([(-1, 1), (1, 1), (1, 3), (-1, 3)])\n', (12502, 12538), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((12552, 12593), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (12559, 12593), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((13105, 13146), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (13112, 13146), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((13160, 13203), 'shapely.geometry.Polygon', 'Polygon', (['[(-1, 1), (1, 1), (1, 3), (-1, 3)]'], {}), '([(-1, 1), (1, 1), (1, 3), (-1, 3)])\n', (13167, 13203), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((13217, 13258), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (13224, 13258), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((15688, 15729), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (15695, 15729), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((15743, 15784), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (15750, 15784), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((15910, 15951), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (15917, 15951), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((15965, 16008), 'shapely.geometry.Polygon', 'Polygon', (['[(-1, 1), (1, 1), (1, 3), (-1, 3)]'], {}), '([(-1, 1), (1, 1), (1, 3), (-1, 3)])\n', (15972, 16008), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16022, 16063), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (16029, 16063), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16178, 16214), 'shapely.geometry.LineString', 'LineString', (['[(2, 0), (2, 4), (6, 4)]'], {}), '([(2, 0), (2, 4), (6, 4)])\n', (16188, 16214), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16216, 16244), 'shapely.geometry.LineString', 'LineString', (['[(0, 3), (6, 3)]'], {}), '([(0, 3), (6, 3)])\n', (16226, 16244), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16338, 16351), 'shapely.geometry.Point', 'Point', (['(2, 2)'], {}), '((2, 2))\n', (16343, 16351), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16353, 16366), 'shapely.geometry.Point', 'Point', (['(3, 3)'], {}), '((3, 3))\n', (16358, 16366), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16590, 16647), 'geopandas.overlay', 'overlay', (['df1', 'df3'], {'how': 'how', 'keep_geom_type': 'keep_geom_type'}), '(df1, df3, how=how, keep_geom_type=keep_geom_type)\n', (16597, 16647), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((18143, 18184), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (18150, 18184), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((18198, 18239), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (18205, 18239), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((18363, 18404), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (18370, 18404), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((18418, 18462), 'shapely.geometry.LineString', 'LineString', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (18428, 18462), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((18994, 19035), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (19001, 19035), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((19049, 19090), 'shapely.geometry.Polygon', 'Polygon', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (19056, 19090), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20111, 20126), 'shapely.geometry.box', 'box', (['(1)', '(3)', '(5)', '(5)'], {}), '(1, 3, 5, 5)\n', (20114, 20126), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20206, 20221), 'shapely.geometry.box', 'box', (['(4)', '(1)', '(5)', '(4)'], {}), '(4, 1, 5, 4)\n', (20209, 20221), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((22419, 22436), 'shapely.geometry.box', 'box', (['(2)', '(2)', '(3)', '(2.0)'], {}), '(2, 2, 3, 2.0)\n', (22422, 22436), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((23264, 23297), 'geopandas.GeoSeries', 'GeoSeries', (['[bowtie, fixed_bowtie]'], {}), '([bowtie, fixed_bowtie])\n', (23273, 23297), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((23567, 23628), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""1 invalid input geometries"""'}), "(ValueError, match='1 invalid input geometries')\n", (23580, 23628), False, 'import pytest\n'), ((23642, 23688), 'geopandas.overlay', 'overlay', (['df1', 'df_bowtie'], {'make_valid': 'make_valid'}), '(df1, df_bowtie, make_valid=make_valid)\n', (23649, 23688), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((24946, 25024), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, np.nan], 'col2': [np.nan, 2], 'geometry': [p1, p2]}"], {}), "({'col1': [1, np.nan], 'col2': [np.nan, 2], 'geometry': [p1, p2]})\n", (24958, 25024), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((16702, 16759), 'geopandas.overlay', 'overlay', (['df1', 'df4'], {'how': 'how', 'keep_geom_type': 'keep_geom_type'}), '(df1, df4, how=how, keep_geom_type=keep_geom_type)\n', (16709, 16759), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((18755, 18796), 'shapely.geometry.Polygon', 'Polygon', (['[(1, 1), (3, 1), (3, 3), (1, 3)]'], {}), '([(1, 1), (3, 1), (3, 3), (1, 3)])\n', (18762, 18796), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((18814, 18858), 'shapely.geometry.LineString', 'LineString', (['[(3, 3), (5, 3), (5, 5), (3, 5)]'], {}), '([(3, 3), (5, 3), (5, 5), (3, 5)])\n', (18824, 18858), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20089, 20104), 'shapely.geometry.box', 'box', (['(1)', '(1)', '(3)', '(3)'], {}), '(1, 1, 3, 3)\n', (20092, 20104), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20184, 20199), 'shapely.geometry.box', 'box', (['(3)', '(1)', '(4)', '(2)'], {}), '(3, 1, 4, 2)\n', (20187, 20199), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20529, 20544), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (20532, 20544), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20546, 20561), 'shapely.geometry.box', 'box', (['(4)', '(3)', '(5)', '(4)'], {}), '(4, 3, 5, 4)\n', (20549, 20561), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20830, 20845), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(1)', '(1)'], {}), '(0, 0, 1, 1)\n', (20833, 20845), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20863, 20874), 'shapely.geometry.Point', 'Point', (['(1)', '(1)'], {}), '(1, 1)\n', (20868, 20874), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21573, 21592), 'shapely.geometry.box', 'box', (['(11)', '(11)', '(12)', '(12)'], {}), '(11, 11, 12, 12)\n', (21576, 21592), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21936, 21955), 'shapely.geometry.box', 'box', (['(11)', '(11)', '(12)', '(12)'], {}), '(11, 11, 12, 12)\n', (21939, 21955), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((22397, 22412), 'shapely.geometry.box', 'box', (['(1)', '(1)', '(2)', '(3)'], {}), '(1, 1, 2, 3)\n', (22400, 22412), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((22747, 22762), 'shapely.geometry.box', 'box', (['(1)', '(2)', '(2)', '(3)'], {}), '(1, 2, 2, 3)\n', (22750, 22762), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((24715, 24742), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (24723, 24742), True, 'import numpy as np\n'), ((24768, 24795), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (24776, 24795), True, 'import numpy as np\n'), ((24862, 24890), 'pandas.Index', 'pd.Index', (['[]'], {'dtype': '"""object"""'}), "([], dtype='object')\n", (24870, 24890), True, 'import pandas as pd\n'), ((25157, 25222), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1.0], 'col2': [np.nan], 'geometry': [p1]}"], {}), "({'col1': [1.0], 'col2': [np.nan], 'geometry': [p1]})\n", (25169, 25222), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((16813, 16870), 'geopandas.overlay', 'overlay', (['df3', 'df1'], {'how': 'how', 'keep_geom_type': 'keep_geom_type'}), '(df3, df1, how=how, keep_geom_type=keep_geom_type)\n', (16820, 16870), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((23161, 23178), 'shapely.geometry.box', 'box', (['(0)', '(0)', '(10)', '(10)'], {}), '(0, 0, 10, 10)\n', (23164, 23178), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((25367, 25445), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1, np.nan], 'col2': [np.nan, 2], 'geometry': [p1, p2]}"], {}), "({'col1': [1, np.nan], 'col2': [np.nan, 2], 'geometry': [p1, p2]})\n", (25379, 25445), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((25850, 25861), 'shapely.geometry.Point', 'Point', (['x', 'x'], {}), '(x, x)\n', (25855, 25861), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26329, 26344), 'shapely.geometry.box', 'box', (['(0)', '(2)', '(6)', '(6)'], {}), '(0, 2, 6, 6)\n', (26332, 26344), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26394, 26410), 'shapely.geometry.box', 'box', (['(4)', '(0)', '(10)', '(4)'], {}), '(4, 0, 10, 4)\n', (26397, 26410), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26465, 26480), 'shapely.geometry.box', 'box', (['(4)', '(2)', '(6)', '(4)'], {}), '(4, 2, 6, 4)\n', (26468, 26480), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26558, 26573), 'shapely.geometry.box', 'box', (['(4)', '(2)', '(6)', '(4)'], {}), '(4, 2, 6, 4)\n', (26561, 26573), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26591, 26656), 'shapely.geometry.Polygon', 'Polygon', (['[(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]'], {}), '([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)])\n', (26598, 26656), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26674, 26742), 'shapely.geometry.Polygon', 'Polygon', (['[(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]'], {}), '([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)])\n', (26681, 26742), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26835, 26900), 'shapely.geometry.Polygon', 'Polygon', (['[(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]'], {}), '([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)])\n', (26842, 26900), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((26996, 27064), 'shapely.geometry.Polygon', 'Polygon', (['[(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]'], {}), '([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)])\n', (27003, 27064), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((27179, 27244), 'shapely.geometry.Polygon', 'Polygon', (['[(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]'], {}), '([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)])\n', (27186, 27244), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((27262, 27330), 'shapely.geometry.Polygon', 'Polygon', (['[(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]'], {}), '([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)])\n', (27269, 27330), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((27438, 27453), 'shapely.geometry.box', 'box', (['(4)', '(2)', '(6)', '(4)'], {}), '(4, 2, 6, 4)\n', (27441, 27453), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((27471, 27536), 'shapely.geometry.Polygon', 'Polygon', (['[(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)]'], {}), '([(4, 2), (0, 2), (0, 6), (6, 6), (6, 4), (4, 4), (4, 2)])\n', (27478, 27536), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((27644, 27659), 'shapely.geometry.box', 'box', (['(4)', '(2)', '(6)', '(4)'], {}), '(4, 2, 6, 4)\n', (27647, 27659), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((27677, 27745), 'shapely.geometry.Polygon', 'Polygon', (['[(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)]'], {}), '([(10, 0), (4, 0), (4, 2), (6, 2), (6, 4), (10, 4), (10, 0)])\n', (27684, 27745), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((16925, 16982), 'geopandas.overlay', 'overlay', (['df4', 'df1'], {'how': 'how', 'keep_geom_type': 'keep_geom_type'}), '(df4, df1, how=how, keep_geom_type=keep_geom_type)\n', (16932, 16982), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n'), ((20912, 20927), 'shapely.geometry.box', 'box', (['(4)', '(3)', '(5)', '(4)'], {}), '(4, 3, 5, 4)\n', (20915, 20927), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((20929, 20957), 'shapely.geometry.LineString', 'LineString', (['[(3, 1), (3, 2)]'], {}), '([(3, 1), (3, 2)])\n', (20939, 20957), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21875, 21903), 'shapely.geometry.LineString', 'LineString', (['[(1, 2), (1, 3)]'], {}), '([(1, 2), (1, 3)])\n', (21885, 21903), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((21905, 21916), 'shapely.geometry.Point', 'Point', (['(1)', '(1)'], {}), '(1, 1)\n', (21910, 21916), False, 'from shapely.geometry import Point, Polygon, LineString, GeometryCollection, box\n'), ((25580, 25625), 'geopandas.GeoDataFrame', 'GeoDataFrame', (["{'col1': [1], 'geometry': [p1]}"], {}), "({'col1': [1], 'geometry': [p1]})\n", (25592, 25625), False, 'from geopandas import GeoDataFrame, GeoSeries, overlay, read_file\n')] |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for IEEE 118 bus test case.
"""
from numpy import array
def case118():
"""Power flow data for IEEE 118 bus test case.
Please see L{caseformat} for details on the case file format.
This data was converted from IEEE Common Data Format
(ieee118cdf.txt) on 20-Sep-2004 by cdf2matp, rev. 1.11
See end of file for warnings generated during conversion.
Converted from IEEE CDF file from:
U{http://www.ee.washington.edu/research/pstca/}
With baseKV data take from the PSAP format file from the same site,
added manually on 10-Mar-2006.
08/25/93 UW ARCHIVE 100.0 1961 W IEEE 118 Bus Test Case
@return: Power flow data for IEEE 118 bus test case.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc["bus"] = array([
[1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94],
[2, 1, 20, 9, 0, 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94],
[3, 1, 39, 10, 0, 0, 1, 0.968, 11.56, 138, 1, 1.06, 0.94],
[4, 2, 39, 12, 0, 0, 1, 0.998, 15.28, 138, 1, 1.06, 0.94],
[5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06, 0.94],
[6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94],
[7, 1, 19, 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94],
[8, 2, 28, 0, 0, 0, 1, 1.015, 20.77, 345, 1, 1.06, 0.94],
[9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, 345, 1, 1.06, 0.94],
[10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, 0.94],
[11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94],
[12, 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94],
[13, 1, 34, 16, 0, 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94],
[14, 1, 14, 1, 0, 0, 1, 0.984, 11.5, 138, 1, 1.06, 0.94],
[15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1, 1.06, 0.94],
[16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],
[17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94],
[18, 2, 60, 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94],
[19, 2, 45, 25, 0, 0, 1, 0.963, 11.05, 138, 1, 1.06, 0.94],
[20, 1, 18, 3, 0, 0, 1, 0.958, 11.93, 138, 1, 1.06, 0.94],
[21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06, 0.94],
[22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94],
[23, 1, 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94],
[24, 2, 13, 0, 0, 0, 1, 0.992, 20.89, 138, 1, 1.06, 0.94],
[25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, 138, 1, 1.06, 0.94],
[26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06, 0.94],
[27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94],
[28, 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94],
[29, 1, 24, 4, 0, 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94],
[30, 1, 0, 0, 0, 0, 1, 0.968, 18.79, 345, 1, 1.06, 0.94],
[31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138, 1, 1.06, 0.94],
[32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, 0.94],
[33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94],
[34, 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94],
[35, 1, 33, 9, 0, 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94],
[36, 2, 31, 17, 0, 0, 1, 0.98, 10.87, 138, 1, 1.06, 0.94],
[37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138, 1, 1.06, 0.94],
[38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94],
[39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94],
[40, 2, 66, 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94],
[41, 1, 37, 10, 0, 0, 1, 0.967, 6.92, 138, 1, 1.06, 0.94],
[42, 2, 96, 23, 0, 0, 1, 0.985, 8.53, 138, 1, 1.06, 0.94],
[43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06, 0.94],
[44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94],
[45, 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94],
[46, 2, 28, 10, 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94],
[47, 1, 34, 0, 0, 0, 1, 1.017, 20.73, 138, 1, 1.06, 0.94],
[48, 1, 20, 11, 0, 15, 1, 1.021, 19.93, 138, 1, 1.06, 0.94],
[49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138, 1, 1.06, 0.94],
[50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94],
[51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94],
[52, 1, 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94],
[53, 1, 23, 11, 0, 0, 1, 0.946, 14.35, 138, 1, 1.06, 0.94],
[54, 2, 113, 32, 0, 0, 1, 0.955, 15.26, 138, 1, 1.06, 0.94],
[55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138, 1, 1.06, 0.94],
[56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, 0.94],
[57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94],
[58, 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94],
[59, 2, 277, 113, 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94],
[60, 1, 78, 3, 0, 0, 1, 0.993, 23.15, 138, 1, 1.06, 0.94],
[61, 2, 0, 0, 0, 0, 1, 0.995, 24.04, 138, 1, 1.06, 0.94],
[62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, 1.06, 0.94],
[63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],
[64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94],
[65, 2, 0, 0, 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94],
[66, 2, 39, 18, 0, 0, 1, 1.05, 27.48, 138, 1, 1.06, 0.94],
[67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, 138, 1, 1.06, 0.94],
[68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06, 0.94],
[69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94],
[70, 2, 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94],
[71, 1, 0, 0, 0, 0, 1, 0.987, 22.15, 138, 1, 1.06, 0.94],
[72, 2, 12, 0, 0, 0, 1, 0.98, 20.98, 138, 1, 1.06, 0.94],
[73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1, 1.06, 0.94],
[74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94],
[75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94],
[76, 2, 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94],
[77, 2, 61, 28, 0, 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94],
[78, 1, 71, 26, 0, 0, 1, 1.003, 26.42, 138, 1, 1.06, 0.94],
[79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138, 1, 1.06, 0.94],
[80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, 0.94],
[81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94],
[82, 1, 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94],
[83, 1, 20, 10, 0, 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94],
[84, 1, 11, 7, 0, 0, 1, 0.98, 30.95, 138, 1, 1.06, 0.94],
[85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138, 1, 1.06, 0.94],
[86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, 0.94],
[87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94],
[88, 1, 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94],
[89, 2, 0, 0, 0, 0, 1, 1.005, 39.69, 138, 1, 1.06, 0.94],
[90, 2, 163, 42, 0, 0, 1, 0.985, 33.29, 138, 1, 1.06, 0.94],
[91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1, 1.06, 0.94],
[92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],
[93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94],
[94, 1, 30, 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94],
[95, 1, 42, 31, 0, 0, 1, 0.981, 27.67, 138, 1, 1.06, 0.94],
[96, 1, 38, 15, 0, 0, 1, 0.993, 27.51, 138, 1, 1.06, 0.94],
[97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, 1, 1.06, 0.94],
[98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94],
[99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94],
[100, 2, 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94],
[101, 1, 22, 15, 0, 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94],
[102, 1, 5, 3, 0, 0, 1, 0.991, 32.3, 138, 1, 1.06, 0.94],
[103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138, 1, 1.06, 0.94],
[104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, 0.94],
[105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94],
[106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94],
[107, 2, 50, 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94],
[108, 1, 2, 1, 0, 0, 1, 0.967, 19.38, 138, 1, 1.06, 0.94],
[109, 1, 8, 3, 0, 0, 1, 0.967, 18.93, 138, 1, 1.06, 0.94],
[110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, 1.06, 0.94],
[111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],
[112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94],
[113, 2, 6, 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94],
[114, 1, 8, 3, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[115, 1, 22, 7, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
[116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, 1.06, 0.94],
[117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],
[118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc["gen"] = array([
[1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 85, 0, 120, -35, 0.99, 100, 1, 185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[15, 0, 0, 30, -10, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[18, 0, 0, 50, -16, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[19, 0, 0, 24, -8, 0.962, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[24, 0, 0, 300, -300, 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[25, 220, 0, 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[26, 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[40, 0, 0, 300, -300, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[42, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[46, 19, 0, 100, -100, 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49, 204, 0, 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[54, 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[65, 391, 0, 200, -67, 1.005, 100, 1, 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[66, 392, 0, 200, -67, 1.05, 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[69, 516.4, 0, 300, -300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[70, 0, 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[87, 4, 0, 1000, -100, 1.015, 100, 1, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[89, 607, 0, 300, -210, 1.005, 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[90, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[91, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[92, 0, 0, 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[99, 0, 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[110, 0, 0, 23, -8, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[111, 36, 0, 1000, -100, 0.98, 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[112, 0, 0, 1000, -100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[113, 0, 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc["branch"] = array([
[1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 3, 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 5, 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 5, 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 6, 0.0119, 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360],
[6, 7, 0.00459, 0.0208, 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 9, 0.00244, 0.0305, 1.162, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, 360],
[5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360],
[2, 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 12, 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[7, 12, 0.00862, 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 13, 0.02225, 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 14, 0.0215, 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360],
[13, 15, 0.0744, 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360],
[14, 15, 0.0595, 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 16, 0.0212, 0.0834, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 17, 0.0132, 0.0437, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[16, 17, 0.0454, 0.1801, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 18, 0.0123, 0.0505, 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360],
[18, 19, 0.01119, 0.0493, 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 20, 0.0252, 0.117, 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 19, 0.012, 0.0394, 0.0101, 9900, 0, 0, 0, 0, 1, -360, 360],
[20, 21, 0.0183, 0.0849, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, 0, 0, 1, -360, 360],
[22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[25, 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 28, 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[28, 29, 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 17, 0, 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[8, 30, 0.00431, 0.0504, 0.514, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 30, 0.00799, 0.086, 0.908, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 31, 0.0474, 0.1563, 0.0399, 9900, 0, 0, 0, 0, 1, -360, 360],
[29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0, 1, -360, 360],
[31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 37, 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 37, 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 36, 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 37, 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 37, 0, 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[37, 39, 0.0321, 0.106, 0.027, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 40, 0.0593, 0.168, 0.042, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 38, 0.00464, 0.054, 0.422, 9900, 0, 0, 0, 0, 1, -360, 360],
[39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360],
[44, 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 46, 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 47, 0.038, 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 48, 0.0601, 0.189, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 49, 0.0191, 0.0625, 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
[48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360],
[52, 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360],
[53, 54, 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.073, 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.0869, 0.291, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.0169, 0.0707, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 56, 0.00275, 0.00955, 0.00732, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 56, 0.00488, 0.0151, 0.00374, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 57, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 60, 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 61, 0.0328, 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 61, 0.00264, 0.0135, 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 62, 0.0123, 0.0561, 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360],
[61, 62, 0.00824, 0.0376, 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360],
[63, 59, 0, 0.0386, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 66, 0.0482, 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 67, 0.0258, 0.117, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 66, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[69, 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 70, 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 71, 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 72, 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 72, 0.0446, 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 73, 0.00866, 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 74, 0.0401, 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 75, 0.0428, 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 75, 0.0405, 0.122, 0.124, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 75, 0.0123, 0.0406, 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 77, 0.0444, 0.148, 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 77, 0.0309, 0.101, 0.1038, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 77, 0.0601, 0.1999, 0.04978, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -360, 360],
[79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[77, 82, 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 83, 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 84, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 85, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
[84, 85, 0.0302, 0.0641, 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 86, 0.035, 0.123, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 87, 0.02828, 0.2074, 0.0445, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360],
[90, 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 92, 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 93, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 94, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[93, 94, 0.0223, 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 95, 0.0132, 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 96, 0.0356, 0.182, 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 96, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 96, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
[95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360],
[96, 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 100, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 100, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 101, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 102, 0.0123, 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360],
[101, 102, 0.0246, 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 103, 0.016, 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 104, 0.0451, 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 104, 0.0466, 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 105, 0.0535, 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 106, 0.0605, 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, 105, 0.00994, 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 106, 0.014, 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 108, 0.0261, 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360],
[106, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[108, 109, 0.0105, 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 110, 0.03906, 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360],
[109, 110, 0.0278, 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 111, 0.022, 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[110, 112, 0.0247, 0.064, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[17, 113, 0.00913, 0.0301, 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 113, 0.0615, 0.203, 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 114, 0.0135, 0.0612, 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 115, 0.0164, 0.0741, 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360],
[114, 115, 0.0023, 0.0104, 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 116, 0.00034, 0.00405, 0.164, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 117, 0.0329, 0.14, 0.0358, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 118, 0.0145, 0.0481, 0.01198, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 118, 0.0164, 0.0544, 0.01356, 9900, 0, 0, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc["gencost"] = array([
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0222222, 20, 0],
[2, 0, 0, 3, 0.117647, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0454545, 20, 0],
[2, 0, 0, 3, 0.0318471, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 1.42857, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.526316, 20, 0],
[2, 0, 0, 3, 0.0490196, 20, 0],
[2, 0, 0, 3, 0.208333, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0645161, 20, 0],
[2, 0, 0, 3, 0.0625, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0255754, 20, 0],
[2, 0, 0, 3, 0.0255102, 20, 0],
[2, 0, 0, 3, 0.0193648, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0209644, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 2.5, 20, 0],
[2, 0, 0, 3, 0.0164745, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0396825, 20, 0],
[2, 0, 0, 3, 0.25, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.277778, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.01, 40, 0]
])
return ppc
| [
"numpy.array"
] | [((1095, 8510), 'numpy.array', 'array', (['[[1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94], [2, 1, 20, 9, 0,\n 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94], [3, 1, 39, 10, 0, 0, 1, 0.968,\n 11.56, 138, 1, 1.06, 0.94], [4, 2, 39, 12, 0, 0, 1, 0.998, 15.28, 138, \n 1, 1.06, 0.94], [5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06, \n 0.94], [6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94], [7, 1, 19,\n 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94], [8, 2, 28, 0, 0, 0, 1, \n 1.015, 20.77, 345, 1, 1.06, 0.94], [9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, \n 345, 1, 1.06, 0.94], [10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, \n 0.94], [11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94], [12,\n 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94], [13, 1, 34, 16, 0,\n 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94], [14, 1, 14, 1, 0, 0, 1, 0.984,\n 11.5, 138, 1, 1.06, 0.94], [15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1,\n 1.06, 0.94], [16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],\n [17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94], [18, 2, 60, \n 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94], [19, 2, 45, 25, 0, 0, 1,\n 0.963, 11.05, 138, 1, 1.06, 0.94], [20, 1, 18, 3, 0, 0, 1, 0.958, 11.93,\n 138, 1, 1.06, 0.94], [21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06,\n 0.94], [22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94], [23, 1,\n 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94], [24, 2, 13, 0, 0, 0, 1, \n 0.992, 20.89, 138, 1, 1.06, 0.94], [25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, \n 138, 1, 1.06, 0.94], [26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06,\n 0.94], [27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94], [28,\n 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94], [29, 1, 24, 4, 0,\n 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94], [30, 1, 0, 0, 0, 0, 1, 0.968, \n 18.79, 345, 1, 1.06, 0.94], [31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138,\n 1, 1.06, 0.94], [32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, \n 0.94], [33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94], [34, \n 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94], [35, 1, 33, 9, 0,\n 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94], [36, 2, 31, 17, 0, 0, 1, 0.98,\n 10.87, 138, 1, 1.06, 0.94], [37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138,\n 1, 1.06, 0.94], [38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94\n ], [39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94], [40, 2, 66,\n 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94], [41, 1, 37, 10, 0, 0, 1, \n 0.967, 6.92, 138, 1, 1.06, 0.94], [42, 2, 96, 23, 0, 0, 1, 0.985, 8.53,\n 138, 1, 1.06, 0.94], [43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06,\n 0.94], [44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94], [45,\n 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94], [46, 2, 28, 10,\n 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94], [47, 1, 34, 0, 0, 0, 1, \n 1.017, 20.73, 138, 1, 1.06, 0.94], [48, 1, 20, 11, 0, 15, 1, 1.021, \n 19.93, 138, 1, 1.06, 0.94], [49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138,\n 1, 1.06, 0.94], [50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94\n ], [51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94], [52, 1, \n 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94], [53, 1, 23, 11, 0, 0,\n 1, 0.946, 14.35, 138, 1, 1.06, 0.94], [54, 2, 113, 32, 0, 0, 1, 0.955, \n 15.26, 138, 1, 1.06, 0.94], [55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138,\n 1, 1.06, 0.94], [56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, \n 0.94], [57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94], [58, \n 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94], [59, 2, 277, 113,\n 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94], [60, 1, 78, 3, 0, 0, 1, \n 0.993, 23.15, 138, 1, 1.06, 0.94], [61, 2, 0, 0, 0, 0, 1, 0.995, 24.04,\n 138, 1, 1.06, 0.94], [62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, \n 1.06, 0.94], [63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],\n [64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94], [65, 2, 0, 0,\n 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94], [66, 2, 39, 18, 0, 0, 1, \n 1.05, 27.48, 138, 1, 1.06, 0.94], [67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, \n 138, 1, 1.06, 0.94], [68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06,\n 0.94], [69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94], [70, 2, \n 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94], [71, 1, 0, 0, 0, 0,\n 1, 0.987, 22.15, 138, 1, 1.06, 0.94], [72, 2, 12, 0, 0, 0, 1, 0.98, \n 20.98, 138, 1, 1.06, 0.94], [73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1,\n 1.06, 0.94], [74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94\n ], [75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94], [76, 2, \n 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94], [77, 2, 61, 28, 0, \n 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94], [78, 1, 71, 26, 0, 0, 1, 1.003,\n 26.42, 138, 1, 1.06, 0.94], [79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138,\n 1, 1.06, 0.94], [80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, \n 0.94], [81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94], [82, 1,\n 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94], [83, 1, 20, 10, 0,\n 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94], [84, 1, 11, 7, 0, 0, 1, 0.98,\n 30.95, 138, 1, 1.06, 0.94], [85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138,\n 1, 1.06, 0.94], [86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, \n 0.94], [87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94], [88, 1,\n 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94], [89, 2, 0, 0, 0, 0,\n 1, 1.005, 39.69, 138, 1, 1.06, 0.94], [90, 2, 163, 42, 0, 0, 1, 0.985, \n 33.29, 138, 1, 1.06, 0.94], [91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1,\n 1.06, 0.94], [92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],\n [93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94], [94, 1, 30, \n 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94], [95, 1, 42, 31, 0, 0, 1,\n 0.981, 27.67, 138, 1, 1.06, 0.94], [96, 1, 38, 15, 0, 0, 1, 0.993, \n 27.51, 138, 1, 1.06, 0.94], [97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, \n 1, 1.06, 0.94], [98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94\n ], [99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94], [100, 2, \n 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94], [101, 1, 22, 15, 0,\n 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94], [102, 1, 5, 3, 0, 0, 1, 0.991,\n 32.3, 138, 1, 1.06, 0.94], [103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138,\n 1, 1.06, 0.94], [104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, \n 0.94], [105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94], [\n 106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94], [107, 2, 50,\n 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94], [108, 1, 2, 1, 0, 0, 1,\n 0.967, 19.38, 138, 1, 1.06, 0.94], [109, 1, 8, 3, 0, 0, 1, 0.967, 18.93,\n 138, 1, 1.06, 0.94], [110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, \n 1.06, 0.94], [111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],\n [112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94], [113, 2, 6,\n 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94], [114, 1, 8, 3, 0, 0, 1, \n 0.96, 14.46, 138, 1, 1.06, 0.94], [115, 1, 22, 7, 0, 0, 1, 0.96, 14.46,\n 138, 1, 1.06, 0.94], [116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, \n 1.06, 0.94], [117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],\n [118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]]'], {}), '([[1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94], [2, 1, 20,\n 9, 0, 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94], [3, 1, 39, 10, 0, 0, 1, \n 0.968, 11.56, 138, 1, 1.06, 0.94], [4, 2, 39, 12, 0, 0, 1, 0.998, 15.28,\n 138, 1, 1.06, 0.94], [5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06,\n 0.94], [6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94], [7, 1, 19,\n 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94], [8, 2, 28, 0, 0, 0, 1, \n 1.015, 20.77, 345, 1, 1.06, 0.94], [9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, \n 345, 1, 1.06, 0.94], [10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, \n 0.94], [11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94], [12,\n 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94], [13, 1, 34, 16, 0,\n 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94], [14, 1, 14, 1, 0, 0, 1, 0.984,\n 11.5, 138, 1, 1.06, 0.94], [15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1,\n 1.06, 0.94], [16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],\n [17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94], [18, 2, 60, \n 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94], [19, 2, 45, 25, 0, 0, 1,\n 0.963, 11.05, 138, 1, 1.06, 0.94], [20, 1, 18, 3, 0, 0, 1, 0.958, 11.93,\n 138, 1, 1.06, 0.94], [21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06,\n 0.94], [22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94], [23, 1,\n 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94], [24, 2, 13, 0, 0, 0, 1, \n 0.992, 20.89, 138, 1, 1.06, 0.94], [25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, \n 138, 1, 1.06, 0.94], [26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06,\n 0.94], [27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94], [28,\n 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94], [29, 1, 24, 4, 0,\n 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94], [30, 1, 0, 0, 0, 0, 1, 0.968, \n 18.79, 345, 1, 1.06, 0.94], [31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138,\n 1, 1.06, 0.94], [32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, \n 0.94], [33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94], [34, \n 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94], [35, 1, 33, 9, 0,\n 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94], [36, 2, 31, 17, 0, 0, 1, 0.98,\n 10.87, 138, 1, 1.06, 0.94], [37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138,\n 1, 1.06, 0.94], [38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94\n ], [39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94], [40, 2, 66,\n 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94], [41, 1, 37, 10, 0, 0, 1, \n 0.967, 6.92, 138, 1, 1.06, 0.94], [42, 2, 96, 23, 0, 0, 1, 0.985, 8.53,\n 138, 1, 1.06, 0.94], [43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06,\n 0.94], [44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94], [45,\n 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94], [46, 2, 28, 10,\n 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94], [47, 1, 34, 0, 0, 0, 1, \n 1.017, 20.73, 138, 1, 1.06, 0.94], [48, 1, 20, 11, 0, 15, 1, 1.021, \n 19.93, 138, 1, 1.06, 0.94], [49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138,\n 1, 1.06, 0.94], [50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94\n ], [51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94], [52, 1, \n 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94], [53, 1, 23, 11, 0, 0,\n 1, 0.946, 14.35, 138, 1, 1.06, 0.94], [54, 2, 113, 32, 0, 0, 1, 0.955, \n 15.26, 138, 1, 1.06, 0.94], [55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138,\n 1, 1.06, 0.94], [56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, \n 0.94], [57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94], [58, \n 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94], [59, 2, 277, 113,\n 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94], [60, 1, 78, 3, 0, 0, 1, \n 0.993, 23.15, 138, 1, 1.06, 0.94], [61, 2, 0, 0, 0, 0, 1, 0.995, 24.04,\n 138, 1, 1.06, 0.94], [62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, \n 1.06, 0.94], [63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],\n [64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94], [65, 2, 0, 0,\n 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94], [66, 2, 39, 18, 0, 0, 1, \n 1.05, 27.48, 138, 1, 1.06, 0.94], [67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, \n 138, 1, 1.06, 0.94], [68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06,\n 0.94], [69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94], [70, 2, \n 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94], [71, 1, 0, 0, 0, 0,\n 1, 0.987, 22.15, 138, 1, 1.06, 0.94], [72, 2, 12, 0, 0, 0, 1, 0.98, \n 20.98, 138, 1, 1.06, 0.94], [73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1,\n 1.06, 0.94], [74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94\n ], [75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94], [76, 2, \n 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94], [77, 2, 61, 28, 0, \n 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94], [78, 1, 71, 26, 0, 0, 1, 1.003,\n 26.42, 138, 1, 1.06, 0.94], [79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138,\n 1, 1.06, 0.94], [80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, \n 0.94], [81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94], [82, 1,\n 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94], [83, 1, 20, 10, 0,\n 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94], [84, 1, 11, 7, 0, 0, 1, 0.98,\n 30.95, 138, 1, 1.06, 0.94], [85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138,\n 1, 1.06, 0.94], [86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, \n 0.94], [87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94], [88, 1,\n 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94], [89, 2, 0, 0, 0, 0,\n 1, 1.005, 39.69, 138, 1, 1.06, 0.94], [90, 2, 163, 42, 0, 0, 1, 0.985, \n 33.29, 138, 1, 1.06, 0.94], [91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1,\n 1.06, 0.94], [92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],\n [93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94], [94, 1, 30, \n 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94], [95, 1, 42, 31, 0, 0, 1,\n 0.981, 27.67, 138, 1, 1.06, 0.94], [96, 1, 38, 15, 0, 0, 1, 0.993, \n 27.51, 138, 1, 1.06, 0.94], [97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, \n 1, 1.06, 0.94], [98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94\n ], [99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94], [100, 2, \n 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94], [101, 1, 22, 15, 0,\n 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94], [102, 1, 5, 3, 0, 0, 1, 0.991,\n 32.3, 138, 1, 1.06, 0.94], [103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138,\n 1, 1.06, 0.94], [104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, \n 0.94], [105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94], [\n 106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94], [107, 2, 50,\n 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94], [108, 1, 2, 1, 0, 0, 1,\n 0.967, 19.38, 138, 1, 1.06, 0.94], [109, 1, 8, 3, 0, 0, 1, 0.967, 18.93,\n 138, 1, 1.06, 0.94], [110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, \n 1.06, 0.94], [111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],\n [112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94], [113, 2, 6,\n 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94], [114, 1, 8, 3, 0, 0, 1, \n 0.96, 14.46, 138, 1, 1.06, 0.94], [115, 1, 22, 7, 0, 0, 1, 0.96, 14.46,\n 138, 1, 1.06, 0.94], [116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, \n 1.06, 0.94], [117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],\n [118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]])\n', (1100, 8510), False, 'from numpy import array\n'), ((9222, 13724), 'numpy.array', 'array', (['[[1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [12, 85, 0, 120, -35, 0.99, 100, 1, 185, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [15, 0, 0, 30, -10, 0.97, 100, 1, 100, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [18, 0, 0, 50, -16, 0.973, 100, 1, \n 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [19, 0, 0, 24, -8, 0.962, 100,\n 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [24, 0, 0, 300, -300, \n 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [25, 220, 0, \n 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [26, \n 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0], [32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0], [36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [40, 0, 0, 300, -300, 0.97, 100, 1, 100,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [42, 0, 0, 300, -300, 0.985, 100, \n 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [46, 19, 0, 100, -100, \n 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49, 204, 0, \n 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [54,\n 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0], [62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 391, 0, 200, -67, 1.005, 100, 1,\n 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [66, 392, 0, 200, -67, 1.05, \n 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [69, 516.4, 0, 300, -\n 300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [70, 0,\n 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [87, 4, 0, 1000, -100, 1.015, 100, 1,\n 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [89, 607, 0, 300, -210, 1.005,\n 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [90, 0, 0, 300, -300,\n 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [91, 0, 0, 100,\n -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [92, 0, 0,\n 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [99, 0, \n 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [110, 0, 0, 23, -8, 0.973, 100, 1, \n 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [111, 36, 0, 1000, -100, 0.98,\n 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [112, 0, 0, 1000, -\n 100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [113, 0, \n 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0]]'], {}), '([[1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0], [4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [12, 85, 0, 120, -35, 0.99, 100, 1, 185,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [15, 0, 0, 30, -10, 0.97, 100, 1, \n 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [18, 0, 0, 50, -16, 0.973, \n 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [19, 0, 0, 24, -8, \n 0.962, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [24, 0, 0, 300,\n -300, 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [25, 220,\n 0, 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 26, 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0], [32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [40, 0, 0, 300, -300, 0.97, 100, 1, \n 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [42, 0, 0, 300, -300, 0.985, \n 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [46, 19, 0, 100, -100,\n 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49, 204, 0, \n 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [54,\n 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0], [55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0], [62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 391, 0, 200, -67, 1.005, 100, 1,\n 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [66, 392, 0, 200, -67, 1.05, \n 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [69, 516.4, 0, 300, -\n 300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [70, 0,\n 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0], [74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0], [77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [87, 4, 0, 1000, -100, 1.015, 100, 1,\n 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [89, 607, 0, 300, -210, 1.005,\n 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [90, 0, 0, 300, -300,\n 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [91, 0, 0, 100,\n -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [92, 0, 0,\n 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [99, 0, \n 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [\n 100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0], [105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [110, 0, 0, 23, -8, 0.973, 100, 1, \n 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [111, 36, 0, 1000, -100, 0.98,\n 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [112, 0, 0, 1000, -\n 100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [113, 0, \n 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0]])\n', (9227, 13724), False, 'from numpy import array\n'), ((14026, 27054), 'numpy.array', 'array', (['[[1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360], [1, 3, \n 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360], [4, 5, \n 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360], [3, 5, \n 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360], [5, 6, 0.0119, \n 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360], [6, 7, 0.00459, 0.0208,\n 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360], [8, 9, 0.00244, 0.0305, 1.162,\n 9900, 0, 0, 0, 0, 1, -360, 360], [8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985,\n 0, 1, -360, 360], [9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, \n 360], [5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],\n [11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360], [2,\n 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360], [3, 12, \n 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360], [7, 12, 0.00862,\n 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360], [11, 13, 0.02225, \n 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360], [12, 14, 0.0215, \n 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360], [13, 15, 0.0744, \n 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360], [14, 15, 0.0595, \n 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360], [12, 16, 0.0212, 0.0834,\n 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360], [15, 17, 0.0132, 0.0437, \n 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360], [16, 17, 0.0454, 0.1801, \n 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360], [17, 18, 0.0123, 0.0505, \n 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360], [18, 19, 0.01119, 0.0493, \n 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360], [19, 20, 0.0252, 0.117, \n 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360], [15, 19, 0.012, 0.0394, 0.0101,\n 9900, 0, 0, 0, 0, 1, -360, 360], [20, 21, 0.0183, 0.0849, 0.0216, 9900,\n 0, 0, 0, 0, 1, -360, 360], [21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, \n 0, 0, 1, -360, 360], [22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, \n 1, -360, 360], [23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, \n 360], [26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360], [25, \n 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360], [27, 28, \n 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360], [28, 29, \n 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360], [30, 17, 0, \n 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360], [8, 30, 0.00431, 0.0504,\n 0.514, 9900, 0, 0, 0, 0, 1, -360, 360], [26, 30, 0.00799, 0.086, 0.908,\n 9900, 0, 0, 0, 0, 1, -360, 360], [17, 31, 0.0474, 0.1563, 0.0399, 9900,\n 0, 0, 0, 0, 1, -360, 360], [29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0,\n 0, 0, 1, -360, 360], [23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0,\n 1, -360, 360], [31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360,\n 360], [15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],\n [19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360], [35, \n 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360], [35, 37,\n 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360], [33, 37, \n 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360], [34, 36, \n 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360], [34, 37, \n 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360], [38, 37, 0, \n 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360], [37, 39, 0.0321, 0.106,\n 0.027, 9900, 0, 0, 0, 0, 1, -360, 360], [37, 40, 0.0593, 0.168, 0.042, \n 9900, 0, 0, 0, 0, 1, -360, 360], [30, 38, 0.00464, 0.054, 0.422, 9900, \n 0, 0, 0, 0, 1, -360, 360], [39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0,\n 0, 0, 1, -360, 360], [40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0,\n 1, -360, 360], [40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, \n 360], [43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],\n [34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360], [44,\n 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360], [45, 46, \n 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360], [46, 47, 0.038, \n 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360], [46, 48, 0.0601, 0.189,\n 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360], [47, 49, 0.0191, 0.0625, \n 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360], [42, 49, 0.0715, 0.323, 0.086,\n 9900, 0, 0, 0, 0, 1, -360, 360], [42, 49, 0.0715, 0.323, 0.086, 9900, 0,\n 0, 0, 0, 1, -360, 360], [45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, \n 0, 1, -360, 360], [48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1,\n -360, 360], [49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360,\n 360], [49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],\n [51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360], [52,\n 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360], [53, 54, \n 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360], [49, 54, 0.073, \n 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360], [49, 54, 0.0869, 0.291,\n 0.073, 9900, 0, 0, 0, 0, 1, -360, 360], [54, 55, 0.0169, 0.0707, 0.0202,\n 9900, 0, 0, 0, 0, 1, -360, 360], [54, 56, 0.00275, 0.00955, 0.00732, \n 9900, 0, 0, 0, 0, 1, -360, 360], [55, 56, 0.00488, 0.0151, 0.00374, \n 9900, 0, 0, 0, 0, 1, -360, 360], [56, 57, 0.0343, 0.0966, 0.0242, 9900,\n 0, 0, 0, 0, 1, -360, 360], [50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, \n 0, 0, 1, -360, 360], [56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0,\n 1, -360, 360], [51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, \n 360], [56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],\n [56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360], [55, \n 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360], [59, 60,\n 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360], [59, 61, 0.0328,\n 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360], [60, 61, 0.00264, 0.0135,\n 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360], [60, 62, 0.0123, 0.0561, \n 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360], [61, 62, 0.00824, 0.0376, \n 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360], [63, 59, 0, 0.0386, 0, 9900, 0,\n 0, 0.96, 0, 1, -360, 360], [63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0,\n 0, 1, -360, 360], [64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360,\n 360], [38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],\n [64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360], [49, \n 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360], [49, 66, \n 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360], [62, 66, 0.0482,\n 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360], [62, 67, 0.0258, 0.117,\n 0.031, 9900, 0, 0, 0, 0, 1, -360, 360], [65, 66, 0, 0.037, 0, 9900, 0, \n 0, 0.935, 0, 1, -360, 360], [66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, \n 0, 0, 0, 1, -360, 360], [65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, \n 0, 1, -360, 360], [47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1,\n -360, 360], [49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, \n 360], [68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360], [69, \n 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360], [24, 70, \n 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360], [70, 71, \n 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360], [24, 72, \n 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360], [71, 72, 0.0446,\n 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360], [71, 73, 0.00866, \n 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360], [70, 74, 0.0401, \n 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360], [70, 75, 0.0428, \n 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360], [69, 75, 0.0405, 0.122, \n 0.124, 9900, 0, 0, 0, 0, 1, -360, 360], [74, 75, 0.0123, 0.0406, \n 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360], [76, 77, 0.0444, 0.148, \n 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360], [69, 77, 0.0309, 0.101, 0.1038,\n 9900, 0, 0, 0, 0, 1, -360, 360], [75, 77, 0.0601, 0.1999, 0.04978, 9900,\n 0, 0, 0, 0, 1, -360, 360], [77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, \n 0, 0, 0, 1, -360, 360], [78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, \n 0, 0, 1, -360, 360], [77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, \n 1, -360, 360], [77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, \n 360], [68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],\n [81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360], [77, 82, \n 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360], [82, 83, \n 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360], [83, 84, \n 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360], [83, 85, 0.043,\n 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360], [84, 85, 0.0302, 0.0641,\n 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360], [85, 86, 0.035, 0.123, 0.0276,\n 9900, 0, 0, 0, 0, 1, -360, 360], [86, 87, 0.02828, 0.2074, 0.0445, 9900,\n 0, 0, 0, 0, 1, -360, 360], [85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0,\n 0, 1, -360, 360], [85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360,\n 360], [89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],\n [89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360], [90, \n 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360], [89, 92, \n 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360], [89, 92, \n 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360], [91, 92, \n 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360], [92, 93, \n 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360], [92, 94, \n 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360], [93, 94, 0.0223,\n 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360], [94, 95, 0.0132, \n 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360], [80, 96, 0.0356, 0.182,\n 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360], [82, 96, 0.0162, 0.053, 0.0544,\n 9900, 0, 0, 0, 0, 1, -360, 360], [94, 96, 0.0269, 0.0869, 0.023, 9900, \n 0, 0, 0, 0, 1, -360, 360], [80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0,\n 0, 0, 1, -360, 360], [80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, \n 1, -360, 360], [80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, \n 360], [94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],\n [95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360], [96,\n 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360], [98, 100, \n 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360], [99, 100, 0.018,\n 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 101, 0.0277, \n 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360], [92, 102, 0.0123, \n 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360], [101, 102, 0.0246, \n 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 103, 0.016, \n 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 104, 0.0451, \n 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360], [103, 104, 0.0466, \n 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360], [103, 105, 0.0535, \n 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 106, 0.0605, \n 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360], [104, 105, 0.00994, \n 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360], [105, 106, 0.014, \n 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360], [105, 107, 0.053, \n 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360], [105, 108, 0.0261, \n 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360], [106, 107, 0.053, \n 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360], [108, 109, 0.0105, \n 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360], [103, 110, 0.03906, \n 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360], [109, 110, 0.0278, \n 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360], [110, 111, 0.022, \n 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360], [110, 112, 0.0247, 0.064,\n 0.062, 9900, 0, 0, 0, 0, 1, -360, 360], [17, 113, 0.00913, 0.0301, \n 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360], [32, 113, 0.0615, 0.203, \n 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360], [32, 114, 0.0135, 0.0612, \n 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360], [27, 115, 0.0164, 0.0741, \n 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360], [114, 115, 0.0023, 0.0104, \n 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360], [68, 116, 0.00034, 0.00405, \n 0.164, 9900, 0, 0, 0, 0, 1, -360, 360], [12, 117, 0.0329, 0.14, 0.0358,\n 9900, 0, 0, 0, 0, 1, -360, 360], [75, 118, 0.0145, 0.0481, 0.01198, \n 9900, 0, 0, 0, 0, 1, -360, 360], [76, 118, 0.0164, 0.0544, 0.01356, \n 9900, 0, 0, 0, 0, 1, -360, 360]]'], {}), '([[1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360], [1, \n 3, 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360], [4, 5, \n 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360], [3, 5, \n 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360], [5, 6, 0.0119, \n 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360], [6, 7, 0.00459, 0.0208,\n 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360], [8, 9, 0.00244, 0.0305, 1.162,\n 9900, 0, 0, 0, 0, 1, -360, 360], [8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985,\n 0, 1, -360, 360], [9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, \n 360], [5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],\n [11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360], [2,\n 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360], [3, 12, \n 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360], [7, 12, 0.00862,\n 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360], [11, 13, 0.02225, \n 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360], [12, 14, 0.0215, \n 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360], [13, 15, 0.0744, \n 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360], [14, 15, 0.0595, \n 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360], [12, 16, 0.0212, 0.0834,\n 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360], [15, 17, 0.0132, 0.0437, \n 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360], [16, 17, 0.0454, 0.1801, \n 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360], [17, 18, 0.0123, 0.0505, \n 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360], [18, 19, 0.01119, 0.0493, \n 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360], [19, 20, 0.0252, 0.117, \n 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360], [15, 19, 0.012, 0.0394, 0.0101,\n 9900, 0, 0, 0, 0, 1, -360, 360], [20, 21, 0.0183, 0.0849, 0.0216, 9900,\n 0, 0, 0, 0, 1, -360, 360], [21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, \n 0, 0, 1, -360, 360], [22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, \n 1, -360, 360], [23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, \n 360], [26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360], [25, \n 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360], [27, 28, \n 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360], [28, 29, \n 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360], [30, 17, 0, \n 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360], [8, 30, 0.00431, 0.0504,\n 0.514, 9900, 0, 0, 0, 0, 1, -360, 360], [26, 30, 0.00799, 0.086, 0.908,\n 9900, 0, 0, 0, 0, 1, -360, 360], [17, 31, 0.0474, 0.1563, 0.0399, 9900,\n 0, 0, 0, 0, 1, -360, 360], [29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0,\n 0, 0, 1, -360, 360], [23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0,\n 1, -360, 360], [31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360,\n 360], [15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],\n [19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360], [35, \n 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360], [35, 37,\n 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360], [33, 37, \n 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360], [34, 36, \n 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360], [34, 37, \n 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360], [38, 37, 0, \n 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360], [37, 39, 0.0321, 0.106,\n 0.027, 9900, 0, 0, 0, 0, 1, -360, 360], [37, 40, 0.0593, 0.168, 0.042, \n 9900, 0, 0, 0, 0, 1, -360, 360], [30, 38, 0.00464, 0.054, 0.422, 9900, \n 0, 0, 0, 0, 1, -360, 360], [39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0,\n 0, 0, 1, -360, 360], [40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0,\n 1, -360, 360], [40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, \n 360], [43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],\n [34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360], [44,\n 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360], [45, 46, \n 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360], [46, 47, 0.038, \n 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360], [46, 48, 0.0601, 0.189,\n 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360], [47, 49, 0.0191, 0.0625, \n 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360], [42, 49, 0.0715, 0.323, 0.086,\n 9900, 0, 0, 0, 0, 1, -360, 360], [42, 49, 0.0715, 0.323, 0.086, 9900, 0,\n 0, 0, 0, 1, -360, 360], [45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, \n 0, 1, -360, 360], [48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1,\n -360, 360], [49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360,\n 360], [49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],\n [51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360], [52,\n 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360], [53, 54, \n 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360], [49, 54, 0.073, \n 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360], [49, 54, 0.0869, 0.291,\n 0.073, 9900, 0, 0, 0, 0, 1, -360, 360], [54, 55, 0.0169, 0.0707, 0.0202,\n 9900, 0, 0, 0, 0, 1, -360, 360], [54, 56, 0.00275, 0.00955, 0.00732, \n 9900, 0, 0, 0, 0, 1, -360, 360], [55, 56, 0.00488, 0.0151, 0.00374, \n 9900, 0, 0, 0, 0, 1, -360, 360], [56, 57, 0.0343, 0.0966, 0.0242, 9900,\n 0, 0, 0, 0, 1, -360, 360], [50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, \n 0, 0, 1, -360, 360], [56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0,\n 1, -360, 360], [51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, \n 360], [56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],\n [56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360], [55, \n 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360], [59, 60,\n 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360], [59, 61, 0.0328,\n 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360], [60, 61, 0.00264, 0.0135,\n 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360], [60, 62, 0.0123, 0.0561, \n 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360], [61, 62, 0.00824, 0.0376, \n 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360], [63, 59, 0, 0.0386, 0, 9900, 0,\n 0, 0.96, 0, 1, -360, 360], [63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0,\n 0, 1, -360, 360], [64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360,\n 360], [38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],\n [64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360], [49, \n 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360], [49, 66, \n 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360], [62, 66, 0.0482,\n 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360], [62, 67, 0.0258, 0.117,\n 0.031, 9900, 0, 0, 0, 0, 1, -360, 360], [65, 66, 0, 0.037, 0, 9900, 0, \n 0, 0.935, 0, 1, -360, 360], [66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, \n 0, 0, 0, 1, -360, 360], [65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, \n 0, 1, -360, 360], [47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1,\n -360, 360], [49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, \n 360], [68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360], [69, \n 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360], [24, 70, \n 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360], [70, 71, \n 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360], [24, 72, \n 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360], [71, 72, 0.0446,\n 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360], [71, 73, 0.00866, \n 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360], [70, 74, 0.0401, \n 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360], [70, 75, 0.0428, \n 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360], [69, 75, 0.0405, 0.122, \n 0.124, 9900, 0, 0, 0, 0, 1, -360, 360], [74, 75, 0.0123, 0.0406, \n 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360], [76, 77, 0.0444, 0.148, \n 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360], [69, 77, 0.0309, 0.101, 0.1038,\n 9900, 0, 0, 0, 0, 1, -360, 360], [75, 77, 0.0601, 0.1999, 0.04978, 9900,\n 0, 0, 0, 0, 1, -360, 360], [77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, \n 0, 0, 0, 1, -360, 360], [78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, \n 0, 0, 1, -360, 360], [77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, \n 1, -360, 360], [77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, \n 360], [68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],\n [81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360], [77, 82, \n 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360], [82, 83, \n 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360], [83, 84, \n 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360], [83, 85, 0.043,\n 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360], [84, 85, 0.0302, 0.0641,\n 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360], [85, 86, 0.035, 0.123, 0.0276,\n 9900, 0, 0, 0, 0, 1, -360, 360], [86, 87, 0.02828, 0.2074, 0.0445, 9900,\n 0, 0, 0, 0, 1, -360, 360], [85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0,\n 0, 1, -360, 360], [85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360,\n 360], [89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],\n [89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360], [90, \n 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360], [89, 92, \n 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360], [89, 92, \n 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360], [91, 92, \n 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360], [92, 93, \n 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360], [92, 94, \n 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360], [93, 94, 0.0223,\n 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360], [94, 95, 0.0132, \n 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360], [80, 96, 0.0356, 0.182,\n 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360], [82, 96, 0.0162, 0.053, 0.0544,\n 9900, 0, 0, 0, 0, 1, -360, 360], [94, 96, 0.0269, 0.0869, 0.023, 9900, \n 0, 0, 0, 0, 1, -360, 360], [80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0,\n 0, 0, 1, -360, 360], [80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, \n 1, -360, 360], [80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -\n 360, 360], [92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, \n 360], [94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],\n [95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360], [96,\n 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360], [98, 100, \n 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360], [99, 100, 0.018,\n 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 101, 0.0277, \n 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360], [92, 102, 0.0123, \n 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360], [101, 102, 0.0246, \n 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 103, 0.016, \n 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 104, 0.0451, \n 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360], [103, 104, 0.0466, \n 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360], [103, 105, 0.0535, \n 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360], [100, 106, 0.0605, \n 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360], [104, 105, 0.00994, \n 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360], [105, 106, 0.014, \n 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360], [105, 107, 0.053, \n 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360], [105, 108, 0.0261, \n 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360], [106, 107, 0.053, \n 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360], [108, 109, 0.0105, \n 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360], [103, 110, 0.03906, \n 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360], [109, 110, 0.0278, \n 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360], [110, 111, 0.022, \n 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360], [110, 112, 0.0247, 0.064,\n 0.062, 9900, 0, 0, 0, 0, 1, -360, 360], [17, 113, 0.00913, 0.0301, \n 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360], [32, 113, 0.0615, 0.203, \n 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360], [32, 114, 0.0135, 0.0612, \n 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360], [27, 115, 0.0164, 0.0741, \n 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360], [114, 115, 0.0023, 0.0104, \n 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360], [68, 116, 0.00034, 0.00405, \n 0.164, 9900, 0, 0, 0, 0, 1, -360, 360], [12, 117, 0.0329, 0.14, 0.0358,\n 9900, 0, 0, 0, 0, 1, -360, 360], [75, 118, 0.0145, 0.0481, 0.01198, \n 9900, 0, 0, 0, 0, 1, -360, 360], [76, 118, 0.0164, 0.0544, 0.01356, \n 9900, 0, 0, 0, 0, 1, -360, 360]])\n', (14031, 27054), False, 'from numpy import array\n'), ((27907, 29538), 'numpy.array', 'array', (['[[2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, \n 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0222222, 20, 0], [2, \n 0, 0, 3, 0.117647, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0,\n 3, 0.0454545, 20, 0], [2, 0, 0, 3, 0.0318471, 20, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 1.42857, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0,\n 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0\n ], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.526316, 20, 0], [2, 0, 0, \n 3, 0.0490196, 20, 0], [2, 0, 0, 3, 0.208333, 20, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0645161, 20, 0], [2, \n 0, 0, 3, 0.0625, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, \n 0.0255754, 20, 0], [2, 0, 0, 3, 0.0255102, 20, 0], [2, 0, 0, 3, \n 0.0193648, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0],\n [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0209644, 20, 0], [2, \n 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 2.5, 20, 0], [2, 0, 0, 3, 0.0164745,\n 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0,\n 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0396825, 20,\n 0], [2, 0, 0, 3, 0.25, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, \n 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2,\n 0, 0, 3, 0.277778, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0]]'], {}), '([[2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, \n 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0222222, 20, 0],\n [2, 0, 0, 3, 0.117647, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, \n 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2,\n 0, 0, 3, 0.0454545, 20, 0], [2, 0, 0, 3, 0.0318471, 20, 0], [2, 0, 0, 3,\n 0.01, 40, 0], [2, 0, 0, 3, 1.42857, 20, 0], [2, 0, 0, 3, 0.01, 40, 0],\n [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.526316, 20, 0], [2, 0,\n 0, 3, 0.0490196, 20, 0], [2, 0, 0, 3, 0.208333, 20, 0], [2, 0, 0, 3, \n 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0645161, 20, 0],\n [2, 0, 0, 3, 0.0625, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, \n 0.0255754, 20, 0], [2, 0, 0, 3, 0.0255102, 20, 0], [2, 0, 0, 3, \n 0.0193648, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0],\n [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0209644, 20, 0], [2, \n 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 2.5, 20, 0], [2, 0, 0, 3, 0.0164745,\n 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0,\n 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.0396825, 20,\n 0], [2, 0, 0, 3, 0.25, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, \n 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01, 40, 0], [2,\n 0, 0, 3, 0.277778, 20, 0], [2, 0, 0, 3, 0.01, 40, 0], [2, 0, 0, 3, 0.01,\n 40, 0], [2, 0, 0, 3, 0.01, 40, 0]])\n', (27912, 29538), False, 'from numpy import array\n')] |
from typing import Union, Container
from itertools import chain
import numpy as np
import scipy.sparse as sp
modALinput = Union[list, np.ndarray, sp.csr_matrix]
def data_vstack(blocks: Container) -> modALinput:
"""
Stack vertically both sparse and dense arrays.
Args:
blocks: Sequence of modALinput objects.
Returns:
New sequence of vertically stacked elements.
"""
if isinstance(blocks[0], np.ndarray):
return np.concatenate(blocks)
elif isinstance(blocks[0], list):
if isinstance(blocks[0][0], np.ndarray):
return [np.concatenate([blocks[i][j] for i in range(len(blocks))]) for j in range(len(blocks[0]))]
return list(chain(blocks))
elif sp.issparse(blocks[0]):
return sp.vstack(blocks)
else:
try:
return np.concatenate(blocks)
except:
raise TypeError('%s datatype is not supported' % type(blocks[0]))
| [
"itertools.chain",
"scipy.sparse.issparse",
"scipy.sparse.vstack",
"numpy.concatenate"
] | [((466, 488), 'numpy.concatenate', 'np.concatenate', (['blocks'], {}), '(blocks)\n', (480, 488), True, 'import numpy as np\n'), ((731, 753), 'scipy.sparse.issparse', 'sp.issparse', (['blocks[0]'], {}), '(blocks[0])\n', (742, 753), True, 'import scipy.sparse as sp\n'), ((707, 720), 'itertools.chain', 'chain', (['blocks'], {}), '(blocks)\n', (712, 720), False, 'from itertools import chain\n'), ((770, 787), 'scipy.sparse.vstack', 'sp.vstack', (['blocks'], {}), '(blocks)\n', (779, 787), True, 'import scipy.sparse as sp\n'), ((830, 852), 'numpy.concatenate', 'np.concatenate', (['blocks'], {}), '(blocks)\n', (844, 852), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
"""Defines the ModelAnalysis class."""
import json
import numpy as np
import pandas as pd
from pathlib import Path
import pickle
import warnings
from responsibleai._input_processing import _convert_to_list
from responsibleai._interfaces import ModelAnalysisData, Dataset
from responsibleai._internal.constants import\
ManagerNames, Metadata, SKLearn
from responsibleai._managers.counterfactual_manager import (
CounterfactualManager)
from responsibleai._managers.error_analysis_manager import ErrorAnalysisManager
from responsibleai._managers.explainer_manager import ExplainerManager
from responsibleai._managers.causal_manager import CausalManager
from responsibleai.exceptions import UserConfigValidationException
from responsibleai.modelanalysis.constants import ModelTask
_DTYPES = 'dtypes'
_TRAIN = 'train'
_TEST = 'test'
_TARGET_COLUMN = 'target_column'
_TASK_TYPE = 'task_type'
_MODEL = Metadata.MODEL
_MODEL_PKL = _MODEL + '.pkl'
_SERIALIZER = 'serializer'
_CLASSES = 'classes'
_MANAGERS = 'managers'
_CATEGORICAL_FEATURES = 'categorical_features'
_TRAN_LABELS = 'train_labels'
_META_JSON = Metadata.META_JSON
class ModelAnalysis(object):
"""Defines the top-level Model Analysis API.
Use ModelAnalysis to analyze errors, explain the most important
features, compute counterfactuals and run causal analysis in a
single API.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param train: The training dataset including the label column.
:type train: pandas.DataFrame
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
:param train_labels: The class labels in the training dataset
:type train_labels: ndarray
:param serializer: Picklable custom serializer with save and load
methods for custom model serialization.
The save method writes the model to file given a parent directory.
The load method returns the deserialized model from the same
parent directory.
:type serializer: object
"""
def __init__(self, model, train, test, target_column,
task_type, categorical_features=None, train_labels=None,
serializer=None):
"""Defines the top-level Model Analysis API.
Use ModelAnalysis to analyze errors, explain the most important
features, compute counterfactuals and run causal analysis in a
single API.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param train: The training dataset including the label column.
:type train: pandas.DataFrame
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
:param train_labels: The class labels in the training dataset
:type train_labels: ndarray
:param serializer: Picklable custom serializer with save and load
methods defined for model that is not serializable. The save
method returns a dictionary state and load method returns the
model.
:type serializer: object
"""
self._validate_model_analysis_input_parameters(
model=model, train=train, test=test,
target_column=target_column, task_type=task_type,
categorical_features=categorical_features,
train_labels=train_labels,
serializer=serializer)
self.model = model
self.train = train
self.test = test
self.target_column = target_column
self.task_type = task_type
self.categorical_features = categorical_features
self._serializer = serializer
self._train_labels = train_labels
self._classes = ModelAnalysis._get_classes(
task_type=self.task_type,
train=self.train,
target_column=self.target_column,
train_labels=self._train_labels
)
self._causal_manager = CausalManager(
train, test, target_column, task_type, categorical_features)
self._counterfactual_manager = CounterfactualManager(
model=model, train=train, test=test,
target_column=target_column, task_type=task_type,
categorical_features=categorical_features)
self._error_analysis_manager = ErrorAnalysisManager(
model, test, target_column,
categorical_features)
self._explainer_manager = ExplainerManager(
model, train, test,
target_column,
self._classes,
categorical_features=categorical_features)
self._managers = [self._causal_manager,
self._counterfactual_manager,
self._error_analysis_manager,
self._explainer_manager]
@staticmethod
def _get_classes(task_type, train, target_column, train_labels):
if task_type == ModelTask.CLASSIFICATION:
if train_labels is None:
return train[target_column].unique()
else:
return train_labels
else:
return None
def _validate_model_analysis_input_parameters(
self, model, train, test, target_column,
task_type, categorical_features=None, train_labels=None,
serializer=None):
"""
Validate the inputs for ModelAnalysis class.
:param model: The model to compute RAI insights for.
A model that implements sklearn.predict or sklearn.predict_proba
or function that accepts a 2d ndarray.
:type model: object
:param train: The training dataset including the label column.
:type train: pandas.DataFrame
:param test: The test dataset including the label column.
:type test: pandas.DataFrame
:param target_column: The name of the label column.
:type target_column: str
:param task_type: The task to run, can be `classification` or
`regression`.
:type task_type: str
:param categorical_features: The categorical feature names.
:type categorical_features: list[str]
:param train_labels: The class labels in the training dataset
:type train_labels: ndarray
:param serializer: Picklable custom serializer with save and load
methods defined for model that is not serializable. The save
method returns a dictionary state and load method returns the
model.
:type serializer: object
"""
valid_tasks = [
ModelTask.CLASSIFICATION.value,
ModelTask.REGRESSION.value
]
if task_type not in valid_tasks:
message = (f"Unsupported task type '{task_type}'. "
f"Should be one of {valid_tasks}")
raise UserConfigValidationException(message)
if model is None:
warnings.warn(
'INVALID-MODEL-WARNING: No valid model is supplied. '
'The explanations, error analysis and counterfactuals '
'may not work')
if serializer is not None:
if not hasattr(serializer, 'save'):
raise UserConfigValidationException(
'The serializer does not implement save()')
if not hasattr(serializer, 'load'):
raise UserConfigValidationException(
'The serializer does not implement load()')
try:
pickle.dumps(serializer)
except Exception:
raise UserConfigValidationException(
'The serializer should be serializable via pickle')
if isinstance(train, pd.DataFrame) and isinstance(test, pd.DataFrame):
if len(set(train.columns) - set(test.columns)) != 0 or \
len(set(test.columns) - set(train.columns)):
raise UserConfigValidationException(
'The features in train and test data do not match')
if target_column not in list(train.columns) or \
target_column not in list(test.columns):
raise UserConfigValidationException(
'Target name {0} not present in train/test data'.format(
target_column)
)
if categorical_features is not None and \
len(categorical_features) > 0:
if target_column in categorical_features:
raise UserConfigValidationException(
'Found target name {0} in '
'categorical feature list'.format(
target_column)
)
difference_set = set(categorical_features) - set(train.columns)
if len(difference_set) > 0:
message = ("Feature names in categorical_features "
"do not exist in train data: "
f"{list(difference_set)}")
raise UserConfigValidationException(message)
if train_labels is not None and task_type == \
ModelTask.CLASSIFICATION:
if len(set(train[target_column].unique()) -
set(train_labels)) != 0 or \
len(set(train_labels) -
set(train[target_column].unique())) != 0:
raise UserConfigValidationException(
'The train labels and distinct values in '
'target (train data) do not match')
if len(set(test[target_column].unique()) -
set(train_labels)) != 0 or \
len(set(train_labels) -
set(test[target_column].unique())) != 0:
raise UserConfigValidationException(
'The train labels and distinct values in '
'target (test data) do not match')
if model is not None:
# Pick one row from train and test data
small_train_data = train.iloc[0:1].drop(
[target_column], axis=1)
small_test_data = test.iloc[0:1].drop(
[target_column], axis=1)
# Run predict() of the model
try:
model.predict(small_train_data)
model.predict(small_test_data)
except Exception:
raise UserConfigValidationException(
'The model passed cannot be used for'
' getting predictions via predict()'
)
# Run predict_proba() of the model
if task_type == ModelTask.CLASSIFICATION:
try:
model.predict_proba(small_train_data)
model.predict_proba(small_test_data)
except Exception:
raise UserConfigValidationException(
'The model passed cannot be used for'
' getting predictions via predict_proba()'
)
@property
def causal(self) -> CausalManager:
"""Get the causal manager.
:return: The causal manager.
:rtype: CausalManager
"""
return self._causal_manager
@property
def counterfactual(self) -> CounterfactualManager:
"""Get the counterfactual manager.
:return: The counterfactual manager.
:rtype: CounterfactualManager
"""
return self._counterfactual_manager
@property
def error_analysis(self) -> ErrorAnalysisManager:
"""Get the error analysis manager.
:return: The error analysis manager.
:rtype: ErrorAnalysisManager
"""
return self._error_analysis_manager
@property
def explainer(self) -> ExplainerManager:
"""Get the explainer manager.
:return: The explainer manager.
:rtype: ExplainerManager
"""
return self._explainer_manager
def compute(self):
"""Calls compute on each of the managers."""
for manager in self._managers:
manager.compute()
def list(self):
"""List information about each of the managers.
:return: Information about each of the managers.
:rtype: dict
"""
configs = {}
for manager in self._managers:
configs[manager.name] = manager.list()
return configs
def get(self):
"""List information about each of the managers.
:return: Information about each of the managers.
:rtype: dict
"""
configs = {}
for manager in self._managers:
configs[manager.name] = manager.get()
return configs
def get_data(self):
"""Get all data as ModelAnalysisData object
:return: Model Analysis Data
:rtype: ModelAnalysisData
"""
data = ModelAnalysisData()
data.dataset = self._get_dataset()
data.modelExplanationData = self.explainer.get_data()
data.errorAnalysisData = self.error_analysis.get_data()
data.causalAnalysisData = self.causal.get_data()
data.counterfactualData = self.counterfactual.get_data()
return data
def _get_dataset(self):
dashboard_dataset = Dataset()
dashboard_dataset.task_type = self.task_type
dashboard_dataset.categorical_features = self.categorical_features
dashboard_dataset.class_names = _convert_to_list(
self._classes)
predicted_y = None
feature_length = None
dataset: pd.DataFrame = self.test.drop(
[self.target_column], axis=1)
if isinstance(dataset, pd.DataFrame) and hasattr(dataset, 'columns'):
self._dataframeColumns = dataset.columns
try:
list_dataset = _convert_to_list(dataset)
except Exception as ex:
raise ValueError(
"Unsupported dataset type") from ex
if dataset is not None and self.model is not None:
try:
predicted_y = self.model.predict(dataset)
except Exception as ex:
msg = "Model does not support predict method for given"
"dataset type"
raise ValueError(msg) from ex
try:
predicted_y = _convert_to_list(predicted_y)
except Exception as ex:
raise ValueError(
"Model prediction output of unsupported type,") from ex
if predicted_y is not None:
if(self.task_type == "classification" and
dashboard_dataset.class_names is not None):
predicted_y = [dashboard_dataset.class_names.index(
y) for y in predicted_y]
dashboard_dataset.predicted_y = predicted_y
row_length = 0
if list_dataset is not None:
row_length, feature_length = np.shape(list_dataset)
if row_length > 100000:
raise ValueError(
"Exceeds maximum number of rows"
"for visualization (100000)")
if feature_length > 1000:
raise ValueError("Exceeds maximum number of features for"
" visualization (1000). Please regenerate the"
" explanation using fewer features or"
" initialize the dashboard without passing a"
" dataset.")
dashboard_dataset.features = list_dataset
true_y = self.test[self.target_column]
if true_y is not None and len(true_y) == row_length:
if(self.task_type == "classification" and
dashboard_dataset.class_names is not None):
true_y = [dashboard_dataset.class_names.index(
y) for y in true_y]
dashboard_dataset.true_y = _convert_to_list(true_y)
features = dataset.columns
if features is not None:
features = _convert_to_list(features)
if feature_length is not None and len(features) != feature_length:
raise ValueError("Feature vector length mismatch:"
" feature names length differs"
" from local explanations dimension")
dashboard_dataset.feature_names = features
dashboard_dataset.target_column = self.target_column
if (self.model is not None and
hasattr(self.model, SKLearn.PREDICT_PROBA) and
self.model.predict_proba is not None and
dataset is not None):
try:
probability_y = self.model.predict_proba(dataset)
except Exception as ex:
raise ValueError("Model does not support predict_proba method"
" for given dataset type,") from ex
try:
probability_y = _convert_to_list(probability_y)
except Exception as ex:
raise ValueError(
"Model predict_proba output of unsupported type,") from ex
dashboard_dataset.probability_y = probability_y
return dashboard_dataset
def _write_to_file(self, file_path, content):
"""Save the string content to the given file path.
:param file_path: The file path to save the content to.
:type file_path: str
:param content: The string content to save.
:type content: str
"""
with open(file_path, 'w') as file:
file.write(content)
def save(self, path):
"""Save the ModelAnalysis to the given path.
:param path: The directory path to save the ModelAnalysis to.
:type path: str
"""
top_dir = Path(path)
# save each of the individual managers
for manager in self._managers:
manager._save(top_dir / manager.name)
# save current state
dtypes = self.train.dtypes.astype(str).to_dict()
self._write_to_file(top_dir / (_TRAIN + _DTYPES),
json.dumps(dtypes))
self._write_to_file(top_dir / _TRAIN, self.train.to_json())
dtypes = self.test.dtypes.astype(str).to_dict()
self._write_to_file(top_dir / (_TEST + _DTYPES),
json.dumps(dtypes))
self._write_to_file(top_dir / _TEST, self.test.to_json())
meta = {
_TARGET_COLUMN: self.target_column,
_TASK_TYPE: self.task_type,
_CATEGORICAL_FEATURES: self.categorical_features,
_TRAN_LABELS: self._train_labels
}
with open(top_dir / _META_JSON, 'w') as file:
json.dump(meta, file)
if self._serializer is not None:
# save the model
self._serializer.save(self.model, top_dir)
# save the serializer
with open(top_dir / _SERIALIZER, 'wb') as file:
pickle.dump(self._serializer, file)
else:
if self.model is not None:
has_setstate = hasattr(self.model, '__setstate__')
has_getstate = hasattr(self.model, '__getstate__')
if not (has_setstate and has_getstate):
raise ValueError(
"Model must be picklable or a custom serializer must"
" be specified")
with open(top_dir / _MODEL_PKL, 'wb') as file:
pickle.dump(self.model, file)
@staticmethod
def load(path):
"""Load the ModelAnalysis from the given path.
:param path: The directory path to load the ModelAnalysis from.
:type path: str
"""
# create the ModelAnalysis without any properties using the __new__
# function, similar to pickle
inst = ModelAnalysis.__new__(ModelAnalysis)
top_dir = Path(path)
# load current state
with open(top_dir / (_TRAIN + _DTYPES), 'r') as file:
types = json.load(file)
with open(top_dir / _TRAIN, 'r') as file:
train = pd.read_json(file, dtype=types)
inst.__dict__[_TRAIN] = train
with open(top_dir / (_TEST + _DTYPES), 'r') as file:
types = json.load(file)
with open(top_dir / _TEST, 'r') as file:
test = pd.read_json(file, dtype=types)
inst.__dict__[_TEST] = test
with open(top_dir / _META_JSON, 'r') as meta_file:
meta = meta_file.read()
meta = json.loads(meta)
inst.__dict__[_TARGET_COLUMN] = meta[_TARGET_COLUMN]
inst.__dict__[_TASK_TYPE] = meta[_TASK_TYPE]
inst.__dict__[_CATEGORICAL_FEATURES] = meta[_CATEGORICAL_FEATURES]
inst.__dict__['_' + _TRAN_LABELS] = meta[_TRAN_LABELS]
inst.__dict__['_' + _CLASSES] = ModelAnalysis._get_classes(
task_type=meta[_TASK_TYPE],
train=train,
target_column=meta[_TARGET_COLUMN],
train_labels=meta[_TRAN_LABELS]
)
serializer_path = top_dir / _SERIALIZER
if serializer_path.exists():
with open(serializer_path, 'rb') as file:
serializer = pickle.load(file)
inst.__dict__['_' + _SERIALIZER] = serializer
inst.__dict__[_MODEL] = serializer.load(top_dir)
else:
inst.__dict__['_' + _SERIALIZER] = None
with open(top_dir / _MODEL_PKL, 'rb') as file:
inst.__dict__[_MODEL] = pickle.load(file)
# load each of the individual managers
manager_map = {
ManagerNames.CAUSAL: CausalManager,
ManagerNames.COUNTERFACTUAL: CounterfactualManager,
ManagerNames.ERROR_ANALYSIS: ErrorAnalysisManager,
ManagerNames.EXPLAINER: ExplainerManager,
}
managers = []
for manager_name, manager_class in manager_map.items():
full_name = f'_{manager_name}_manager'
manager_dir = top_dir / manager_name
manager = manager_class._load(manager_dir, inst)
inst.__dict__[full_name] = manager
managers.append(manager)
inst.__dict__['_' + _MANAGERS] = managers
return inst
| [
"pickle.dumps",
"responsibleai.exceptions.UserConfigValidationException",
"responsibleai._interfaces.ModelAnalysisData",
"pathlib.Path",
"json.dumps",
"warnings.warn",
"pandas.read_json",
"responsibleai._managers.counterfactual_manager.CounterfactualManager",
"json.loads",
"responsibleai._managers... | [((4939, 5013), 'responsibleai._managers.causal_manager.CausalManager', 'CausalManager', (['train', 'test', 'target_column', 'task_type', 'categorical_features'], {}), '(train, test, target_column, task_type, categorical_features)\n', (4952, 5013), False, 'from responsibleai._managers.causal_manager import CausalManager\n'), ((5067, 5228), 'responsibleai._managers.counterfactual_manager.CounterfactualManager', 'CounterfactualManager', ([], {'model': 'model', 'train': 'train', 'test': 'test', 'target_column': 'target_column', 'task_type': 'task_type', 'categorical_features': 'categorical_features'}), '(model=model, train=train, test=test, target_column=\n target_column, task_type=task_type, categorical_features=\n categorical_features)\n', (5088, 5228), False, 'from responsibleai._managers.counterfactual_manager import CounterfactualManager\n'), ((5296, 5366), 'responsibleai._managers.error_analysis_manager.ErrorAnalysisManager', 'ErrorAnalysisManager', (['model', 'test', 'target_column', 'categorical_features'], {}), '(model, test, target_column, categorical_features)\n', (5316, 5366), False, 'from responsibleai._managers.error_analysis_manager import ErrorAnalysisManager\n'), ((5427, 5540), 'responsibleai._managers.explainer_manager.ExplainerManager', 'ExplainerManager', (['model', 'train', 'test', 'target_column', 'self._classes'], {'categorical_features': 'categorical_features'}), '(model, train, test, target_column, self._classes,\n categorical_features=categorical_features)\n', (5443, 5540), False, 'from responsibleai._managers.explainer_manager import ExplainerManager\n'), ((14117, 14136), 'responsibleai._interfaces.ModelAnalysisData', 'ModelAnalysisData', ([], {}), '()\n', (14134, 14136), False, 'from responsibleai._interfaces import ModelAnalysisData, Dataset\n'), ((14505, 14514), 'responsibleai._interfaces.Dataset', 'Dataset', ([], {}), '()\n', (14512, 14514), False, 'from responsibleai._interfaces import ModelAnalysisData, Dataset\n'), ((14683, 14714), 'responsibleai._input_processing._convert_to_list', '_convert_to_list', (['self._classes'], {}), '(self._classes)\n', (14699, 14714), False, 'from responsibleai._input_processing import _convert_to_list\n'), ((19064, 19074), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (19068, 19074), False, 'from pathlib import Path\n'), ((21170, 21180), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (21174, 21180), False, 'from pathlib import Path\n'), ((21791, 21807), 'json.loads', 'json.loads', (['meta'], {}), '(meta)\n', (21801, 21807), False, 'import json\n'), ((7839, 7877), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['message'], {}), '(message)\n', (7868, 7877), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((7917, 8060), 'warnings.warn', 'warnings.warn', (['"""INVALID-MODEL-WARNING: No valid model is supplied. The explanations, error analysis and counterfactuals may not work"""'], {}), "(\n 'INVALID-MODEL-WARNING: No valid model is supplied. The explanations, error analysis and counterfactuals may not work'\n )\n", (7930, 8060), False, 'import warnings\n'), ((15049, 15074), 'responsibleai._input_processing._convert_to_list', '_convert_to_list', (['dataset'], {}), '(dataset)\n', (15065, 15074), False, 'from responsibleai._input_processing import _convert_to_list\n'), ((16156, 16178), 'numpy.shape', 'np.shape', (['list_dataset'], {}), '(list_dataset)\n', (16164, 16178), True, 'import numpy as np\n'), ((17160, 17184), 'responsibleai._input_processing._convert_to_list', '_convert_to_list', (['true_y'], {}), '(true_y)\n', (17176, 17184), False, 'from responsibleai._input_processing import _convert_to_list\n'), ((17278, 17304), 'responsibleai._input_processing._convert_to_list', '_convert_to_list', (['features'], {}), '(features)\n', (17294, 17304), False, 'from responsibleai._input_processing import _convert_to_list\n'), ((19383, 19401), 'json.dumps', 'json.dumps', (['dtypes'], {}), '(dtypes)\n', (19393, 19401), False, 'import json\n'), ((19612, 19630), 'json.dumps', 'json.dumps', (['dtypes'], {}), '(dtypes)\n', (19622, 19630), False, 'import json\n'), ((19986, 20007), 'json.dump', 'json.dump', (['meta', 'file'], {}), '(meta, file)\n', (19995, 20007), False, 'import json\n'), ((21292, 21307), 'json.load', 'json.load', (['file'], {}), '(file)\n', (21301, 21307), False, 'import json\n'), ((21378, 21409), 'pandas.read_json', 'pd.read_json', (['file'], {'dtype': 'types'}), '(file, dtype=types)\n', (21390, 21409), True, 'import pandas as pd\n'), ((21529, 21544), 'json.load', 'json.load', (['file'], {}), '(file)\n', (21538, 21544), False, 'import json\n'), ((21613, 21644), 'pandas.read_json', 'pd.read_json', (['file'], {'dtype': 'types'}), '(file, dtype=types)\n', (21625, 21644), True, 'import pandas as pd\n'), ((8212, 8285), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The serializer does not implement save()"""'], {}), "('The serializer does not implement save()')\n", (8241, 8285), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((8378, 8451), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The serializer does not implement load()"""'], {}), "('The serializer does not implement load()')\n", (8407, 8451), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((8507, 8531), 'pickle.dumps', 'pickle.dumps', (['serializer'], {}), '(serializer)\n', (8519, 8531), False, 'import pickle\n'), ((8923, 9009), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The features in train and test data do not match"""'], {}), "(\n 'The features in train and test data do not match')\n", (8952, 9009), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((15555, 15584), 'responsibleai._input_processing._convert_to_list', '_convert_to_list', (['predicted_y'], {}), '(predicted_y)\n', (15571, 15584), False, 'from responsibleai._input_processing import _convert_to_list\n'), ((18216, 18247), 'responsibleai._input_processing._convert_to_list', '_convert_to_list', (['probability_y'], {}), '(probability_y)\n', (18232, 18247), False, 'from responsibleai._input_processing import _convert_to_list\n'), ((20243, 20278), 'pickle.dump', 'pickle.dump', (['self._serializer', 'file'], {}), '(self._serializer, file)\n', (20254, 20278), False, 'import pickle\n'), ((20754, 20783), 'pickle.dump', 'pickle.dump', (['self.model', 'file'], {}), '(self.model, file)\n', (20765, 20783), False, 'import pickle\n'), ((22464, 22481), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (22475, 22481), False, 'import pickle\n'), ((22766, 22783), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (22777, 22783), False, 'import pickle\n'), ((8584, 8670), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The serializer should be serializable via pickle"""'], {}), "(\n 'The serializer should be serializable via pickle')\n", (8613, 8670), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((10076, 10114), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['message'], {}), '(message)\n', (10105, 10114), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((10477, 10587), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The train labels and distinct values in target (train data) do not match"""'], {}), "(\n 'The train labels and distinct values in target (train data) do not match')\n", (10506, 10587), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((10890, 10999), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The train labels and distinct values in target (test data) do not match"""'], {}), "(\n 'The train labels and distinct values in target (test data) do not match')\n", (10919, 10999), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((11570, 11677), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The model passed cannot be used for getting predictions via predict()"""'], {}), "(\n 'The model passed cannot be used for getting predictions via predict()')\n", (11599, 11677), False, 'from responsibleai.exceptions import UserConfigValidationException\n'), ((12072, 12190), 'responsibleai.exceptions.UserConfigValidationException', 'UserConfigValidationException', (['"""The model passed cannot be used for getting predictions via predict_proba()"""'], {}), "(\n 'The model passed cannot be used for getting predictions via predict_proba()'\n )\n", (12101, 12190), False, 'from responsibleai.exceptions import UserConfigValidationException\n')] |
"""
This module defines a class used for evaluating coordinate transformations at null shell junctions.
"""
import numpy as np
import interpolators as interp
from helpers import *
class active_slice:
"""
Class for handling shell and corner slicing of SSS regions. Given the region and the slice parameters,
reference arrays are created for all desired transformations, which are then defined by interpolation
and extrapolation of the reference arrays.
Unlike passive_slice, this will actively obtain new functions U(udl),V(vdl) based on the inputs U0,V0, rather
than just reading the existing ones.
Designed to simultaneously incorporate both shell and corner junctions. Input determines which behavior will
take effect. Offers protection from bad inputs in most cases. The exception is that the correct ublocks and
vblocks have to be given or everything will come out whacky.
Slice Location:
The location of the slice is determined by the values r0,u0,v0. These describe a point with coordinates
(u0,v0) in some block, and with radius r0. The values are forced to be self-consistent by the algorithm.
In corner junction mode, this is the corner point. Otherwise, it just describes a point on the shell.
In shell mode, r0 and (u0 xor v0) can be nan, in which case the parameters just specify the shell.
To fully specify the point we need to know what block it's in. So correct ublock and vblock args are needed.
Slice Location Input Requirements:
u0==finite xor v0==finite # avoids overspecifying a nonphysical point
(optional in shell mode) r0==finite # if finite, used to determine the other one of u0,v0
Corner Junction Mode Input Requirements:
r0==finite # corner junction point radius
u0==finite xor v0==finite # corner junction coordinate value
U0 not None # function U0(r) at v=v0
V0 not None # function V0(r) at u=u0
ublocks==[correct] # the correct list of block indices must be provided
vblocks==[correct] # the correct list of block indices must be provided
Shell Junction Mode Input Requirements:
For example, suppose we want a shell junction at v=v0.
(v0==finite) xor (r0==finite and u0==finite) # either of these uniquely determines the value v=v0 for the shell
U0 not None # function U0(r) at v=v0 shell
vblocks==[correct] # the correct list of block indices must be provided
For shell at u=u0, just switch u's and v's.
Inputs:
reg = The region being sliced.
ublocks = List of indices (corresponding to reg.blocks) for the blocks containing the u=u0 slice.
vblocks = List of indices (corresponding to reg.blocks) for the blocks containing the v=v0 slice.
r0 = Radius of the junction corner point. Or, radius of the specified point on the shell in shell mode. See above.
u0 = Slice location coordinate. See above.
v0 = Slice location coordinate. See above.
U0 = Function U0(r) at v=v0, or None. When None, U(udl) = udl.
V0 = Function V0(r) at u=u0, or None. When None, V(vdl) = vdl.
mu = Extrapolation parameter. When mu=0, extrapolation is just translation. When mu->inf, extrapolation is smooth linear.
(See interpolators module).
Methods:
Provides various coordinate transformation methods, which can be used by a region.
Attributes:
Input parameters as well as various reference arrays.
"""
def __init__(self, reg, ublocks=[], vblocks=[], r0=np.nan, u0=np.nan, v0=np.nan, U0=None, V0=None, mu=0., r_refs=[]):
## process and store input values
self.reg = reg
self.ublocks, self.vblocks = bcon(reg,ublocks), bcon(reg,vblocks)
self.r0, self.u0, self.v0 = set_ruv0(reg, r0=r0, u0=u0, v0=v0)
self.U0, self.V0 = U0, V0
self.mu = 1.*float(mu)
## get ref arrays
self.r = get_r_ref(self.reg, r_refs, self.r0)
self.r, self.uvdl_u0, self.uvdl_v0 = uvdl_of_r_at_uv0(self.r, self.reg, ublocks=self.ublocks, vblocks=self.vblocks, u0=self.u0, v0=self.v0)
self.U_v0 = U_of_udl_at_v0(self.r, self.uvdl_v0[0], self.U0)
self.V_u0 = V_of_vdl_at_u0(self.r, self.uvdl_u0[1], self.V0)
## use interpolated functions to get UV of ref arrays
if np.isfinite(r0) and len(ublocks)>0 and len(vblocks)>0:
self.UV_u0 = self.UV_of_uvdl(self.uvdl_u0)
self.UV_v0 = self.UV_of_uvdl(self.uvdl_v0)
"""
Coordinate transformation methods.
"""
def U_of_udl_at_v0(self, udl):
"""
Evaluate the function U(udl) = U(r(udl,vdl0)) by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
"""
return interp.interp_with_smooth_extrap(udl, self.uvdl_v0[0], self.U_v0, mu=self.mu)
def V_of_vdl_at_u0(self, vdl):
"""
Evaluate the function V(vdl) = V(r(udl0,vdl)) by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
"""
return interp.interp_with_smooth_extrap(vdl, self.uvdl_u0[1], self.V_u0, mu=self.mu)
def UV_of_uvdl(self, uvdl):
"""
Combine U(udl) and V(vdl) into UV(uvdl).
"""
U_temp = self.U_of_udl_at_v0(uvdl[0])
V_temp = self.V_of_vdl_at_u0(uvdl[1])
UV_temp = np.array([U_temp,V_temp])
return UV_temp
def U_of_r_at_v0(self, r):
"""
Evaluate the function U(r) at v0 by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
Setting mu to nan because this should not be extrapolated.
"""
return interp.interp_with_smooth_extrap(r, self.r, self.U_v0, mu=np.nan)
def V_of_r_at_u0(self, r):
"""
Evaluate the function V(r) at u0 by interpolating from stored reference values.
Smooth interpolation and extrapolation handled by interpolators module.
Setting mu to nan because this should not be extrapolated.
"""
return interp.interp_with_smooth_extrap(r, self.r, self.V_u0, mu=np.nan)
| [
"numpy.array",
"numpy.isfinite",
"interpolators.interp_with_smooth_extrap"
] | [((4644, 4721), 'interpolators.interp_with_smooth_extrap', 'interp.interp_with_smooth_extrap', (['udl', 'self.uvdl_v0[0]', 'self.U_v0'], {'mu': 'self.mu'}), '(udl, self.uvdl_v0[0], self.U_v0, mu=self.mu)\n', (4676, 4721), True, 'import interpolators as interp\n'), ((4945, 5022), 'interpolators.interp_with_smooth_extrap', 'interp.interp_with_smooth_extrap', (['vdl', 'self.uvdl_u0[1]', 'self.V_u0'], {'mu': 'self.mu'}), '(vdl, self.uvdl_u0[1], self.V_u0, mu=self.mu)\n', (4977, 5022), True, 'import interpolators as interp\n'), ((5200, 5226), 'numpy.array', 'np.array', (['[U_temp, V_temp]'], {}), '([U_temp, V_temp])\n', (5208, 5226), True, 'import numpy as np\n'), ((5510, 5575), 'interpolators.interp_with_smooth_extrap', 'interp.interp_with_smooth_extrap', (['r', 'self.r', 'self.U_v0'], {'mu': 'np.nan'}), '(r, self.r, self.U_v0, mu=np.nan)\n', (5542, 5575), True, 'import interpolators as interp\n'), ((5843, 5908), 'interpolators.interp_with_smooth_extrap', 'interp.interp_with_smooth_extrap', (['r', 'self.r', 'self.V_u0'], {'mu': 'np.nan'}), '(r, self.r, self.V_u0, mu=np.nan)\n', (5875, 5908), True, 'import interpolators as interp\n'), ((4227, 4242), 'numpy.isfinite', 'np.isfinite', (['r0'], {}), '(r0)\n', (4238, 4242), True, 'import numpy as np\n')] |
import struct
import xml.etree.ElementTree as ETree
from collections import defaultdict
import mne
import numpy as np
from pyxdf import load_xdf, match_streaminfos, resolve_streams
from pyxdf.pyxdf import open_xdf, _read_varlen_int
def read_raw_xdf(fname, stream_id, srate="effective", prefix_markers=False, *args,
**kwargs):
"""Read XDF file.
Parameters
----------
fname : str
Name of the XDF file.
stream_id : int
ID (number) of the stream to load.
srate : {"nominal", "effective"}
Use either nominal or effective sampling rate.
prefix_markers : bool
Whether or not to prefix markers with their corresponding stream ID.
Returns
-------
raw : mne.io.Raw
XDF file data.
"""
if srate not in ("nominal", "effective"):
raise ValueError(f"The 'srate' parameter must be either 'nominal' or 'effective' "
f"(got {srate}).")
streams, _ = load_xdf(fname)
for stream in streams:
if stream["info"]["stream_id"] == stream_id:
break # stream found
else: # stream not found
raise IOError(f"Stream ID {stream_id} not found.")
if float(stream["info"]["nominal_srate"][0]) == 0:
raise RuntimeError("Importing a marker stream is not supported, try importing a "
"regularly sampled stream instead.")
n_chans = int(stream["info"]["channel_count"][0])
fs = float(np.array(stream["info"][f"{srate}_srate"]).item())
labels, types, units = [], [], []
try:
for ch in stream["info"]["desc"][0]["channels"][0]["channel"]:
labels.append(str(ch["label"][0]))
if ch["type"]:
types.append(ch["type"][0])
units.append(ch["unit"][0] if ch["unit"] else "NA")
except (TypeError, IndexError): # no channel labels found
pass
if not labels:
labels = [str(n) for n in range(n_chans)]
if not units:
units = ["NA" for _ in range(n_chans)]
info = mne.create_info(ch_names=labels, sfreq=fs, ch_types="eeg")
# convert from microvolts to volts if necessary
scale = np.array([1e-6 if u in ("microvolt", "microvolts") else 1 for u in units])
raw = mne.io.RawArray((stream["time_series"] * scale).T, info)
raw._filenames = [fname]
first_samp = stream["time_stamps"][0]
markers = match_streaminfos(resolve_streams(fname), [{"type": "Markers"}])
for stream_id in markers:
for stream in streams:
if stream["info"]["stream_id"] == stream_id:
break
onsets = stream["time_stamps"] - first_samp
prefix = f"{stream_id}-" if prefix_markers else ""
descriptions = [f"{prefix}{item}" for sub in stream["time_series"] for item in sub]
raw.annotations.append(onsets, [0] * len(onsets), descriptions)
return raw
def get_xml(fname):
"""Get XML stream headers and footers from all streams.
Parameters
----------
fname : str
Name of the XDF file.
Returns
-------
xml : dict
XML stream headers and footers.
"""
with open_xdf(fname) as f:
xml = defaultdict(dict)
while True:
try:
nbytes = _read_varlen_int(f)
except EOFError:
return xml
tag = struct.unpack('<H', f.read(2))[0]
if tag in [2, 3, 4, 6]:
stream_id = struct.unpack("<I", f.read(4))[0]
if tag in [2, 6]: # parse StreamHeader/StreamFooter chunk
string = f.read(nbytes - 6).decode()
xml[stream_id][tag] = ETree.fromstring(string)
else: # skip remaining chunk contents
f.seek(nbytes - 6, 1)
else:
f.seek(nbytes - 2, 1) # skip remaining chunk contents
def list_chunks(fname):
"""List all chunks contained in an XDF file.
Listing chunks summarizes the content of the XDF file. Because this function does not
attempt to parse the data, this also works for corrupted files.
Parameters
----------
fname : str
Name of the XDF file.
Returns
-------
chunks : list
List of dicts containing a short summary for each chunk.
"""
with open_xdf(fname) as f:
chunks = []
while True:
try:
nbytes = _read_varlen_int(f)
except EOFError:
return chunks
chunk = {"nbytes": nbytes}
tag = struct.unpack('<H', f.read(2))[0]
chunk["tag"] = tag
if tag == 1:
chunk["content"] = f.read(nbytes - 2).decode()
elif tag == 5:
chunk["content"] = ("0x43 0xA5 0x46 0xDC 0xCB 0xF5 0x41 0x0F 0xB3 0x0E "
"0xD5 0x46 0x73 0x83 0xCB 0xE4")
f.seek(chunk["nbytes"] - 2, 1) # skip remaining chunk contents
elif tag in [2, 6]: # XML
chunk["stream_id"] = struct.unpack("<I", f.read(4))[0]
chunk["content"] = f.read(chunk["nbytes"] - 6).decode().replace("\t", " ")
elif tag == 4:
chunk["stream_id"] = struct.unpack("<I", f.read(4))[0]
collection_time = struct.unpack("<d", f.read(8))[0]
offset_value = struct.unpack("<d", f.read(8))[0]
chunk["content"] = (f"Collection time: {collection_time}\n"
f"Offset value: {offset_value}")
elif tag == 3:
chunk["stream_id"] = struct.unpack("<I", f.read(4))[0]
remainder = chunk["nbytes"] - 6
chunk["content"] = f"<BINARY DATA ({remainder} Bytes)>"
f.seek(remainder, 1) # skip remaining chunk contents
else:
f.seek(chunk["nbytes"] - 2, 1) # skip remaining chunk contents
chunks.append(chunk)
| [
"pyxdf.pyxdf._read_varlen_int",
"mne.create_info",
"pyxdf.pyxdf.open_xdf",
"pyxdf.load_xdf",
"numpy.array",
"collections.defaultdict",
"xml.etree.ElementTree.fromstring",
"mne.io.RawArray",
"pyxdf.resolve_streams"
] | [((979, 994), 'pyxdf.load_xdf', 'load_xdf', (['fname'], {}), '(fname)\n', (987, 994), False, 'from pyxdf import load_xdf, match_streaminfos, resolve_streams\n'), ((2049, 2107), 'mne.create_info', 'mne.create_info', ([], {'ch_names': 'labels', 'sfreq': 'fs', 'ch_types': '"""eeg"""'}), "(ch_names=labels, sfreq=fs, ch_types='eeg')\n", (2064, 2107), False, 'import mne\n'), ((2172, 2249), 'numpy.array', 'np.array', (["[(1e-06 if u in ('microvolt', 'microvolts') else 1) for u in units]"], {}), "([(1e-06 if u in ('microvolt', 'microvolts') else 1) for u in units])\n", (2180, 2249), True, 'import numpy as np\n'), ((2257, 2313), 'mne.io.RawArray', 'mne.io.RawArray', (["(stream['time_series'] * scale).T", 'info'], {}), "((stream['time_series'] * scale).T, info)\n", (2272, 2313), False, 'import mne\n'), ((2417, 2439), 'pyxdf.resolve_streams', 'resolve_streams', (['fname'], {}), '(fname)\n', (2432, 2439), False, 'from pyxdf import load_xdf, match_streaminfos, resolve_streams\n'), ((3150, 3165), 'pyxdf.pyxdf.open_xdf', 'open_xdf', (['fname'], {}), '(fname)\n', (3158, 3165), False, 'from pyxdf.pyxdf import open_xdf, _read_varlen_int\n'), ((3186, 3203), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3197, 3203), False, 'from collections import defaultdict\n'), ((4313, 4328), 'pyxdf.pyxdf.open_xdf', 'open_xdf', (['fname'], {}), '(fname)\n', (4321, 4328), False, 'from pyxdf.pyxdf import open_xdf, _read_varlen_int\n'), ((1477, 1519), 'numpy.array', 'np.array', (["stream['info'][f'{srate}_srate']"], {}), "(stream['info'][f'{srate}_srate'])\n", (1485, 1519), True, 'import numpy as np\n'), ((3266, 3285), 'pyxdf.pyxdf._read_varlen_int', '_read_varlen_int', (['f'], {}), '(f)\n', (3282, 3285), False, 'from pyxdf.pyxdf import open_xdf, _read_varlen_int\n'), ((4417, 4436), 'pyxdf.pyxdf._read_varlen_int', '_read_varlen_int', (['f'], {}), '(f)\n', (4433, 4436), False, 'from pyxdf.pyxdf import open_xdf, _read_varlen_int\n'), ((3666, 3690), 'xml.etree.ElementTree.fromstring', 'ETree.fromstring', (['string'], {}), '(string)\n', (3682, 3690), True, 'import xml.etree.ElementTree as ETree\n')] |
"""
Predict state-level electricity demand.
Using hourly electricity demand reported at the balancing authority and utility
level in the FERC 714, and service territories for utilities and balancing
autorities inferred from the counties served by each utility, and the utilities
that make up each balancing authority in the EIA 861, estimate the total hourly
electricity demand for each US state.
This analysis uses the total electricity sales by state reported in the EIA 861
as a scaling factor to ensure that the magnitude of electricity sales is
roughly correct, and obtains the shape of the demand curve from the hourly
planning area demand reported in the FERC 714.
The compilation of historical service territories based on the EIA 861 data is
somewhat manual and could certainly be improved, but overall the results seem
reasonable. Additional predictive spatial variables will be required to obtain
more granular electricity demand estimates (e.g. at the county level).
Currently the script takes no arguments and simply runs a predefined analysis
across all states and all years for which both EIA 861 and FERC 714 data are
available, and outputs the results as a CSV in
PUDL_DIR/local/state-demand/demand.csv
"""
import argparse
import datetime
import logging
import pathlib
import sys
from typing import Any, Dict, Iterable, List, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlalchemy as sa
import pudl.analysis.timeseries_cleaning
import pudl.output.pudltabl
import pudl.workspace.setup
logger = logging.getLogger(__name__)
# --- Constants --- #
STATES: List[Dict[str, Union[str, int]]] = [
{'name': 'Alabama', 'code': 'AL', 'fips': '01'},
{'name': 'Alaska', 'code': 'AK', 'fips': '02'},
{'name': 'Arizona', 'code': 'AZ', 'fips': '04'},
{'name': 'Arkansas', 'code': 'AR', 'fips': '05'},
{'name': 'California', 'code': 'CA', 'fips': '06'},
{'name': 'Colorado', 'code': 'CO', 'fips': '08'},
{'name': 'Connecticut', 'code': 'CT', 'fips': '09'},
{'name': 'Delaware', 'code': 'DE', 'fips': '10'},
{'name': 'District of Columbia', 'code': 'DC', 'fips': '11'},
{'name': 'Florida', 'code': 'FL', 'fips': '12'},
{'name': 'Georgia', 'code': 'GA', 'fips': '13'},
{'name': 'Hawaii', 'code': 'HI', 'fips': '15'},
{'name': 'Idaho', 'code': 'ID', 'fips': '16'},
{'name': 'Illinois', 'code': 'IL', 'fips': '17'},
{'name': 'Indiana', 'code': 'IN', 'fips': '18'},
{'name': 'Iowa', 'code': 'IA', 'fips': '19'},
{'name': 'Kansas', 'code': 'KS', 'fips': '20'},
{'name': 'Kentucky', 'code': 'KY', 'fips': '21'},
{'name': 'Louisiana', 'code': 'LA', 'fips': '22'},
{'name': 'Maine', 'code': 'ME', 'fips': '23'},
{'name': 'Maryland', 'code': 'MD', 'fips': '24'},
{'name': 'Massachusetts', 'code': 'MA', 'fips': '25'},
{'name': 'Michigan', 'code': 'MI', 'fips': '26'},
{'name': 'Minnesota', 'code': 'MN', 'fips': '27'},
{'name': 'Mississippi', 'code': 'MS', 'fips': '28'},
{'name': 'Missouri', 'code': 'MO', 'fips': '29'},
{'name': 'Montana', 'code': 'MT', 'fips': '30'},
{'name': 'Nebraska', 'code': 'NE', 'fips': '31'},
{'name': 'Nevada', 'code': 'NV', 'fips': '32'},
{'name': 'New Hampshire', 'code': 'NH', 'fips': '33'},
{'name': 'New Jersey', 'code': 'NJ', 'fips': '34'},
{'name': 'New Mexico', 'code': 'NM', 'fips': '35'},
{'name': 'New York', 'code': 'NY', 'fips': '36'},
{'name': 'North Carolina', 'code': 'NC', 'fips': '37'},
{'name': 'North Dakota', 'code': 'ND', 'fips': '38'},
{'name': 'Ohio', 'code': 'OH', 'fips': '39'},
{'name': 'Oklahoma', 'code': 'OK', 'fips': '40'},
{'name': 'Oregon', 'code': 'OR', 'fips': '41'},
{'name': 'Pennsylvania', 'code': 'PA', 'fips': '42'},
{'name': 'Rhode Island', 'code': 'RI', 'fips': '44'},
{'name': 'South Carolina', 'code': 'SC', 'fips': '45'},
{'name': 'South Dakota', 'code': 'SD', 'fips': '46'},
{'name': 'Tennessee', 'code': 'TN', 'fips': '47'},
{'name': 'Texas', 'code': 'TX', 'fips': '48'},
{'name': 'Utah', 'code': 'UT', 'fips': '49'},
{'name': 'Vermont', 'code': 'VT', 'fips': '50'},
{'name': 'Virginia', 'code': 'VA', 'fips': '51'},
{'name': 'Washington', 'code': 'WA', 'fips': '53'},
{'name': 'West Virginia', 'code': 'WV', 'fips': '54'},
{'name': 'Wisconsin', 'code': 'WI', 'fips': '55'},
{'name': 'Wyoming', 'code': 'WY', 'fips': '56'},
{'name': 'American Samoa', 'code': 'AS', 'fips': '60'},
{'name': 'Guam', 'code': 'GU', 'fips': '66'},
{'name': 'Northern Mariana Islands', 'code': 'MP', 'fips': '69'},
{'name': 'Puerto Rico', 'code': 'PR', 'fips': '72'},
{'name': 'Virgin Islands', 'code': 'VI', 'fips': '78'},
]
"""
Attributes of US states and territories.
* `name` (str): Full name.
* `code` (str): US Postal Service (USPS) two-letter alphabetic code.
* `fips` (int): Federal Information Processing Standard (FIPS) code.
"""
STANDARD_UTC_OFFSETS: Dict[str, str] = {
'Pacific/Honolulu': -10,
'America/Anchorage': -9,
'America/Los_Angeles': -8,
'America/Denver': -7,
'America/Chicago': -6,
'America/New_York': -5,
'America/Halifax': -4,
}
"""
Hour offset from Coordinated Universal Time (UTC) by time zone.
Time zones are canonical names (e.g. 'America/Denver') from tzdata
(https://www.iana.org/time-zones) mapped to their standard-time UTC offset.
"""
UTC_OFFSETS: Dict[str, int] = {
'HST': -10,
'AKST': -9,
'AKDT': -8,
'PST': -8,
'PDT': -7,
'MST': -7,
'MDT': -6,
'CST': -6,
'CDT': -5,
'EST': -5,
'EDT': -4,
'AST': -4,
'ADT': -3,
}
"""
Hour offset from Coordinated Universal Time (UTC) by time zone.
Time zones are either standard or daylight-savings time zone abbreviations (e.g. 'MST').
"""
# --- Helpers --- #
def lookup_state(state: Union[str, int]) -> dict:
"""
Lookup US state by state identifier.
Args:
state: State name, two-letter abbreviation, or FIPS code.
String matching is case-insensitive.
Returns:
State identifers.
Examples:
>>> lookup_state('alabama')
{'name': 'Alabama', 'code': 'AL', 'fips': '01'}
>>> lookup_state('AL')
{'name': 'Alabama', 'code': 'AL', 'fips': '01'}
>>> lookup_state(1)
{'name': 'Alabama', 'code': 'AL', 'fips': '01'}
"""
# Try to cast state as an integer to deal with "02", "2", 2.0, np.int64(2)...
try:
is_fips = isinstance(int(state), int)
except ValueError:
is_fips = False
if is_fips:
state = str(int(state)).zfill(2)
return {x['fips']: x for x in STATES}[state]
key = 'code' if len(state) == 2 else 'name'
return {x[key].lower(): x for x in STATES}[state.lower()]
def local_to_utc(local: pd.Series, tz: Iterable, **kwargs: Any) -> pd.Series:
"""
Convert local times to UTC.
Args:
local: Local times (tz-naive `datetime64[ns]`).
tz: For each time, a timezone (see :meth:`DatetimeIndex.tz_localize`)
or UTC offset in hours (`int` or `float`).
kwargs: Optional arguments to :meth:`DatetimeIndex.tz_localize`.
Returns:
UTC times (tz-naive `datetime64[ns]`).
Examples:
>>> s = pd.Series([pd.Timestamp(2020, 1, 1), pd.Timestamp(2020, 1, 1)])
>>> local_to_utc(s, [-7, -6])
0 2020-01-01 07:00:00
1 2020-01-01 06:00:00
dtype: datetime64[ns]
>>> local_to_utc(s, ['America/Denver', 'America/Chicago'])
0 2020-01-01 07:00:00
1 2020-01-01 06:00:00
dtype: datetime64[ns]
"""
return local.groupby(tz).transform(
lambda x: x.dt.tz_localize(
datetime.timezone(datetime.timedelta(hours=x.name))
if isinstance(x.name, (int, float)) else x.name,
**kwargs
).dt.tz_convert(None)
)
def utc_to_local(utc: pd.Series, tz: Iterable) -> pd.Series:
"""
Convert UTC times to local.
Args:
utc: UTC times (tz-naive `datetime64[ns]` or `datetime64[ns, UTC]`).
tz: For each time, a timezone (see :meth:`DatetimeIndex.tz_localize`)
or UTC offset in hours (`int` or `float`).
Returns:
Local times (tz-naive `datetime64[ns]`).
Examples:
>>> s = pd.Series([pd.Timestamp(2020, 1, 1), pd.Timestamp(2020, 1, 1)])
>>> utc_to_local(s, [-7, -6])
0 2019-12-31 17:00:00
1 2019-12-31 18:00:00
dtype: datetime64[ns]
>>> utc_to_local(s, ['America/Denver', 'America/Chicago'])
0 2019-12-31 17:00:00
1 2019-12-31 18:00:00
dtype: datetime64[ns]
"""
if utc.dt.tz is None:
utc = utc.dt.tz_localize('UTC')
return utc.groupby(tz).transform(
lambda x: x.dt.tz_convert(
datetime.timezone(datetime.timedelta(hours=x.name))
if isinstance(x.name, (int, float)) else x.name
).dt.tz_localize(None)
)
# --- Datasets: References --- #
def load_ventyx_hourly_state_demand(path: str) -> pd.DataFrame:
"""
Read and format Ventyx hourly state-level demand.
After manual corrections of the listed time zone, ambiguous time zone issues remain.
Below is a list of transmission zones (by `Transmission Zone ID`)
with one or more missing timestamps at transitions to or from daylight-savings:
* 615253 (Indiana)
* 615261 (Michigan)
* 615352 (Wisconsin)
* 615357 (Missouri)
* 615377 (Saskatchewan)
* 615401 (Minnesota, Wisconsin)
* 615516 (Missouri)
* 615529 (Oklahoma)
* 615603 (Idaho, Washington)
* 1836089 (California)
Args:
path: Path to the data file (published as 'state_level_load_2007_2018.csv').
Returns:
Dataframe with hourly state-level demand.
* `state_id_fips`: FIPS code of US state.
* `utc_datetime`: UTC time of the start of each hour.
* `demand_mwh`: Hourly demand in MWh.
"""
df = pd.read_csv(
path,
usecols=[
'State/Province',
'Local Datetime (Hour Ending)',
'Time Zone',
'Estimated State Load MW - Sum',
]
)
df.rename(columns={
'State/Province': 'state',
'Local Datetime (Hour Ending)': 'datetime',
'Estimated State Load MW - Sum': 'demand_mwh',
'Time Zone': 'tz',
}, inplace=True)
# Convert state name to FIPS codes and keep only data for US states
fips = {x['name']: x['fips'] for x in STATES}
df['state_id_fips'] = df['state'].map(fips)
df = df[~df['state_id_fips'].isnull()]
# Parse datetime
df['datetime'] = pd.to_datetime(df['datetime'], format='%m/%d/%Y %H:%M')
# Correct timezone errors
mask = df['state'].eq('Wyoming') & df['tz'].eq('PST')
df.loc[mask, 'tz'] = 'MST'
# Sum by local time and timezone
df = df.groupby(
['state_id_fips', 'datetime', 'tz'], as_index=False
)['demand_mwh'].sum()
# Convert local times to UTC
df['utc_datetime'] = local_to_utc(df['datetime'], df['tz'].map(UTC_OFFSETS))
# Sum by UTC time
df = df.groupby(
['state_id_fips', 'utc_datetime'], as_index=False
)['demand_mwh'].sum()
# Roll back one hour to convert hour-ending to hour-starting
df['utc_datetime'] -= pd.Timedelta(hours=1)
return df
# --- Datasets: FERC 714 hourly demand --- #
def load_ferc714_hourly_demand_matrix(
pudl_out: pudl.output.pudltabl.PudlTabl
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Read and format FERC 714 hourly demand into matrix form.
Args:
pudl_out: Used to access
:meth:`pudl.output.pudltabl.PudlTabl.demand_hourly_pa_ferc714`.
Returns:
Hourly demand as a matrix with a `datetime` row index
(e.g. '2006-01-01 00:00:00', ..., '2019-12-31 23:00:00')
in local time ignoring daylight-savings,
and a `respondent_id_ferc714` column index (e.g. 101, ..., 329).
A second Dataframe lists the UTC offset in hours
of each `respondent_id_ferc714` and reporting `year` (int).
"""
demand = pudl_out.demand_hourly_pa_ferc714().copy()
# Convert UTC to local time (ignoring daylight savings)
demand['utc_offset'] = demand['timezone'].map(STANDARD_UTC_OFFSETS)
demand['datetime'] = utc_to_local(demand['utc_datetime'], demand['utc_offset'])
# Pivot to demand matrix: timestamps x respondents
matrix = demand.pivot(
index='datetime', columns='respondent_id_ferc714', values='demand_mwh'
)
# List timezone by year for each respondent
demand['year'] = demand['report_date'].dt.year
utc_offset = demand.groupby(
['respondent_id_ferc714', 'year'], as_index=False
)['utc_offset'].first()
return matrix, utc_offset
def clean_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:
"""
Detect and null anomalous values in FERC 714 hourly demand matrix.
.. note::
Takes about 10 minutes.
Args:
df: FERC 714 hourly demand matrix,
as described in :func:`load_ferc714_hourly_demand_matrix`.
Returns:
Copy of `df` with nulled anomalous values.
"""
ts = pudl.analysis.timeseries_cleaning.Timeseries(df)
ts.flag_ruggles()
return ts.to_dataframe(copy=False)
def filter_ferc714_hourly_demand_matrix(
df: pd.DataFrame,
min_data: int = 100,
min_data_fraction: float = 0.9,
) -> pd.DataFrame:
"""
Filter incomplete years from FERC 714 hourly demand matrix.
Nulls respondent-years with too few data and
drops respondents with no data across all years.
Args:
df: FERC 714 hourly demand matrix,
as described in :func:`load_ferc714_hourly_demand_matrix`.
min_data: Minimum number of non-null hours in a year.
min_data_fraction: Minimum fraction of non-null hours between the first and last
non-null hour in a year.
Returns:
Hourly demand matrix `df` modified in-place.
"""
# Identify respondent-years where data coverage is below thresholds
has_data = ~df.isnull()
coverage = (
# Last timestamp with demand in year
has_data[::-1].groupby(df.index.year[::-1]).idxmax() -
# First timestamp with demand in year
has_data.groupby(df.index.year).idxmax()
).apply(lambda x: 1 + x.dt.days * 24 + x.dt.seconds / 3600, axis=1)
fraction = has_data.groupby(df.index.year).sum() / coverage
short = coverage.lt(min_data)
bad = fraction.gt(0) & fraction.lt(min_data_fraction)
# Set all values in short or bad respondent-years to null
mask = (short | bad).loc[df.index.year]
mask.index = df.index
df[mask] = np.nan
# Report nulled respondent-years
for mask, msg in [
(short, 'Nulled short respondent-years (below min_data)'),
(bad, 'Nulled bad respondent-years (below min_data_fraction)'),
]:
row, col = mask.values.nonzero()
report = (
pd.DataFrame({'id': mask.columns[col], 'year': mask.index[row]})
.groupby('id')['year'].apply(lambda x: np.sort(x))
)
with pd.option_context('display.max_colwidth', -1):
logger.info(f'{msg}:\n{report}')
# Drop respondents with no data
blank = df.columns[df.isnull().all()].tolist()
df.drop(columns=blank, inplace=True)
# Report dropped respondents (with no data)
logger.info(f'Dropped blank respondents: {blank}')
return df
def impute_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:
"""
Impute null values in FERC 714 hourly demand matrix.
Imputation is performed separately for each year,
with only the respondents reporting data in that year.
.. note::
Takes about 15 minutes.
Args:
df: FERC 714 hourly demand matrix,
as described in :func:`load_ferc714_hourly_demand_matrix`.
Returns:
Copy of `df` with imputed values.
"""
results = []
for year, gdf in df.groupby(df.index.year):
logger.info(f'Imputing year {year}')
keep = df.columns[~gdf.isnull().all()]
tsi = pudl.analysis.timeseries_cleaning.Timeseries(gdf[keep])
result = tsi.to_dataframe(tsi.impute(method='tnn'), copy=False)
results.append(result)
return pd.concat(results)
def melt_ferc714_hourly_demand_matrix(
df: pd.DataFrame, tz: pd.DataFrame
) -> pd.DataFrame:
"""
Melt FERC 714 hourly demand matrix to long format.
Args:
df: FERC 714 hourly demand matrix,
as described in :func:`load_ferc714_hourly_demand_matrix`.
tz: FERC 714 respondent time zones,
as described in :func:`load_ferc714_hourly_demand_matrix`.
Returns:
Long-format hourly demand with columns
`respondent_id_ferc714`, report `year` (int), `utc_datetime`, and `demand_mwh`.
"""
# Melt demand matrix to long format
df = df.melt(value_name='demand_mwh', ignore_index=False)
df.reset_index(inplace=True)
# Convert local times to UTC
df['year'] = df['datetime'].dt.year
df = df.merge(tz, on=['respondent_id_ferc714', 'year'])
df['utc_datetime'] = local_to_utc(df['datetime'], df['utc_offset'])
df.drop(columns=['utc_offset', 'datetime'], inplace=True)
return df
# --- Datasets: Counties --- #
def load_ferc714_county_assignments(
pudl_out: pudl.output.pudltabl.PudlTabl
) -> pd.DataFrame:
"""
Load FERC 714 county assignments.
Args:
pudl_out: PUDL database extractor.
Returns:
Dataframe with columns
`respondent_id_ferc714`, report `year` (int), and `county_id_fips`.
"""
respondents = pudl.output.ferc714.Respondents(pudl_out)
df = respondents.fipsify()[
['respondent_id_ferc714', 'county_id_fips', 'report_date']
]
# Drop rows where county is blank or a duplicate
df = df[~df['county_id_fips'].isnull()].drop_duplicates()
# Convert date to year
df['year'] = df['report_date'].dt.year
df.drop(columns=['report_date'], inplace=True)
return df
def load_counties(
pudl_out: pudl.output.pudltabl.PudlTabl, pudl_settings: dict
) -> pd.DataFrame:
"""
Load county attributes.
Args:
pudl_out: PUDL database extractor.
pudl_settings: PUDL settings.
Returns:
Dataframe with columns `county_id_fips` and `population`.
"""
df = pudl.output.censusdp1tract.get_layer(
layer="county",
pudl_settings=pudl_settings
)[["geoid10", "dp0010001"]]
return df.rename(columns={"geoid10": "county_id_fips", "dp0010001": "population"})
# --- Allocation --- #
def load_eia861_state_total_sales(
pudl_out: pudl.output.pudltabl.PudlTabl
) -> pd.DataFrame:
"""
Read and format EIA 861 sales by state and year.
Args:
pudl_out: Used to access
:meth:`pudl.output.pudltabl.PudlTabl.sales_eia861`.
Returns:
Dataframe with columns `state_id_fips`, `year`, `demand_mwh`.
"""
df = pudl_out.sales_eia861()
df = df.groupby(['state', 'report_date'], as_index=False)['sales_mwh'].sum()
# Convert report_date to year
df['year'] = df['report_date'].dt.year
# Convert state abbreviations to FIPS codes
fips = {x['code']: x['fips'] for x in STATES}
df['state_id_fips'] = df['state'].map(fips)
# Drop records with zero sales
df.rename(columns={'sales_mwh': 'demand_mwh'}, inplace=True)
df = df[df['demand_mwh'].gt(0)]
return df[['state_id_fips', 'year', 'demand_mwh']]
def predict_state_hourly_demand(
demand: pd.DataFrame,
counties: pd.DataFrame,
assignments: pd.DataFrame,
state_totals: pd.DataFrame = None,
mean_overlaps: bool = False,
) -> pd.DataFrame:
"""
Predict state hourly demand.
Args:
demand: Hourly demand timeseries, with columns
`respondent_id_ferc714`, report `year`, `utc_datetime`, and `demand_mwh`.
counties: Counties, with columns `county_id_fips` and `population`.
assignments: County assignments for demand respondents,
with columns `respondent_id_ferc714`, `year`, and `county_id_fips`.
state_totals: Total annual demand by state,
with columns `state_id_fips`, `year`, and `demand_mwh`.
If provided, the predicted hourly demand is scaled to match these totals.
mean_overlaps: Whether to mean the demands predicted for a county
in cases when a county is assigned to multiple respondents.
By default, demands are summed.
Returns:
Dataframe with columns
`state_id_fips`, `utc_datetime`, `demand_mwh`, and
(if `state_totals` was provided) `scaled_demand_mwh`.
"""
# Pre-compute list of respondent-years with demand
with_demand = (
demand
.groupby(['respondent_id_ferc714', 'year'], as_index=False)['demand_mwh'].sum()
.query('demand_mwh > 0')
)[['respondent_id_ferc714', 'year']]
# Pre-compute state-county assignments
counties = counties.copy()
counties['state_id_fips'] = counties['county_id_fips'].str[:2]
# Merge counties with respondent- and state-county assignments
df = (
assignments
# Drop respondent-years with no demand
.merge(with_demand, on=['respondent_id_ferc714', 'year'])
# Merge with counties and state-county assignments
.merge(counties, on=['county_id_fips'])
)
# Divide county population by total population in respondent (by year)
# TODO: Use more county attributes in the calculation of their weights
totals = df.groupby(
['respondent_id_ferc714', 'year']
)['population'].transform('sum')
df['weight'] = df['population'] / totals
# Normalize county weights by county occurences (by year)
if mean_overlaps:
counts = df.groupby(
['county_id_fips', 'year']
)['county_id_fips'].transform('count')
df['weight'] /= counts
# Sum county weights by respondent, year, and state
weights = df.groupby(
['respondent_id_ferc714', 'year', 'state_id_fips'], as_index=False
)['weight'].sum()
# Multiply respondent-state weights with demands
df = weights.merge(demand, on=['respondent_id_ferc714', 'year'])
df['demand_mwh'] *= df['weight']
# Scale estimates using state totals
if state_totals is not None:
# Compute scale factor between current and target state totals
totals = (
df.groupby(['state_id_fips', 'year'], as_index=False)['demand_mwh'].sum()
.merge(state_totals, on=['state_id_fips', 'year'])
)
totals['scale'] = totals['demand_mwh_y'] / totals['demand_mwh_x']
df = df.merge(totals[['state_id_fips', 'year', 'scale']])
df['scaled_demand_mwh'] = df['demand_mwh'] * df['scale']
# Sum demand by state by matching UTC time
fields = [x for x in ['demand_mwh', 'scaled_demand_mwh'] if x in df]
return df.groupby(['state_id_fips', 'utc_datetime'], as_index=False)[fields].sum()
def plot_demand_timeseries(
a: pd.DataFrame,
b: pd.DataFrame = None,
window: int = 168,
title: str = None,
path: str = None,
) -> None:
"""
Make a timeseries plot of predicted and reference demand.
Args:
a: Predicted demand with columns `utc_datetime` and any of
`demand_mwh` (in grey) and `scaled_demand_mwh` (in orange).
b: Reference demand with columns `utc_datetime` and `demand_mwh` (in red).
window: Width of window (in rows) to use to compute rolling means,
or `None` to plot raw values.
title: Plot title.
path: Plot path. If provided, the figure is saved to file and closed.
"""
plt.figure(figsize=(16, 8))
# Plot predicted
for field, color in [('demand_mwh', 'grey'), ('scaled_demand_mwh', 'orange')]:
if field not in a:
continue
y = a[field]
if window:
y = y.rolling(window).mean()
plt.plot(
a['utc_datetime'], y, color=color, alpha=0.5, label=f'Predicted ({field})'
)
# Plot expected
if b is not None:
y = b['demand_mwh']
if window:
y = y.rolling(window).mean()
plt.plot(
b['utc_datetime'], y, color='red', alpha=0.5, label='Reference (demand_mwh)'
)
if title:
plt.title(title)
plt.ylabel('Demand (MWh)')
plt.legend()
if path:
plt.savefig(path, bbox_inches='tight')
plt.close()
def plot_demand_scatter(
a: pd.DataFrame,
b: pd.DataFrame,
title: str = None,
path: str = None,
) -> None:
"""
Make a scatter plot comparing predicted and reference demand.
Args:
a: Predicted demand with columns `utc_datetime` and any of
`demand_mwh` (in grey) and `scaled_demand_mwh` (in orange).
b: Reference demand with columns `utc_datetime` and `demand_mwh`.
Every element in `utc_datetime` must match the one in `a`.
title: Plot title.
path: Plot path. If provided, the figure is saved to file and closed.
Raises:
ValueError: Datetime columns do not match.
"""
if not a['utc_datetime'].equals(b['utc_datetime']):
raise ValueError('Datetime columns do not match')
plt.figure(figsize=(8, 8))
plt.gca().set_aspect('equal')
plt.axline((0, 0), (1, 1), linestyle=':', color='grey')
for field, color in [('demand_mwh', 'grey'), ('scaled_demand_mwh', 'orange')]:
if field not in a:
continue
plt.scatter(
b['demand_mwh'],
a[field],
c=color,
s=0.1,
alpha=0.5,
label=f'Prediction ({field})',
)
if title:
plt.title(title)
plt.xlabel('Reference (MWh)')
plt.ylabel('Predicted (MWh)')
plt.legend()
if path:
plt.savefig(path, bbox_inches='tight')
plt.close()
def compare_state_demand(
a: pd.DataFrame, b: pd.DataFrame, scaled: bool = True
) -> pd.DataFrame:
"""
Compute statistics comparing predicted and reference demand.
Statistics are computed for each year.
Args:
a: Predicted demand with columns `utc_datetime` and either
`demand_mwh` (if `scaled=False) or `scaled_demand_mwh` (if `scaled=True`).
b: Reference demand with columns `utc_datetime` and `demand_mwh`.
Every element in `utc_datetime` must match the one in `a`.
Returns:
Dataframe with columns `year`,
`rmse` (root mean square error), and `mae` (mean absolute error).
Raises:
ValueError: Datetime columns do not match.
"""
if not a['utc_datetime'].equals(b['utc_datetime']):
raise ValueError('Datetime columns do not match')
field = 'scaled_demand_mwh' if scaled else 'demand_mwh'
df = pd.DataFrame({
'year': a['utc_datetime'].dt.year,
'diff': a[field] - b['demand_mwh'],
})
return df.groupby(['year'], as_index=False)['diff'].agg({
'rmse': lambda x: np.sqrt(np.sum(x**2) / x.size),
'mae': lambda x: np.sum(np.abs(x)) / x.size,
})
# --- Parse Command Line Args --- #
def parse_command_line(argv):
"""Skeletal command line argument parser to provide a help message."""
parser = argparse.ArgumentParser(description=__doc__)
return parser.parse_args(argv[1:])
# --- Example usage --- #
def main():
"""Predict state demand."""
# --- Connect to PUDL logger --- #
logger = logging.getLogger('pudl')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s'
))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# --- Parse command line args --- #
_ = parse_command_line(sys.argv)
# --- Connect to PUDL database --- #
pudl_settings = pudl.workspace.setup.get_defaults()
pudl_engine = sa.create_engine(pudl_settings["pudl_db"])
pudl_out = pudl.output.pudltabl.PudlTabl(pudl_engine)
# --- Prepare FERC 714 hourly demand --- #
df, tz = load_ferc714_hourly_demand_matrix(pudl_out)
df = clean_ferc714_hourly_demand_matrix(df)
df = filter_ferc714_hourly_demand_matrix(df, min_data=100, min_data_fraction=0.9)
df = impute_ferc714_hourly_demand_matrix(df)
demand = melt_ferc714_hourly_demand_matrix(df, tz)
# --- Predict demand --- #
counties = load_counties(pudl_out, pudl_settings)
assignments = load_ferc714_county_assignments(pudl_out)
state_totals = load_eia861_state_total_sales(pudl_out)
prediction = predict_state_hourly_demand(
demand,
counties=counties,
assignments=assignments,
state_totals=state_totals,
mean_overlaps=False
)
# --- Export results --- #
local_dir = pathlib.Path(pudl_settings['data_dir']) / 'local'
ventyx_path = local_dir / 'ventyx/state_level_load_2007_2018.csv'
base_dir = local_dir / 'state-demand'
base_dir.mkdir(parents=True, exist_ok=True)
demand_path = base_dir / 'demand.csv'
stats_path = base_dir / 'demand-stats.csv'
timeseries_dir = base_dir / 'timeseries'
timeseries_dir.mkdir(parents=True, exist_ok=True)
scatter_dir = base_dir / 'scatter'
scatter_dir.mkdir(parents=True, exist_ok=True)
# Write predicted hourly state demand
prediction.to_csv(
demand_path, index=False, date_format='%Y%m%dT%H', float_format='%.1f'
)
# Load Ventyx as reference if available
reference = None
if ventyx_path.exists():
reference = load_ventyx_hourly_state_demand(ventyx_path)
# Plots and statistics
stats = []
for fips in prediction['state_id_fips'].unique():
state = lookup_state(fips)
# Filter demand by state
a = prediction.query(f"state_id_fips == '{fips}'")
b = None
title = f'{state["fips"]}: {state["name"]} ({state["code"]})'
plot_name = f'{state["fips"]}-{state["name"]}.png'
if reference is not None:
b = reference.query(f"state_id_fips == '{fips}'")
# Save timeseries plot
plot_demand_timeseries(
a, b=b, window=168, title=title, path=timeseries_dir / plot_name
)
if b is None or b.empty:
continue
# Align predicted and reference demand
a = a.set_index('utc_datetime')
b = b.set_index('utc_datetime')
index = a.index.intersection(b.index)
a = a.loc[index].reset_index()
b = b.loc[index].reset_index()
# Compute statistics
stat = compare_state_demand(a, b, scaled=True)
stat['state_id_fips'] = fips
stats.append(stat)
# Save scatter plot
plot_demand_scatter(a, b=b, title=title, path=scatter_dir / plot_name)
# Write statistics
if reference is not None:
pd.concat(stats, ignore_index=True).to_csv(
stats_path, index=False, float_format='%.1f'
)
if __name__ == '__main__':
sys.exit(main())
| [
"logging.getLogger",
"logging.StreamHandler",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"pandas.option_context",
"datetime.timedelta",
"pandas.to_datetime",
"argparse.ArgumentParser",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"sqlalchemy.create_engine",
"matplotlib.pyplot.plot",
"nump... | [((1566, 1593), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1583, 1593), False, 'import logging\n'), ((9996, 10123), 'pandas.read_csv', 'pd.read_csv', (['path'], {'usecols': "['State/Province', 'Local Datetime (Hour Ending)', 'Time Zone',\n 'Estimated State Load MW - Sum']"}), "(path, usecols=['State/Province', 'Local Datetime (Hour Ending)',\n 'Time Zone', 'Estimated State Load MW - Sum'])\n", (10007, 10123), True, 'import pandas as pd\n'), ((10670, 10725), 'pandas.to_datetime', 'pd.to_datetime', (["df['datetime']"], {'format': '"""%m/%d/%Y %H:%M"""'}), "(df['datetime'], format='%m/%d/%Y %H:%M')\n", (10684, 10725), True, 'import pandas as pd\n'), ((11321, 11342), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (11333, 11342), True, 'import pandas as pd\n'), ((16319, 16337), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (16328, 16337), True, 'import pandas as pd\n'), ((23739, 23766), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (23749, 23766), True, 'import matplotlib.pyplot as plt\n'), ((24405, 24431), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Demand (MWh)"""'], {}), "('Demand (MWh)')\n", (24415, 24431), True, 'import matplotlib.pyplot as plt\n'), ((24436, 24448), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (24446, 24448), True, 'import matplotlib.pyplot as plt\n'), ((25314, 25340), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (25324, 25340), True, 'import matplotlib.pyplot as plt\n'), ((25379, 25434), 'matplotlib.pyplot.axline', 'plt.axline', (['(0, 0)', '(1, 1)'], {'linestyle': '""":"""', 'color': '"""grey"""'}), "((0, 0), (1, 1), linestyle=':', color='grey')\n", (25389, 25434), True, 'import matplotlib.pyplot as plt\n'), ((25797, 25826), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Reference (MWh)"""'], {}), "('Reference (MWh)')\n", (25807, 25826), True, 'import matplotlib.pyplot as plt\n'), ((25831, 25860), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predicted (MWh)"""'], {}), "('Predicted (MWh)')\n", (25841, 25860), True, 'import matplotlib.pyplot as plt\n'), ((25865, 25877), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25875, 25877), True, 'import matplotlib.pyplot as plt\n'), ((26868, 26958), 'pandas.DataFrame', 'pd.DataFrame', (["{'year': a['utc_datetime'].dt.year, 'diff': a[field] - b['demand_mwh']}"], {}), "({'year': a['utc_datetime'].dt.year, 'diff': a[field] - b[\n 'demand_mwh']})\n", (26880, 26958), True, 'import pandas as pd\n'), ((27313, 27357), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (27336, 27357), False, 'import argparse\n'), ((27523, 27548), 'logging.getLogger', 'logging.getLogger', (['"""pudl"""'], {}), "('pudl')\n", (27540, 27548), False, 'import logging\n'), ((27563, 27586), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (27584, 27586), False, 'import logging\n'), ((27969, 28011), 'sqlalchemy.create_engine', 'sa.create_engine', (["pudl_settings['pudl_db']"], {}), "(pudl_settings['pudl_db'])\n", (27985, 28011), True, 'import sqlalchemy as sa\n'), ((24008, 24097), 'matplotlib.pyplot.plot', 'plt.plot', (["a['utc_datetime']", 'y'], {'color': 'color', 'alpha': '(0.5)', 'label': 'f"""Predicted ({field})"""'}), "(a['utc_datetime'], y, color=color, alpha=0.5, label=\n f'Predicted ({field})')\n", (24016, 24097), True, 'import matplotlib.pyplot as plt\n'), ((24253, 24344), 'matplotlib.pyplot.plot', 'plt.plot', (["b['utc_datetime']", 'y'], {'color': '"""red"""', 'alpha': '(0.5)', 'label': '"""Reference (demand_mwh)"""'}), "(b['utc_datetime'], y, color='red', alpha=0.5, label=\n 'Reference (demand_mwh)')\n", (24261, 24344), True, 'import matplotlib.pyplot as plt\n'), ((24384, 24400), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (24393, 24400), True, 'import matplotlib.pyplot as plt\n'), ((24470, 24508), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (24481, 24508), True, 'import matplotlib.pyplot as plt\n'), ((24517, 24528), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24526, 24528), True, 'import matplotlib.pyplot as plt\n'), ((25574, 25675), 'matplotlib.pyplot.scatter', 'plt.scatter', (["b['demand_mwh']", 'a[field]'], {'c': 'color', 's': '(0.1)', 'alpha': '(0.5)', 'label': 'f"""Prediction ({field})"""'}), "(b['demand_mwh'], a[field], c=color, s=0.1, alpha=0.5, label=\n f'Prediction ({field})')\n", (25585, 25675), True, 'import matplotlib.pyplot as plt\n'), ((25776, 25792), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (25785, 25792), True, 'import matplotlib.pyplot as plt\n'), ((25899, 25937), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (25910, 25937), True, 'import matplotlib.pyplot as plt\n'), ((25946, 25957), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25955, 25957), True, 'import matplotlib.pyplot as plt\n'), ((27612, 27698), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s"""'], {}), "(\n '%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s')\n", (27629, 27698), False, 'import logging\n'), ((28860, 28899), 'pathlib.Path', 'pathlib.Path', (["pudl_settings['data_dir']"], {}), "(pudl_settings['data_dir'])\n", (28872, 28899), False, 'import pathlib\n'), ((15151, 15196), 'pandas.option_context', 'pd.option_context', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (15168, 15196), True, 'import pandas as pd\n'), ((25345, 25354), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (25352, 25354), True, 'import matplotlib.pyplot as plt\n'), ((15116, 15126), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (15123, 15126), True, 'import numpy as np\n'), ((30897, 30932), 'pandas.concat', 'pd.concat', (['stats'], {'ignore_index': '(True)'}), '(stats, ignore_index=True)\n', (30906, 30932), True, 'import pandas as pd\n'), ((27073, 27087), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (27079, 27087), True, 'import numpy as np\n'), ((27129, 27138), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (27135, 27138), True, 'import numpy as np\n'), ((15000, 15064), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': mask.columns[col], 'year': mask.index[row]}"], {}), "({'id': mask.columns[col], 'year': mask.index[row]})\n", (15012, 15064), True, 'import pandas as pd\n'), ((7752, 7784), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'x.name'}), '(hours=x.name)\n', (7770, 7784), False, 'import datetime\n'), ((8854, 8886), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'x.name'}), '(hours=x.name)\n', (8872, 8886), False, 'import datetime\n')] |
"""
pysteps.timeseries.autoregression
=================================
Methods related to autoregressive AR(p) models.
.. autosummary::
:toctree: ../generated/
adjust_lag2_corrcoef1
adjust_lag2_corrcoef2
ar_acf
estimate_ar_params_ols
estimate_ar_params_ols_localized
estimate_ar_params_yw
estimate_ar_params_yw_localized
estimate_var_params_ols
estimate_var_params_ols_localized
estimate_var_params_yw
iterate_ar_model
iterate_var_model
"""
import numpy as np
from scipy.special import binom
from scipy import linalg as la
from scipy import ndimage
def adjust_lag2_corrcoef1(gamma_1, gamma_2):
"""A simple adjustment of lag-2 temporal autocorrelation coefficient to
ensure that the resulting AR(2) process is stationary when the parameters
are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_1 - 1 + 1e-10)
gamma_2 = np.minimum(gamma_2, 1 - 1e-10)
return gamma_2
def adjust_lag2_corrcoef2(gamma_1, gamma_2):
"""A more advanced adjustment of lag-2 temporal autocorrelation coefficient
to ensure that the resulting AR(2) process is stationary when
the parameters are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_2 - 1)
gamma_2 = np.maximum(
gamma_2, (3 * gamma_1 ** 2 - 2 + 2 * (1 - gamma_1 ** 2) ** 1.5) / gamma_1 ** 2
)
return gamma_2
def ar_acf(gamma, n=None):
"""Compute theoretical autocorrelation function (ACF) from the AR(p) model
with lag-l, l=1,2,...,p temporal autocorrelation coefficients.
Parameters
----------
gamma : array-like
Array of length p containing the lag-l, l=1,2,...p, temporal
autocorrelation coefficients.
The correlation coefficients are assumed to be in ascending
order with respect to time lag.
n : int
Desired length of ACF array. Must be greater than len(gamma).
Returns
-------
out : array-like
Array containing the ACF values.
"""
ar_order = len(gamma)
if n == ar_order or n is None:
return gamma
elif n < ar_order:
raise ValueError(
"n=%i, but must be larger than the order of the AR process %i"
% (n, ar_order)
)
phi = estimate_ar_params_yw(gamma)[:-1]
acf = gamma.copy()
for t in range(0, n - ar_order):
# Retrieve gammas (in reverse order)
gammas = acf[t : t + ar_order][::-1]
# Compute next gamma
gamma_ = np.sum(gammas * phi)
acf.append(gamma_)
return acf
def estimate_ar_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""Estimate the parameters of an autoregressive AR(p) model
:math:`x_{k+1}=c+\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p : int
The order of the model.
d : {0,1}
The order of differencing to apply to the time series.
check_stationarity : bool
Check the stationarity of the estimated model.
include_constant_term : bool
Include the constant term :math:`c` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`c` is added to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to zero.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if len(x.shape) > 1:
x = x.reshape((n, np.prod(x.shape[1:])))
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x_lhs = x[p:, :]
Z = []
for i in range(x.shape[1]):
for j in range(p - 1, n - 1 - h):
z_ = np.hstack([x[j - k, i] for k in range(p)])
if include_constant_term:
z_ = np.hstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
b = np.dot(
np.dot(x_lhs, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0]))
)
b = b.flatten()
if include_constant_term:
c = b[0]
phi = list(b[1:])
else:
phi = list(b)
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0] * phi[0])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1]) * ((1.0 - phi[1]) ** 2.0 - phi[0] ** 2.0) / (1.0 - phi[1])
)
else:
phi_pert = 0.0
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
if d == 1:
phi_out = _compute_differenced_model_params(phi, p, 1, 1)
else:
phi_out = phi
phi_out.append(phi_pert)
if include_constant_term:
phi_out.insert(0, c)
return phi_out
def estimate_ar_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=c_i+\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1,i}\epsilon`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p : int
The order of the model.
window_radius : float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d : {0,1}
The order of differencing to apply to the time series.
include_constant_term : bool
Include the constant term :math:`c_i` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window : {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out : list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma. Nan values are assigned, where the
sample size for estimating the parameters is too small. If
include_constant_term is True, the constant term :math:`c_i` is added
to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to a zero array.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[p], x.shape[1:]]))
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(
x[p + j, :] * x[p - 1 - i + j, :], window_size, mode="constant"
)
XZ[i, :] += tmp
if include_constant_term:
v = 0.0
for i in range(h + 1):
v += convol_filter(x[p + i, :], window_size, mode="constant")
XZ = np.vstack([v[np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p, p], x.shape[1:]]))
for i in range(p):
for j in range(p):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, :] * x[p - 1 - j + k, :],
window_size,
mode="constant",
)
Z2[i, j, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p + 1, p + 1], x.shape[1:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[1:]), window_size, mode="constant")
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(x[p - 1 - i + j, :], window_size, mode="constant")
Z2[0, i + 1, :] += tmp
Z2[i + 1, 0, :] += tmp
for i in range(p):
for j in range(p):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, :] * x[p - 1 - j + k, :],
window_size,
mode="constant",
)
Z2[i + 1, j + 1, :] += tmp
m = np.prod(x.shape[1:])
phi = np.empty(np.hstack([[p], m]))
if include_constant_term:
c = np.empty(m)
XZ = XZ.reshape(np.hstack([[XZ.shape[0]], m]))
Z2 = Z2.reshape(np.hstack([[Z2.shape[0], Z2.shape[1]], m]))
for i in range(m):
try:
b = np.dot(XZ[:, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0])))
if not include_constant_term:
phi[:, i] = b
else:
phi[:, i] = b[1:]
c[i] = b[0]
except np.linalg.LinAlgError:
phi[:, i] = np.nan
if include_constant_term:
c[i] = np.nan
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0, :] * phi[0, :])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1, :])
* ((1.0 - phi[1, :]) ** 2.0 - phi[0, :] ** 2.0)
/ (1.0 - phi[1, :])
)
else:
phi_pert = np.zeros(m)
phi = list(phi.reshape(np.hstack([[phi.shape[0]], x.shape[1:]])))
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi.append(phi_pert.reshape(x.shape[1:]))
if include_constant_term:
phi.insert(0, c.reshape(x.shape[1:]))
return phi
def estimate_ar_params_yw(gamma, d=0, check_stationarity=True):
r"""Estimate the parameters of an AR(p) model
:math:`x_{k+1}=\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
from the Yule-Walker equations using the given set of autocorrelation
coefficients.
Parameters
----------
gamma : array_like
Array of length p containing the lag-l temporal autocorrelation
coefficients for l=1,2,...p. The correlation coefficients are assumed
to be in ascending order with respect to time lag.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out : ndarray
Array of length p+1 containing the AR(p) parameters for for the
lag-p terms and the innovation term.
Notes
-----
To estimate the parameters of an integrated ARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation` with d>0.
"""
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
p = len(gamma)
g = np.hstack([[1.0], gamma])
G = []
for j in range(p):
G.append(np.roll(g[:-1], j))
G = np.array(G)
phi = np.linalg.solve(G, g[1:].flatten())
# Check that the absolute values of the roots of the characteristic
# polynomial are less than one.
# Otherwise the AR(p) model is not stationary.
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
c = 1.0
for j in range(p):
c -= gamma[j] * phi[j]
phi_pert = np.sqrt(c)
# If the expression inside the square root is negative, phi_pert cannot
# be computed and it is set to zero instead.
if not np.isfinite(phi_pert):
phi_pert = 0.0
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi_out = np.empty(len(phi) + 1)
phi_out[: len(phi)] = phi
phi_out[-1] = phi_pert
return phi_out
def estimate_ar_params_yw_localized(gamma, d=0):
r"""Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1}\epsilon`
from the Yule-Walker equations using the given set of autocorrelation
coefficients :math`\gamma_{l,i}`, where :math`l` denotes time lag and
:math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma : array_like
A list containing the lag-l temporal autocorrelation coefficient fields
for l=1,2,...p. The correlation coefficients are assumed to be in
ascending order with respect to time lag.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
Returns
-------
out : list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma.
Notes
-----
To estimate the parameters of an integrated ARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation` with d>0
and window_radius<np.inf.
"""
for i in range(1, len(gamma)):
if gamma[i].shape != gamma[0].shape:
raise ValueError(
"the correlation coefficient fields gamma have mismatching shapes"
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
p = len(gamma)
n = np.prod(gamma[0].shape)
gamma_1d = [gamma[i].flatten() for i in range(len(gamma))]
phi = np.empty((p, n))
for i in range(n):
g = np.hstack([[1.0], [gamma_1d[k][i] for k in range(len(gamma_1d))]])
G = []
for k in range(p):
G.append(np.roll(g[:-1], k))
G = np.array(G)
try:
phi_ = np.linalg.solve(G, g[1:].flatten())
except np.linalg.LinAlgError:
phi_ = np.ones(p) * np.nan
phi[:, i] = phi_
c = 1.0
for i in range(p):
c -= gamma_1d[i] * phi[i]
phi_pert = np.sqrt(c)
if d == 1:
phi = _compute_differenced_model_params(phi, p, 1, 1)
phi_out = np.empty((len(phi) + 1, n))
phi_out[: len(phi), :] = phi
phi_out[-1, :] = phi_pert
return list(phi_out.reshape(np.hstack([[len(phi_out)], gamma[0].shape])))
def estimate_var_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{c}+\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p : int
The order of the model.
d : {0,1}
The order of differencing to apply to the time series.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
include_constant_term : bool
Include the constant term :math:`\mathbf{c}` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`\mathbf{c}` is added to the beginning of the list.
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x = x.reshape((n, q, np.prod(x.shape[2:])))
X = []
for i in range(x.shape[2]):
for j in range(p + h, n):
x_ = x[j, :, i]
X.append(x_.reshape((q, 1)))
X = np.hstack(X)
Z = []
for i in range(x.shape[2]):
for j in range(p - 1, n - 1 - h):
z_ = np.vstack([x[j - k, :, i].reshape((q, 1)) for k in range(p)])
if include_constant_term:
z_ = np.vstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
B = np.dot(np.dot(X, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0])))
phi = []
if include_constant_term:
c = B[:, 0]
for i in range(p):
phi.append(B[:, i * q + 1 : (i + 1) * q + 1])
else:
for i in range(p):
phi.append(B[:, i * q : (i + 1) * q])
if check_stationarity:
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r, v = np.linalg.eig(M)
if np.any(np.abs(r) > 0.999):
raise RuntimeError(
"Error in estimate_var_params_ols: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
if include_constant_term:
phi.insert(0, c)
phi.append(np.zeros((q, q)))
return phi
def estimate_var_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{c}_i+\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n, q, :) containing a time series of length n=p+d+h+1
with q-dimensional variables. The remaining dimensions are flattened.
The remaining dimensions starting from the third one represent the
samples.
p : int
The order of the model.
window_radius : float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d : {0,1}
The order of differencing to apply to the time series.
include_constant_term : bool
Include the constant term :math:`\mathbf{c}` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window : {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. If
include_constant_term is True, the constant term :math:`\mathbf{c}_i` is
added to the beginning of the list. Each element of the list is a matrix
of shape (x.shape[2:], q, q).
Notes
-----
Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}` is not
currently implemented, and it is set to a zero matrix.
"""
q = x.shape[1]
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[q, p * q], x.shape[2:]]))
for i in range(q):
for k in range(p):
for j in range(q):
for l in range(h + 1):
tmp = convol_filter(
x[p + l, i, :] * x[p - 1 - k + l, j, :],
window_size,
mode="constant",
)
XZ[i, k * q + j, :] += tmp
if include_constant_term:
v = np.zeros(np.hstack([[q], x.shape[2:]]))
for i in range(q):
for j in range(h + 1):
v[i, :] += convol_filter(x[p + j, i, :], window_size, mode="constant")
XZ = np.hstack([v[:, np.newaxis, :], XZ])
if not include_constant_term:
Z2 = np.zeros(np.hstack([[p * q, p * q], x.shape[2:]]))
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j, k * q + l, :] += tmp
else:
Z2 = np.zeros(np.hstack([[p * q + 1, p * q + 1], x.shape[2:]]))
Z2[0, 0, :] = convol_filter(np.ones(x.shape[2:]), window_size, mode="constant")
for i in range(p):
for j in range(q):
for k in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + k, j, :], window_size, mode="constant"
)
Z2[0, i * q + j + 1, :] += tmp
Z2[i * q + j + 1, 0, :] += tmp
for i in range(p):
for j in range(q):
for k in range(p):
for l in range(q):
for m in range(h + 1):
tmp = convol_filter(
x[p - 1 - i + m, j, :] * x[p - 1 - k + m, l, :],
window_size,
mode="constant",
)
Z2[i * q + j + 1, k * q + l + 1, :] += tmp
m = np.prod(x.shape[2:])
if include_constant_term:
c = np.empty((m, q))
XZ = XZ.reshape((XZ.shape[0], XZ.shape[1], m))
Z2 = Z2.reshape((Z2.shape[0], Z2.shape[1], m))
phi = np.empty((p, m, q, q))
for i in range(m):
try:
B = np.dot(
XZ[:, :, i], np.linalg.inv(Z2[:, :, i] + lam * np.eye(Z2.shape[0]))
)
for k in range(p):
if not include_constant_term:
phi[k, i, :, :] = B[:, k * q : (k + 1) * q]
else:
phi[k, i, :, :] = B[:, k * q + 1 : (k + 1) * q + 1]
if include_constant_term:
c[i, :] = B[:, 0]
except np.linalg.LinAlgError:
phi[:, i, :, :] = np.nan
if include_constant_term:
c[i, :] = np.nan
phi_out = [
phi[i].reshape(np.hstack([x.shape[2:], [q, q]])) for i in range(len(phi))
]
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, q, 1)
phi_out.append(np.zeros(phi_out[0].shape))
if include_constant_term:
phi_out.insert(0, c.reshape(np.hstack([x.shape[2:], [q]])))
return phi_out
def estimate_var_params_yw(gamma, d=0, check_stationarity=True):
r"""Estimate the parameters of a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+
\mathbf{\Phi}_2\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
from the Yule-Walker equations using the given correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`, where
n=p.
Parameters
----------
gamma : list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius=np.inf.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
check_stationarity : bool
If True, the stationarity of the resulting VAR(p) process is tested. An
exception is thrown if the process is not stationary.
Returns
-------
out : list
List of VAR(p) coefficient matrices :math:`\mathbf{\Phi}_1,
\mathbf{\Phi}_2,\dots\mathbf{\Phi}_{p+1}`, where the last matrix
corresponds to the innovation term.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0. Estimation of the innovation parameter :math:`\mathbf{\Phi}_{p+1}`
is not currently implemented, and it is set to a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[0]
for i in range(len(gamma)):
if gamma[i].shape[0] != q or gamma[i].shape[1] != q:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but (%d,%d) expected"
% (i, str(gamma[i].shape), q, q)
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma[abs(i - j)]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma[i].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
phi = []
for i in range(p):
phi.append(x[i * q : (i + 1) * q, :])
if check_stationarity:
if not test_var_stationarity(phi):
raise RuntimeError(
"Error in estimate_var_params_yw: " "nonstationary VAR(p) process"
)
if d == 1:
phi = _compute_differenced_model_params(phi, p, q, 1)
phi.append(np.zeros(phi[0].shape))
return phi
def estimate_var_params_yw_localized(gamma, d=0):
r"""Estimate the parameters of a vector autoregressive VAR(p) model
:math:`\mathbf{x}_{k+1,i}=\mathbf{\Phi}_{1,i}\mathbf{x}_{k,i}+
\mathbf{\Phi}_{2,i}\mathbf{x}_{k-1,i}+\dots+\mathbf{\Phi}_{p,i}
\mathbf{x}_{k-p,i}+\mathbf{\Phi}_{p+1,i}\mathbf{\epsilon}`
from the Yule-Walker equations by using the given correlation matrices,
where :math:`i` denote spatial coordinates with arbitrary dimension.
Parameters
----------
gamma : list
List of correlation matrices
:math:`\mathbf{\Gamma}_0,\mathbf{\Gamma}_1,\dots,\mathbf{\Gamma}_n`.
To obtain these matrices, use
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with window_radius<np.inf.
d : {0,1}
The order of differencing. If d=1, the correlation coefficients gamma
are assumed to be computed from the differenced time series, which is
also done for the resulting parameter estimates.
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_{1,i},
\mathbf{\Phi}_{2,i},\dots,\mathbf{\Phi}_{p+1,i}`. Each element of the
list has the same shape as those in gamma.
Notes
-----
To estimate the parameters of an integrated VARI(p,d) model, compute the
correlation coefficients gamma by calling
:py:func:`pysteps.timeseries.correlation.temporal_autocorrelation_multivariate`
with d>0 and window_radius<np.inf. Estimation of the innovation parameter
:math:`\mathbf{\Phi}_{p+1}` is not currently implemented, and it is set to
a zero matrix.
"""
p = len(gamma) - 1
q = gamma[0].shape[2]
n = np.prod(gamma[0].shape[:-2])
for i in range(1, len(gamma)):
if gamma[i].shape != gamma[0].shape:
raise ValueError(
"dimension mismatch: gamma[%d].shape=%s, but %s expected"
% (i, str(gamma[i].shape), str(gamma[0].shape))
)
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
gamma_1d = [g.reshape((n, q, q)) for g in gamma]
phi_out = [np.zeros([n, q, q]) for i in range(p)]
for k in range(n):
a = np.empty((p * q, p * q))
for i in range(p):
for j in range(p):
a_tmp = gamma_1d[abs(i - j)][k, :]
if i > j:
a_tmp = a_tmp.T
a[i * q : (i + 1) * q, j * q : (j + 1) * q] = a_tmp
b = np.vstack([gamma_1d[i][k, :].T for i in range(1, p + 1)])
x = np.linalg.solve(a, b)
for i in range(p):
phi_out[i][k, :, :] = x[i * q : (i + 1) * q, :]
for i in range(len(phi_out)):
phi_out[i] = phi_out[i].reshape(np.hstack([gamma[0].shape[:-2], [q, q]]))
if d == 1:
phi_out = _compute_differenced_model_params(phi_out, p, 1, 1)
phi_out.append(np.zeros(gamma[0].shape))
return phi_out
def iterate_ar_model(x, phi, eps=None):
r"""Apply an AR(p) model
:math:`x_{k+1}=\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
to a time series :math:`x_k`.
Parameters
----------
x : array_like
Array of shape (n,...), n>=p, containing a time series of a input variable
x. The elements of x along the first dimension are assumed to be in
ascending order by time, and the time intervals are assumed to be regular.
phi : list
List or array of length p+1 specifying the parameters of the AR(p) model.
The parameters are in ascending order by increasing time lag, and the
last element is the parameter corresponding to the innovation term eps.
eps : array_like
Optional innovation term for the AR(p) process. The shape of eps is
expected to be a scalar or x.shape[1:] if len(x.shape)>1. If eps is
None, the innovation term is not added.
"""
if x.shape[0] < len(phi) - 1:
raise ValueError(
"dimension mismatch between x and phi: x.shape[0]=%d, len(phi)=%d"
% (x.shape[0], len(phi))
)
if len(x.shape) == 1:
x_simple_shape = True
x = x[:, np.newaxis]
else:
x_simple_shape = False
if eps is not None and eps.shape != x.shape[1:]:
raise ValueError(
"dimension mismatch between x and eps: x.shape=%s, eps.shape[1:]=%s"
% (str(x.shape), str(eps.shape[1:]))
)
x_new = 0.0
p = len(phi) - 1
for i in range(p):
x_new += phi[i] * x[-(i + 1), :]
if eps is not None:
x_new += phi[-1] * eps
if x_simple_shape:
return np.hstack([x[1:], [x_new]])
else:
return np.concatenate([x[1:, :], x_new[np.newaxis, :]])
def iterate_var_model(x, phi, eps=None):
r"""Apply a VAR(p) model
:math:`\mathbf{x}_{k+1}=\mathbf{\Phi}_1\mathbf{x}_k+\mathbf{\Phi}_2
\mathbf{x}_{k-1}+\dots+\mathbf{\Phi}_p\mathbf{x}_{k-p}+
\mathbf{\Phi}_{p+1}\mathbf{\epsilon}`
to a q-variate time series :math:`\mathbf{x}_k`.
Parameters
----------
x : array_like
Array of shape (n,q,...), n>=p, containing a q-variate time series of a
input variable x. The elements of x along the first dimension are
assumed to be in ascending order by time, and the time intervals are
assumed to be regular.
phi : list
List of parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,\dots,
\mathbf{\Phi}_{p+1}`.
eps : array_like
Optional innovation term for the AR(p) process. The shape of eps is
expected to be (x.shape[1],) or (x.shape[1],x.shape[2:]) if
len(x.shape)>2. If eps is None, the innovation term is not added.
"""
if x.shape[0] < len(phi) - 1:
raise ValueError(
"dimension mismatch between x and phi: x.shape[0]=%d, len(phi)=%d"
% (x.shape[1], len(phi))
)
phi_shape = phi[0].shape
if phi_shape[-1] != phi_shape[-2]:
raise ValueError(
"phi[0].shape = %s, but the last two dimensions are expected to be equal"
% str(phi_shape)
)
for i in range(1, len(phi)):
if phi[i].shape != phi_shape:
raise ValueError("dimension mismatch between parameter matrices phi")
if len(x.shape) == 2:
x_simple_shape = True
x = x[:, :, np.newaxis]
else:
x_simple_shape = False
x_new = np.zeros(x.shape[1:])
p = len(phi) - 1
for l in range(p):
x_new += np.einsum("...ij,j...->i...", phi[l], x[-(l + 1), :])
if eps is not None:
x_new += np.dot(np.dot(phi[-1], phi[-1]), eps)
if x_simple_shape:
return np.vstack([x[1:, :, 0], x_new[:, 0]])
else:
x_new = x_new.reshape(x.shape[1:])
return np.concatenate([x[1:, :], x_new[np.newaxis, :, :]], axis=0)
def test_ar_stationarity(phi):
r"""Test stationarity of an AR(p) process. That is, test that the roots of
the equation :math:`x^p-\phi_1*x^{p-1}-\dots-\phi_p` lie inside the unit
circle.
Parameters
----------
phi : list
List of AR(p) parameters :math:`\phi_1,\phi_2,\dots,\phi_p`.
Returns
-------
out : bool
True/False if the process is/is not stationary.
"""
r = np.array(
[
np.abs(r_)
for r_ in np.roots([1.0 if i == 0 else -phi[i] for i in range(len(phi))])
]
)
return False if np.any(r >= 1) else True
def test_var_stationarity(phi):
r"""Test stationarity of an AR(p) process. That is, test that the moduli of
the eigenvalues of the companion matrix lie inside the unit circle.
Parameters
----------
phi : list
List of VAR(p) parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_p`.
Returns
-------
out : bool
True/False if the process is/is not stationary.
"""
q = phi[0].shape
for i in range(1, len(phi)):
if phi[i].shape != q:
raise ValueError("dimension mismatch between parameter matrices phi")
p = len(phi)
q = phi[0].shape[0]
M = np.zeros((p * q, p * q))
for i in range(p):
M[0:q, i * q : (i + 1) * q] = phi[i]
for i in range(1, p):
M[i * q : (i + 1) * q, (i - 1) * q : i * q] = np.eye(q, q)
r = np.linalg.eig(M)[0]
return False if np.any(np.abs(r) >= 1) else True
def _compute_differenced_model_params(phi, p, q, d):
phi_out = []
for i in range(p + d):
if q > 1:
if len(phi[0].shape) == 2:
phi_out.append(np.zeros((q, q)))
else:
phi_out.append(np.zeros(phi[0].shape))
else:
phi_out.append(0.0)
for i in range(1, d + 1):
if q > 1:
phi_out[i - 1] -= binom(d, i) * (-1) ** i * np.eye(q)
else:
phi_out[i - 1] -= binom(d, i) * (-1) ** i
for i in range(1, p + 1):
phi_out[i - 1] += phi[i - 1]
for i in range(1, p + 1):
for j in range(1, d + 1):
phi_out[i + j - 1] += phi[i - 1] * binom(d, j) * (-1) ** j
return phi_out
| [
"numpy.prod",
"numpy.sqrt",
"numpy.hstack",
"numpy.column_stack",
"numpy.array",
"numpy.isfinite",
"numpy.einsum",
"numpy.diff",
"numpy.dot",
"numpy.empty",
"numpy.vstack",
"numpy.concatenate",
"numpy.maximum",
"numpy.abs",
"numpy.eye",
"numpy.linalg.eig",
"numpy.ones",
"numpy.any"... | [((1142, 1196), 'numpy.maximum', 'np.maximum', (['gamma_2', '(2 * gamma_1 * gamma_1 - 1 + 1e-10)'], {}), '(gamma_2, 2 * gamma_1 * gamma_1 - 1 + 1e-10)\n', (1152, 1196), True, 'import numpy as np\n'), ((1211, 1241), 'numpy.minimum', 'np.minimum', (['gamma_2', '(1 - 1e-10)'], {}), '(gamma_2, 1 - 1e-10)\n', (1221, 1241), True, 'import numpy as np\n'), ((1807, 1853), 'numpy.maximum', 'np.maximum', (['gamma_2', '(2 * gamma_1 * gamma_2 - 1)'], {}), '(gamma_2, 2 * gamma_1 * gamma_2 - 1)\n', (1817, 1853), True, 'import numpy as np\n'), ((1868, 1962), 'numpy.maximum', 'np.maximum', (['gamma_2', '((3 * gamma_1 ** 2 - 2 + 2 * (1 - gamma_1 ** 2) ** 1.5) / gamma_1 ** 2)'], {}), '(gamma_2, (3 * gamma_1 ** 2 - 2 + 2 * (1 - gamma_1 ** 2) ** 1.5) /\n gamma_1 ** 2)\n', (1878, 1962), True, 'import numpy as np\n'), ((5630, 5648), 'numpy.column_stack', 'np.column_stack', (['Z'], {}), '(Z)\n', (5645, 5648), True, 'import numpy as np\n'), ((11118, 11138), 'numpy.prod', 'np.prod', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (11125, 11138), True, 'import numpy as np\n'), ((13803, 13828), 'numpy.hstack', 'np.hstack', (['[[1.0], gamma]'], {}), '([[1.0], gamma])\n', (13812, 13828), True, 'import numpy as np\n'), ((13908, 13919), 'numpy.array', 'np.array', (['G'], {}), '(G)\n', (13916, 13919), True, 'import numpy as np\n'), ((14404, 14414), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (14411, 14414), True, 'import numpy as np\n'), ((16527, 16550), 'numpy.prod', 'np.prod', (['gamma[0].shape'], {}), '(gamma[0].shape)\n', (16534, 16550), True, 'import numpy as np\n'), ((16626, 16642), 'numpy.empty', 'np.empty', (['(p, n)'], {}), '((p, n))\n', (16634, 16642), True, 'import numpy as np\n'), ((17108, 17118), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (17115, 17118), True, 'import numpy as np\n'), ((19957, 19969), 'numpy.hstack', 'np.hstack', (['X'], {}), '(X)\n', (19966, 19969), True, 'import numpy as np\n'), ((20248, 20266), 'numpy.column_stack', 'np.column_stack', (['Z'], {}), '(Z)\n', (20263, 20266), True, 'import numpy as np\n'), ((26561, 26581), 'numpy.prod', 'np.prod', (['x.shape[2:]'], {}), '(x.shape[2:])\n', (26568, 26581), True, 'import numpy as np\n'), ((26754, 26776), 'numpy.empty', 'np.empty', (['(p, m, q, q)'], {}), '((p, m, q, q))\n', (26762, 26776), True, 'import numpy as np\n'), ((29943, 29967), 'numpy.empty', 'np.empty', (['(p * q, p * q)'], {}), '((p * q, p * q))\n', (29951, 29967), True, 'import numpy as np\n'), ((30240, 30261), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (30255, 30261), True, 'import numpy as np\n'), ((32416, 32444), 'numpy.prod', 'np.prod', (['gamma[0].shape[:-2]'], {}), '(gamma[0].shape[:-2])\n', (32423, 32444), True, 'import numpy as np\n'), ((37154, 37175), 'numpy.zeros', 'np.zeros', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (37162, 37175), True, 'import numpy as np\n'), ((38865, 38889), 'numpy.zeros', 'np.zeros', (['(p * q, p * q)'], {}), '((p * q, p * q))\n', (38873, 38889), True, 'import numpy as np\n'), ((3101, 3121), 'numpy.sum', 'np.sum', (['(gammas * phi)'], {}), '(gammas * phi)\n', (3107, 3121), True, 'import numpy as np\n'), ((5315, 5333), 'numpy.diff', 'np.diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5322, 5333), True, 'import numpy as np\n'), ((5674, 5692), 'numpy.dot', 'np.dot', (['x_lhs', 'Z.T'], {}), '(x_lhs, Z.T)\n', (5680, 5692), True, 'import numpy as np\n'), ((5918, 5948), 'numpy.sqrt', 'np.sqrt', (['(1.0 - phi[0] * phi[0])'], {}), '(1.0 - phi[0] * phi[0])\n', (5925, 5948), True, 'import numpy as np\n'), ((9187, 9205), 'numpy.diff', 'np.diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (9194, 9205), True, 'import numpy as np\n'), ((9494, 9523), 'numpy.hstack', 'np.hstack', (['[[p], x.shape[1:]]'], {}), '([[p], x.shape[1:]])\n', (9503, 9523), True, 'import numpy as np\n'), ((9899, 9932), 'numpy.vstack', 'np.vstack', (['[v[np.newaxis, :], XZ]'], {}), '([v[np.newaxis, :], XZ])\n', (9908, 9932), True, 'import numpy as np\n'), ((11158, 11177), 'numpy.hstack', 'np.hstack', (['[[p], m]'], {}), '([[p], m])\n', (11167, 11177), True, 'import numpy as np\n'), ((11221, 11232), 'numpy.empty', 'np.empty', (['m'], {}), '(m)\n', (11229, 11232), True, 'import numpy as np\n'), ((11253, 11282), 'numpy.hstack', 'np.hstack', (['[[XZ.shape[0]], m]'], {}), '([[XZ.shape[0]], m])\n', (11262, 11282), True, 'import numpy as np\n'), ((11304, 11346), 'numpy.hstack', 'np.hstack', (['[[Z2.shape[0], Z2.shape[1]], m]'], {}), '([[Z2.shape[0], Z2.shape[1]], m])\n', (11313, 11346), True, 'import numpy as np\n'), ((11798, 11834), 'numpy.sqrt', 'np.sqrt', (['(1.0 - phi[0, :] * phi[0, :])'], {}), '(1.0 - phi[0, :] * phi[0, :])\n', (11805, 11834), True, 'import numpy as np\n'), ((14552, 14573), 'numpy.isfinite', 'np.isfinite', (['phi_pert'], {}), '(phi_pert)\n', (14563, 14573), True, 'import numpy as np\n'), ((16840, 16851), 'numpy.array', 'np.array', (['G'], {}), '(G)\n', (16848, 16851), True, 'import numpy as np\n'), ((19719, 19737), 'numpy.diff', 'np.diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (19726, 19737), True, 'import numpy as np\n'), ((20283, 20297), 'numpy.dot', 'np.dot', (['X', 'Z.T'], {}), '(X, Z.T)\n', (20289, 20297), True, 'import numpy as np\n'), ((20633, 20657), 'numpy.zeros', 'np.zeros', (['(p * q, p * q)'], {}), '((p * q, p * q))\n', (20641, 20657), True, 'import numpy as np\n'), ((20851, 20867), 'numpy.linalg.eig', 'np.linalg.eig', (['M'], {}), '(M)\n', (20864, 20867), True, 'import numpy as np\n'), ((21186, 21202), 'numpy.zeros', 'np.zeros', (['(q, q)'], {}), '((q, q))\n', (21194, 21202), True, 'import numpy as np\n'), ((23944, 23962), 'numpy.diff', 'np.diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (23951, 23962), True, 'import numpy as np\n'), ((24251, 24287), 'numpy.hstack', 'np.hstack', (['[[q, p * q], x.shape[2:]]'], {}), '([[q, p * q], x.shape[2:]])\n', (24260, 24287), True, 'import numpy as np\n'), ((24907, 24943), 'numpy.hstack', 'np.hstack', (['[v[:, np.newaxis, :], XZ]'], {}), '([v[:, np.newaxis, :], XZ])\n', (24916, 24943), True, 'import numpy as np\n'), ((26624, 26640), 'numpy.empty', 'np.empty', (['(m, q)'], {}), '((m, q))\n', (26632, 26640), True, 'import numpy as np\n'), ((27598, 27624), 'numpy.zeros', 'np.zeros', (['phi_out[0].shape'], {}), '(phi_out[0].shape)\n', (27606, 27624), True, 'import numpy as np\n'), ((30639, 30661), 'numpy.zeros', 'np.zeros', (['phi[0].shape'], {}), '(phi[0].shape)\n', (30647, 30661), True, 'import numpy as np\n'), ((32862, 32881), 'numpy.zeros', 'np.zeros', (['[n, q, q]'], {}), '([n, q, q])\n', (32870, 32881), True, 'import numpy as np\n'), ((32937, 32961), 'numpy.empty', 'np.empty', (['(p * q, p * q)'], {}), '((p * q, p * q))\n', (32945, 32961), True, 'import numpy as np\n'), ((33284, 33305), 'numpy.linalg.solve', 'np.linalg.solve', (['a', 'b'], {}), '(a, b)\n', (33299, 33305), True, 'import numpy as np\n'), ((33615, 33639), 'numpy.zeros', 'np.zeros', (['gamma[0].shape'], {}), '(gamma[0].shape)\n', (33623, 33639), True, 'import numpy as np\n'), ((35363, 35390), 'numpy.hstack', 'np.hstack', (['[x[1:], [x_new]]'], {}), '([x[1:], [x_new]])\n', (35372, 35390), True, 'import numpy as np\n'), ((35416, 35464), 'numpy.concatenate', 'np.concatenate', (['[x[1:, :], x_new[np.newaxis, :]]'], {}), '([x[1:, :], x_new[np.newaxis, :]])\n', (35430, 35464), True, 'import numpy as np\n'), ((37238, 37291), 'numpy.einsum', 'np.einsum', (['"""...ij,j...->i..."""', 'phi[l]', 'x[-(l + 1), :]'], {}), "('...ij,j...->i...', phi[l], x[-(l + 1), :])\n", (37247, 37291), True, 'import numpy as np\n'), ((37411, 37448), 'numpy.vstack', 'np.vstack', (['[x[1:, :, 0], x_new[:, 0]]'], {}), '([x[1:, :, 0], x_new[:, 0]])\n', (37420, 37448), True, 'import numpy as np\n'), ((37517, 37576), 'numpy.concatenate', 'np.concatenate', (['[x[1:, :], x_new[np.newaxis, :, :]]'], {'axis': '(0)'}), '([x[1:, :], x_new[np.newaxis, :, :]], axis=0)\n', (37531, 37576), True, 'import numpy as np\n'), ((38172, 38186), 'numpy.any', 'np.any', (['(r >= 1)'], {}), '(r >= 1)\n', (38178, 38186), True, 'import numpy as np\n'), ((39039, 39051), 'numpy.eye', 'np.eye', (['q', 'q'], {}), '(q, q)\n', (39045, 39051), True, 'import numpy as np\n'), ((39060, 39076), 'numpy.linalg.eig', 'np.linalg.eig', (['M'], {}), '(M)\n', (39073, 39076), True, 'import numpy as np\n'), ((5985, 6071), 'numpy.sqrt', 'np.sqrt', (['((1.0 + phi[1]) * ((1.0 - phi[1]) ** 2.0 - phi[0] ** 2.0) / (1.0 - phi[1]))'], {}), '((1.0 + phi[1]) * ((1.0 - phi[1]) ** 2.0 - phi[0] ** 2.0) / (1.0 -\n phi[1]))\n', (5992, 6071), True, 'import numpy as np\n'), ((9990, 10022), 'numpy.hstack', 'np.hstack', (['[[p, p], x.shape[1:]]'], {}), '([[p, p], x.shape[1:]])\n', (9999, 10022), True, 'import numpy as np\n'), ((10400, 10440), 'numpy.hstack', 'np.hstack', (['[[p + 1, p + 1], x.shape[1:]]'], {}), '([[p + 1, p + 1], x.shape[1:]])\n', (10409, 10440), True, 'import numpy as np\n'), ((10478, 10498), 'numpy.ones', 'np.ones', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (10485, 10498), True, 'import numpy as np\n'), ((11871, 11969), 'numpy.sqrt', 'np.sqrt', (['((1.0 + phi[1, :]) * ((1.0 - phi[1, :]) ** 2.0 - phi[0, :] ** 2.0) / (1.0 -\n phi[1, :]))'], {}), '((1.0 + phi[1, :]) * ((1.0 - phi[1, :]) ** 2.0 - phi[0, :] ** 2.0) /\n (1.0 - phi[1, :]))\n', (11878, 11969), True, 'import numpy as np\n'), ((12041, 12052), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (12049, 12052), True, 'import numpy as np\n'), ((12081, 12121), 'numpy.hstack', 'np.hstack', (['[[phi.shape[0]], x.shape[1:]]'], {}), '([[phi.shape[0]], x.shape[1:]])\n', (12090, 12121), True, 'import numpy as np\n'), ((13880, 13898), 'numpy.roll', 'np.roll', (['g[:-1]', 'j'], {}), '(g[:-1], j)\n', (13887, 13898), True, 'import numpy as np\n'), ((19779, 19799), 'numpy.prod', 'np.prod', (['x.shape[2:]'], {}), '(x.shape[2:])\n', (19786, 19799), True, 'import numpy as np\n'), ((20823, 20835), 'numpy.eye', 'np.eye', (['q', 'q'], {}), '(q, q)\n', (20829, 20835), True, 'import numpy as np\n'), ((24714, 24743), 'numpy.hstack', 'np.hstack', (['[[q], x.shape[2:]]'], {}), '([[q], x.shape[2:]])\n', (24723, 24743), True, 'import numpy as np\n'), ((25001, 25041), 'numpy.hstack', 'np.hstack', (['[[p * q, p * q], x.shape[2:]]'], {}), '([[p * q, p * q], x.shape[2:]])\n', (25010, 25041), True, 'import numpy as np\n'), ((25571, 25619), 'numpy.hstack', 'np.hstack', (['[[p * q + 1, p * q + 1], x.shape[2:]]'], {}), '([[p * q + 1, p * q + 1], x.shape[2:]])\n', (25580, 25619), True, 'import numpy as np\n'), ((25657, 25677), 'numpy.ones', 'np.ones', (['x.shape[2:]'], {}), '(x.shape[2:])\n', (25664, 25677), True, 'import numpy as np\n'), ((27428, 27460), 'numpy.hstack', 'np.hstack', (['[x.shape[2:], [q, q]]'], {}), '([x.shape[2:], [q, q]])\n', (27437, 27460), True, 'import numpy as np\n'), ((33469, 33509), 'numpy.hstack', 'np.hstack', (['[gamma[0].shape[:-2], [q, q]]'], {}), '([gamma[0].shape[:-2], [q, q]])\n', (33478, 33509), True, 'import numpy as np\n'), ((37341, 37365), 'numpy.dot', 'np.dot', (['phi[-1]', 'phi[-1]'], {}), '(phi[-1], phi[-1])\n', (37347, 37365), True, 'import numpy as np\n'), ((38038, 38048), 'numpy.abs', 'np.abs', (['r_'], {}), '(r_)\n', (38044, 38048), True, 'import numpy as np\n'), ((5179, 5199), 'numpy.prod', 'np.prod', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (5186, 5199), True, 'import numpy as np\n'), ((5576, 5596), 'numpy.hstack', 'np.hstack', (['[[1], z_]'], {}), '([[1], z_])\n', (5585, 5596), True, 'import numpy as np\n'), ((5708, 5722), 'numpy.dot', 'np.dot', (['Z', 'Z.T'], {}), '(Z, Z.T)\n', (5714, 5722), True, 'import numpy as np\n'), ((16808, 16826), 'numpy.roll', 'np.roll', (['g[:-1]', 'k'], {}), '(g[:-1], k)\n', (16815, 16826), True, 'import numpy as np\n'), ((20194, 20214), 'numpy.vstack', 'np.vstack', (['[[1], z_]'], {}), '([[1], z_])\n', (20203, 20214), True, 'import numpy as np\n'), ((20313, 20327), 'numpy.dot', 'np.dot', (['Z', 'Z.T'], {}), '(Z, Z.T)\n', (20319, 20327), True, 'import numpy as np\n'), ((20887, 20896), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (20893, 20896), True, 'import numpy as np\n'), ((27692, 27721), 'numpy.hstack', 'np.hstack', (['[x.shape[2:], [q]]'], {}), '([x.shape[2:], [q]])\n', (27701, 27721), True, 'import numpy as np\n'), ((39108, 39117), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (39114, 39117), True, 'import numpy as np\n'), ((39563, 39572), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (39569, 39572), True, 'import numpy as np\n'), ((39617, 39628), 'scipy.special.binom', 'binom', (['d', 'i'], {}), '(d, i)\n', (39622, 39628), False, 'from scipy.special import binom\n'), ((5731, 5749), 'numpy.eye', 'np.eye', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (5737, 5749), True, 'import numpy as np\n'), ((16977, 16987), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (16984, 16987), True, 'import numpy as np\n'), ((20336, 20354), 'numpy.eye', 'np.eye', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (20342, 20354), True, 'import numpy as np\n'), ((39321, 39337), 'numpy.zeros', 'np.zeros', (['(q, q)'], {}), '((q, q))\n', (39329, 39337), True, 'import numpy as np\n'), ((39388, 39410), 'numpy.zeros', 'np.zeros', (['phi[0].shape'], {}), '(phi[0].shape)\n', (39396, 39410), True, 'import numpy as np\n'), ((39537, 39548), 'scipy.special.binom', 'binom', (['d', 'i'], {}), '(d, i)\n', (39542, 39548), False, 'from scipy.special import binom\n'), ((39819, 39830), 'scipy.special.binom', 'binom', (['d', 'j'], {}), '(d, j)\n', (39824, 39830), False, 'from scipy.special import binom\n'), ((11452, 11471), 'numpy.eye', 'np.eye', (['Z2.shape[0]'], {}), '(Z2.shape[0])\n', (11458, 11471), True, 'import numpy as np\n'), ((26900, 26919), 'numpy.eye', 'np.eye', (['Z2.shape[0]'], {}), '(Z2.shape[0])\n', (26906, 26919), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
#import pandas as pd ### For future manipulations
#import scipy as sp ### For future manipulations
#import matplotlib.pyplot as plt #### Uncomment and use if you would like to see the traiing dataset length frequency plots
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing, cross_validation, neighbors
#from sklearn.decomposition import PCA ### Uncomment if planning to do dimensionality reduction
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score, precision_score
from sklearn.preprocessing import OneHotEncoder
from keras.layers import Input
from keras.layers import Conv1D, MaxPooling1D
from keras.layers import Flatten, Dense, UpSampling1D
from keras.models import Model
from keras.optimizers import SGD
from keras.layers import concatenate
from numpy import zeros, newaxis
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import matplotlib.pyplot as plt
dir = os.path.dirname(os.path.realpath(__file__))
file=open('post_epi_hela_m1A_signals_1mer_20190324.txt','r')
u_labels=[]
### lists of train m5c labels and signals
m_labels=[]
m_signals=[]
um_labels=[]
um_signals=[]
### lists of test m5c labels and signals-will be helpful to do a test hold
mt_labels=[]
mt_signals=[]
### control input data length and signal length by changing len(m_labels) and len(s_len) value below
m_median = []
c_median = []
em_median = []
mm_median = []
cm_median = []
for i in file:
i1=i.split( )
s_len=i1[3].split('_')
if i1[1][2:][:1] == 'A' and len(m_labels) < 12001 and len(s_len) <= 1000:
#print(i1[1][2:][:1])
#print(i1[3])
ms=i1[3].split('_')
msm=list(map(int, ms))
#print(msm)
#print(np.median(msm))
# print(i1[1][:5])
# if np.median(msm) >= 543:
m_labels.append('m')
u_labels.append(i1[1][1:][:1]+'m'+i1[1][3:][:1])
mm_median.append(np.median(msm))
m_signals.append(i1[3].split('_'))
elif i1[1][2:][:1] != 'A' and len(m_labels) < 4001 and len(s_len) <= 1000:
#print(i1[1][2:][:1])
#print(i1[3])
um_labels.append('e')
um_signals.append(i1[3].split('_'))
elif i1[1][2:][:1] == 'A' and len(m_labels) >= 43001 and len(mt_labels) <= 150 and len(s_len) <= 1000:
#print(i1[1][2:][:1])
#print(i1[3])
mt_labels.append('m')
mt_signals.append(i1[3].split('_'))
uni=len(set(u_labels))
#print(len(u_labels))
#print(uni)
#print('length of m5c training data : '+str(len(m_labels))) #### uncomment to print length of m5c training data
################################## Following section loads Unmodified A,T,G,C from input file ##########################################
file1=open('control_hela_m1A_m6A_signals_201903_24.txt','r')
#loss_out=open('loss_out.txt','a')
### lists of A,T,G,C labels and signals
A_labels=[]
A_signals=[]
uA_labels=[]
### lists of C labels and signals for test dataset hold
At_labels=[]
At_signals=[]
### control input data length and signal length by changing len(X_labels) and len(X_len) value below
for i in file1:
i1=i.split( )
s_len=i1[3].split('_')
if i1[1][2:][:1] == 'A' and len(A_labels) < len(m_labels) and len(s_len) <= 1000:
#print(i1[1][2:][:1])
#print(i1[3])
cs=i1[3].split('_')
csm=list(map(int, cs))
#print(np.median(csm))
# if np.median(csm) <= 540:
cm_median.append(np.median(csm))
A_labels.append('A')
A_signals.append(i1[3].split('_'))
uA_labels.append(i1[1][1:][:1]+'A'+i1[1][3:][:1])
#print(i1[1][1:][:1]+'C'+i1[1][3:][:1])
#print(i1[1][:5])
elif i1[1][2:][:1] == 'A' and len(A_labels) >= 43001 and len(At_labels) <= 301 and len(s_len) <= 1000:
#print(i1[1][2:][:1])
#print(i1[3])
At_labels.append('C')
At_signals.append(i1[3].split('_'))
signals=m_signals+A_signals
labels=m_labels+A_labels
print(len(m_signals))
print(len(A_signals))
### uncomment if planning to use hold data for testing
# t_signals=mt_signals+Ct_signals
# t_labels=mt_labels+Ct_labels
X = [list(map(int, i)) for i in signals]
#test_X=[list(map(int, i)) for i in t_signals]
####### Normaliazation ########
#XN=[]
#for i in X:
# ads=[]
# for n in i:
# norm=(n-np.mean(i))/np.std(i)
# ads.append(norm)
# XN.append(ads)
# #print(len(ads))
#X=XN
### Transform labels data from text to oneHot encoder
le=preprocessing.LabelEncoder()
le.fit(labels)
Y=le.transform(labels)
###Uncomment to print label classes
#print('\n'+"Print Classes of train data set: "+'\n')
#print(le.classes_)
# integer encode
label_encoder = preprocessing.LabelEncoder()
integer_encoded = label_encoder.fit_transform(labels)
#print(integer_encoded)
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
Y = onehot_encoder.fit_transform(integer_encoded)
### Post Pad sequences to a length of 3000
X=pad_sequences(X, padding="post", maxlen=1000)
### crosschecking code for signal length #########
# asd=[]
# for i in X:
# asd.append(len(i))
# print('Max_length of signal: '+str(max(asd)))
# test_X=pad_sequences(test_X, padding="post",maxlen=max(asd))
# asd1=[]
# for i in X:
# asd1.append(len(i))
# print('Max_length of mt_signal: '+str(max(asd1)))
#print(signals)
#print(labels)
print("Length of dataset labels",len(labels))
print("Length of dataset signals",len(signals))
X = X[:, :, newaxis]
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y,test_size=0.2)
input_img=Input(shape=(1000,1))
print(X_train.shape)
regularization_rate=0.0001
weightinit = 'lecun_uniform'
fc_hidden_nodes = 256
outputdim = 2
metrics=['accuracy']
learning_rate=0.001
callbacks = []
model = Sequential()
model.add(
BatchNormalization(
input_shape=(1000,1)))
# for filter_number, f_s1, in zip(filters, f_s):
# model.add(Convolution1D(filter_number, f_s1, kernel_size=3, padding='same',
# kernel_regularizer=l2(regularization_rate),
# kernel_initializer=weightinit))
# model.add(BatchNormalization())
# model.add(Activation('relu'))
###1
model.add(Convolution1D(32, kernel_size=5, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_size=2, strides=1, padding='valid'))
###2
model.add(Dropout(0.5))
model.add(Convolution1D(64, kernel_size=5, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling1D(pool_size=2, strides=1, padding='valid'))
###3
#model.add(Dropout(0.5))
#model.add(Convolution1D(128, kernel_size=5, padding='same',
# kernel_regularizer=l2(regularization_rate),
# kernel_initializer=weightinit))
#model.add(BatchNormalization())
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=2, strides=1, padding='valid'))
###4
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dropout(0.5))
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=learning_rate),
metrics=metrics)
model.summary()
#Save Model=ON
history = model.fit(X_train, Y_train,
batch_size=50,
epochs=200,
verbose=1,
validation_data=(X_test, Y_test),shuffle=True,callbacks=[TensorBoard(log_dir=dir+'/autoencoder200_20190324')])
score = model.evaluate(X_test, Y_test, verbose=0)
model.save(dir+'/0.6_prob_keras_m1A_v2_200_20190324.h5') # creates a HDF5 file 'my_model.h5'
#print loss and accuracy
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
"keras.optimizers.Adam",
"keras.layers.MaxPooling1D",
"sklearn.preprocessing.LabelEncoder",
"numpy.median",
"keras.layers.Flatten",
"sklearn.preprocessing.OneHotEncoder",
"keras.models.Sequential",
"os.path.realpath",
"keras.layers.BatchNormalization",
"keras.layers.Input",
"keras.callbacks.Tens... | [((4869, 4897), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (4895, 4897), False, 'from sklearn import preprocessing, cross_validation, neighbors\n'), ((5081, 5109), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (5107, 5109), False, 'from sklearn import preprocessing, cross_validation, neighbors\n'), ((5222, 5249), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (5235, 5249), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((5413, 5458), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X'], {'padding': '"""post"""', 'maxlen': '(1000)'}), "(X, padding='post', maxlen=1000)\n", (5426, 5458), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5956, 6010), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'Y'], {'test_size': '(0.2)'}), '(X, Y, test_size=0.2)\n', (5989, 6010), False, 'from sklearn import preprocessing, cross_validation, neighbors\n'), ((6022, 6044), 'keras.layers.Input', 'Input', ([], {'shape': '(1000, 1)'}), '(shape=(1000, 1))\n', (6027, 6044), False, 'from keras.layers import Input\n'), ((6223, 6235), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6233, 6235), False, 'from keras.models import Sequential\n'), ((1348, 1374), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1364, 1374), False, 'import os\n'), ((6251, 6292), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'input_shape': '(1000, 1)'}), '(input_shape=(1000, 1))\n', (6269, 6292), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((6844, 6864), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6862, 6864), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((6876, 6894), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6886, 6894), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((6906, 6959), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'strides': '(1)', 'padding': '"""valid"""'}), "(pool_size=2, strides=1, padding='valid')\n", (6918, 6959), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((6976, 6988), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (6983, 6988), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7183, 7203), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7201, 7203), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7215, 7233), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7225, 7233), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7245, 7298), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'strides': '(1)', 'padding': '"""valid"""'}), "(pool_size=2, strides=1, padding='valid')\n", (7257, 7298), False, 'from keras.layers import Conv1D, MaxPooling1D\n'), ((7662, 7674), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (7669, 7674), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7687, 7696), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7694, 7696), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7880, 7898), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7890, 7898), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7930, 7942), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (7937, 7942), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((7954, 8007), 'keras.layers.Dense', 'Dense', ([], {'units': 'outputdim', 'kernel_initializer': 'weightinit'}), '(units=outputdim, kernel_initializer=weightinit)\n', (7959, 8007), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((8019, 8039), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8037, 8039), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((8051, 8072), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (8061, 8072), False, 'from keras.layers import Dense, Activation, Flatten, Convolution1D, Dropout, BatchNormalization\n'), ((8178, 8200), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (8182, 8200), False, 'from keras.optimizers import Adam\n'), ((2300, 2314), 'numpy.median', 'np.median', (['msm'], {}), '(msm)\n', (2309, 2314), True, 'import numpy as np\n'), ((3837, 3851), 'numpy.median', 'np.median', (['csm'], {}), '(csm)\n', (3846, 3851), True, 'import numpy as np\n'), ((6753, 6776), 'keras.regularizers.l2', 'l2', (['regularization_rate'], {}), '(regularization_rate)\n', (6755, 6776), False, 'from keras.regularizers import l2\n'), ((7092, 7115), 'keras.regularizers.l2', 'l2', (['regularization_rate'], {}), '(regularization_rate)\n', (7094, 7115), False, 'from keras.regularizers import l2\n'), ((7772, 7795), 'keras.regularizers.l2', 'l2', (['regularization_rate'], {}), '(regularization_rate)\n', (7774, 7795), False, 'from keras.regularizers import l2\n'), ((8441, 8494), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': "(dir + '/autoencoder200_20190324')"}), "(log_dir=dir + '/autoencoder200_20190324')\n", (8452, 8494), False, 'from keras.callbacks import TensorBoard\n')] |
"""Test capabilities for optimization.
This module contains a host of models and functions often used for testing optimization
algorithms.
"""
import sys
import numpy as np
import pandas as pd
from scipy.optimize import rosen
def ackley(x, a=20, b=0.2, c=2 * np.pi):
r"""Ackley function.
.. math::
f(x) = -a \exp{\left(-b \sqrt{\frac{1}{p} \sum_{i=1}^p x_i^2}\right)}
\exp{\left(\frac{1}{p} \sum_{i=1}^p \cos(c x_i)\right)} + a + \exp(1)
Parameters
----------
x : array_like
Input domain with dimension :math:`p`, which
is usually evaluated on the hypercube
:math:`x_i \in [-32.768, 32.768]` for all :math:`i = 1, \dots, p`.
a : float, optional
The default value is 20.
b : float, optional
The default value is 0.2.
c : float, optional
The default value is :math:`2\pi`.
Returns
-------
float
Output domain
Notes
-----
This function was proposed by <NAME> in [A1987]_ and used in [B1996]_
and [M2005]_. It is characterized by an almost flat outer region and a central hole
or peak where modulations become increasingly influential. The function has
its global minimum :math:`f(x) = 0` at :math:`x = \begin{pmatrix}0 & \dots & 0
\end{pmatrix}^T`.
.. figure:: ../../docs/_static/images/fig-ackley.png
:align: center
References
----------
.. [A1987] Ackley, <NAME>. (1987).
*A connectionist machine for genetic hillclimbing*. Kluwer Academic Publishers.
.. [B1996] <NAME>. (1996).
*Evolutionary algorithms in theory and practice:
Evolution strategies, evolutionary programming, genetic algorithms*.
Oxford University Press.
.. [M2005] <NAME>., and <NAME>. (2005).
*Test functions for optimization needs*.
Retrieved June 2020, from
http://www.zsd.ict.pwr.wroc.pl/files/docs/functions.pdf.
Examples
--------
>>> from temfpy.optimization import ackley
>>> import numpy as np
>>>
>>> x = [0, 0]
>>> y = ackley(x)
>>> np.testing.assert_almost_equal(y, 0)
"""
rslt = (
a + np.exp(1) - (a * (np.exp(-b * np.sqrt(1 / len(x) * np.sum(np.square(x))))))
)
rslt -= np.exp(1 / len(x) * np.sum(np.cos(np.multiply(c, x))))
return rslt
def rastrigin(x, a=10):
r"""Rastrigin function.
.. math::
f(x) = a p + \sum_{i=1}^p \left(x_i^2 - 10 \cos(2\pi x_i)\right)
Parameters
----------
x : array_like
Input domain with dimension :math:`p`.
It is usually evaluated on the hypercube
:math:`x_i\in [-5.12, 5.12]` for all :math:`i = 1, \dots, p`.
a : float, optional
The default value is 10.
Returns
-------
float
Output domain
Notes
-----
The function was first proposed by <NAME> in [R1974]_.
It produces frequent local minima, as it is highly multimodal.
However, the location of the minima are regularly distributed.
The function has its global minimum :math:`f(x) = 0` at
:math:`x = \begin{pmatrix}0 & \dots & 0 \end{pmatrix}^T`.
.. figure:: ../../docs/_static/images/fig-rastrigin.png
:align: center
References
----------
.. [R1974] <NAME>. (1974).
*Systems of extremal control*.
Moscow, Russia.
Examples
--------
>>> from temfpy.optimization import rastrigin
>>> import numpy as np
>>>
>>> x = [0, 0]
>>> y = rastrigin(x)
>>> np.testing.assert_almost_equal(y, 0)
"""
rslt = a * len(x) + np.sum(
np.multiply(x, x) - 10 * np.cos(2 * np.multiply(np.pi, x)),
)
return rslt
def rosenbrock(x):
r"""Rosenbrock function.
.. math::
f(x) = \sum^{p-1}_{i = 1} \left[100(x_{i+1}-x_i^2)^2 + (1-x_i^2) \right]
Parameters
----------
x : array_like
Input domain with dimension :math:`p > 1`.
Returns
-------
float
Output domain
Notes
-----
The function was first proposed by <NAME> in [R1960]_ and
is often referred to as Rosenbrock's valley or
Rosenbrock's Banana function due to its shape.
The function has its global minimum at
:math:`x = \begin{pmatrix}1 & \dots & 1 \end{pmatrix}^T`.
.. figure:: ../../docs/_static/images/fig-rosenbrock.png
:align: center
References
----------
.. [R1960] <NAME>. (1960).
An automatic method for finding the greatest
or least value of a function.
*The Computer Journal*, 3(3): 175-184.
Examples
--------
>>> from temfpy.optimization import rosenbrock
>>> import numpy as np
>>>
>>> x = [1, 1]
>>> y = rosenbrock(x)
>>> np.testing.assert_almost_equal(y, 0)
"""
if not isinstance(x, (list, tuple, pd.core.series.Series, np.ndarray)):
sys.exit("The parameter x must be an array like object.")
else:
if len(x) < 2:
sys.exit("The input array x must be at least of length 2.")
rslt = rosen(x)
return rslt
def carlberg(x, a, b):
r"""Carlberg function.
.. math::
f(x) = \frac{1}{2}\sum_{i=1}^p a_i (x_i - 1)^2 + b \left[p -
\sum_{i=1}^p \cos(2 \pi(x_i-1)) \right]
Parameters
----------
x : array_like
Input vector with dimension :math:`p`.
a : array_like
Input vector with dimension :math:`p`.
b : float
Cannot be smaller than zero.
For more information see Notes.
Returns
-------
float
Output domain
Notes
-----
If the values in :math:`a` are widely distributed, the function is
said to be ill-conditioned, making it hard to minimize in some
directions for Hessian-free numerical methods.
If :math:`b=0` (see second graph below), the function is
convex, smooth, and has its minimum at
:math:`x = \begin{pmatrix}1 & \dots & 1 \end{pmatrix}^T`. For
:math:`b>0` the function is no longer convex and has many local
minima (see first graph below). These circumstances make it hard for
local optimization methods to find the global minimum,
which is still at
:math:`x = \begin{pmatrix}1 & \dots & 1 \end{pmatrix}^T`.
.. figure:: ../../docs/_static/images/fig-carlberg-noise.png
:align: center
.. figure:: ../../docs/_static/images/fig-carlberg-no-noise.png
:align: center
References
----------
.. [C2019] <NAME>. (2019).
*Optimization in Python*.
Fundamentals of Data Science Summer Workshops, Stanford.
Examples
--------
>>> from temfpy.optimization import carlberg
>>> import numpy as np
>>>
>>> x = [1, 1]
>>> a = [1, 1]
>>> b = 1
>>> y = carlberg(x,a,b)
>>> np.testing.assert_almost_equal(y, 0)
"""
if b < 0:
sys.exit("Input parameter b must not be smaller than zero.")
x, a = np.atleast_1d(x), np.atleast_1d(a)
dimension = len(x)
fval = 0
fval += 0.5 * np.sum(a * (x - np.ones(dimension)) ** 2)
fval += b * dimension
fval -= b * np.sum(np.cos(2 * np.pi * (x - np.ones(dimension))))
return fval
| [
"numpy.multiply",
"numpy.ones",
"numpy.square",
"numpy.exp",
"sys.exit",
"scipy.optimize.rosen",
"numpy.atleast_1d"
] | [((5187, 5195), 'scipy.optimize.rosen', 'rosen', (['x'], {}), '(x)\n', (5192, 5195), False, 'from scipy.optimize import rosen\n'), ((5007, 5064), 'sys.exit', 'sys.exit', (['"""The parameter x must be an array like object."""'], {}), "('The parameter x must be an array like object.')\n", (5015, 5064), False, 'import sys\n'), ((7042, 7102), 'sys.exit', 'sys.exit', (['"""Input parameter b must not be smaller than zero."""'], {}), "('Input parameter b must not be smaller than zero.')\n", (7050, 7102), False, 'import sys\n'), ((7117, 7133), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (7130, 7133), True, 'import numpy as np\n'), ((7135, 7151), 'numpy.atleast_1d', 'np.atleast_1d', (['a'], {}), '(a)\n', (7148, 7151), True, 'import numpy as np\n'), ((2217, 2226), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (2223, 2226), True, 'import numpy as np\n'), ((5113, 5172), 'sys.exit', 'sys.exit', (['"""The input array x must be at least of length 2."""'], {}), "('The input array x must be at least of length 2.')\n", (5121, 5172), False, 'import sys\n'), ((3701, 3718), 'numpy.multiply', 'np.multiply', (['x', 'x'], {}), '(x, x)\n', (3712, 3718), True, 'import numpy as np\n'), ((2347, 2364), 'numpy.multiply', 'np.multiply', (['c', 'x'], {}), '(c, x)\n', (2358, 2364), True, 'import numpy as np\n'), ((7229, 7247), 'numpy.ones', 'np.ones', (['dimension'], {}), '(dimension)\n', (7236, 7247), True, 'import numpy as np\n'), ((7330, 7348), 'numpy.ones', 'np.ones', (['dimension'], {}), '(dimension)\n', (7337, 7348), True, 'import numpy as np\n'), ((3737, 3758), 'numpy.multiply', 'np.multiply', (['np.pi', 'x'], {}), '(np.pi, x)\n', (3748, 3758), True, 'import numpy as np\n'), ((2275, 2287), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (2284, 2287), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.distributions import Normal
from torch import distributions
from torch.nn.parameter import Parameter
import ipdb
from sklearn import cluster, datasets, mixture
from sklearn.preprocessing import StandardScaler
from flows.flow_helpers import *
from nflows.flows.base import Flow
from nflows.distributions.normal import StandardNormal
from nflows.transforms.base import CompositeTransform
from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform
from nflows.transforms.permutations import ReversePermutation
from nflows.flows import realnvp
from e2cnn import gspaces
from e2cnn import nn as enn
import flows.layers.base as base_layers
import flows.layers as layers
ACTIVATION_FNS = {
'relu': torch.nn.ReLU,
'tanh': torch.nn.Tanh,
'elu': torch.nn.ELU,
'selu': torch.nn.SELU,
'fullsort': base_layers.FullSort,
'maxmin': base_layers.MaxMin,
'swish': base_layers.Swish,
'lcube': base_layers.LipschitzCube,
}
GROUPS = {
'fliprot16': gspaces.FlipRot2dOnR2(N=16),
'fliprot12': gspaces.FlipRot2dOnR2(N=12),
'fliprot8': gspaces.FlipRot2dOnR2(N=8),
'fliprot4': gspaces.FlipRot2dOnR2(N=4),
'fliprot2': gspaces.FlipRot2dOnR2(N=2),
'flip': gspaces.Flip2dOnR2(),
'rot16': gspaces.Rot2dOnR2(N=16),
'rot12': gspaces.Rot2dOnR2(N=12),
'rot8': gspaces.Rot2dOnR2(N=8),
'rot4': gspaces.Rot2dOnR2(N=4),
'rot2': gspaces.Rot2dOnR2(N=2),
'so2': gspaces.Rot2dOnR2(N=-1, maximum_frequency=10),
'o2': gspaces.FlipRot2dOnR2(N=-1, maximum_frequency=10),
}
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def add_padding(args, x, nvals=256):
# Theoretically, padding should've been added before the add_noise preprocessing.
# nvals takes into account the preprocessing before padding is added.
if args.padding > 0:
if args.padding_dist == 'uniform':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).uniform_()
logpu = torch.zeros_like(u).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
elif args.padding_dist == 'gaussian':
u = x.new_empty(x.shape[0], args.padding, x.shape[2], x.shape[3]).normal_(nvals / 2, nvals / 8)
logpu = normal_logprob(u, nvals / 2, math.log(nvals / 8)).sum([1, 2, 3]).view(-1, 1)
return torch.cat([x, u / nvals], dim=1), logpu
else:
raise ValueError()
elif args.double_padding:
x = x.repeat(1, 2, 1, 1)
return x, torch.zeros(x.shape[0], 1).to(x)
else:
return x, torch.zeros(x.shape[0], 1).to(x)
#Reference: https://github.com/ritheshkumar95/pytorch-normalizing-flows/blob/master/modules.py
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def weights_init_(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
def toy_flow(args, n_blocks, input_dim, hidden_dim, num_layers):
base_dist = StandardNormal(shape=[input_dim])
transforms = []
for _ in range(num_layers):
transforms.append(ReversePermutation(features=input_dim))
transforms.append(MaskedAffineAutoregressiveTransform(features=input_dim,
hidden_features=hidden_dim))
transform = CompositeTransform(transforms)
flow = Flow(transform, base_dist)
return flow
def package_realnvp(args, n_blocks, input_dim, hidden_dim, num_layers):
flow = realnvp.SimpleRealNVP(features=input_dim,
hidden_features=hidden_dim,
num_layers=num_layers,
num_blocks_per_layer=n_blocks)
return flow
# All code below this line is taken from
# https://github.com/kamenbliznashki/normalizing_flows/blob/master/maf.py
## Taken from: https://github.com/senya-ashukha/real-nvp-pytorch/blob/master/real-nvp-pytorch.ipynb
class FlowSequential(nn.Sequential):
""" Container for layers of a normalizing flow """
def forward(self, x, y):
sum_log_abs_det_jacobians = 0
i = len(self)
for module in self:
x, log_abs_det_jacobian = module(x, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
i -= 1
return x, sum_log_abs_det_jacobians
def inverse(self, u, y):
i = 0
sum_log_abs_det_jacobians = 0
for module in reversed(self):
u, log_abs_det_jacobian = module.inverse(u, y)
sum_log_abs_det_jacobians = sum_log_abs_det_jacobians + log_abs_det_jacobian
i += 1
return u, sum_log_abs_det_jacobians
# --------------------
# Models
# --------------------
class MAFRealNVP(nn.Module):
def __init__(self, args, n_blocks, input_size, hidden_size, n_hidden,
radius=torch.Tensor([0]), cond_label_size=None, batch_norm=False):
super().__init__()
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.zeros(input_size))
self.register_buffer('base_dist_var', torch.ones(input_size))
self.p_z = StandardNormal
self.radius = radius
# construct model
modules = []
mask = torch.arange(input_size).float() % 2
for i in range(n_blocks):
modules += [LinearMaskedCoupling(input_size, hidden_size, n_hidden, mask, cond_label_size)]
mask = 1 - mask
# modules += batch_norm * [BatchNorm(input_size)]
self.net = FlowSequential(*modules)
@property
def base_dist(self):
return D.Normal(self.base_dist_mean, self.base_dist_var)
def forward(self, x, y=None):
return self.net(x, y)
def inverse(self, u, y=None):
return self.net.inverse(u, y)
def log_prob(self, inputs, y=None):
u, sum_log_abs_det_jacobians = self.forward(inputs, y)
return torch.sum(self.base_dist.log_prob(u) + sum_log_abs_det_jacobians, dim=1)
## Taken from: https://github.com/senya-ashukha/real-nvp-pytorch/blob/master/real-nvp-pytorch.ipynb
class RealNVP(nn.Module):
def __init__(self, args, n_blocks, input_size, hidden_size, n_hidden,
layer_type='Conv'):
super(RealNVP, self).__init__()
_, self.c, self.h, self.w = input_size[:]
# mask_size = self.c * self.h * self.w
# mask = torch.arange(mask_size).float() % 2
self.group_action_type = GROUPS[args.group]
self.out_fiber = args.out_fiber
self.field_type = args.field_type
self.group_card = len(list(self.group_action_type.testing_elements))
self.input_type = enn.FieldType(self.group_action_type, self.c*[self.group_action_type.trivial_repr])
self.n_blocks = int(n_blocks)
self.n_hidden = n_hidden
self.layer_type = layer_type
checkerboard = [[((i % 2) + j) % 2 for j in range(self.w)] for i in range(self.h)]
mask = torch.tensor(checkerboard).float()
# Reshape to (1, 1, height, width) for broadcasting with tensors of shape (B, C, H, W)
mask = mask.view(1, 1, self.h, self.w)
self.dev = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
i_mask = 1 - mask
mask = torch.vstack([mask,i_mask]).repeat(int(self.n_blocks/2), 1, 1, 1)
self.p_z = StandardNormal
self.s, self.t = create_real_nvp_blocks(self.c, hidden_size,
self.n_blocks, n_hidden, layer_type)
self.mask = nn.Parameter(mask, requires_grad=False)
def inverse(self, z, logpz=None):
z = z.view(-1, self.c, self.h, self.w)
log_det_J, x = z.new_zeros(z.shape[0]), z
for i in range(0,self.n_blocks):
x_ = x*self.mask[i]
s = self.s[i](x_)
t = self.t[i](x_)
x = x_ + (1 - self.mask[i]) * (x * torch.exp(s) + t)
log_det_J += ((1-self.mask[i])*s).sum(dim=(1,2,3)) # log det dx/du
return x.squeeze() if logpz is None else (z, -1*log_det_J.view(-1,1))
def forward(self, x, inverse=False):
if inverse:
return self.inverse(x)
log_det_J, z = x.new_zeros(x.shape[0]), x
for i in reversed(range(0,self.n_blocks)):
z_ = self.mask[i] * z
s = self.s[i](z_)
t = self.t[i](z_)
z = (1 - self.mask[i]) * (z - t) * torch.exp(-s) + z_
log_det_J -= ((1-self.mask[i])*s).sum(dim=(1,2,3))
return z.squeeze(), log_det_J.view(-1, 1)
def log_prob(self, inputs, beta=1.):
z, delta_logp = self.forward(inputs)
logpz = standard_normal_logprob(z).view(z.size(0), -1).sum(1, keepdim=True)
logpx = logpz - beta * delta_logp
return logpx, logpz, -1*delta_logp
# p_z = self.p_z([inputs.shape[-1]])
# return p_z.log_prob(z) + logp
def compute_avg_test_loss(self, args, r2_act, data, beta=1.):
_, c, h, w = data.shape
input_type = enn.FieldType(r2_act, self.c*[r2_act.trivial_repr])
bits_per_dim, logits_tensor = torch.zeros(1).to(data), torch.zeros(args.n_classes).to(data)
logpz, delta_logp = torch.zeros(1).to(data), torch.zeros(1).to(data)
logpx_list = []
data = enn.GeometricTensor(data.cpu(), self.input_type)
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
for g in r2_act.testing_elements:
x_transformed = data.transform(g).tensor.view(-1, c, h, w).cuda()
padded_inputs, logpu = add_padding(args, x_transformed, nvals)
_, logpz, delta_logp = self.log_prob(padded_inputs)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
logpx_list.append(logpx)
logpx_total = torch.vstack(logpx_list)
bits_per_dim = -torch.mean(logpx_total) / (args.imagesize *
args.imagesize * args.im_dim) / np.log(2)
return bits_per_dim
def compute_loss(self, args, inputs, beta=1., do_test=False):
if do_test:
return self.compute_avg_test_loss(args, self.group_action_type, inputs)
bits_per_dim, logits_tensor = torch.zeros(1).to(inputs), torch.zeros(args.n_classes).to(inputs)
logpz, delta_logp = torch.zeros(1).to(inputs), torch.zeros(1).to(inputs)
if args.dataset == 'celeba_5bit':
nvals = 32
elif args.dataset == 'celebahq':
nvals = 2**args.nbits
else:
nvals = 256
padded_inputs, logpu = add_padding(args, inputs, nvals)
_, logpz, delta_logp = self.log_prob(padded_inputs, beta)
# log p(x)
logpx = logpz - beta * delta_logp - np.log(nvals) * (
args.imagesize * args.imagesize * (args.im_dim + args.padding)
) - logpu
bits_per_dim = -torch.mean(logpx) / (args.imagesize *
args.imagesize * args.im_dim) / np.log(2)
logpz = torch.mean(logpz).detach()
delta_logp = torch.mean(-delta_logp).detach()
return bits_per_dim, logits_tensor, logpz, delta_logp, _
def sample(self, batchSize):
# TODO: Update this method for edge_index
z = self.prior.sample((batchSize, 1))
logp = self.prior.log_prob(z)
x = self.inverse(z)
return x
| [
"nflows.flows.base.Flow",
"torch.nn.init.constant_",
"numpy.log",
"torch.exp",
"e2cnn.gspaces.Flip2dOnR2",
"nflows.transforms.base.CompositeTransform",
"torch.cuda.is_available",
"torch.arange",
"e2cnn.gspaces.Rot2dOnR2",
"nflows.transforms.autoregressive.MaskedAffineAutoregressiveTransform",
"t... | [((1086, 1113), 'e2cnn.gspaces.FlipRot2dOnR2', 'gspaces.FlipRot2dOnR2', ([], {'N': '(16)'}), '(N=16)\n', (1107, 1113), False, 'from e2cnn import gspaces\n'), ((1132, 1159), 'e2cnn.gspaces.FlipRot2dOnR2', 'gspaces.FlipRot2dOnR2', ([], {'N': '(12)'}), '(N=12)\n', (1153, 1159), False, 'from e2cnn import gspaces\n'), ((1177, 1203), 'e2cnn.gspaces.FlipRot2dOnR2', 'gspaces.FlipRot2dOnR2', ([], {'N': '(8)'}), '(N=8)\n', (1198, 1203), False, 'from e2cnn import gspaces\n'), ((1221, 1247), 'e2cnn.gspaces.FlipRot2dOnR2', 'gspaces.FlipRot2dOnR2', ([], {'N': '(4)'}), '(N=4)\n', (1242, 1247), False, 'from e2cnn import gspaces\n'), ((1265, 1291), 'e2cnn.gspaces.FlipRot2dOnR2', 'gspaces.FlipRot2dOnR2', ([], {'N': '(2)'}), '(N=2)\n', (1286, 1291), False, 'from e2cnn import gspaces\n'), ((1305, 1325), 'e2cnn.gspaces.Flip2dOnR2', 'gspaces.Flip2dOnR2', ([], {}), '()\n', (1323, 1325), False, 'from e2cnn import gspaces\n'), ((1340, 1363), 'e2cnn.gspaces.Rot2dOnR2', 'gspaces.Rot2dOnR2', ([], {'N': '(16)'}), '(N=16)\n', (1357, 1363), False, 'from e2cnn import gspaces\n'), ((1378, 1401), 'e2cnn.gspaces.Rot2dOnR2', 'gspaces.Rot2dOnR2', ([], {'N': '(12)'}), '(N=12)\n', (1395, 1401), False, 'from e2cnn import gspaces\n'), ((1415, 1437), 'e2cnn.gspaces.Rot2dOnR2', 'gspaces.Rot2dOnR2', ([], {'N': '(8)'}), '(N=8)\n', (1432, 1437), False, 'from e2cnn import gspaces\n'), ((1451, 1473), 'e2cnn.gspaces.Rot2dOnR2', 'gspaces.Rot2dOnR2', ([], {'N': '(4)'}), '(N=4)\n', (1468, 1473), False, 'from e2cnn import gspaces\n'), ((1487, 1509), 'e2cnn.gspaces.Rot2dOnR2', 'gspaces.Rot2dOnR2', ([], {'N': '(2)'}), '(N=2)\n', (1504, 1509), False, 'from e2cnn import gspaces\n'), ((1522, 1567), 'e2cnn.gspaces.Rot2dOnR2', 'gspaces.Rot2dOnR2', ([], {'N': '(-1)', 'maximum_frequency': '(10)'}), '(N=-1, maximum_frequency=10)\n', (1539, 1567), False, 'from e2cnn import gspaces\n'), ((1579, 1628), 'e2cnn.gspaces.FlipRot2dOnR2', 'gspaces.FlipRot2dOnR2', ([], {'N': '(-1)', 'maximum_frequency': '(10)'}), '(N=-1, maximum_frequency=10)\n', (1600, 1628), False, 'from e2cnn import gspaces\n'), ((3244, 3277), 'nflows.distributions.normal.StandardNormal', 'StandardNormal', ([], {'shape': '[input_dim]'}), '(shape=[input_dim])\n', (3258, 3277), False, 'from nflows.distributions.normal import StandardNormal\n'), ((3585, 3615), 'nflows.transforms.base.CompositeTransform', 'CompositeTransform', (['transforms'], {}), '(transforms)\n', (3603, 3615), False, 'from nflows.transforms.base import CompositeTransform\n'), ((3628, 3654), 'nflows.flows.base.Flow', 'Flow', (['transform', 'base_dist'], {}), '(transform, base_dist)\n', (3632, 3654), False, 'from nflows.flows.base import Flow\n'), ((3755, 3882), 'nflows.flows.realnvp.SimpleRealNVP', 'realnvp.SimpleRealNVP', ([], {'features': 'input_dim', 'hidden_features': 'hidden_dim', 'num_layers': 'num_layers', 'num_blocks_per_layer': 'n_blocks'}), '(features=input_dim, hidden_features=hidden_dim,\n num_layers=num_layers, num_blocks_per_layer=n_blocks)\n', (3776, 3882), False, 'from nflows.flows import realnvp\n'), ((2926, 2951), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2949, 2951), False, 'import torch\n'), ((3071, 3118), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight'], {'gain': '(1)'}), '(m.weight, gain=1)\n', (3100, 3118), False, 'import torch\n'), ((3127, 3161), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (3150, 3161), False, 'import torch\n'), ((5139, 5156), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (5151, 5156), False, 'import torch\n'), ((6977, 7067), 'e2cnn.nn.FieldType', 'enn.FieldType', (['self.group_action_type', '(self.c * [self.group_action_type.trivial_repr])'], {}), '(self.group_action_type, self.c * [self.group_action_type.\n trivial_repr])\n', (6990, 7067), True, 'from e2cnn import nn as enn\n'), ((7847, 7886), 'torch.nn.Parameter', 'nn.Parameter', (['mask'], {'requires_grad': '(False)'}), '(mask, requires_grad=False)\n', (7859, 7886), True, 'import torch.nn as nn\n'), ((9312, 9365), 'e2cnn.nn.FieldType', 'enn.FieldType', (['r2_act', '(self.c * [r2_act.trivial_repr])'], {}), '(r2_act, self.c * [r2_act.trivial_repr])\n', (9325, 9365), True, 'from e2cnn import nn as enn\n'), ((10318, 10342), 'torch.vstack', 'torch.vstack', (['logpx_list'], {}), '(logpx_list)\n', (10330, 10342), False, 'import torch\n'), ((3356, 3394), 'nflows.transforms.permutations.ReversePermutation', 'ReversePermutation', ([], {'features': 'input_dim'}), '(features=input_dim)\n', (3374, 3394), False, 'from nflows.transforms.permutations import ReversePermutation\n'), ((3422, 3510), 'nflows.transforms.autoregressive.MaskedAffineAutoregressiveTransform', 'MaskedAffineAutoregressiveTransform', ([], {'features': 'input_dim', 'hidden_features': 'hidden_dim'}), '(features=input_dim, hidden_features=\n hidden_dim)\n', (3457, 3510), False, 'from nflows.transforms.autoregressive import MaskedAffineAutoregressiveTransform\n'), ((5346, 5369), 'torch.zeros', 'torch.zeros', (['input_size'], {}), '(input_size)\n', (5357, 5369), False, 'import torch\n'), ((5417, 5439), 'torch.ones', 'torch.ones', (['input_size'], {}), '(input_size)\n', (5427, 5439), False, 'import torch\n'), ((10488, 10497), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (10494, 10497), True, 'import numpy as np\n'), ((11507, 11516), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (11513, 11516), True, 'import numpy as np\n'), ((2178, 2210), 'torch.cat', 'torch.cat', (['[x, u / nvals]'], {'dim': '(1)'}), '([x, u / nvals], dim=1)\n', (2187, 2210), False, 'import torch\n'), ((7275, 7301), 'torch.tensor', 'torch.tensor', (['checkerboard'], {}), '(checkerboard)\n', (7287, 7301), False, 'import torch\n'), ((7494, 7519), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7517, 7519), False, 'import torch\n'), ((7573, 7601), 'torch.vstack', 'torch.vstack', (['[mask, i_mask]'], {}), '([mask, i_mask])\n', (7585, 7601), False, 'import torch\n'), ((11534, 11551), 'torch.mean', 'torch.mean', (['logpz'], {}), '(logpz)\n', (11544, 11551), False, 'import torch\n'), ((11582, 11605), 'torch.mean', 'torch.mean', (['(-delta_logp)'], {}), '(-delta_logp)\n', (11592, 11605), False, 'import torch\n'), ((2488, 2520), 'torch.cat', 'torch.cat', (['[x, u / nvals]'], {'dim': '(1)'}), '([x, u / nvals], dim=1)\n', (2497, 2520), False, 'import torch\n'), ((5567, 5591), 'torch.arange', 'torch.arange', (['input_size'], {}), '(input_size)\n', (5579, 5591), False, 'import torch\n'), ((8719, 8732), 'torch.exp', 'torch.exp', (['(-s)'], {}), '(-s)\n', (8728, 8732), False, 'import torch\n'), ((9402, 9416), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (9413, 9416), False, 'import torch\n'), ((9427, 9454), 'torch.zeros', 'torch.zeros', (['args.n_classes'], {}), '(args.n_classes)\n', (9438, 9454), False, 'import torch\n'), ((9492, 9506), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (9503, 9506), False, 'import torch\n'), ((9517, 9531), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (9528, 9531), False, 'import torch\n'), ((10367, 10390), 'torch.mean', 'torch.mean', (['logpx_total'], {}), '(logpx_total)\n', (10377, 10390), False, 'import torch\n'), ((10736, 10750), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (10747, 10750), False, 'import torch\n'), ((10763, 10790), 'torch.zeros', 'torch.zeros', (['args.n_classes'], {}), '(args.n_classes)\n', (10774, 10790), False, 'import torch\n'), ((10830, 10844), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (10841, 10844), False, 'import torch\n'), ((10857, 10871), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (10868, 10871), False, 'import torch\n'), ((11257, 11270), 'numpy.log', 'np.log', (['nvals'], {}), '(nvals)\n', (11263, 11270), True, 'import numpy as np\n'), ((11392, 11409), 'torch.mean', 'torch.mean', (['logpx'], {}), '(logpx)\n', (11402, 11409), False, 'import torch\n'), ((2654, 2680), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(1)'], {}), '(x.shape[0], 1)\n', (2665, 2680), False, 'import torch\n'), ((2715, 2741), 'torch.zeros', 'torch.zeros', (['x.shape[0]', '(1)'], {}), '(x.shape[0], 1)\n', (2726, 2741), False, 'import torch\n'), ((10139, 10152), 'numpy.log', 'np.log', (['nvals'], {}), '(nvals)\n', (10145, 10152), True, 'import numpy as np\n'), ((2112, 2131), 'torch.zeros_like', 'torch.zeros_like', (['u'], {}), '(u)\n', (2128, 2131), False, 'import torch\n'), ((8203, 8215), 'torch.exp', 'torch.exp', (['s'], {}), '(s)\n', (8212, 8215), False, 'import torch\n')] |
"""Age-Fitness selection
This module implements the Age-Fitness selection algorithm that defines
the selection used in the Age-Fitness evolutionary algorithm module.
This module expects to be used in conjunction with the
``RandomIndividualVariation`` module that wraps the ``VarOr`` module.
"""
import numpy as np
from .selection import Selection
from ..util.argument_validation import argument_validation
class AgeFitness(Selection):
"""Age-Fitness selection
Parameters
----------
selection_size : int
The size of the group of individuals to be randomly compared. The size
must be an integer greater than 1.
"""
WORST_CASE_FACTOR = 50
@argument_validation(selection_size={">=": 2})
def __init__(self, selection_size=2):
self._selection_size = selection_size
self._selected_indices = []
self._population_index_array = np.array([])
self._selection_attempts = 0
def __call__(self, population, target_population_size):
"""Performs Age-Fitness selection on a population. If ``selection_size``
is larger than the population, the population size is used as the
``selection_size``.
Parameters
----------
population : list of chromosomes
The population on which to perform selection
target_population_size : int
The size of the new population after selection. It will never be
the case that the new population will have a size smaller than the
target population. However, it *is* possible to for the new
population to be larger than ``target_population_size``.
Returns
-------
list of chromosomes :
The chromosomes not selected for removal
Raises
------
ValueError
If the ``target_population_size`` is larger than the intial
`population`
"""
if target_population_size > len(population):
raise ValueError("Target population size should\
be less than initial population")
n_removed = 0
start_pop_size = len(population)
target_removal = start_pop_size - target_population_size
selection_attempts = 0
while n_removed < target_removal and \
selection_attempts < start_pop_size * self.WORST_CASE_FACTOR:
inds = self._get_unique_rand_indices(start_pop_size - n_removed)
to_remove = self._find_inds_for_removal(inds, population,
target_removal - n_removed)
self._swap_removals_to_end(population, to_remove, n_removed)
n_removed += len(to_remove)
selection_attempts += 1
new_pop_size = start_pop_size - n_removed
return population[:new_pop_size]
def _get_unique_rand_indices(self, max_int):
if self._selection_size >= max_int:
return list(range(max_int))
if self._selection_size < 5:
return self._dumb_selection(max_int)
return np.random.choice(max_int, self._selection_size, replace=False)
def _dumb_selection(self, max_int):
inds = set(np.random.randint(max_int, size=self._selection_size))
while len(inds) < self._selection_size:
inds.add(np.random.randint(max_int))
return list(inds)
def _find_inds_for_removal(self, inds, population, num_removals_needed):
if self._selection_size == 2:
return self._streamlined_pair_removal(inds[0], inds[1], population)
removal_set = set()
for i, ind_a in enumerate(inds[:-1]):
if ind_a not in removal_set:
for ind_b in inds[i+1:]:
if ind_b not in removal_set:
self._update_removal_set(population, ind_a, ind_b,
removal_set)
if len(removal_set) >= num_removals_needed:
return list(removal_set)
return list(removal_set)
def _streamlined_pair_removal(self, indv_index_1, indv_index_2,
population):
indv_1 = population[indv_index_1]
indv_2 = population[indv_index_2]
if np.isnan(indv_1.fitness):
return [indv_index_1]
if np.isnan(indv_2.fitness):
return [indv_index_2]
if self._first_not_dominated(indv_1, indv_2):
return [indv_index_2]
if self._first_not_dominated(indv_2, indv_1):
return [indv_index_1]
return []
def _update_removal_set(self, population, indv_index_1,
indv_index_2, removal_set):
indv_1 = population[indv_index_1]
indv_2 = population[indv_index_2]
if np.isnan(indv_1.fitness):
removal_set.add(indv_index_1)
elif np.isnan(indv_2.fitness):
removal_set.add(indv_index_2)
elif self._first_not_dominated(indv_1, indv_2):
removal_set.add(indv_index_2)
elif self._first_not_dominated(indv_2, indv_1):
removal_set.add(indv_index_1)
@staticmethod
def _first_not_dominated(first_indv, second_indv):
# This code block can be used to force equivalency of bingocpp and
# bingo that may otherwise diverge because of truncation or small math
# differences.
# rel_fitness_diff = (first_indv.fitness - second_indv.fitness) \
# / (first_indv.fitness + second_indv.fitness)
# return not (first_indv.genetic_age > second_indv.genetic_age or
# rel_fitness_diff > 1e-15)
return not (first_indv.genetic_age > second_indv.genetic_age or
first_indv.fitness > second_indv.fitness)
def _swap_removals_to_end(self, population, inds_to_remove, num_removed):
for i, ind in enumerate(sorted(inds_to_remove, reverse=True)):
self._swap(population, ind, -(i+num_removed+1))
@staticmethod
def _swap(array, index_1, index_2):
array[index_1], array[index_2] = array[index_2], array[index_1]
| [
"numpy.random.choice",
"numpy.array",
"numpy.random.randint",
"numpy.isnan"
] | [((896, 908), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (904, 908), True, 'import numpy as np\n'), ((3105, 3167), 'numpy.random.choice', 'np.random.choice', (['max_int', 'self._selection_size'], {'replace': '(False)'}), '(max_int, self._selection_size, replace=False)\n', (3121, 3167), True, 'import numpy as np\n'), ((4311, 4335), 'numpy.isnan', 'np.isnan', (['indv_1.fitness'], {}), '(indv_1.fitness)\n', (4319, 4335), True, 'import numpy as np\n'), ((4382, 4406), 'numpy.isnan', 'np.isnan', (['indv_2.fitness'], {}), '(indv_2.fitness)\n', (4390, 4406), True, 'import numpy as np\n'), ((4849, 4873), 'numpy.isnan', 'np.isnan', (['indv_1.fitness'], {}), '(indv_1.fitness)\n', (4857, 4873), True, 'import numpy as np\n'), ((3228, 3281), 'numpy.random.randint', 'np.random.randint', (['max_int'], {'size': 'self._selection_size'}), '(max_int, size=self._selection_size)\n', (3245, 3281), True, 'import numpy as np\n'), ((4930, 4954), 'numpy.isnan', 'np.isnan', (['indv_2.fitness'], {}), '(indv_2.fitness)\n', (4938, 4954), True, 'import numpy as np\n'), ((3352, 3378), 'numpy.random.randint', 'np.random.randint', (['max_int'], {}), '(max_int)\n', (3369, 3378), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import os
import logging
import numpy
from timeit import default_timer as timer
import pandas
from metax import __version__
from metax import Logging
from metax import Exceptions
from metax import Utilities
from metax.predixcan import MultiPrediXcanAssociation
from metax.predixcan import Utilities as MultiPrediXcanUtilities, Simulations as MultiPredixcanSimulations
def run(args):
start = timer()
folder, prefix = os.path.split(args.output_prefix)
results_name = args.output_prefix + "__mt_results.txt"
predixcan_results_name = args.output_prefix + "__p_results.txt"
additional_name = args.output_prefix + "__additional.txt"
if os.path.exists(results_name):
logging.info("%s already exists, you have to move it or delete it if you want it done again", results_name)
return
#for reproducibility
numpy.random.seed(100)
results = []
additional = []
predixcan_results = []
n_max = args.max_n_results
logging.info("Acquiring context")
with MultiPredixcanSimulations.context_from_args(args) as context:
logging.info("processing")
_c, _cp, _e = context.get_mp_simulation(None)
for i, gene in enumerate(context.get_genes()):
if n_max and i+1>n_max:
logging.info("Max runs met")
break
logging.log(9, "%d Gene %s", i, gene)
r, add, p = MultiPredixcanSimulations.simulate(gene, context)
if r is None:
logging.log(9, "%s could not be simulated", gene)
continue
results.append(r)
additional.append(add)
if p is not None:
predixcan_results.append(p)
results = MultiPrediXcanAssociation.dataframe_from_results(results, _c).sort_values(by="pvalue")
additional = pandas.concat(additional)
Utilities.ensure_requisite_folders(results_name)
Utilities.save_dataframe(results, results_name)
Utilities.save_dataframe(additional, additional_name)
if len(predixcan_results):
predixcan_results = pandas.concat(predixcan_results)
Utilities.save_dataframe(predixcan_results, predixcan_results_name)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='MPSimulation.py')
parser.add_argument("--expression_folder", help="Folder with predicted gene expressions. (plain text file format)")
parser.add_argument("--expression_pattern", help="Patterns to select expression files", default=None)
parser.add_argument("--input_phenos_file", help="Text file (or gzip-compressed) where one column will be used as phenotype")
parser.add_argument("--simulation_type", help="What kind of genotype to simulate: [random, combination, simple]")
parser.add_argument("--output_prefix", help="File where stuff will be saved.")
parser.add_argument("--verbosity", help="Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything", default = "10")
parser.add_argument("--throw", action="store_true", help="Throw exception on error", default=False)
parser.add_argument("--code_999", help="values of -999 in expression are to be ignored", action="store_true", default=False)
parser.add_argument("--mode", help="Type of regression. Can be: {}".format(MultiPrediXcanAssociation.MTPMode.K_MODES), default=MultiPrediXcanAssociation.MTPMode.K_LINEAR)
parser.add_argument("--pc_condition_number", help="Principal components condition number", type=int)
parser.add_argument("--pc_eigen_ratio", help="Principal components filter, cutoff at proportion to max eigenvalue", type=float)
parser.add_argument("--standardize_expression", help="Standardise input predicted expressions.", action="store_true", default=False)
parser.add_argument("--only_truth", help="Run Multi-PrediXcan only with selected causal models.", action="store_true", default=False)
parser.add_argument("--simulation_parameters", help="Depends on particular scheme", action="append", nargs=2)
parser.add_argument("--do_predixcan", help="Also compute predixcan association", action="store_true", default=False)
parser.add_argument("--max_n_results", help="Optional. If provided, run up to as many analysis", type=int)
args = parser.parse_args()
Logging.configureLogging(int(args.verbosity))
if args.throw:
run(args)
else:
try:
run(args)
except Exceptions.ReportableException as e:
logging.error(e.msg)
except Exception as e:
logging.info("Unexpected error: %s" % str(e)) | [
"os.path.exists",
"argparse.ArgumentParser",
"timeit.default_timer",
"metax.Utilities.ensure_requisite_folders",
"metax.Utilities.save_dataframe",
"os.path.split",
"metax.predixcan.Simulations.context_from_args",
"logging.log",
"metax.predixcan.MultiPrediXcanAssociation.dataframe_from_results",
"p... | [((422, 429), 'timeit.default_timer', 'timer', ([], {}), '()\n', (427, 429), True, 'from timeit import default_timer as timer\n'), ((452, 485), 'os.path.split', 'os.path.split', (['args.output_prefix'], {}), '(args.output_prefix)\n', (465, 485), False, 'import os\n'), ((683, 711), 'os.path.exists', 'os.path.exists', (['results_name'], {}), '(results_name)\n', (697, 711), False, 'import os\n'), ((874, 896), 'numpy.random.seed', 'numpy.random.seed', (['(100)'], {}), '(100)\n', (891, 896), False, 'import numpy\n'), ((998, 1031), 'logging.info', 'logging.info', (['"""Acquiring context"""'], {}), "('Acquiring context')\n", (1010, 1031), False, 'import logging\n'), ((1850, 1875), 'pandas.concat', 'pandas.concat', (['additional'], {}), '(additional)\n', (1863, 1875), False, 'import pandas\n'), ((1881, 1929), 'metax.Utilities.ensure_requisite_folders', 'Utilities.ensure_requisite_folders', (['results_name'], {}), '(results_name)\n', (1915, 1929), False, 'from metax import Utilities\n'), ((1934, 1981), 'metax.Utilities.save_dataframe', 'Utilities.save_dataframe', (['results', 'results_name'], {}), '(results, results_name)\n', (1958, 1981), False, 'from metax import Utilities\n'), ((1986, 2039), 'metax.Utilities.save_dataframe', 'Utilities.save_dataframe', (['additional', 'additional_name'], {}), '(additional, additional_name)\n', (2010, 2039), False, 'from metax import Utilities\n'), ((2213, 2237), 'logging.info', 'logging.info', (['"""Finished"""'], {}), "('Finished')\n", (2225, 2237), False, 'import logging\n'), ((2299, 2353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MPSimulation.py"""'}), "(description='MPSimulation.py')\n", (2322, 2353), False, 'import argparse\n'), ((721, 838), 'logging.info', 'logging.info', (['"""%s already exists, you have to move it or delete it if you want it done again"""', 'results_name'], {}), "(\n '%s already exists, you have to move it or delete it if you want it done again'\n , results_name)\n", (733, 838), False, 'import logging\n'), ((1041, 1090), 'metax.predixcan.Simulations.context_from_args', 'MultiPredixcanSimulations.context_from_args', (['args'], {}), '(args)\n', (1084, 1090), True, 'from metax.predixcan import Utilities as MultiPrediXcanUtilities, Simulations as MultiPredixcanSimulations\n'), ((1111, 1137), 'logging.info', 'logging.info', (['"""processing"""'], {}), "('processing')\n", (1123, 1137), False, 'import logging\n'), ((2100, 2132), 'pandas.concat', 'pandas.concat', (['predixcan_results'], {}), '(predixcan_results)\n', (2113, 2132), False, 'import pandas\n'), ((2141, 2208), 'metax.Utilities.save_dataframe', 'Utilities.save_dataframe', (['predixcan_results', 'predixcan_results_name'], {}), '(predixcan_results, predixcan_results_name)\n', (2165, 2208), False, 'from metax import Utilities\n'), ((1362, 1399), 'logging.log', 'logging.log', (['(9)', '"""%d Gene %s"""', 'i', 'gene'], {}), "(9, '%d Gene %s', i, gene)\n", (1373, 1399), False, 'import logging\n'), ((1424, 1473), 'metax.predixcan.Simulations.simulate', 'MultiPredixcanSimulations.simulate', (['gene', 'context'], {}), '(gene, context)\n', (1458, 1473), True, 'from metax.predixcan import Utilities as MultiPrediXcanUtilities, Simulations as MultiPredixcanSimulations\n'), ((1746, 1807), 'metax.predixcan.MultiPrediXcanAssociation.dataframe_from_results', 'MultiPrediXcanAssociation.dataframe_from_results', (['results', '_c'], {}), '(results, _c)\n', (1794, 1807), False, 'from metax.predixcan import MultiPrediXcanAssociation\n'), ((1299, 1327), 'logging.info', 'logging.info', (['"""Max runs met"""'], {}), "('Max runs met')\n", (1311, 1327), False, 'import logging\n'), ((1516, 1565), 'logging.log', 'logging.log', (['(9)', '"""%s could not be simulated"""', 'gene'], {}), "(9, '%s could not be simulated', gene)\n", (1527, 1565), False, 'import logging\n'), ((4587, 4607), 'logging.error', 'logging.error', (['e.msg'], {}), '(e.msg)\n', (4600, 4607), False, 'import logging\n')] |
import numpy as np
import skimage.transform
import pandas as pd
import cv2
from scipy.ndimage.interpolation import map_coordinates
# from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
# Function to distort image
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))
return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape) | [
"cv2.warpAffine",
"numpy.reshape",
"numpy.arange",
"scipy.ndimage.interpolation.map_coordinates",
"cv2.getAffineTransform",
"numpy.zeros_like",
"numpy.float32",
"numpy.random.RandomState"
] | [((587, 728), 'numpy.float32', 'np.float32', (['[center_square + square_size, [center_square[0] + square_size, \n center_square[1] - square_size], center_square - square_size]'], {}), '([center_square + square_size, [center_square[0] + square_size, \n center_square[1] - square_size], center_square - square_size])\n', (597, 728), True, 'import numpy as np\n'), ((826, 860), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (848, 860), False, 'import cv2\n'), ((870, 947), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', 'shape_size[::-1]'], {'borderMode': 'cv2.BORDER_REFLECT_101'}), '(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n', (884, 947), False, 'import cv2\n'), ((1103, 1120), 'numpy.zeros_like', 'np.zeros_like', (['dx'], {}), '(dx)\n', (1116, 1120), True, 'import numpy as np\n'), ((408, 435), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (429, 435), True, 'import numpy as np\n'), ((515, 537), 'numpy.float32', 'np.float32', (['shape_size'], {}), '(shape_size)\n', (525, 537), True, 'import numpy as np\n'), ((1145, 1164), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (1154, 1164), True, 'import numpy as np\n'), ((1166, 1185), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (1175, 1185), True, 'import numpy as np\n'), ((1187, 1206), 'numpy.arange', 'np.arange', (['shape[2]'], {}), '(shape[2])\n', (1196, 1206), True, 'import numpy as np\n'), ((1219, 1246), 'numpy.reshape', 'np.reshape', (['(y + dy)', '(-1, 1)'], {}), '(y + dy, (-1, 1))\n', (1229, 1246), True, 'import numpy as np\n'), ((1246, 1273), 'numpy.reshape', 'np.reshape', (['(x + dx)', '(-1, 1)'], {}), '(x + dx, (-1, 1))\n', (1256, 1273), True, 'import numpy as np\n'), ((1273, 1295), 'numpy.reshape', 'np.reshape', (['z', '(-1, 1)'], {}), '(z, (-1, 1))\n', (1283, 1295), True, 'import numpy as np\n'), ((1305, 1361), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['image', 'indices'], {'order': '(1)', 'mode': '"""reflect"""'}), "(image, indices, order=1, mode='reflect')\n", (1320, 1361), False, 'from scipy.ndimage.interpolation import map_coordinates\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 19, 2018
@author: <NAME>
This contains functions to calculate recombination rates. More types of recombination will be
added later.
"""
import numpy as np
from numba import jit
class Recombo():
def __init__(self, params):
self.R_Langevin = np.zeros(params.num_cell)
@jit
def compute_R_Langevin(self, R_Langevin, n, p, N, k_rec, n1, p1):
'''
Computes bimolecular Langevin recombination rate.
Inputs:
R_Langevin: the empty numpy array. This is input explicitely b/c of a speedup over accessing
it through the recombo object.
n: electron density
p: hole density
N: density of states scaling factor
k_rec: recombination coefficient
n1: N_LUMO*exp(-(E_LUMO - Et)/(k_B T)) number of electrons in the LUMO band when the electron’s quasi-Fermi energy
equals the trap energy Et
p1: N_HOMO*exp(-(Et - E_HOMO)/(k_B T)) number of holes in the HOMO band when hole’s quasi-Fermi
energy equals Et
n1 and p1 are defined inside of initialization.py
Output: R_Langevin recombination rate array, indexed from 1.
'''
R_Langevin[1:] = k_rec*(N*N*n[1:]*p[1:] - n1*p1)
# negative recombination values are unphysical
for val in R_Langevin:
if val < 0:
val = 0
return R_Langevin | [
"numpy.zeros"
] | [((308, 333), 'numpy.zeros', 'np.zeros', (['params.num_cell'], {}), '(params.num_cell)\n', (316, 333), True, 'import numpy as np\n')] |
import math
from typing import Any, Callable, Dict, Iterator, List
import numpy as np
from toolz import itertoolz
def get_cosine_learning_rates(lr_min: float, lr_max: float, f: float, N: int):
"""Decay the learning rate based on a cosine schedule of frequency `f`.
Returns a list of `N` learning rate values in the interval `[lr_min, lr_max]`.
"""
lr = []
for i in range(N):
freq = f * i / N
scaler = 0.5 * (1 + math.cos(2 * math.pi * freq)) # [0, 1]
l = lr_min + scaler * (lr_max - lr_min)
lr.append(l)
return lr
def batchify(
data: np.ndarray, batch_size: int, func: Callable[[np.ndarray], np.ndarray] = None
) -> Iterator[np.ndarray]:
"""Batchify `data`. If `func` is not None, then the emitted item is `func(batch)`.
Args:
data (np.ndarray): NumPy array of items to batchify.
batch_size (int): Batch size; must be between 1 and `len(data)`.
func (Callable[[np.ndarray], np.ndarray], optional): Optional function to apply to each emitted batch.
Defaults to identity function.
Returns:
Iterator[np.ndarray]: Generator object containing batches.
"""
if not isinstance(batch_size, int) or not (1 <= batch_size <= len(data)):
raise ValueError(f"Batch size must be an int in [1, {data.shape[0]}].")
if func is None:
func = lambda x: x
n = len(data)
for i in range(0, n, batch_size):
yield func(data[i : min(i + batch_size, n)])
def split_data(
data: List[Any], train_f: float, test_f: float, shuffle: bool = False
) -> Dict[str, List[Any]]:
"""Get `train / test / valid` splits from `data`.
If `shuffle` is True, then use a random permutation of `data`.
`valid` split size is given by `(1 - train_f - test_f) * len(data)`.
Args:
data (List[Any]): Any collection of items to be split.
train_f (float): Train size factor from the entire length (must be between 0 and 1).
test_f (float): Test size factor from the entire length (must be between 0 and 1).
shuffle (bool): Whether to use a random permutation of `data`.
Returns:
Dict[str, List[Any]]: Keys are {train, test, valid}, and values are corresponding splits
"""
n = len(data)
# use a generator to keep offset internally when taking elements
if shuffle:
rand_idx = np.random.permutation(n)
gen = (data[i] for i in rand_idx)
else:
gen = (x for x in data)
return {
"train": list(itertoolz.take(int(n * train_f), gen)), # take first
"test": list(itertoolz.take(int(n * test_f), gen)), # take next
"valid": list(gen), # take remaining
}
if __name__ == "__main__":
lrs = get_cosine_learning_rates(lr_min=1e-5, lr_max=1e-3, f=2, N=100)
for lr in lrs:
print(f"{lr:.7f}")
| [
"math.cos",
"numpy.random.permutation"
] | [((2382, 2406), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2403, 2406), True, 'import numpy as np\n'), ((452, 480), 'math.cos', 'math.cos', (['(2 * math.pi * freq)'], {}), '(2 * math.pi * freq)\n', (460, 480), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Functions for estimating electricity prices, eeg levies, remunerations and other components, based on customer type and annual demand
@author: Abuzar and Shakhawat
"""
from typing import ValuesView
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline
def calculate_mean_price(customer_type, val_yearly_demand):
"""
Parameters
----------
customer_type : Type of customer, differentiated between household and industrial customers
total_demand : yearly electricity demand for household customers in KWh/y and for industrial customers in MWh/y
Returns
-------
mean_price: average price for the customer for the next year in cents/kWh
"""
def plotting(x,y, title, x_label, y_label, name_plot):
fig = plt.figure()
values = x
plt.plot (x,y)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(x,values)
plt.xticks(rotation = 45)
fig.savefig(name_plot, dpi=fig.dpi)
def haupt_tarif(data):
#haupt_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
haupt_tarrif = df_with_data[df_with_data["hour"].isin([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])]
cond = df_with_data['hour'].isin(haupt_tarrif['hour'])
df_with_data.drop(haupt_tarrif[cond].index, inplace = True)
ht_factor = haupt_tarrif.price.mean()/yearly_mean
return ht_factor
def neben_tarif(data):
#neben_tarrif = df_with_data
df_with_data = pd.read_excel(data)
yearly_mean = df_with_data.price.mean()
neben_tarrif = df_with_data[(df_with_data["hour"].isin([1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23, 24]) & df_with_data["Day"].isin(['Wednesday', 'Thursday', 'Friday', 'Monday', 'Tuesday'])) |(df_with_data["Day"].isin(['Saturday', 'Sunday']))]
neben_tarrif.head()
cond = df_with_data['hour'].isin(neben_tarrif['hour'])
df_with_data.drop(neben_tarrif[cond].index, inplace = True)
nt_factor = neben_tarrif.price.mean()/yearly_mean
return nt_factor
ht_factor = haupt_tarif("ht_nt_price.xlsx")
nt_factor = neben_tarif("ht_nt_price.xlsx")
#industrial 2000 - 20000 MWh
industrie_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.3 Strom - € - Industrie', skiprows = 5, nrows = 26, index_col = 0)
industrie_prices_without_VAT = industrie_prices_without_VAT.iloc[:,0]
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT["index"]= industrie_prices_without_VAT["index"].str.slice(start = 5)
industrie_prices_without_VAT.columns = ["year","price"]
industrie_prices_without_VAT = industrie_prices_without_VAT.set_index("year")
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index = pd.to_datetime(industrie_prices_without_VAT.index, errors='ignore')
industrie_prices_without_VAT = industrie_prices_without_VAT.astype(float)
industrie_prices_without_VAT = industrie_prices_without_VAT.resample('12M').mean()
industrie_prices_without_VAT.index = industrie_prices_without_VAT.index.astype(str)
industrie_prices_without_VAT.index= industrie_prices_without_VAT.index.str.slice(start = 0, stop = -6)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT.price * ht_factor
nt_industrie_prices_without_VAT = industrie_prices_without_VAT.price * nt_factor
ht_industrie_prices_without_VAT = ht_industrie_prices_without_VAT.reset_index()
nt_industrie_prices_without_VAT = nt_industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT.reset_index()
industrie_prices_without_VAT = industrie_prices_without_VAT[industrie_prices_without_VAT.year >= str(2016)]
#industrial prices > 150000 MWh/y
v_big_industrial_prices_BDEW = {'year': range(2019,2021), 'price': [3.77,3.05]}
v_big_industrial_prices_BDEW = pd.DataFrame(data=v_big_industrial_prices_BDEW)
v_big_industrial_prices_BDEW
#industrial prices between 70000-150000 MWh/y
big_industrial_prices_BDEW = {'year': range(2016,2021), 'price': [8.37, 9.96, 8.96, 9.28, 10.07]}
big_industrial_prices_BDEW = pd.DataFrame(data=big_industrial_prices_BDEW)
big_industrial_prices_BDEW
#industrial prices between 20000-70000 MWh/y
mid_industrie_prices = pd.read_excel(r'mid_size_industrial_prices.xlsx')
mid_industrie_prices.columns = ['year', 'price']
mid_industrie_prices
#household electricity prices between 2500-5000 KWh/y
household_prices_without_VAT = pd.read_excel(r'Energiepreisentwicklung.xlsx',sheet_name='5.8.2 Strom - € - Haushalte', skiprows = 5, nrows = 26, index_col = 0)
household_prices_without_VAT = household_prices_without_VAT.iloc[:,0]
household_prices_without_VAT = household_prices_without_VAT.reset_index()
household_prices_without_VAT["index"]= household_prices_without_VAT["index"].str.slice(start = 5)
household_prices_without_VAT.columns = ["year","price"]
household_prices_without_VAT = household_prices_without_VAT.set_index("year")
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index = pd.to_datetime(household_prices_without_VAT.index, errors='ignore')
household_prices_without_VAT = household_prices_without_VAT.astype(float)
household_prices_without_VAT = household_prices_without_VAT.resample('12M').mean()
household_prices_without_VAT.index = household_prices_without_VAT.index.astype(str)
household_prices_without_VAT.index= household_prices_without_VAT.index.str.slice(start = 0, stop = -6)
household_prices_without_VAT = household_prices_without_VAT[6:].reset_index()
household_prices_without_VAT = household_prices_without_VAT[household_prices_without_VAT.year >= str(2016)]
household_prices_without_VAT
if ((customer_type == 0) & ((val_yearly_demand >= 2500) & (val_yearly_demand <= 5000))):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
# ht_industrie_prices_without_VAT = household_prices
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_household_prices_without_VAT = household_prices_without_VAT
nt_household_prices_without_VAT["year"] = nt_household_prices_without_VAT["year"].astype(int)
nt_year = nt_household_prices_without_VAT["year"]
nt_price = nt_household_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (val1))
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
yt_household_prices_without_VAT = household_prices_without_VAT
yt_household_prices_without_VAT["year"] = yt_household_prices_without_VAT["year"].astype(int)
yt_year = yt_household_prices_without_VAT["year"]
yt_price = yt_household_prices_without_VAT["price"]
f = interpolate.interp1d(yt_year, yt_price, fill_value = "extrapolate")
p_2021 = f(2021)
yt_new_year = np.append(yt_year, 2021)
yt_new_price = np.append(yt_price, (f(2021)))
# ht_new_price = ht_new_price * ht_factor
print(yt_new_year)
print(yt_new_price)
plotting(yt_new_year, yt_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 0) & (val_yearly_demand < 2000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_household_prices_without_VAT = household_prices_without_VAT
ht_household_prices_without_VAT["year"] = ht_household_prices_without_VAT["year"].astype(int)
ht_year = ht_household_prices_without_VAT["year"]
ht_price = ht_household_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = household_prices_without_VAT
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = household_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand >= 2000) & (val_yearly_demand <= 20000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"]
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price * nt_factor, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
# val1 = input("Enter your preferred price: ")
# val1 = float(val1)
ht_industrie_prices_without_VAT = industrie_prices_without_VAT
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 20000) & (val_yearly_demand <= 70000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = mid_industrie_prices
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (val1))
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = mid_industrie_prices
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 70000) & (val_yearly_demand <= 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = big_industrial_prices_BDEW
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
#nt_industrie_prices_without_VAT = big_industrial_prices_BDEW
#nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
#nt_year = nt_industrie_prices_without_VAT["year"]
#nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
#nt_new_year = np.append(nt_year, 2021)
#nt_new_price = np.append(nt_price, (val1))
#print(nt_new_year)
#print(nt_new_price)
# plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
#plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif ((customer_type == 1) & (val_yearly_demand > 150000)):
print("Do you already know your electricty price?")
#print("Yes = 1 / No = 2")
print("Yes = 0 / No = 1")
#choose = 0
val = input("Enter your value: ")
val = int(val)
if (val == 0):
print("Do you have a fixed electricity price or HT/NT price structure?")
val_ht_nt = input("Enter 0 (zero) for yearly mean price and Enter 1 for HT/NT price structure: ")
val_ht_nt = int(val_ht_nt)
if (val_ht_nt == 1):
val1 = input("Enter HT value: ")
val1 = float(val1)
val2 = input("Enter NT value: ")
val2 = float(val2)
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"] * ht_factor
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
nt_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
nt_year = nt_industrie_prices_without_VAT["year"]
nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
nt_new_year = np.append(nt_year, 2021)
nt_new_price = np.append(nt_price, val2)
print(nt_new_year)
print(nt_new_price)
plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val_ht_nt == 0):
val1 = input("Enter yearly mean price for electricity: ")
val1 = float(val1)
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, val1)
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
#nt_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
#nt_industrie_prices_without_VAT["year"] = nt_industrie_prices_without_VAT["year"].astype(int)
#nt_year = nt_industrie_prices_without_VAT["year"]
#nt_price = nt_industrie_prices_without_VAT["price"] * nt_factor
#nt_new_year = np.append(nt_year, 2021)
#nt_new_price = np.append(nt_price, (val1*nt_factor))
#print(nt_new_year)
#print(nt_new_price)
# plotting(ht_new_year, ht_new_price, "HT Price", "Year", "Price", "images/HT Price.png")
#plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
elif (val == 1):
ht_industrie_prices_without_VAT = v_big_industrial_prices_BDEW
ht_industrie_prices_without_VAT["year"] = ht_industrie_prices_without_VAT["year"].astype(int)
ht_year = ht_industrie_prices_without_VAT["year"]
ht_price = ht_industrie_prices_without_VAT["price"]
f = interpolate.interp1d(ht_year, ht_price, fill_value = "extrapolate")
p_2021 = f(2021)
ht_new_year = np.append(ht_year, 2021)
ht_new_price = np.append(ht_price, (f(2021)))
ht_new_price = ht_new_price
print(ht_new_year)
print(ht_new_price)
plotting(ht_new_year, ht_new_price, "Price", "Year", "Price", "images/Price.png")
# plotting(nt_new_year, nt_new_price, "NT Price", "Year", "Price", "images/NT Price.png")
print("Which type of Customer category you have?")
print("Enter 0 (zero) for Household customers and 1 (one) for Industrial customers.")
val1 = input("Please enter your value: ")
val1 = int(val1)
if (val1 == 0):
print("What is your yearly electricty demand (in KWh/y)?")
val2 =input("Please enter your value: ")
val2 = float(val2)
elif(val1 == 1):
val2 = input("What is your yearly electricty demand in (MWh/y)? ")
val2 = float(val2)
calculate_mean_price(val1,val2)
| [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"scipy.interpolate.interp1d",
"numpy.append",
"matplotlib.pyplot.figure",
"pandas.read_excel",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"pandas.to_datetime"
] | [((2519, 2646), 'pandas.read_excel', 'pd.read_excel', (['"""Energiepreisentwicklung.xlsx"""'], {'sheet_name': '"""5.8.3 Strom - € - Industrie"""', 'skiprows': '(5)', 'nrows': '(26)', 'index_col': '(0)'}), "('Energiepreisentwicklung.xlsx', sheet_name=\n '5.8.3 Strom - € - Industrie', skiprows=5, nrows=26, index_col=0)\n", (2532, 2646), True, 'import pandas as pd\n'), ((3174, 3241), 'pandas.to_datetime', 'pd.to_datetime', (['industrie_prices_without_VAT.index'], {'errors': '"""ignore"""'}), "(industrie_prices_without_VAT.index, errors='ignore')\n", (3188, 3241), True, 'import pandas as pd\n'), ((4289, 4336), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'v_big_industrial_prices_BDEW'}), '(data=v_big_industrial_prices_BDEW)\n', (4301, 4336), True, 'import pandas as pd\n'), ((4556, 4601), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'big_industrial_prices_BDEW'}), '(data=big_industrial_prices_BDEW)\n', (4568, 4601), True, 'import pandas as pd\n'), ((4711, 4759), 'pandas.read_excel', 'pd.read_excel', (['"""mid_size_industrial_prices.xlsx"""'], {}), "('mid_size_industrial_prices.xlsx')\n", (4724, 4759), True, 'import pandas as pd\n'), ((4939, 5066), 'pandas.read_excel', 'pd.read_excel', (['"""Energiepreisentwicklung.xlsx"""'], {'sheet_name': '"""5.8.2 Strom - € - Haushalte"""', 'skiprows': '(5)', 'nrows': '(26)', 'index_col': '(0)'}), "('Energiepreisentwicklung.xlsx', sheet_name=\n '5.8.2 Strom - € - Haushalte', skiprows=5, nrows=26, index_col=0)\n", (4952, 5066), True, 'import pandas as pd\n'), ((5594, 5661), 'pandas.to_datetime', 'pd.to_datetime', (['household_prices_without_VAT.index'], {'errors': '"""ignore"""'}), "(household_prices_without_VAT.index, errors='ignore')\n", (5608, 5661), True, 'import pandas as pd\n'), ((898, 910), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (908, 910), True, 'import matplotlib.pyplot as plt\n'), ((938, 952), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (946, 952), True, 'import matplotlib.pyplot as plt\n'), ((961, 977), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (970, 977), True, 'import matplotlib.pyplot as plt\n'), ((986, 1005), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (996, 1005), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1033), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (1024, 1033), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1063), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'values'], {}), '(x, values)\n', (1052, 1063), True, 'import matplotlib.pyplot as plt\n'), ((1071, 1094), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (1081, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1250), 'pandas.read_excel', 'pd.read_excel', (['data'], {}), '(data)\n', (1244, 1250), True, 'import pandas as pd\n'), ((1797, 1816), 'pandas.read_excel', 'pd.read_excel', (['data'], {}), '(data)\n', (1810, 1816), True, 'import pandas as pd\n'), ((7468, 7492), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (7477, 7492), True, 'import numpy as np\n'), ((7524, 7549), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (7533, 7549), True, 'import numpy as np\n'), ((8198, 8222), 'numpy.append', 'np.append', (['nt_year', '(2021)'], {}), '(nt_year, 2021)\n', (8207, 8222), True, 'import numpy as np\n'), ((8254, 8279), 'numpy.append', 'np.append', (['nt_price', 'val2'], {}), '(nt_price, val2)\n', (8263, 8279), True, 'import numpy as np\n'), ((9662, 9727), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['yt_year', 'yt_price'], {'fill_value': '"""extrapolate"""'}), "(yt_year, yt_price, fill_value='extrapolate')\n", (9682, 9727), False, 'from scipy import interpolate\n'), ((9786, 9810), 'numpy.append', 'np.append', (['yt_year', '(2021)'], {}), '(yt_year, 2021)\n', (9795, 9810), True, 'import numpy as np\n'), ((8954, 8978), 'numpy.append', 'np.append', (['yt_year', '(2021)'], {}), '(yt_year, 2021)\n', (8963, 8978), True, 'import numpy as np\n'), ((9010, 9035), 'numpy.append', 'np.append', (['yt_price', 'val1'], {}), '(yt_price, val1)\n', (9019, 9035), True, 'import numpy as np\n'), ((11309, 11333), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (11318, 11333), True, 'import numpy as np\n'), ((11365, 11390), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (11374, 11390), True, 'import numpy as np\n'), ((12039, 12063), 'numpy.append', 'np.append', (['nt_year', '(2021)'], {}), '(nt_year, 2021)\n', (12048, 12063), True, 'import numpy as np\n'), ((12095, 12120), 'numpy.append', 'np.append', (['nt_price', 'val2'], {}), '(nt_price, val2)\n', (12104, 12120), True, 'import numpy as np\n'), ((13597, 13662), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ht_year', 'ht_price'], {'fill_value': '"""extrapolate"""'}), "(ht_year, ht_price, fill_value='extrapolate')\n", (13617, 13662), False, 'from scipy import interpolate\n'), ((13721, 13745), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (13730, 13745), True, 'import numpy as np\n'), ((12796, 12820), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (12805, 12820), True, 'import numpy as np\n'), ((12852, 12877), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (12861, 12877), True, 'import numpy as np\n'), ((15157, 15181), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (15166, 15181), True, 'import numpy as np\n'), ((15213, 15238), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (15222, 15238), True, 'import numpy as np\n'), ((15796, 15820), 'numpy.append', 'np.append', (['nt_year', '(2021)'], {}), '(nt_year, 2021)\n', (15805, 15820), True, 'import numpy as np\n'), ((15852, 15889), 'numpy.append', 'np.append', (['(nt_price * nt_factor)', 'val2'], {}), '(nt_price * nt_factor, val2)\n', (15861, 15889), True, 'import numpy as np\n'), ((17366, 17431), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ht_year', 'ht_price'], {'fill_value': '"""extrapolate"""'}), "(ht_year, ht_price, fill_value='extrapolate')\n", (17386, 17431), False, 'from scipy import interpolate\n'), ((17490, 17514), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (17499, 17514), True, 'import numpy as np\n'), ((16565, 16589), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (16574, 16589), True, 'import numpy as np\n'), ((16621, 16646), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (16630, 16646), True, 'import numpy as np\n'), ((18997, 19021), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (19006, 19021), True, 'import numpy as np\n'), ((19053, 19078), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (19062, 19078), True, 'import numpy as np\n'), ((19719, 19743), 'numpy.append', 'np.append', (['nt_year', '(2021)'], {}), '(nt_year, 2021)\n', (19728, 19743), True, 'import numpy as np\n'), ((19775, 19800), 'numpy.append', 'np.append', (['nt_price', 'val2'], {}), '(nt_price, val2)\n', (19784, 19800), True, 'import numpy as np\n'), ((21181, 21246), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ht_year', 'ht_price'], {'fill_value': '"""extrapolate"""'}), "(ht_year, ht_price, fill_value='extrapolate')\n", (21201, 21246), False, 'from scipy import interpolate\n'), ((21305, 21329), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (21314, 21329), True, 'import numpy as np\n'), ((20467, 20491), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (20476, 20491), True, 'import numpy as np\n'), ((20523, 20548), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (20532, 20548), True, 'import numpy as np\n'), ((22820, 22844), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (22829, 22844), True, 'import numpy as np\n'), ((22876, 22901), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (22885, 22901), True, 'import numpy as np\n'), ((23548, 23572), 'numpy.append', 'np.append', (['nt_year', '(2021)'], {}), '(nt_year, 2021)\n', (23557, 23572), True, 'import numpy as np\n'), ((23604, 23629), 'numpy.append', 'np.append', (['nt_price', 'val2'], {}), '(nt_price, val2)\n', (23613, 23629), True, 'import numpy as np\n'), ((25753, 25818), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ht_year', 'ht_price'], {'fill_value': '"""extrapolate"""'}), "(ht_year, ht_price, fill_value='extrapolate')\n", (25773, 25818), False, 'from scipy import interpolate\n'), ((25877, 25901), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (25886, 25901), True, 'import numpy as np\n'), ((24302, 24326), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (24311, 24326), True, 'import numpy as np\n'), ((24358, 24383), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (24367, 24383), True, 'import numpy as np\n'), ((27369, 27393), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (27378, 27393), True, 'import numpy as np\n'), ((27425, 27450), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (27434, 27450), True, 'import numpy as np\n'), ((28099, 28123), 'numpy.append', 'np.append', (['nt_year', '(2021)'], {}), '(nt_year, 2021)\n', (28108, 28123), True, 'import numpy as np\n'), ((28155, 28180), 'numpy.append', 'np.append', (['nt_price', 'val2'], {}), '(nt_price, val2)\n', (28164, 28180), True, 'import numpy as np\n'), ((30321, 30386), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ht_year', 'ht_price'], {'fill_value': '"""extrapolate"""'}), "(ht_year, ht_price, fill_value='extrapolate')\n", (30341, 30386), False, 'from scipy import interpolate\n'), ((30445, 30469), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (30454, 30469), True, 'import numpy as np\n'), ((28856, 28880), 'numpy.append', 'np.append', (['ht_year', '(2021)'], {}), '(ht_year, 2021)\n', (28865, 28880), True, 'import numpy as np\n'), ((28912, 28937), 'numpy.append', 'np.append', (['ht_price', 'val1'], {}), '(ht_price, val1)\n', (28921, 28937), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import MaxPooling2D
from keras.layers import Dropout
from keras.backend import tensorflow_backend
from keras.utils.np_utils import to_categorical
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
def CNN_Model(input_shape, output_dim):
__x = Input(shape=input_shape)
__h = Conv2D(filters=32, kernel_size=3, activation='relu')(__x)
__h = Conv2D(filters=64, kernel_size=3, activation='relu')(__h)
__h = MaxPooling2D(pool_size=(2, 2))(__h)
__h = Dropout(rate=0.25)(__h)
__h = Flatten()(__h)
__h = Dense(units=128, activation='relu')(__h)
__h = Dropout(rate=0.25)(__h)
__y = Dense(units=output_dim, activation='softmax')(__h)
# return
return Model(__x, __y)
# main
if __name__ == "__main__":
# GPU configulations
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
tensorflow_backend.set_session(session)
# random seeds
np.random.seed(1)
tf.set_random_seed(1)
# parameters
n_classes = 10
n_channels = 1
img_width = 28
img_height = 28
# load the dataset
print('Loading the dataset...')
from keras.datasets import mnist as dataset
(X_train, Y_train_int), (X_test, Y_test_int) = dataset.load_data()
X_train = X_train[:, np.newaxis].transpose((0, 2, 3, 1)).astype('float32') / 255.0
X_test = X_test[:, np.newaxis].transpose((0, 2, 3, 1)).astype('float32') / 255.0
Y_train = to_categorical(Y_train_int, num_classes=n_classes)
Y_test = to_categorical(Y_test_int, num_classes=n_classes)
# training
print('Train a NN model...')
## define
input_shape = (img_width, img_height, n_channels)
model = CNN_Model(input_shape, n_classes)
optimizer = Adam()
model.compile(optimizer=optimizer, loss=categorical_crossentropy, metrics=['accuracy'])
## train
history = model.fit(X_train, Y_train, batch_size=100,
epochs=2, shuffle=True, initial_epoch=0)
# test
Y_train_pred = model.predict(X_train)
Y_train_pred = Y_train_pred.argmax(axis=1)
Y_test_pred = model.predict(X_test)
Y_test_pred = Y_test_pred.argmax(axis=1)
print('Training score for a NN classifier: \t{0}'.format(
metrics.accuracy_score(Y_train_int, Y_train_pred)))
print('Test score for a NN classifier: \t{0}'.format(
metrics.accuracy_score(Y_test_int, Y_test_pred)))
print('Training classification report for a NN classifier\n{0}\n'.format(
metrics.classification_report(Y_train_int, Y_train_pred)))
print('Test classification report for a NN classifier\n{0}\n'.format(
metrics.classification_report(Y_test_int, Y_test_pred)))
| [
"keras.optimizers.Adam",
"keras.layers.Conv2D",
"keras.backend.tensorflow_backend.set_session",
"keras.layers.Flatten",
"keras.datasets.mnist.load_data",
"keras.layers.MaxPooling2D",
"sklearn.metrics.classification_report",
"tensorflow.Session",
"keras.layers.Input",
"keras.utils.np_utils.to_categ... | [((637, 661), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (642, 661), False, 'from keras.layers import Input\n'), ((1073, 1088), 'keras.models.Model', 'Model', (['__x', '__y'], {}), '(__x, __y)\n', (1078, 1088), False, 'from keras.models import Model\n'), ((1237, 1262), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1247, 1262), True, 'import tensorflow as tf\n'), ((1267, 1306), 'keras.backend.tensorflow_backend.set_session', 'tensorflow_backend.set_session', (['session'], {}), '(session)\n', (1297, 1306), False, 'from keras.backend import tensorflow_backend\n'), ((1331, 1348), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1345, 1348), True, 'import numpy as np\n'), ((1353, 1374), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (1371, 1374), True, 'import tensorflow as tf\n'), ((1629, 1648), 'keras.datasets.mnist.load_data', 'dataset.load_data', ([], {}), '()\n', (1646, 1648), True, 'from keras.datasets import mnist as dataset\n'), ((1835, 1885), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['Y_train_int'], {'num_classes': 'n_classes'}), '(Y_train_int, num_classes=n_classes)\n', (1849, 1885), False, 'from keras.utils.np_utils import to_categorical\n'), ((1899, 1948), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['Y_test_int'], {'num_classes': 'n_classes'}), '(Y_test_int, num_classes=n_classes)\n', (1913, 1948), False, 'from keras.utils.np_utils import to_categorical\n'), ((2128, 2134), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (2132, 2134), False, 'from keras.optimizers import Adam\n'), ((672, 724), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, activation='relu')\n", (678, 724), False, 'from keras.layers import Conv2D\n'), ((740, 792), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, activation='relu')\n", (746, 792), False, 'from keras.layers import Conv2D\n'), ((808, 838), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (820, 838), False, 'from keras.layers import MaxPooling2D\n'), ((854, 872), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (861, 872), False, 'from keras.layers import Dropout\n'), ((888, 897), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (895, 897), False, 'from keras.layers import Flatten\n'), ((913, 948), 'keras.layers.Dense', 'Dense', ([], {'units': '(128)', 'activation': '"""relu"""'}), "(units=128, activation='relu')\n", (918, 948), False, 'from keras.layers import Dense\n'), ((964, 982), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (971, 982), False, 'from keras.layers import Dropout\n'), ((998, 1043), 'keras.layers.Dense', 'Dense', ([], {'units': 'output_dim', 'activation': '"""softmax"""'}), "(units=output_dim, activation='softmax')\n", (1003, 1043), False, 'from keras.layers import Dense\n'), ((1189, 1221), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (1202, 1221), True, 'import tensorflow as tf\n'), ((2620, 2669), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['Y_train_int', 'Y_train_pred'], {}), '(Y_train_int, Y_train_pred)\n', (2642, 2669), False, 'from sklearn import metrics\n'), ((2738, 2785), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['Y_test_int', 'Y_test_pred'], {}), '(Y_test_int, Y_test_pred)\n', (2760, 2785), False, 'from sklearn import metrics\n'), ((2874, 2930), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['Y_train_int', 'Y_train_pred'], {}), '(Y_train_int, Y_train_pred)\n', (2903, 2930), False, 'from sklearn import metrics\n'), ((3015, 3069), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['Y_test_int', 'Y_test_pred'], {}), '(Y_test_int, Y_test_pred)\n', (3044, 3069), False, 'from sklearn import metrics\n')] |
# straight from 08-Designing-Kalman-Filters
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
from kf_book.book_plots import plot_measurements, plot_filter
from filterpy.stats import plot_covariance_ellipse
from filterpy.kalman import KalmanFilter
from scipy.linalg import block_diag
from filterpy.common import Q_discrete_white_noise
import book_format
book_format.set_style()
class PosSensor(object):
def __init__(self, pos=(0, 0), vel=(0, 0), noise_std=1.):
self.vel = vel
self.noise_std = noise_std
self.pos = [pos[0], pos[1]]
def read(self):
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
return [self.pos[0] + randn() * self.noise_std,
self.pos[1] + randn() * self.noise_std]
R_std = 0.35
Q_std = 0.04
def tracker1():
tracker = KalmanFilter(dim_x=4, dim_z=2)
dt = 1.0 # time step
tracker.F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
tracker.u = 0.
tracker.H = np.array([[1 / 0.3048, 0, 0, 0],
[0, 0, 1 / 0.3048, 0]])
tracker.R = np.eye(2) * R_std ** 2
q = Q_discrete_white_noise(dim=2, dt=dt, var=Q_std ** 2)
tracker.Q = block_diag(q, q)
tracker.x = np.array([[0, 0, 0, 0]]).T
tracker.P = np.eye(4) * 500.
return tracker
# simulate robot movement
N = 30
sensor = PosSensor((0, 0), (2, .2), noise_std=R_std)
zs = np.array([sensor.read() for _ in range(N)])
# run filter
robot_tracker = tracker1()
mu, cov, _, _ = robot_tracker.batch_filter(zs)
for x, P in zip(mu, cov):
# covariance of x and y
cov = np.array([[P[0, 0], P[2, 0]],
[P[0, 2], P[2, 2]]])
mean = (x[0, 0], x[2, 0])
plot_covariance_ellipse(mean, cov=cov, fc='g', std=3, alpha=0.5)
# plot results
zs *= .3048 # convert to meters
plt.clf()
plot_filter(mu[:, 0], mu[:, 2])
plot_measurements(zs[:, 0], zs[:, 1])
plt.ylim(-3, 5)
plt.xlim(0, 20)
plt.legend(loc=2)
print(zs)
plt.show()
| [
"kf_book.book_plots.plot_measurements",
"numpy.eye",
"kf_book.book_plots.plot_filter",
"matplotlib.pyplot.clf",
"filterpy.kalman.KalmanFilter",
"book_format.set_style",
"filterpy.stats.plot_covariance_ellipse",
"numpy.array",
"filterpy.common.Q_discrete_white_noise",
"scipy.linalg.block_diag",
"... | [((386, 409), 'book_format.set_style', 'book_format.set_style', ([], {}), '()\n', (407, 409), False, 'import book_format\n'), ((1933, 1942), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1940, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1974), 'kf_book.book_plots.plot_filter', 'plot_filter', (['mu[:, 0]', 'mu[:, 2]'], {}), '(mu[:, 0], mu[:, 2])\n', (1954, 1974), False, 'from kf_book.book_plots import plot_measurements, plot_filter\n'), ((1975, 2012), 'kf_book.book_plots.plot_measurements', 'plot_measurements', (['zs[:, 0]', 'zs[:, 1]'], {}), '(zs[:, 0], zs[:, 1])\n', (1992, 2012), False, 'from kf_book.book_plots import plot_measurements, plot_filter\n'), ((2013, 2028), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-3)', '(5)'], {}), '(-3, 5)\n', (2021, 2028), True, 'import matplotlib.pyplot as plt\n'), ((2029, 2044), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(20)'], {}), '(0, 20)\n', (2037, 2044), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2062), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (2055, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2081, 2083), True, 'import matplotlib.pyplot as plt\n'), ((855, 885), 'filterpy.kalman.KalmanFilter', 'KalmanFilter', ([], {'dim_x': '(4)', 'dim_z': '(2)'}), '(dim_x=4, dim_z=2)\n', (867, 885), False, 'from filterpy.kalman import KalmanFilter\n'), ((929, 997), 'numpy.array', 'np.array', (['[[1, dt, 0, 0], [0, 1, 0, 0], [0, 0, 1, dt], [0, 0, 0, 1]]'], {}), '([[1, dt, 0, 0], [0, 1, 0, 0], [0, 0, 1, dt], [0, 0, 0, 1]])\n', (937, 997), True, 'import numpy as np\n'), ((1111, 1167), 'numpy.array', 'np.array', (['[[1 / 0.3048, 0, 0, 0], [0, 0, 1 / 0.3048, 0]]'], {}), '([[1 / 0.3048, 0, 0, 0], [0, 0, 1 / 0.3048, 0]])\n', (1119, 1167), True, 'import numpy as np\n'), ((1242, 1294), 'filterpy.common.Q_discrete_white_noise', 'Q_discrete_white_noise', ([], {'dim': '(2)', 'dt': 'dt', 'var': '(Q_std ** 2)'}), '(dim=2, dt=dt, var=Q_std ** 2)\n', (1264, 1294), False, 'from filterpy.common import Q_discrete_white_noise\n'), ((1311, 1327), 'scipy.linalg.block_diag', 'block_diag', (['q', 'q'], {}), '(q, q)\n', (1321, 1327), False, 'from scipy.linalg import block_diag\n'), ((1714, 1764), 'numpy.array', 'np.array', (['[[P[0, 0], P[2, 0]], [P[0, 2], P[2, 2]]]'], {}), '([[P[0, 0], P[2, 0]], [P[0, 2], P[2, 2]]])\n', (1722, 1764), True, 'import numpy as np\n'), ((1819, 1883), 'filterpy.stats.plot_covariance_ellipse', 'plot_covariance_ellipse', (['mean'], {'cov': 'cov', 'fc': '"""g"""', 'std': '(3)', 'alpha': '(0.5)'}), "(mean, cov=cov, fc='g', std=3, alpha=0.5)\n", (1842, 1883), False, 'from filterpy.stats import plot_covariance_ellipse\n'), ((1211, 1220), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1217, 1220), True, 'import numpy as np\n'), ((1344, 1368), 'numpy.array', 'np.array', (['[[0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0]])\n', (1352, 1368), True, 'import numpy as np\n'), ((1387, 1396), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1393, 1396), True, 'import numpy as np\n'), ((713, 720), 'numpy.random.randn', 'randn', ([], {}), '()\n', (718, 720), False, 'from numpy.random import randn\n'), ((769, 776), 'numpy.random.randn', 'randn', ([], {}), '()\n', (774, 776), False, 'from numpy.random import randn\n')] |
###############################################################################
# IMPORT STATEMENTS ###########################################################
###############################################################################
import numpy as np
from tudatpy.kernel import constants
from tudatpy.kernel.interface import spice_interface
from tudatpy.kernel.simulation import environment_setup
from tudatpy.kernel.simulation import propagation_setup
from tudatpy.kernel.astro import conversion
def main():
# Load spice kernels.
spice_interface.load_standard_kernels()
# Set simulation start and end epochs.
simulation_start_epoch = 0.0
simulation_end_epoch = constants.JULIAN_DAY
###########################################################################
# CREATE ENVIRONMENT ######################################################
###########################################################################
# Create default body settings for selected celestial bodies
bodies_to_create = ["Sun", "Earth", "Moon", "Mars", "Venus"]
# Create default body settings for bodies_to_create, with "Earth"/"J2000" as
# global frame origin and orientation. This environment will only be valid
# in the indicated time range
# [simulation_start_epoch --- simulation_end_epoch]
body_settings = environment_setup.get_default_body_settings(
bodies_to_create,
simulation_start_epoch,
simulation_end_epoch,
"Earth","J2000")
# Create system of selected celestial bodies
bodies = environment_setup.create_system_of_bodies(body_settings)
###########################################################################
# CREATE VEHICLE ##########################################################
###########################################################################
# Create vehicle objects.
bodies.create_empty_body( "Delfi-C3" )
bodies.get_body( "Delfi-C3").set_constant_mass(400.0)
# Create aerodynamic coefficient interface settings, and add to vehicle
reference_area = 4.0
drag_coefficient = 1.2
aero_coefficient_settings = environment_setup.aerodynamic_coefficients.constant(
reference_area,[drag_coefficient,0,0],
are_coefficients_in_aerodynamic_frame=True,
are_coefficients_in_negative_axis_direction=True
)
environment_setup.add_aerodynamic_coefficient_interface(
bodies, "Delfi-C3", aero_coefficient_settings );
# Create radiation pressure settings, and add to vehicle
reference_area_radiation = 4.0
radiation_pressure_coefficient = 1.2
occulting_bodies = ["Earth"]
radiation_pressure_settings = environment_setup.radiation_pressure.cannonball(
"Sun", reference_area_radiation, radiation_pressure_coefficient, occulting_bodies
)
environment_setup.add_radiation_pressure_interface(
bodies, "Delfi-C3", radiation_pressure_settings );
###########################################################################
# CREATE ACCELERATIONS ####################################################
###########################################################################
# Define bodies that are propagated.
bodies_to_propagate = ["Delfi-C3"]
# Define central bodies.
central_bodies = ["Earth"]
# Define accelerations acting on Delfi-C3 by Sun and Earth.
accelerations_settings_delfi_c3 = dict(
Sun=
[
propagation_setup.acceleration.cannon_ball_radiation_pressure_(),
propagation_setup.acceleration.point_mass_gravity()
],
Earth=
[
propagation_setup.acceleration.spherical_harmonic_gravity(5, 5),
propagation_setup.acceleration.aerodynamic()
])
# Define point mass accelerations acting on Delfi-C3 by all other bodies.
for other in set(bodies_to_create).difference({"Sun", "Earth"}):
accelerations_settings_delfi_c3[other] = [
propagation_setup.acceleration.point_mass_gravity()]
# Create global accelerations settings dictionary.
acceleration_settings = {"Delfi-C3": accelerations_settings_delfi_c3}
# Create acceleration models.
acceleration_models = propagation_setup.create_acceleration_models(
bodies,
acceleration_settings,
bodies_to_propagate,
central_bodies)
###########################################################################
# CREATE PROPAGATION SETTINGS #############################################
###########################################################################
# Set initial conditions for the Asterix satellite that will be
# propagated in this simulation. The initial conditions are given in
# Keplerian elements and later on converted to Cartesian elements.
earth_gravitational_parameter = bodies.get_body( "Earth" ).gravitational_parameter
initial_state = conversion.keplerian_to_cartesian(
gravitational_parameter=earth_gravitational_parameter,
semi_major_axis=7500.0E3,
eccentricity=0.1,
inclination=np.deg2rad(85.3),
argument_of_periapsis=np.deg2rad(235.7),
longitude_of_ascending_node=np.deg2rad(23.4),
true_anomaly=np.deg2rad(139.87)
)
# Define list of dependent variables to save.
dependent_variables_to_save = [
propagation_setup.dependent_variable.total_acceleration(
"Delfi-C3"
),
propagation_setup.dependent_variable.keplerian_state(
"Delfi-C3", "Earth"
),
propagation_setup.dependent_variable.latitude(
"Delfi-C3", "Earth"
),
propagation_setup.dependent_variable.longitude(
"Delfi-C3", "Earth"
)
]
# Create propagation settings.
propagator_settings = propagation_setup.propagator.translational(
central_bodies,
acceleration_models,
bodies_to_propagate,
initial_state,
simulation_end_epoch,
output_variables = dependent_variables_to_save
)
# Create numerical integrator settings.
fixed_step_size = 10.0
integrator_settings = propagation_setup.integrator.runge_kutta_4(
simulation_start_epoch,
fixed_step_size
)
###########################################################################
# PROPAGATE ORBIT #########################################################
###########################################################################
# Create simulation object and propagate dynamics.
dynamics_simulator = propagation_setup.SingleArcDynamicsSimulator(
bodies, integrator_settings, propagator_settings)
states = dynamics_simulator.state_history
dependent_variables = dynamics_simulator.dependent_variable_history
###########################################################################
# PRINT INITIAL AND FINAL STATES ##########################################
###########################################################################
print(
f"""
Single Earth-Orbiting Satellite Example.
The initial position vector of Delfi-C3 is [km]: \n{
states[simulation_start_epoch][:3] / 1E3}
The initial velocity vector of Delfi-C3 is [km/s]: \n{
states[simulation_start_epoch][3:] / 1E3}
After {simulation_end_epoch} seconds the position vector of Delfi-C3 is [km]: \n{
states[simulation_end_epoch][:3] / 1E3}
And the velocity vector of Delfi-C3 is [km/s]: \n{
states[simulation_end_epoch][3:] / 1E3}
"""
)
# Final statement (not required, though good practice in a __main__).
return 0
if __name__ == "__main__":
main()
| [
"tudatpy.kernel.simulation.propagation_setup.dependent_variable.latitude",
"tudatpy.kernel.simulation.propagation_setup.acceleration.spherical_harmonic_gravity",
"tudatpy.kernel.simulation.propagation_setup.acceleration.point_mass_gravity",
"tudatpy.kernel.simulation.propagation_setup.dependent_variable.total... | [((548, 587), 'tudatpy.kernel.interface.spice_interface.load_standard_kernels', 'spice_interface.load_standard_kernels', ([], {}), '()\n', (585, 587), False, 'from tudatpy.kernel.interface import spice_interface\n'), ((1359, 1488), 'tudatpy.kernel.simulation.environment_setup.get_default_body_settings', 'environment_setup.get_default_body_settings', (['bodies_to_create', 'simulation_start_epoch', 'simulation_end_epoch', '"""Earth"""', '"""J2000"""'], {}), "(bodies_to_create,\n simulation_start_epoch, simulation_end_epoch, 'Earth', 'J2000')\n", (1402, 1488), False, 'from tudatpy.kernel.simulation import environment_setup\n'), ((1580, 1636), 'tudatpy.kernel.simulation.environment_setup.create_system_of_bodies', 'environment_setup.create_system_of_bodies', (['body_settings'], {}), '(body_settings)\n', (1621, 1636), False, 'from tudatpy.kernel.simulation import environment_setup\n'), ((2171, 2367), 'tudatpy.kernel.simulation.environment_setup.aerodynamic_coefficients.constant', 'environment_setup.aerodynamic_coefficients.constant', (['reference_area', '[drag_coefficient, 0, 0]'], {'are_coefficients_in_aerodynamic_frame': '(True)', 'are_coefficients_in_negative_axis_direction': '(True)'}), '(reference_area, [\n drag_coefficient, 0, 0], are_coefficients_in_aerodynamic_frame=True,\n are_coefficients_in_negative_axis_direction=True)\n', (2222, 2367), False, 'from tudatpy.kernel.simulation import environment_setup\n'), ((2390, 2496), 'tudatpy.kernel.simulation.environment_setup.add_aerodynamic_coefficient_interface', 'environment_setup.add_aerodynamic_coefficient_interface', (['bodies', '"""Delfi-C3"""', 'aero_coefficient_settings'], {}), "(bodies, 'Delfi-C3',\n aero_coefficient_settings)\n", (2445, 2496), False, 'from tudatpy.kernel.simulation import environment_setup\n'), ((2717, 2851), 'tudatpy.kernel.simulation.environment_setup.radiation_pressure.cannonball', 'environment_setup.radiation_pressure.cannonball', (['"""Sun"""', 'reference_area_radiation', 'radiation_pressure_coefficient', 'occulting_bodies'], {}), "('Sun',\n reference_area_radiation, radiation_pressure_coefficient, occulting_bodies)\n", (2764, 2851), False, 'from tudatpy.kernel.simulation import environment_setup\n'), ((2866, 2969), 'tudatpy.kernel.simulation.environment_setup.add_radiation_pressure_interface', 'environment_setup.add_radiation_pressure_interface', (['bodies', '"""Delfi-C3"""', 'radiation_pressure_settings'], {}), "(bodies, 'Delfi-C3',\n radiation_pressure_settings)\n", (2916, 2969), False, 'from tudatpy.kernel.simulation import environment_setup\n'), ((4278, 4394), 'tudatpy.kernel.simulation.propagation_setup.create_acceleration_models', 'propagation_setup.create_acceleration_models', (['bodies', 'acceleration_settings', 'bodies_to_propagate', 'central_bodies'], {}), '(bodies, acceleration_settings,\n bodies_to_propagate, central_bodies)\n', (4322, 4394), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((5848, 6039), 'tudatpy.kernel.simulation.propagation_setup.propagator.translational', 'propagation_setup.propagator.translational', (['central_bodies', 'acceleration_models', 'bodies_to_propagate', 'initial_state', 'simulation_end_epoch'], {'output_variables': 'dependent_variables_to_save'}), '(central_bodies,\n acceleration_models, bodies_to_propagate, initial_state,\n simulation_end_epoch, output_variables=dependent_variables_to_save)\n', (5890, 6039), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((6185, 6272), 'tudatpy.kernel.simulation.propagation_setup.integrator.runge_kutta_4', 'propagation_setup.integrator.runge_kutta_4', (['simulation_start_epoch', 'fixed_step_size'], {}), '(simulation_start_epoch,\n fixed_step_size)\n', (6227, 6272), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((6613, 6711), 'tudatpy.kernel.simulation.propagation_setup.SingleArcDynamicsSimulator', 'propagation_setup.SingleArcDynamicsSimulator', (['bodies', 'integrator_settings', 'propagator_settings'], {}), '(bodies, integrator_settings,\n propagator_settings)\n', (6657, 6711), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((5425, 5492), 'tudatpy.kernel.simulation.propagation_setup.dependent_variable.total_acceleration', 'propagation_setup.dependent_variable.total_acceleration', (['"""Delfi-C3"""'], {}), "('Delfi-C3')\n", (5480, 5492), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((5517, 5590), 'tudatpy.kernel.simulation.propagation_setup.dependent_variable.keplerian_state', 'propagation_setup.dependent_variable.keplerian_state', (['"""Delfi-C3"""', '"""Earth"""'], {}), "('Delfi-C3', 'Earth')\n", (5569, 5590), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((5608, 5674), 'tudatpy.kernel.simulation.propagation_setup.dependent_variable.latitude', 'propagation_setup.dependent_variable.latitude', (['"""Delfi-C3"""', '"""Earth"""'], {}), "('Delfi-C3', 'Earth')\n", (5653, 5674), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((5692, 5759), 'tudatpy.kernel.simulation.propagation_setup.dependent_variable.longitude', 'propagation_setup.dependent_variable.longitude', (['"""Delfi-C3"""', '"""Earth"""'], {}), "('Delfi-C3', 'Earth')\n", (5738, 5759), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((4034, 4085), 'tudatpy.kernel.simulation.propagation_setup.acceleration.point_mass_gravity', 'propagation_setup.acceleration.point_mass_gravity', ([], {}), '()\n', (4083, 4085), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((5163, 5179), 'numpy.deg2rad', 'np.deg2rad', (['(85.3)'], {}), '(85.3)\n', (5173, 5179), True, 'import numpy as np\n'), ((5211, 5228), 'numpy.deg2rad', 'np.deg2rad', (['(235.7)'], {}), '(235.7)\n', (5221, 5228), True, 'import numpy as np\n'), ((5266, 5282), 'numpy.deg2rad', 'np.deg2rad', (['(23.4)'], {}), '(23.4)\n', (5276, 5282), True, 'import numpy as np\n'), ((5305, 5323), 'numpy.deg2rad', 'np.deg2rad', (['(139.87)'], {}), '(139.87)\n', (5315, 5323), True, 'import numpy as np\n'), ((3512, 3576), 'tudatpy.kernel.simulation.propagation_setup.acceleration.cannon_ball_radiation_pressure_', 'propagation_setup.acceleration.cannon_ball_radiation_pressure_', ([], {}), '()\n', (3574, 3576), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((3590, 3641), 'tudatpy.kernel.simulation.propagation_setup.acceleration.point_mass_gravity', 'propagation_setup.acceleration.point_mass_gravity', ([], {}), '()\n', (3639, 3641), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((3690, 3753), 'tudatpy.kernel.simulation.propagation_setup.acceleration.spherical_harmonic_gravity', 'propagation_setup.acceleration.spherical_harmonic_gravity', (['(5)', '(5)'], {}), '(5, 5)\n', (3747, 3753), False, 'from tudatpy.kernel.simulation import propagation_setup\n'), ((3767, 3811), 'tudatpy.kernel.simulation.propagation_setup.acceleration.aerodynamic', 'propagation_setup.acceleration.aerodynamic', ([], {}), '()\n', (3809, 3811), False, 'from tudatpy.kernel.simulation import propagation_setup\n')] |
import numpy as np
from tmtoolkit.topicmod.evaluate import metric_coherence_gensim
class GldaTrainer:
def __init__(self, model, data):
self.model = model.model
self.data = data
self.vocab = model.vocab
self.seed_topics = model.seed_topics
def train(self):
self.model.fit(self.data, seed_topics=self.seed_topics, seed_confidence=0.75)
def evaluate(self, data, corpus):
score_lst = metric_coherence_gensim(measure='c_v',
top_n=10,
topic_word_distrib=np.array(self.model.topic_word_),
dtm=np.array(data),
vocab=np.array(self.vocab),
texts=corpus)
avg_score = np.mean(score_lst)
return score_lst, avg_score
def generate_topics(self):
n_top_words = 10
topic_word = self.model.topic_word_
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(self.vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
print('Topic {}: {}'.format(i, ' '.join(topic_words)))
| [
"numpy.argsort",
"numpy.mean",
"numpy.array"
] | [((848, 866), 'numpy.mean', 'np.mean', (['score_lst'], {}), '(score_lst)\n', (855, 866), True, 'import numpy as np\n'), ((600, 632), 'numpy.array', 'np.array', (['self.model.topic_word_'], {}), '(self.model.topic_word_)\n', (608, 632), True, 'import numpy as np\n'), ((682, 696), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (690, 696), True, 'import numpy as np\n'), ((748, 768), 'numpy.array', 'np.array', (['self.vocab'], {}), '(self.vocab)\n', (756, 768), True, 'import numpy as np\n'), ((1082, 1102), 'numpy.array', 'np.array', (['self.vocab'], {}), '(self.vocab)\n', (1090, 1102), True, 'import numpy as np\n'), ((1103, 1125), 'numpy.argsort', 'np.argsort', (['topic_dist'], {}), '(topic_dist)\n', (1113, 1125), True, 'import numpy as np\n')] |
from ctapipe.image.muon import muon_ring_finder
import numpy as np
import astropy.units as u
from ctapipe.instrument import CameraGeometry
from functools import partial
from ctapipe.image import toymodel, tailcuts_clean
def test_ChaudhuriKunduRingFitter_old():
fitter = muon_ring_finder.ChaudhuriKunduRingFitter(parent=None)
points = np.linspace(-100, 100, 200)
x, y = np.meshgrid(points, points) * u.deg
weight = np.zeros(x.shape)
c_x = 50 * u.deg
c_y = 20 * u.deg
r = np.sqrt((x - c_x)**2 + (y - c_y)**2)
min_r = 10 * u.deg
max_r = 20 * u.deg
weight[(r > min_r) & (r < max_r)] = 1
output = fitter.fit(x, y, weight)
lim_p = 0.05 * u.deg
lim_r = 1 * u.deg
rad_a = 0.5 * (max_r + min_r)
assert abs(output.ring_center_x - c_x) < lim_p
assert abs(output.ring_center_y - c_y) < lim_p
assert abs(output.ring_radius - rad_a) < lim_r
def test_ChaudhuriKunduRingFitterHline():
fitter = muon_ring_finder.ChaudhuriKunduRingFitter(parent=None)
x = np.linspace(20, 30, 10) * u.deg # Make linear array in x
y = np.full_like(x, 15) # Fill y array of same size with y
weight = np.ones(x.shape) # Fill intensity array with value
output = fitter.fit(x, y, weight)
# TODO in muon_ring_fitter decide what to do if unreconstructable
# ... add Status Flag?
assert output.ring_radius is not np.NaN
assert output.ring_phi is not np.NaN
assert output.ring_inclination is not np.NaN
def test_ChaudhuriKunduRingFitter():
geom = CameraGeometry.from_name('HESS-I')
ring_rad = np.deg2rad(1. * u.deg) * 15. # make sure this is in camera coordinates
ring_width = np.deg2rad(0.05 * u.deg) * 15.
geom_pixall = np.empty(geom.pix_x.shape + (2,))
geom_pixall[..., 0] = geom.pix_x.value
geom_pixall[..., 1] = geom.pix_y.value
# image = generate_muon_model(geom_pixall, ring_rad, ring_width, 0.3, 0.2)
muon_model = partial(toymodel.generate_muon_model, radius=ring_rad.value,
width=ring_width.value, centre_x=-0.2, centre_y=-0.3)
toymodel_image, toy_signal, toy_noise = \
toymodel.make_toymodel_shower_image(geom, muon_model)
clean_toy_mask = tailcuts_clean(geom, toymodel_image,
boundary_thresh=5, picture_thresh=10)
# camera_coord = CameraFrame(x=x,y=y,z=np.zeros(x.shape)*u.m,
# focal_length = event.inst.optical_foclen[telid], rotation=geom.pix_rotation)
muonring = muon_ring_finder.ChaudhuriKunduRingFitter(None)
x = np.rad2deg((geom.pix_x.value / 15.) * u.rad) # .value
y = np.rad2deg((geom.pix_y.value / 15.) * u.rad) # .value
muonringparam = muonring.fit(x, y, toymodel_image * clean_toy_mask)
dist = np.sqrt(np.power(x - muonringparam.ring_center_x, 2)
+ np.power(y - muonringparam.ring_center_y, 2))
ring_dist = np.abs(dist - muonringparam.ring_radius)
muonringparam = muonring.fit(x, y, toymodel_image * (ring_dist <
muonringparam.ring_radius * 0.4))
dist = np.sqrt(np.power(x - muonringparam.ring_center_x, 2) +
np.power(y - muonringparam.ring_center_y, 2))
ring_dist = np.abs(dist - muonringparam.ring_radius)
muonringparam = muonring.fit(x, y, toymodel_image * (ring_dist <
muonringparam.ring_radius * 0.4))
print('Fitted ring radius', muonringparam.ring_radius, 'c.f.', ring_rad)
print('Fitted ring centre', muonringparam.ring_center_x, muonringparam.ring_center_y)
assert muonringparam.ring_radius is not ring_rad # .value
assert muonringparam.ring_center_x is not -0.2
assert muonringparam.ring_center_y is not -0.3
if __name__ == '__main__':
test_ChaudhuriKunduRingFitter_old()
test_ChaudhuriKunduRingFitterHline()
test_ChaudhuriKunduRingFitter()
| [
"numpy.abs",
"numpy.sqrt",
"numpy.full_like",
"numpy.ones",
"ctapipe.instrument.CameraGeometry.from_name",
"numpy.power",
"ctapipe.image.muon.muon_ring_finder.ChaudhuriKunduRingFitter",
"numpy.linspace",
"numpy.zeros",
"numpy.empty",
"functools.partial",
"ctapipe.image.tailcuts_clean",
"nump... | [((277, 331), 'ctapipe.image.muon.muon_ring_finder.ChaudhuriKunduRingFitter', 'muon_ring_finder.ChaudhuriKunduRingFitter', ([], {'parent': 'None'}), '(parent=None)\n', (318, 331), False, 'from ctapipe.image.muon import muon_ring_finder\n'), ((346, 373), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', '(200)'], {}), '(-100, 100, 200)\n', (357, 373), True, 'import numpy as np\n'), ((435, 452), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (443, 452), True, 'import numpy as np\n'), ((505, 545), 'numpy.sqrt', 'np.sqrt', (['((x - c_x) ** 2 + (y - c_y) ** 2)'], {}), '((x - c_x) ** 2 + (y - c_y) ** 2)\n', (512, 545), True, 'import numpy as np\n'), ((964, 1018), 'ctapipe.image.muon.muon_ring_finder.ChaudhuriKunduRingFitter', 'muon_ring_finder.ChaudhuriKunduRingFitter', ([], {'parent': 'None'}), '(parent=None)\n', (1005, 1018), False, 'from ctapipe.image.muon import muon_ring_finder\n'), ((1095, 1114), 'numpy.full_like', 'np.full_like', (['x', '(15)'], {}), '(x, 15)\n', (1107, 1114), True, 'import numpy as np\n'), ((1177, 1193), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (1184, 1193), True, 'import numpy as np\n'), ((1560, 1594), 'ctapipe.instrument.CameraGeometry.from_name', 'CameraGeometry.from_name', (['"""HESS-I"""'], {}), "('HESS-I')\n", (1584, 1594), False, 'from ctapipe.instrument import CameraGeometry\n'), ((1749, 1782), 'numpy.empty', 'np.empty', (['(geom.pix_x.shape + (2,))'], {}), '(geom.pix_x.shape + (2,))\n', (1757, 1782), True, 'import numpy as np\n'), ((1966, 2085), 'functools.partial', 'partial', (['toymodel.generate_muon_model'], {'radius': 'ring_rad.value', 'width': 'ring_width.value', 'centre_x': '(-0.2)', 'centre_y': '(-0.3)'}), '(toymodel.generate_muon_model, radius=ring_rad.value, width=\n ring_width.value, centre_x=-0.2, centre_y=-0.3)\n', (1973, 2085), False, 'from functools import partial\n'), ((2161, 2214), 'ctapipe.image.toymodel.make_toymodel_shower_image', 'toymodel.make_toymodel_shower_image', (['geom', 'muon_model'], {}), '(geom, muon_model)\n', (2196, 2214), False, 'from ctapipe.image import toymodel, tailcuts_clean\n'), ((2237, 2311), 'ctapipe.image.tailcuts_clean', 'tailcuts_clean', (['geom', 'toymodel_image'], {'boundary_thresh': '(5)', 'picture_thresh': '(10)'}), '(geom, toymodel_image, boundary_thresh=5, picture_thresh=10)\n', (2251, 2311), False, 'from ctapipe.image import toymodel, tailcuts_clean\n'), ((2513, 2560), 'ctapipe.image.muon.muon_ring_finder.ChaudhuriKunduRingFitter', 'muon_ring_finder.ChaudhuriKunduRingFitter', (['None'], {}), '(None)\n', (2554, 2560), False, 'from ctapipe.image.muon import muon_ring_finder\n'), ((2570, 2613), 'numpy.rad2deg', 'np.rad2deg', (['(geom.pix_x.value / 15.0 * u.rad)'], {}), '(geom.pix_x.value / 15.0 * u.rad)\n', (2580, 2613), True, 'import numpy as np\n'), ((2633, 2676), 'numpy.rad2deg', 'np.rad2deg', (['(geom.pix_y.value / 15.0 * u.rad)'], {}), '(geom.pix_y.value / 15.0 * u.rad)\n', (2643, 2676), True, 'import numpy as np\n'), ((2909, 2949), 'numpy.abs', 'np.abs', (['(dist - muonringparam.ring_radius)'], {}), '(dist - muonringparam.ring_radius)\n', (2915, 2949), True, 'import numpy as np\n'), ((3258, 3298), 'numpy.abs', 'np.abs', (['(dist - muonringparam.ring_radius)'], {}), '(dist - muonringparam.ring_radius)\n', (3264, 3298), True, 'import numpy as np\n'), ((386, 413), 'numpy.meshgrid', 'np.meshgrid', (['points', 'points'], {}), '(points, points)\n', (397, 413), True, 'import numpy as np\n'), ((1028, 1051), 'numpy.linspace', 'np.linspace', (['(20)', '(30)', '(10)'], {}), '(20, 30, 10)\n', (1039, 1051), True, 'import numpy as np\n'), ((1611, 1634), 'numpy.deg2rad', 'np.deg2rad', (['(1.0 * u.deg)'], {}), '(1.0 * u.deg)\n', (1621, 1634), True, 'import numpy as np\n'), ((1700, 1724), 'numpy.deg2rad', 'np.deg2rad', (['(0.05 * u.deg)'], {}), '(0.05 * u.deg)\n', (1710, 1724), True, 'import numpy as np\n'), ((2781, 2825), 'numpy.power', 'np.power', (['(x - muonringparam.ring_center_x)', '(2)'], {}), '(x - muonringparam.ring_center_x, 2)\n', (2789, 2825), True, 'import numpy as np\n'), ((2847, 2891), 'numpy.power', 'np.power', (['(y - muonringparam.ring_center_y)', '(2)'], {}), '(y - muonringparam.ring_center_y, 2)\n', (2855, 2891), True, 'import numpy as np\n'), ((3130, 3174), 'numpy.power', 'np.power', (['(x - muonringparam.ring_center_x)', '(2)'], {}), '(x - muonringparam.ring_center_x, 2)\n', (3138, 3174), True, 'import numpy as np\n'), ((3196, 3240), 'numpy.power', 'np.power', (['(y - muonringparam.ring_center_y)', '(2)'], {}), '(y - muonringparam.ring_center_y, 2)\n', (3204, 3240), True, 'import numpy as np\n')] |
import numpy as np
import random
def nuclear_norm_alpha_generation(num_models, **params):
return np.array(
[0]
+ [
2 ** x
for x in np.linspace(
start=params["options"][0],
stop=params["options"][1],
num=(num_models - 1),
)
]
)
def hidden_size_generation(num_models, **params):
return random.choices(
list(
{
int(2 ** x)
for x in np.arange(
params["options"][0], params["options"][1], params["step"]
)
}
),
k=num_models,
)
| [
"numpy.linspace",
"numpy.arange"
] | [((177, 268), 'numpy.linspace', 'np.linspace', ([], {'start': "params['options'][0]", 'stop': "params['options'][1]", 'num': '(num_models - 1)'}), "(start=params['options'][0], stop=params['options'][1], num=\n num_models - 1)\n", (188, 268), True, 'import numpy as np\n'), ((505, 574), 'numpy.arange', 'np.arange', (["params['options'][0]", "params['options'][1]", "params['step']"], {}), "(params['options'][0], params['options'][1], params['step'])\n", (514, 574), True, 'import numpy as np\n')] |
# Copyright (c) 2020 <NAME> & <NAME>
# FEniCS Project
# SPDX-License-Identifier: MIT
import libtab
import numpy
import pytest
import sympy
from .test_lagrange import sympy_disc_lagrange
def sympy_nedelec(celltype, n):
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
from sympy import S
topology = libtab.topology(celltype)
geometry = S(libtab.geometry(celltype).astype(int))
dummy = [sympy.Symbol("DUMMY1"), sympy.Symbol("DUMMY2"), sympy.Symbol("DUMMY3")]
funcs = []
if celltype == libtab.CellType.triangle:
tdim = 2
for i in range(n):
for j in range(n - i):
for d in range(2):
funcs += [[x**j * y**i if k == d else 0 for k in range(2)]]
for i in range(n):
funcs += [[x ** (n - 1 - i) * y ** (i + 1),
-x ** (n - i) * y ** i]]
mat = numpy.empty((len(funcs), len(funcs)), dtype=object)
# edge tangents
if n == 1:
edge_basis = [sympy.Integer(1)]
else:
edge_basis = sympy_disc_lagrange(libtab.CellType.interval, n - 1)
edge_basis = [a.subs(x, dummy[0]) for a in edge_basis]
for i, f in enumerate(funcs):
j = 0
for edge in topology[1]:
edge_geom = [geometry[t, :] for t in edge]
tangent = edge_geom[1] - edge_geom[0]
norm = sympy.sqrt(sum(i ** 2 for i in tangent))
tangent = [i / norm for i in tangent]
param = [(1 - dummy[0]) * a + dummy[0] * b for a, b in zip(edge_geom[0], edge_geom[1])]
for g in edge_basis:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, tangent))
integrand = integrand.subs(x, param[0]).subs(y, param[1])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1))
j += 1
# interior dofs
if n > 1:
if n == 2:
face_basis = [sympy.Integer(1)]
else:
face_basis = sympy_disc_lagrange(libtab.CellType.triangle, n - 2)
for i, f in enumerate(funcs):
j = n * 3
for g in face_basis:
for vec in [(1, 0), (0, 1)]:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, vec)) * g
mat[i, j] = integrand.integrate((x, 0, 1 - y)).integrate((y, 0, 1))
j += 1
elif celltype == libtab.CellType.tetrahedron:
tdim = 3
for i in range(n):
for j in range(n - i):
for k in range(n - i - j):
for d in range(3):
funcs += [[x**k * y**j * z**i if m == d else 0 for m in range(3)]]
if n == 1:
funcs += [[y, -x, sympy.Integer(0)], [z, sympy.Integer(0), -x], [sympy.Integer(0), z, -y]]
elif n == 2:
funcs += [
[y ** 2, -x * y, sympy.Integer(0)],
[x * y, -x ** 2, sympy.Integer(0)],
[z * y, -z * x, sympy.Integer(0)],
[sympy.Integer(0), y * z, -y ** 2],
[sympy.Integer(0), z ** 2, -z * y],
[sympy.Integer(0), x * z, -x * y],
[x * z, sympy.Integer(0), -x ** 2],
[z ** 2, sympy.Integer(0), -z * x],
]
elif n == 3:
funcs += [
[x ** 2 * y, -x ** 3, sympy.Integer(0)],
[x ** 2 * z, sympy.Integer(0), -x ** 3],
[sympy.Integer(0), x ** 2 * z, -x ** 2 * y],
[x * y ** 2, -x ** 2 * y, sympy.Integer(0)],
[2 * x * y * z, -x ** 2 * z, -x ** 2 * y],
[sympy.Integer(0), x * y * z, -x * y ** 2],
[x * z ** 2, sympy.Integer(0), -x ** 2 * z],
[sympy.Integer(0), x * z ** 2, -x * y * z],
[y ** 3, -x * y ** 2, sympy.Integer(0)],
[9 * y ** 2 * z, -4 * x * y * z, -5 * x * y ** 2],
[sympy.Integer(0), y ** 2 * z, -y ** 3],
[9 * y * z ** 2, -5 * x * z ** 2, -4 * x * y * z],
[sympy.Integer(0), y * z ** 2, -y ** 2 * z],
[z ** 3, sympy.Integer(0), -x * z ** 2],
[sympy.Integer(0), z ** 3, -y * z ** 2],
]
else:
raise NotImplementedError
mat = numpy.empty((len(funcs), len(funcs)), dtype=object)
# edge tangents
if n == 1:
edge_basis = [sympy.Integer(1)]
else:
edge_basis = sympy_disc_lagrange(libtab.CellType.interval, n - 1)
edge_basis = [a.subs(x, dummy[0]) for a in edge_basis]
for i, f in enumerate(funcs):
j = 0
for edge in topology[1]:
edge_geom = [geometry[t, :] for t in edge]
tangent = edge_geom[1] - edge_geom[0]
norm = sympy.sqrt(sum(i ** 2 for i in tangent))
tangent = [i / norm for i in tangent]
param = [(1 - dummy[0]) * a + dummy[0] * b for a, b in zip(edge_geom[0], edge_geom[1])]
for g in edge_basis:
integrand = sum((f_i * v_i) for f_i, v_i in zip(f, tangent))
integrand = integrand.subs(x, param[0]).subs(y, param[1]).subs(z, param[2])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1))
j += 1
# face dofs
if n > 1:
if n == 2:
face_basis = [sympy.Integer(1)]
else:
face_basis = sympy_disc_lagrange(libtab.CellType.triangle, n - 2)
face_basis = [a.subs(x, dummy[0]).subs(y, dummy[1]) for a in face_basis]
for i, f in enumerate(funcs):
j = n * 6
for face in topology[2]:
face_geom = [geometry[t, :] for t in face]
axes = [face_geom[1] - face_geom[0], face_geom[2] - face_geom[0]]
norm = sympy.sqrt(sum(i**2 for i in
[axes[0][1] * axes[1][2] - axes[0][2] * axes[1][1],
axes[0][2] * axes[1][0] - axes[0][0] * axes[1][2],
axes[0][0] * axes[1][1] - axes[0][1] * axes[1][0]]))
scaled_axes = []
for a in axes:
axisnorm = sympy.sqrt(sum(k**2 for k in a))
scaled_axes.append([k / axisnorm for k in a])
param = [a + dummy[0] * b + dummy[1] * c for a, b, c in zip(face_geom[0], *axes)]
for g in face_basis:
for vec in scaled_axes:
integrand = sum(f_i * v_i for f_i, v_i in zip(f, vec))
integrand = integrand.subs(x, param[0]).subs(y, param[1]).subs(z, param[2])
integrand *= g * norm
mat[i, j] = integrand.integrate((dummy[0], 0, 1 - dummy[1])).integrate((dummy[1], 0, 1))
j += 1
# interior dofs
if n > 2:
if n == 3:
interior_basis = [sympy.Integer(1)]
else:
interior_basis = sympy_disc_lagrange(libtab.CellType.tetrahedron, n - 3)
for i, f in enumerate(funcs):
j = n * 6 + 4 * n * (n - 1)
for g in interior_basis:
for vec in [(1, 0, 0), (0, 1, 0), (0, 0, 1)]:
integrand = sum(f_i * v_i for f_i, v_i in zip(f, vec))
integrand *= g
mat[i, j] = integrand.integrate((x, 0, 1 - y - z)).integrate((y, 0, 1 - z)).integrate((z, 0, 1))
j += 1
mat = sympy.Matrix(mat)
mat = mat.inv()
g = []
for dim in range(tdim):
for r in range(mat.shape[0]):
g += [sum([v * funcs[i][dim] for i, v in enumerate(mat.row(r))])]
return g
@pytest.mark.parametrize("order", [1, 2, 3])
def test_tri(order):
celltype = libtab.CellType.triangle
g = sympy_nedelec(celltype, order)
x = sympy.Symbol("x")
y = sympy.Symbol("y")
nedelec = libtab.Nedelec("triangle", order)
pts = libtab.create_lattice(celltype, 6, libtab.LatticeType.equispaced, True)
nderiv = 3
wtab = nedelec.tabulate(nderiv, pts)
for kx in range(nderiv):
for ky in range(0, nderiv - kx):
wsym = numpy.zeros_like(wtab[0])
for i in range(len(g)):
wd = sympy.diff(g[i], x, kx, y, ky)
for j, p in enumerate(pts):
wsym[j, i] = wd.subs([(x, p[0]), (y, p[1])])
assert(numpy.isclose(wtab[libtab.index(kx, ky)], wsym).all())
@pytest.mark.parametrize("order", [1, 2, 3])
def test_tet(order):
celltype = libtab.CellType.tetrahedron
g = sympy_nedelec(celltype, order)
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
nedelec = libtab.Nedelec("tetrahedron", order)
pts = libtab.create_lattice(celltype, 6, libtab.LatticeType.equispaced, True)
nderiv = 1
wtab = nedelec.tabulate(nderiv, pts)
for k in range(nderiv + 1):
for q in range(k + 1):
for kx in range(q + 1):
ky = q - kx
kz = k - q
wsym = numpy.zeros_like(wtab[0])
for i in range(len(g)):
wd = sympy.diff(g[i], x, kx, y, ky, z, kz)
for j, p in enumerate(pts):
wsym[j, i] = wd.subs([(x, p[0]),
(y, p[1]),
(z, p[2])])
assert(numpy.isclose(wtab[libtab.index(kx, ky, kz)], wsym).all())
@pytest.mark.parametrize("order", [1, 2, 3, 4])
def test_dof_permutations_triangle(order):
nedelec = libtab.Nedelec("triangle", order)
permuted = {}
if order == 2:
# Reflect 2 DOFs on edges
permuted[0] = {0: 1, 1: 0}
permuted[1] = {2: 3, 3: 2}
permuted[2] = {4: 5, 5: 4}
elif order == 3:
# Reflect 3 DOFs on edges
permuted[0] = {0: 2, 2: 0}
permuted[1] = {3: 5, 5: 3}
permuted[2] = {6: 8, 8: 6}
elif order == 4:
# Reflect 4 DOFs on edges
permuted[0] = {0: 3, 1: 2, 2: 1, 3: 0}
permuted[1] = {4: 7, 5: 6, 6: 5, 7: 4}
permuted[2] = {8: 11, 9: 10, 10: 9, 11: 8}
base_perms = nedelec.base_permutations
assert len(base_perms) == 3
for i, perm in enumerate(base_perms):
actual = numpy.zeros_like(perm)
for row in range(perm.shape[0]):
for k in range(perm.shape[1]):
if i in permuted and k in permuted[i]:
col = permuted[i][k]
else:
col = k
if row == k:
if order * i <= row < order * (i + 1):
actual[row, col] = -1
else:
actual[row, col] = 1
assert numpy.allclose(perm, actual)
@pytest.mark.parametrize("order", [1, 2, 3, 4])
def test_dof_permutations_tetrahedron(order):
nedelec = libtab.Nedelec("tetrahedron", order)
permuted = {}
if order == 2:
# Reflect 2 DOFs on edges
for i in range(6):
permuted[i] = {2 * i: 2 * i + 1, 2 * i + 1: 2 * i}
elif order == 3:
# Reflect 3 DOFs on edges
for i in range(6):
permuted[i] = {3 * i: 3 * i + 2, 3 * i + 2: 3 * i}
# Rotate and reflect 3*2 DOFs on faces
for i in range(4):
permuted[6 + 2 * i] = {
18 + 6 * i: 22 + 6 * i, 19 + 6 * i: 23 + 6 * i, 20 + 6 * i: 18 + 6 * i,
21 + 6 * i: 19 + 6 * i, 22 + 6 * i: 20 + 6 * i, 23 + 6 * i: 21 + 6 * i}
permuted[7 + 2 * i] = {
20 + 6 * i: 22 + 6 * i, 21 + 6 * i: 23 + 6 * i,
22 + 6 * i: 20 + 6 * i, 23 + 6 * i: 21 + 6 * i}
elif order == 4:
# Reflect 3 DOFs on edges
for i in range(6):
permuted[i] = {4 * i + j: 4 * i + 3 - j for j in range(4)}
# Rotate and reflect 6*2 DOFs on faces
for i in range(4):
permuted[6 + 2 * i] = {
24 + a + 12 * i: 24 + b + 12 * i for a, b in {
0: 10, 1: 11, 2: 6, 3: 7, 4: 0, 5: 1, 6: 8,
7: 9, 8: 2, 9: 3, 10: 4, 11: 5}.items()}
permuted[7 + 2 * i] = {
24 + a + 12 * i: 24 + b + 12 * i for a, b in {
2: 6, 3: 7, 4: 10, 5: 11, 6: 2, 7: 3, 10: 4, 11: 5}.items()}
base_perms = nedelec.base_permutations
assert len(base_perms) == 14
# Test edge flips
for i, perm in enumerate(base_perms[:6]):
actual = numpy.zeros_like(perm)
for row in range(perm.shape[0]):
for k in range(perm.shape[1]):
if i in permuted and k in permuted[i]:
col = permuted[i][k]
else:
col = k
if row == k:
if order * i <= row < order * (i + 1):
actual[row, col] = -1
else:
actual[row, col] = 1
assert numpy.allclose(perm, actual)
# TODO: write good test for face rotations
| [
"sympy.Symbol",
"libtab.create_lattice",
"numpy.allclose",
"libtab.geometry",
"sympy.Integer",
"libtab.index",
"sympy.Matrix",
"pytest.mark.parametrize",
"libtab.topology",
"sympy.diff",
"libtab.Nedelec",
"numpy.zeros_like"
] | [((8124, 8167), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[1, 2, 3]'], {}), "('order', [1, 2, 3])\n", (8147, 8167), False, 'import pytest\n'), ((8897, 8940), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[1, 2, 3]'], {}), "('order', [1, 2, 3])\n", (8920, 8940), False, 'import pytest\n'), ((9926, 9972), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[1, 2, 3, 4]'], {}), "('order', [1, 2, 3, 4])\n", (9949, 9972), False, 'import pytest\n'), ((11243, 11289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[1, 2, 3, 4]'], {}), "('order', [1, 2, 3, 4])\n", (11266, 11289), False, 'import pytest\n'), ((229, 246), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (241, 246), False, 'import sympy\n'), ((255, 272), 'sympy.Symbol', 'sympy.Symbol', (['"""y"""'], {}), "('y')\n", (267, 272), False, 'import sympy\n'), ((281, 298), 'sympy.Symbol', 'sympy.Symbol', (['"""z"""'], {}), "('z')\n", (293, 298), False, 'import sympy\n'), ((339, 364), 'libtab.topology', 'libtab.topology', (['celltype'], {}), '(celltype)\n', (354, 364), False, 'import libtab\n'), ((7914, 7931), 'sympy.Matrix', 'sympy.Matrix', (['mat'], {}), '(mat)\n', (7926, 7931), False, 'import sympy\n'), ((8276, 8293), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (8288, 8293), False, 'import sympy\n'), ((8302, 8319), 'sympy.Symbol', 'sympy.Symbol', (['"""y"""'], {}), "('y')\n", (8314, 8319), False, 'import sympy\n'), ((8334, 8367), 'libtab.Nedelec', 'libtab.Nedelec', (['"""triangle"""', 'order'], {}), "('triangle', order)\n", (8348, 8367), False, 'import libtab\n'), ((8378, 8449), 'libtab.create_lattice', 'libtab.create_lattice', (['celltype', '(6)', 'libtab.LatticeType.equispaced', '(True)'], {}), '(celltype, 6, libtab.LatticeType.equispaced, True)\n', (8399, 8449), False, 'import libtab\n'), ((9052, 9069), 'sympy.Symbol', 'sympy.Symbol', (['"""x"""'], {}), "('x')\n", (9064, 9069), False, 'import sympy\n'), ((9078, 9095), 'sympy.Symbol', 'sympy.Symbol', (['"""y"""'], {}), "('y')\n", (9090, 9095), False, 'import sympy\n'), ((9104, 9121), 'sympy.Symbol', 'sympy.Symbol', (['"""z"""'], {}), "('z')\n", (9116, 9121), False, 'import sympy\n'), ((9136, 9172), 'libtab.Nedelec', 'libtab.Nedelec', (['"""tetrahedron"""', 'order'], {}), "('tetrahedron', order)\n", (9150, 9172), False, 'import libtab\n'), ((9184, 9255), 'libtab.create_lattice', 'libtab.create_lattice', (['celltype', '(6)', 'libtab.LatticeType.equispaced', '(True)'], {}), '(celltype, 6, libtab.LatticeType.equispaced, True)\n', (9205, 9255), False, 'import libtab\n'), ((10030, 10063), 'libtab.Nedelec', 'libtab.Nedelec', (['"""triangle"""', 'order'], {}), "('triangle', order)\n", (10044, 10063), False, 'import libtab\n'), ((11350, 11386), 'libtab.Nedelec', 'libtab.Nedelec', (['"""tetrahedron"""', 'order'], {}), "('tetrahedron', order)\n", (11364, 11386), False, 'import libtab\n'), ((434, 456), 'sympy.Symbol', 'sympy.Symbol', (['"""DUMMY1"""'], {}), "('DUMMY1')\n", (446, 456), False, 'import sympy\n'), ((458, 480), 'sympy.Symbol', 'sympy.Symbol', (['"""DUMMY2"""'], {}), "('DUMMY2')\n", (470, 480), False, 'import sympy\n'), ((482, 504), 'sympy.Symbol', 'sympy.Symbol', (['"""DUMMY3"""'], {}), "('DUMMY3')\n", (494, 504), False, 'import sympy\n'), ((10737, 10759), 'numpy.zeros_like', 'numpy.zeros_like', (['perm'], {}), '(perm)\n', (10753, 10759), False, 'import numpy\n'), ((11211, 11239), 'numpy.allclose', 'numpy.allclose', (['perm', 'actual'], {}), '(perm, actual)\n', (11225, 11239), False, 'import numpy\n'), ((12938, 12960), 'numpy.zeros_like', 'numpy.zeros_like', (['perm'], {}), '(perm)\n', (12954, 12960), False, 'import numpy\n'), ((13411, 13439), 'numpy.allclose', 'numpy.allclose', (['perm', 'actual'], {}), '(perm, actual)\n', (13425, 13439), False, 'import numpy\n'), ((8596, 8621), 'numpy.zeros_like', 'numpy.zeros_like', (['wtab[0]'], {}), '(wtab[0])\n', (8612, 8621), False, 'import numpy\n'), ((382, 407), 'libtab.geometry', 'libtab.geometry', (['celltype'], {}), '(celltype)\n', (397, 407), False, 'import libtab\n'), ((1028, 1044), 'sympy.Integer', 'sympy.Integer', (['(1)'], {}), '(1)\n', (1041, 1044), False, 'import sympy\n'), ((8679, 8709), 'sympy.diff', 'sympy.diff', (['g[i]', 'x', 'kx', 'y', 'ky'], {}), '(g[i], x, kx, y, ky)\n', (8689, 8709), False, 'import sympy\n'), ((9491, 9516), 'numpy.zeros_like', 'numpy.zeros_like', (['wtab[0]'], {}), '(wtab[0])\n', (9507, 9516), False, 'import numpy\n'), ((2064, 2080), 'sympy.Integer', 'sympy.Integer', (['(1)'], {}), '(1)\n', (2077, 2080), False, 'import sympy\n'), ((4588, 4604), 'sympy.Integer', 'sympy.Integer', (['(1)'], {}), '(1)\n', (4601, 4604), False, 'import sympy\n'), ((9582, 9619), 'sympy.diff', 'sympy.diff', (['g[i]', 'x', 'kx', 'y', 'ky', 'z', 'kz'], {}), '(g[i], x, kx, y, ky, z, kz)\n', (9592, 9619), False, 'import sympy\n'), ((2897, 2913), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (2910, 2913), False, 'import sympy\n'), ((2920, 2936), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (2933, 2936), False, 'import sympy\n'), ((2944, 2960), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (2957, 2960), False, 'import sympy\n'), ((5635, 5651), 'sympy.Integer', 'sympy.Integer', (['(1)'], {}), '(1)\n', (5648, 5651), False, 'import sympy\n'), ((7314, 7330), 'sympy.Integer', 'sympy.Integer', (['(1)'], {}), '(1)\n', (7327, 7330), False, 'import sympy\n'), ((3047, 3063), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3060, 3063), False, 'import sympy\n'), ((3099, 3115), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3112, 3115), False, 'import sympy\n'), ((3150, 3166), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3163, 3166), False, 'import sympy\n'), ((3186, 3202), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3199, 3202), False, 'import sympy\n'), ((3238, 3254), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3251, 3254), False, 'import sympy\n'), ((3290, 3306), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3303, 3306), False, 'import sympy\n'), ((3348, 3364), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3361, 3364), False, 'import sympy\n'), ((3401, 3417), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3414, 3417), False, 'import sympy\n'), ((8858, 8878), 'libtab.index', 'libtab.index', (['kx', 'ky'], {}), '(kx, ky)\n', (8870, 8878), False, 'import libtab\n'), ((3524, 3540), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3537, 3540), False, 'import sympy\n'), ((3572, 3588), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3585, 3588), False, 'import sympy\n'), ((3617, 3633), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3630, 3633), False, 'import sympy\n'), ((3703, 3719), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3716, 3719), False, 'import sympy\n'), ((3798, 3814), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3811, 3814), False, 'import sympy\n'), ((3870, 3886), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3883, 3886), False, 'import sympy\n'), ((3919, 3935), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (3932, 3935), False, 'import sympy\n'), ((4000, 4016), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (4013, 4016), False, 'import sympy\n'), ((4103, 4119), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (4116, 4119), False, 'import sympy\n'), ((4227, 4243), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (4240, 4243), False, 'import sympy\n'), ((4296, 4312), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (4309, 4312), False, 'import sympy\n'), ((4345, 4361), 'sympy.Integer', 'sympy.Integer', (['(0)'], {}), '(0)\n', (4358, 4361), False, 'import sympy\n'), ((9883, 9907), 'libtab.index', 'libtab.index', (['kx', 'ky', 'kz'], {}), '(kx, ky, kz)\n', (9895, 9907), False, 'import libtab\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Feb 7, 2013
@author: alfoa
This python module performs the loading of
data from csv files
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import numpy as np
from scipy.interpolate import interp1d
import copy
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils
import MessageHandler
#Internal Modules End--------------------------------------------------------------------------------
class CsvLoader(MessageHandler.MessageUser):
"""
Class aimed to load the CSV files
"""
def __init__(self,messageHandler):
"""
Constructor
@ In, messageHandler, MessageHandler, the message handler
@ Out, None
"""
self.allOutParam = False # all output parameters?
self.allFieldNames = []
self.type = 'CsvLoader'
self.printTag = self.type
self.messageHandler = messageHandler
def loadCsvFile(self,myFile):
"""
Function to load a csv file into a numpy array (2D)
It also retrieves the headers
The format of the csv must be:
STRING,STRING,STRING,STRING
FLOAT ,FLOAT ,FLOAT ,FLOAT
...
FLOAT ,FLOAT ,FLOAT ,FLOAT
@ In, fileIn, string, Input file name (absolute path)
@ Out, data, numpy.ndarray, the loaded data
"""
# open file
myFile.open(mode='rb')
# read the field names
head = myFile.readline().decode()
self.allFieldNames = head.split(',')
for index in range(len(self.allFieldNames)):
self.allFieldNames[index] = self.allFieldNames[index].strip()
# load the table data (from the csv file) into a numpy nd array
data = np.loadtxt(myFile,dtype='float',delimiter=',',ndmin=2,skiprows=1)
# close file
myFile.close()
return data
def getAllFieldNames(self):
"""
Function to get all field names found in the csv file
@ In, None
@ Out, allFieldNames, list, list of field names (headers)
"""
return self.allFieldNames
| [
"numpy.loadtxt"
] | [((2722, 2791), 'numpy.loadtxt', 'np.loadtxt', (['myFile'], {'dtype': '"""float"""', 'delimiter': '""","""', 'ndmin': '(2)', 'skiprows': '(1)'}), "(myFile, dtype='float', delimiter=',', ndmin=2, skiprows=1)\n", (2732, 2791), True, 'import numpy as np\n')] |
import sys
if sys.version_info[0] == 2:
import Tkinter as tk
from tkFileDialog import askdirectory
else:
import tkinter as tk
from tkinter.filedialog import askdirectory
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
### set size for figures
x_size = 7
y_size = 5
#############################################################################
############################## SAVE_FREQ ####################################
#############################################################################
def save_freq(arg0,arg2,arg3,arg4):
path_file = askdirectory()
path_file = os.path.join(path_file,'freq_perc.txt')
f = open(path_file,'w')
f.write('Frequencies for file %s\n\n' % (arg0))
f.write('Filter_round Frequency Period Percentage\n\n')
for i in range(len(arg2)):
f.write('%d %.8f %.2f %.2f\n' % (arg2[i],1.0 / arg3[i],arg3[i],arg4[i]))
f.close()
#############################################################################
#############################################################################
############################## DETREND_PLOT #################################
#############################################################################
def detrend_plot(main_win,arg0,arg1,arg2):
ticks = np.arange(0,len(arg0),len(arg0) / 7,dtype = int)
t = np.arange(1,len(arg0) + 1,dtype = int)
time = np.array(arg2,dtype = str)
ticks_vec = t[ticks]
time_label = time[ticks]
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(t,arg0 + arg1,'r',label = 'original')
if not np.isscalar(arg1):
plt.plot(t,arg1,'k',label = 'trend')
plt.plot(t,arg0,'b',label = 'detrended')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.xticks(ticks,time_label)
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.savefig(os.path.join(path_tot,'ts.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(t,arg0 + arg1,'r',label = 'original')
if not np.isscalar(arg1):
a.plot(t,arg1,'k',label = 'trend')
a.plot(t,arg0,'b',label = 'detrended')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get(),fontsize = 15)
a.set_ylabel(entries[5].get(),fontsize = 15)
a.set_title(entries[6].get(),fontsize = 15)
a.set_xticks(ticks_vec)
a.set_xticklabels(time_label)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("Time Series")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","X Label","Y Label","Title"]
if not np.isscalar(arg1):
values = [t[0],t[-1],np.min([np.min(arg0[~np.isnan(arg0)]),np.min(arg1[~np.isnan(arg1)]),
np.min(arg0[~np.isnan(arg0)] + arg1[~np.isnan(arg1)])]) - 1.0,
np.max([np.max(arg0[~np.isnan(arg0)]),np.max(arg1[~np.isnan(arg1)]),
np.max(arg0[~np.isnan(arg0)] + arg1[~np.isnan(arg1)])]) + 1.0,'t','$X_t$',
'Time Series']
else:
values = [t[0],t[-1],np.min(arg0[~np.isnan(arg0)]) - 1.0,np.max(arg0[~np.isnan(arg0)]) + 1.0,
't','$X_t$','Time Series']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## SPECTRUM_PLOT ################################
#############################################################################
def spectrum_plot(main_win,arg0,arg1,arg2):
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(arg1,arg0,'b')
if arg2 != 0:
plt.plot((arg1[0],arg1[-1]),(arg2,arg2),'r')
plt.xlabel(entries[0].get())
plt.ylabel(entries[1].get())
plt.xlim(float(entries[2].get()),float(entries[3].get()))
plt.ylim(float(entries[4].get()),float(entries[5].get()))
plt.title(entries[6].get())
plt.savefig(os.path.join(path_tot,'spectrum_in.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(arg1,arg0,'b')
if arg2 != 0:
a.plot((arg1[0],arg1[-1]),(arg2,arg2),'r')
a.set_xlabel(entries[0].get(),fontsize = 15)
a.set_ylabel(entries[1].get(),fontsize = 15)
a.set_xlim(float(entries[2].get()),float(entries[3].get()))
a.set_ylim(float(entries[4].get()),float(entries[5].get()))
a.set_title(entries[6].get(),fontsize = 15)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
if arg2 != 0:
top.wm_title("Spectrum")
else:
top.wm_title("Spectrum of residuals")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Label","Y Label","X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","Title"]
if arg2 != 0:
values = ['$\\nu$','$P(\\nu)$',0,arg1[-1],0,np.max(arg0) + 10.0,'LS spectrum (initial)']
else:
values = ['$\\nu$','$P(\\nu)$',0,arg1[-1],0,np.max(arg0) + 10.0,'LS spectrum of residuals']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## RES_PLOT #####################################
#############################################################################
def res_plot(main_win,arg0,arg1,arg2,arg3):
ticks = np.arange(0,len(arg0),len(arg0) / 7,dtype = int)
t = np.arange(1,len(arg0) + 1,dtype = int)
time = np.array(arg3,dtype = str)
ticks_vec = t[ticks]
time_label = time[ticks]
pn_norm_notnan = arg2[~np.isnan(arg2)]
outlier_lim = 3.0
num_outliers_max = len(pn_norm_notnan[pn_norm_notnan > outlier_lim])
num_outliers_min = len(pn_norm_notnan[pn_norm_notnan < -outlier_lim])
num_outliers = num_outliers_max + num_outliers_min
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (12,9))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,1,1)
plt.plot(t,arg0)
plt.xlim(int(entries[0].get()),int(entries[1].get()))
plt.ylim(float(entries[5].get()),float(entries[6].get()))
plt.xticks(ticks,'')
plt.ylabel(entries[2].get())
plt.title(entries[4].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.0)
plt.subplot(2,1,2)
sigma = '%.2f' % arg1
if int(matplotlib.__version__.split('.')[0]) == 2:
plt.bar(t,arg2,width = 10,label = 'num outl = ' + str(num_outliers))
else:
plt.bar(t,arg2,width = 0.1,label = 'num outl = ' + str(num_outliers))
plt.plot((t[0],t[-1]),(outlier_lim,outlier_lim),'r',label = '$\sigma$ = ' + sigma)
plt.plot((t[0],t[-1]),(-outlier_lim,-outlier_lim),'r')
plt.legend(loc = 0)
plt.xlim(int(entries[0].get()),int(entries[1].get()))
plt.ylim(float(entries[7].get()),float(entries[8].get()))
plt.xticks(ticks,time_label)
plt.ylabel(entries[3].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.0)
plt.savefig(os.path.join(path_tot,'res.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(211)
a.plot(t,arg0)
a.set_xlim(int(entries[0].get()),int(entries[1].get()))
a.set_ylim(float(entries[5].get()),float(entries[6].get()))
a.set_xticks(ticks_vec)
a.set_xticklabels('')
a.set_ylabel(entries[2].get(),fontsize = 15)
a.set_title(entries[4].get(),fontsize = 15)
b = fig_ts.add_subplot(212)
sigma = '%.2f' % arg1
if int(matplotlib.__version__.split('.')[0]) == 2:
b.bar(t,arg2,width = 10,label = 'num outl = ' + str(num_outliers))
else:
b.bar(t,arg2,width = 0.1,label = 'num outl = ' + str(num_outliers))
b.plot((t[0],t[-1]),(outlier_lim,outlier_lim),'r',label = '$\sigma$ = ' + sigma)
b.plot((t[0],t[-1]),(-outlier_lim,-outlier_lim),'r')
b.legend(loc = 0)
b.set_xlim(int(entries[0].get()),int(entries[1].get()))
b.set_ylim(float(entries[7].get()),float(entries[8].get()))
b.set_xticks(ticks)
b.set_xticklabels(time_label)
b.set_ylabel(entries[3].get(),fontsize = 15)
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("Residuals")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Label (top)","Y Label (bottom)","Title",
"Y1 Limit (bottom)","Y1 Limit (top)","Y2 Limit (bottom)","Y2 Limit (top)"]
values = [t[0],t[-1],'$N_t$','$N_t^{norm}$','Residuals / Normalised residuals',np.min(arg0[~np.isnan(arg0)]) - 10.0,
np.max(arg0[~np.isnan(arg0)]) + 10.0,np.min(arg2[~np.isnan(arg0)]) - 1.0,np.max(arg2[~np.isnan(arg0)]) + 1.0]
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## DFA_PLOT #####################################
#############################################################################
def dfa_plot(main_win,arg0,arg1,arg2,arg3):
def save_fig():
path_tot = askdirectory()
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.plot(np.log(arg0),np.log(arg1),'o',label = '$H$ = ' + arg3)
plt.plot(np.log(arg0),arg2,'r')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.savefig(os.path.join(path_tot,'dfa.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(111)
a.plot(np.log(arg0),np.log(arg1),'o',label = '$H$ = ' + arg3)
a.plot(np.log(arg0),arg2,'r')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get())
a.set_ylabel(entries[5].get())
a.set_title(entries[6].get())
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("DFA")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X Limit (left)","X Limit (right)","Y Limit (bottom)","Y Limit (top)","X Label","Y Label","Title"]
values = [np.log(arg0[0]) - 0.3,np.log(arg0[-1]) + 0.3,np.min(np.log(arg1)) - 1.0,np.max(np.log(arg1)) + 1.0,
'log$(F(n))$','log$(n)$','DFA fit']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = 2 * i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = 2 * i,column = 1)
for i in range(len(names)):
tk.Label(frame_2,text = "").grid(row = 2 * i + 1,column = 0)
screen_fig()
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = 2 * len(names),column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = 2 * len(names),column = 1)
tk.Label(frame_2,text = "").grid(row = 2 * len(names) + 1,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = 2 * len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## MDFA_PLOT ####################################
#############################################################################
def mdfa_plot(main_win,arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7):
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (11,11))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,2,1)
plt.plot(np.log(arg0),np.log(arg1[0,:]),'b.')
plt.plot(np.log(arg0),arg2[:,0],'b',label = 'q = -3')
plt.plot(np.log(arg0),np.log(arg1[50,:]),'r.')
plt.plot(np.log(arg0),arg2[:,50],'r',label = 'q = 0')
plt.plot(np.log(arg0),np.log(arg1[-1,:]),'g.')
plt.plot(np.log(arg0),arg2[:,-1],'g',label = 'q = 3')
plt.legend(loc = 0)
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.subplot(2,2,2)
plt.plot(arg3,arg4,'b',label = 'h(q)')
plt.plot((arg3[0],arg3[-1]),(arg5,arg5),'k',label = 'H')
plt.legend(loc = 0)
plt.xlim(float(entries[7].get()),float(entries[8].get()))
plt.ylim(float(entries[9].get()),float(entries[10].get()))
plt.xlabel(entries[11].get())
plt.ylabel(entries[12].get())
plt.title(entries[13].get())
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.subplot(2,2,3)
plt.plot(arg6,arg7,'b')
plt.xlim(float(entries[14].get()),float(entries[15].get()))
plt.ylim(float(entries[16].get()),float(entries[17].get()))
plt.xlabel(entries[18].get())
plt.ylabel(entries[19].get())
plt.title(entries[20].get())
plt.margins(0.2)
plt.subplots_adjust(bottom = 0.2)
plt.savefig(os.path.join(path_tot,'mdfa.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(221)
a.plot(np.log(arg0),np.log(arg1[0,:]),'b.')
a.plot(np.log(arg0),arg2[:,0],'b',label = 'q = -3')
a.plot(np.log(arg0),np.log(arg1[50,:]),'r.')
a.plot(np.log(arg0),arg2[:,50],'r',label = 'q = 0')
a.plot(np.log(arg0),np.log(arg1[-1,:]),'g.')
a.plot(np.log(arg0),arg2[:,-1],'g',label = 'q = 3')
a.legend(loc = 0)
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get())
a.set_ylabel(entries[5].get())
a.set_title(entries[6].get())
b = fig_ts.add_subplot(222)
b.plot(arg3,arg4,'b',label = 'H(q)')
b.plot((arg3[0],arg3[-1]),(arg5,arg5),'k',label = 'H')
b.legend(loc = 0)
b.set_xlim(float(entries[7].get()),float(entries[8].get()))
b.set_ylim(float(entries[9].get()),float(entries[10].get()))
b.set_xlabel(entries[11].get())
b.set_ylabel(entries[12].get())
b.set_title(entries[13].get())
c = fig_ts.add_subplot(223)
c.plot(arg6,arg7,'b')
c.set_xlim(float(entries[14].get()),float(entries[15].get()))
c.set_ylim(float(entries[16].get()),float(entries[17].get()))
c.set_xlabel(entries[18].get())
c.set_ylabel(entries[19].get())
c.set_title(entries[20].get())
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.75)))
top.wm_title("MFDFA")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X1 Limit (left)","X1 Limit (right)","Y1 Limit (bottom)","Y1 Limit (top)","X1 Label","Y1 Label","Title1",
"X2 Limit (left)","X2 Limit (right)","Y2 Limit (bottom)","Y2 Limit (top)","X2 Label","Y2 Label","Title2",
"X3 Limit (left)","X3 Limit (right)","Y3 Limit (bottom)","Y3 Limit (top)","X3 Label","Y3 Label","Title3"]
values = [np.log(arg0[0]),np.log(arg0[-1]),np.min([np.min(np.log(arg1[0,:])),np.min(arg2[:,0]),
np.min(np.log(arg1[50,:])),np.min(arg2[:,50]),np.min(np.log(arg1[-1,:])),np.min(arg2[:,-1])]) - 1.0,
np.max([np.max(np.log(arg1[0,:])),np.max(arg2[:,0]),np.max(np.log(arg1[50,:])),np.max(arg2[:,50]),
np.max(np.log(arg1[-1,:])),np.max(arg2[:,-1])]) + 1.0,'log(n)','log(F(n))','MDFA fit',
arg3[0],arg3[-1],np.min(arg4) - 0.1,np.max(arg4) + 0.1,'q','H(q)','Generalised Hurst exponent',
np.min(arg6) - 0.2,np.max(arg6) + 0.2,np.min(arg7) - 0.2,1.2,'$\\alpha$','$f(\\alpha)$',
'Singularity spectrum']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = i,column = 1)
screen_fig()
tk.Label(frame_2,text = "").grid(row = len(names),column = 0)
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = len(names) + 1,column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = len(names) + 1,column = 1)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = len(names) + 2,column = 0)
#############################################################################
#############################################################################
############################## MFDFA2_PLOT ##################################
#############################################################################
def MFDFA2_plot(main_win,arg0,arg1,arg2,arg3,arg4,arg5):
def save_fig():
path_tot = askdirectory()
plt.figure(figsize = (12,9))
plt.rc('text',usetex = True)
plt.rc('font',family = 'serif')
plt.subplot(2,1,1)
ax = plt.gca()
if int(matplotlib.__version__.split('.')[0]) == 2:
ax.set_facecolor('black')
else:
ax.set_axis_bgcolor('black')
plt.plot(arg0,'y')
plt.plot(0.5 * np.ones((len(arg0),)),'w')
plt.plot(np.ones((len(arg0),)),'m')
plt.plot(1.5 * np.ones((len(arg0),)),'r')
plt.xlim(float(entries[0].get()),float(entries[1].get()))
plt.ylim(float(entries[2].get()),float(entries[3].get()))
plt.xlabel(entries[4].get())
plt.ylabel(entries[5].get())
plt.title(entries[6].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.3)
plt.subplot(2,1,2)
plt.plot(arg1,arg2,'b',label = '$\mu$ = ' + arg4)
plt.plot(arg1,arg3,'r',linewidth = 2.0,label = '$\sigma$ = ' + arg5)
plt.legend(loc = 0)
plt.xlim(float(entries[7].get()),float(entries[8].get()))
plt.ylim(float(entries[9].get()),float(entries[10].get()))
plt.ylabel(entries[11].get())
plt.xlabel(entries[12].get())
plt.title(entries[13].get())
plt.margins(0.2)
plt.subplots_adjust(hspace = 0.3)
plt.savefig(os.path.join(path_tot,'MFDFA2.pdf'))
plt.close()
def screen_fig():
fig_ts = Figure(figsize = (x_size,y_size))
a = fig_ts.add_subplot(211)
ax = fig_ts.gca()
if int(matplotlib.__version__.split('.')[0]) == 2:
ax.set_facecolor('black')
else:
ax.set_axis_bgcolor('black')
a.plot(arg0,'y')
a.plot(0.5 * np.ones((len(arg0),)),'w')
a.plot(np.ones((len(arg0),)),'m')
a.plot(1.5 * np.ones((len(arg0),)),'r')
a.set_xlim(float(entries[0].get()),float(entries[1].get()))
a.set_ylim(float(entries[2].get()),float(entries[3].get()))
a.set_xlabel(entries[4].get())
a.set_ylabel(entries[5].get())
a.set_title(entries[6].get())
b = fig_ts.add_subplot(212)
b.plot(arg1,arg2,'b',label = '$\mu$ = ' + arg4)
b.plot(arg1,arg3,'r',linewidth = 2.0,label = '$\sigma$ = ' + arg5)
b.legend(loc = 0)
b.set_xlim(float(entries[7].get()),float(entries[8].get()))
b.set_ylim(float(entries[9].get()),float(entries[10].get()))
b.set_ylabel(entries[11].get())
b.set_xlabel(entries[12].get())
b.set_title(entries[13].get())
fig_ts.tight_layout()
canvas = FigureCanvasTkAgg(fig_ts,master = frame_1)
canvas.get_tk_widget().grid(row = 0,column = 0)
canvas.draw()
def reset_fig():
for i in range(len(entries)):
entries[i].delete(0,tk.END)
entries[i].insert(0,values[i])
screen_fig()
top = tk.Toplevel(main_win)
top.geometry("%dx%d" % (int(main_win.winfo_screenwidth() * 0.93 * 0.85),
int(main_win.winfo_screenheight() * 0.65)))
top.wm_title("DFA")
top.resizable(width = False,height = False)
frame_1 = tk.Frame(top)
frame_1.grid(row = 0,column = 0)
frame_2 = tk.Frame(top)
frame_2.grid(row = 0,column = 1)
names = ["X1 Limit (left)","X1 Limit (right)","Y1 Limit (bottom)","Y1 Limit (top)","X1 Label (top)",
"Y1 Label (top)","Title1 (top)","X2 Limit (left)","X2 Limit (right)","Y2 Limit (bottom)",
"Y2 Limit (top)","X2 Label (top)","Y2 Label (bottom)","Title2 (bottom)"]
values = [0,len(arg0),0,3,'time','$H_t$','local Hurst exponent',np.min(arg1) - 0.2,np.max(arg1) + 0.2,
0,np.max(arg2) * 11 / 10,'P($H_t$)','$H_t$','Prob distr of $H_t$']
entries = []
for i in range(len(names)):
tk.Label(frame_2,text = names[i],font = "Verdana 13 bold").grid(row = i,column = 0,
padx = int(main_win.winfo_screenwidth() * 0.01))
entries.append(tk.Entry(frame_2,width = 18))
entries[-1].insert(0,values[i])
entries[-1].grid(row = i,column = 1)
screen_fig()
tk.Label(frame_2,text = "").grid(row = len(names),column = 0)
tk.Button(frame_2,text = "Replot",font = "Verdana 13 bold",command = screen_fig).grid(row = len(names) + 1,column = 0)
tk.Button(frame_2,text = "Save",font = "Verdana 13 bold",command = save_fig).grid(row = len(names) + 1,column = 1)
tk.Label(frame_2,text = "").grid(row = len(names) + 2,column = 0)
tk.Button(frame_2,text = "Reset",font = "Verdana 13 bold",command = reset_fig).grid(row = len(names) + 3,column = 0)
#############################################################################
| [
"tkinter.filedialog.askdirectory",
"matplotlib.__version__.split",
"numpy.log",
"tkinter.Button",
"numpy.array",
"tkinter.Label",
"tkinter.Frame",
"matplotlib.pyplot.margins",
"tkinter.Entry",
"numpy.isscalar",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
... | [((223, 246), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (237, 246), False, 'import matplotlib\n'), ((730, 744), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (742, 744), False, 'from tkinter.filedialog import askdirectory\n'), ((761, 801), 'os.path.join', 'os.path.join', (['path_file', '"""freq_perc.txt"""'], {}), "(path_file, 'freq_perc.txt')\n", (773, 801), False, 'import os\n'), ((1546, 1571), 'numpy.array', 'np.array', (['arg2'], {'dtype': 'str'}), '(arg2, dtype=str)\n', (1554, 1571), True, 'import numpy as np\n'), ((3433, 3454), 'tkinter.Toplevel', 'tk.Toplevel', (['main_win'], {}), '(main_win)\n', (3444, 3454), True, 'import tkinter as tk\n'), ((3703, 3716), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (3711, 3716), True, 'import tkinter as tk\n'), ((3773, 3786), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (3781, 3786), True, 'import tkinter as tk\n'), ((7302, 7323), 'tkinter.Toplevel', 'tk.Toplevel', (['main_win'], {}), '(main_win)\n', (7313, 7323), True, 'import tkinter as tk\n'), ((7643, 7656), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (7651, 7656), True, 'import tkinter as tk\n'), ((7713, 7726), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (7721, 7726), True, 'import tkinter as tk\n'), ((9548, 9573), 'numpy.array', 'np.array', (['arg3'], {'dtype': 'str'}), '(arg3, dtype=str)\n', (9556, 9573), True, 'import numpy as np\n'), ((12775, 12796), 'tkinter.Toplevel', 'tk.Toplevel', (['main_win'], {}), '(main_win)\n', (12786, 12796), True, 'import tkinter as tk\n'), ((13043, 13056), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (13051, 13056), True, 'import tkinter as tk\n'), ((13113, 13126), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (13121, 13126), True, 'import tkinter as tk\n'), ((16351, 16372), 'tkinter.Toplevel', 'tk.Toplevel', (['main_win'], {}), '(main_win)\n', (16362, 16372), True, 'import tkinter as tk\n'), ((16613, 16626), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (16621, 16626), True, 'import tkinter as tk\n'), ((16683, 16696), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (16691, 16696), True, 'import tkinter as tk\n'), ((22004, 22025), 'tkinter.Toplevel', 'tk.Toplevel', (['main_win'], {}), '(main_win)\n', (22015, 22025), True, 'import tkinter as tk\n'), ((22268, 22281), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (22276, 22281), True, 'import tkinter as tk\n'), ((22338, 22351), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (22346, 22351), True, 'import tkinter as tk\n'), ((27589, 27610), 'tkinter.Toplevel', 'tk.Toplevel', (['main_win'], {}), '(main_win)\n', (27600, 27610), True, 'import tkinter as tk\n'), ((27851, 27864), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (27859, 27864), True, 'import tkinter as tk\n'), ((27921, 27934), 'tkinter.Frame', 'tk.Frame', (['top'], {}), '(top)\n', (27929, 27934), True, 'import tkinter as tk\n'), ((1671, 1685), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (1683, 1685), False, 'from tkinter.filedialog import askdirectory\n'), ((1694, 1721), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1700, 1721), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1761), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (1737, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1818), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(arg0 + arg1)', '"""r"""'], {'label': '"""original"""'}), "(t, arg0 + arg1, 'r', label='original')\n", (1779, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1979), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1972, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2232, 2261), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks', 'time_label'], {}), '(ticks, time_label)\n', (2242, 2261), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2285), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (2280, 2285), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2325), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (2313, 2325), True, 'import matplotlib.pyplot as plt\n'), ((2389, 2400), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2398, 2400), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2473), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(x_size, y_size)'}), '(figsize=(x_size, y_size))\n', (2447, 2473), False, 'from matplotlib.figure import Figure\n'), ((3133, 3174), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_ts'], {'master': 'frame_1'}), '(fig_ts, master=frame_1)\n', (3150, 3174), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((3947, 3964), 'numpy.isscalar', 'np.isscalar', (['arg1'], {}), '(arg1)\n', (3958, 3964), True, 'import numpy as np\n'), ((5909, 5923), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (5921, 5923), False, 'from tkinter.filedialog import askdirectory\n'), ((5932, 5959), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (5938, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5969, 5999), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (5975, 5999), True, 'import matplotlib.pyplot as plt\n'), ((6009, 6034), 'matplotlib.pyplot.plot', 'plt.plot', (['arg1', 'arg0', '"""b"""'], {}), "(arg1, arg0, 'b')\n", (6017, 6034), True, 'import matplotlib.pyplot as plt\n'), ((6424, 6435), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6433, 6435), True, 'import matplotlib.pyplot as plt\n'), ((6480, 6512), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(x_size, y_size)'}), '(figsize=(x_size, y_size))\n', (6486, 6512), False, 'from matplotlib.figure import Figure\n'), ((6998, 7039), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_ts'], {'master': 'frame_1'}), '(fig_ts, master=frame_1)\n', (7015, 7039), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((9945, 9959), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (9957, 9959), False, 'from tkinter.filedialog import askdirectory\n'), ((9968, 9995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (9978, 9995), True, 'import matplotlib.pyplot as plt\n'), ((10005, 10032), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (10011, 10032), True, 'import matplotlib.pyplot as plt\n'), ((10042, 10072), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (10048, 10072), True, 'import matplotlib.pyplot as plt\n'), ((10087, 10107), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (10098, 10107), True, 'import matplotlib.pyplot as plt\n'), ((10114, 10131), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'arg0'], {}), '(t, arg0)\n', (10122, 10131), True, 'import matplotlib.pyplot as plt\n'), ((10267, 10288), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks', '""""""'], {}), "(ticks, '')\n", (10277, 10288), True, 'import matplotlib.pyplot as plt\n'), ((10369, 10385), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (10380, 10385), True, 'import matplotlib.pyplot as plt\n'), ((10394, 10425), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.0)'}), '(hspace=0.0)\n', (10413, 10425), True, 'import matplotlib.pyplot as plt\n'), ((10441, 10461), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (10452, 10461), True, 'import matplotlib.pyplot as plt\n'), ((10734, 10825), 'matplotlib.pyplot.plot', 'plt.plot', (['(t[0], t[-1])', '(outlier_lim, outlier_lim)', '"""r"""'], {'label': "('$\\\\sigma$ = ' + sigma)"}), "((t[0], t[-1]), (outlier_lim, outlier_lim), 'r', label=\n '$\\\\sigma$ = ' + sigma)\n", (10742, 10825), True, 'import matplotlib.pyplot as plt\n'), ((10825, 10883), 'matplotlib.pyplot.plot', 'plt.plot', (['(t[0], t[-1])', '(-outlier_lim, -outlier_lim)', '"""r"""'], {}), "((t[0], t[-1]), (-outlier_lim, -outlier_lim), 'r')\n", (10833, 10883), True, 'import matplotlib.pyplot as plt\n'), ((10888, 10905), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (10898, 10905), True, 'import matplotlib.pyplot as plt\n'), ((11044, 11073), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks', 'time_label'], {}), '(ticks, time_label)\n', (11054, 11073), True, 'import matplotlib.pyplot as plt\n'), ((11118, 11134), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (11129, 11134), True, 'import matplotlib.pyplot as plt\n'), ((11143, 11174), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.0)'}), '(hspace=0.0)\n', (11162, 11174), True, 'import matplotlib.pyplot as plt\n'), ((11240, 11251), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11249, 11251), True, 'import matplotlib.pyplot as plt\n'), ((11292, 11324), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(x_size, y_size)'}), '(figsize=(x_size, y_size))\n', (11298, 11324), False, 'from matplotlib.figure import Figure\n'), ((12475, 12516), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_ts'], {'master': 'frame_1'}), '(fig_ts, master=frame_1)\n', (12492, 12516), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((14956, 14970), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (14968, 14970), False, 'from tkinter.filedialog import askdirectory\n'), ((14979, 15006), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (14985, 15006), True, 'import matplotlib.pyplot as plt\n'), ((15016, 15046), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (15022, 15046), True, 'import matplotlib.pyplot as plt\n'), ((15168, 15185), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (15178, 15185), True, 'import matplotlib.pyplot as plt\n'), ((15492, 15503), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15501, 15503), True, 'import matplotlib.pyplot as plt\n'), ((15548, 15580), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(x_size, y_size)'}), '(figsize=(x_size, y_size))\n', (15554, 15580), False, 'from matplotlib.figure import Figure\n'), ((16051, 16092), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_ts'], {'master': 'frame_1'}), '(fig_ts, master=frame_1)\n', (16068, 16092), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((18398, 18412), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18410, 18412), False, 'from tkinter.filedialog import askdirectory\n'), ((18421, 18449), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 11)'}), '(figsize=(11, 11))\n', (18431, 18449), True, 'import matplotlib.pyplot as plt\n'), ((18459, 18486), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (18465, 18486), True, 'import matplotlib.pyplot as plt\n'), ((18496, 18526), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (18502, 18526), True, 'import matplotlib.pyplot as plt\n'), ((18537, 18557), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (18548, 18557), True, 'import matplotlib.pyplot as plt\n'), ((18914, 18931), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (18924, 18931), True, 'import matplotlib.pyplot as plt\n'), ((19184, 19200), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (19195, 19200), True, 'import matplotlib.pyplot as plt\n'), ((19209, 19240), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (19228, 19240), True, 'import matplotlib.pyplot as plt\n'), ((19252, 19272), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (19263, 19272), True, 'import matplotlib.pyplot as plt\n'), ((19279, 19318), 'matplotlib.pyplot.plot', 'plt.plot', (['arg3', 'arg4', '"""b"""'], {'label': '"""h(q)"""'}), "(arg3, arg4, 'b', label='h(q)')\n", (19287, 19318), True, 'import matplotlib.pyplot as plt\n'), ((19326, 19385), 'matplotlib.pyplot.plot', 'plt.plot', (['(arg3[0], arg3[-1])', '(arg5, arg5)', '"""k"""'], {'label': '"""H"""'}), "((arg3[0], arg3[-1]), (arg5, arg5), 'k', label='H')\n", (19334, 19385), True, 'import matplotlib.pyplot as plt\n'), ((19391, 19408), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (19401, 19408), True, 'import matplotlib.pyplot as plt\n'), ((19665, 19681), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (19676, 19681), True, 'import matplotlib.pyplot as plt\n'), ((19690, 19721), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (19709, 19721), True, 'import matplotlib.pyplot as plt\n'), ((19733, 19753), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (19744, 19753), True, 'import matplotlib.pyplot as plt\n'), ((19760, 19785), 'matplotlib.pyplot.plot', 'plt.plot', (['arg6', 'arg7', '"""b"""'], {}), "(arg6, arg7, 'b')\n", (19768, 19785), True, 'import matplotlib.pyplot as plt\n'), ((20041, 20057), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (20052, 20057), True, 'import matplotlib.pyplot as plt\n'), ((20066, 20097), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (20085, 20097), True, 'import matplotlib.pyplot as plt\n'), ((20164, 20175), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20173, 20175), True, 'import matplotlib.pyplot as plt\n'), ((20216, 20248), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(x_size, y_size)'}), '(figsize=(x_size, y_size))\n', (20222, 20248), False, 'from matplotlib.figure import Figure\n'), ((21704, 21745), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_ts'], {'master': 'frame_1'}), '(fig_ts, master=frame_1)\n', (21721, 21745), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((22760, 22775), 'numpy.log', 'np.log', (['arg0[0]'], {}), '(arg0[0])\n', (22766, 22775), True, 'import numpy as np\n'), ((22776, 22792), 'numpy.log', 'np.log', (['arg0[-1]'], {}), '(arg0[-1])\n', (22782, 22792), True, 'import numpy as np\n'), ((24685, 24699), 'tkinter.filedialog.askdirectory', 'askdirectory', ([], {}), '()\n', (24697, 24699), False, 'from tkinter.filedialog import askdirectory\n'), ((24708, 24735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (24718, 24735), True, 'import matplotlib.pyplot as plt\n'), ((24745, 24772), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (24751, 24772), True, 'import matplotlib.pyplot as plt\n'), ((24782, 24812), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (24788, 24812), True, 'import matplotlib.pyplot as plt\n'), ((24823, 24843), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (24834, 24843), True, 'import matplotlib.pyplot as plt\n'), ((24855, 24864), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24862, 24864), True, 'import matplotlib.pyplot as plt\n'), ((25025, 25044), 'matplotlib.pyplot.plot', 'plt.plot', (['arg0', '"""y"""'], {}), "(arg0, 'y')\n", (25033, 25044), True, 'import matplotlib.pyplot as plt\n'), ((25438, 25454), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (25449, 25454), True, 'import matplotlib.pyplot as plt\n'), ((25463, 25494), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (25482, 25494), True, 'import matplotlib.pyplot as plt\n'), ((25506, 25526), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (25517, 25526), True, 'import matplotlib.pyplot as plt\n'), ((25533, 25584), 'matplotlib.pyplot.plot', 'plt.plot', (['arg1', 'arg2', '"""b"""'], {'label': "('$\\\\mu$ = ' + arg4)"}), "(arg1, arg2, 'b', label='$\\\\mu$ = ' + arg4)\n", (25541, 25584), True, 'import matplotlib.pyplot as plt\n'), ((25591, 25660), 'matplotlib.pyplot.plot', 'plt.plot', (['arg1', 'arg3', '"""r"""'], {'linewidth': '(2.0)', 'label': "('$\\\\sigma$ = ' + arg5)"}), "(arg1, arg3, 'r', linewidth=2.0, label='$\\\\sigma$ = ' + arg5)\n", (25599, 25660), True, 'import matplotlib.pyplot as plt\n'), ((25668, 25685), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (25678, 25685), True, 'import matplotlib.pyplot as plt\n'), ((25942, 25958), 'matplotlib.pyplot.margins', 'plt.margins', (['(0.2)'], {}), '(0.2)\n', (25953, 25958), True, 'import matplotlib.pyplot as plt\n'), ((25967, 25998), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (25986, 25998), True, 'import matplotlib.pyplot as plt\n'), ((26067, 26078), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26076, 26078), True, 'import matplotlib.pyplot as plt\n'), ((26119, 26151), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(x_size, y_size)'}), '(figsize=(x_size, y_size))\n', (26125, 26151), False, 'from matplotlib.figure import Figure\n'), ((27289, 27330), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig_ts'], {'master': 'frame_1'}), '(fig_ts, master=frame_1)\n', (27306, 27330), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((1833, 1850), 'numpy.isscalar', 'np.isscalar', (['arg1'], {}), '(arg1)\n', (1844, 1850), True, 'import numpy as np\n'), ((1864, 1901), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'arg1', '"""k"""'], {'label': '"""trend"""'}), "(t, arg1, 'k', label='trend')\n", (1872, 1901), True, 'import matplotlib.pyplot as plt\n'), ((1913, 1954), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'arg0', '"""b"""'], {'label': '"""detrended"""'}), "(t, arg0, 'b', label='detrended')\n", (1921, 1954), True, 'import matplotlib.pyplot as plt\n'), ((2348, 2380), 'os.path.join', 'os.path.join', (['path_tot', '"""ts.pdf"""'], {}), "(path_tot, 'ts.pdf')\n", (2360, 2380), False, 'import os\n'), ((2579, 2596), 'numpy.isscalar', 'np.isscalar', (['arg1'], {}), '(arg1)\n', (2590, 2596), True, 'import numpy as np\n'), ((4831, 4858), 'tkinter.Entry', 'tk.Entry', (['frame_2'], {'width': '(18)'}), '(frame_2, width=18)\n', (4839, 4858), True, 'import tkinter as tk\n'), ((5072, 5149), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Replot"""', 'font': '"""Verdana 13 bold"""', 'command': 'screen_fig'}), "(frame_2, text='Replot', font='Verdana 13 bold', command=screen_fig)\n", (5081, 5149), True, 'import tkinter as tk\n'), ((5195, 5268), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Save"""', 'font': '"""Verdana 13 bold"""', 'command': 'save_fig'}), "(frame_2, text='Save', font='Verdana 13 bold', command=save_fig)\n", (5204, 5268), True, 'import tkinter as tk\n'), ((5314, 5340), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (5322, 5340), True, 'import tkinter as tk\n'), ((5388, 5463), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Reset"""', 'font': '"""Verdana 13 bold"""', 'command': 'reset_fig'}), "(frame_2, text='Reset', font='Verdana 13 bold', command=reset_fig)\n", (5397, 5463), True, 'import tkinter as tk\n'), ((6067, 6115), 'matplotlib.pyplot.plot', 'plt.plot', (['(arg1[0], arg1[-1])', '(arg2, arg2)', '"""r"""'], {}), "((arg1[0], arg1[-1]), (arg2, arg2), 'r')\n", (6075, 6115), True, 'import matplotlib.pyplot as plt\n'), ((6374, 6415), 'os.path.join', 'os.path.join', (['path_tot', '"""spectrum_in.pdf"""'], {}), "(path_tot, 'spectrum_in.pdf')\n", (6386, 6415), False, 'import os\n'), ((8390, 8417), 'tkinter.Entry', 'tk.Entry', (['frame_2'], {'width': '(18)'}), '(frame_2, width=18)\n', (8398, 8417), True, 'import tkinter as tk\n'), ((8631, 8708), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Replot"""', 'font': '"""Verdana 13 bold"""', 'command': 'screen_fig'}), "(frame_2, text='Replot', font='Verdana 13 bold', command=screen_fig)\n", (8640, 8708), True, 'import tkinter as tk\n'), ((8754, 8827), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Save"""', 'font': '"""Verdana 13 bold"""', 'command': 'save_fig'}), "(frame_2, text='Save', font='Verdana 13 bold', command=save_fig)\n", (8763, 8827), True, 'import tkinter as tk\n'), ((8873, 8899), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (8881, 8899), True, 'import tkinter as tk\n'), ((8947, 9022), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Reset"""', 'font': '"""Verdana 13 bold"""', 'command': 'reset_fig'}), "(frame_2, text='Reset', font='Verdana 13 bold', command=reset_fig)\n", (8956, 9022), True, 'import tkinter as tk\n'), ((9661, 9675), 'numpy.isnan', 'np.isnan', (['arg2'], {}), '(arg2)\n', (9669, 9675), True, 'import numpy as np\n'), ((11198, 11231), 'os.path.join', 'os.path.join', (['path_tot', '"""res.pdf"""'], {}), "(path_tot, 'res.pdf')\n", (11210, 11231), False, 'import os\n'), ((13878, 13905), 'tkinter.Entry', 'tk.Entry', (['frame_2'], {'width': '(18)'}), '(frame_2, width=18)\n', (13886, 13905), True, 'import tkinter as tk\n'), ((14119, 14196), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Replot"""', 'font': '"""Verdana 13 bold"""', 'command': 'screen_fig'}), "(frame_2, text='Replot', font='Verdana 13 bold', command=screen_fig)\n", (14128, 14196), True, 'import tkinter as tk\n'), ((14242, 14315), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Save"""', 'font': '"""Verdana 13 bold"""', 'command': 'save_fig'}), "(frame_2, text='Save', font='Verdana 13 bold', command=save_fig)\n", (14251, 14315), True, 'import tkinter as tk\n'), ((14361, 14387), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (14369, 14387), True, 'import tkinter as tk\n'), ((14435, 14510), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Reset"""', 'font': '"""Verdana 13 bold"""', 'command': 'reset_fig'}), "(frame_2, text='Reset', font='Verdana 13 bold', command=reset_fig)\n", (14444, 14510), True, 'import tkinter as tk\n'), ((15065, 15077), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (15071, 15077), True, 'import numpy as np\n'), ((15078, 15090), 'numpy.log', 'np.log', (['arg1'], {}), '(arg1)\n', (15084, 15090), True, 'import numpy as np\n'), ((15137, 15149), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (15143, 15149), True, 'import numpy as np\n'), ((15450, 15483), 'os.path.join', 'os.path.join', (['path_tot', '"""dfa.pdf"""'], {}), "(path_tot, 'dfa.pdf')\n", (15462, 15483), False, 'import os\n'), ((15633, 15645), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (15639, 15645), True, 'import numpy as np\n'), ((15646, 15658), 'numpy.log', 'np.log', (['arg1'], {}), '(arg1)\n', (15652, 15658), True, 'import numpy as np\n'), ((15703, 15715), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (15709, 15715), True, 'import numpy as np\n'), ((16860, 16875), 'numpy.log', 'np.log', (['arg0[0]'], {}), '(arg0[0])\n', (16866, 16875), True, 'import numpy as np\n'), ((16882, 16898), 'numpy.log', 'np.log', (['arg0[-1]'], {}), '(arg0[-1])\n', (16888, 16898), True, 'import numpy as np\n'), ((17299, 17326), 'tkinter.Entry', 'tk.Entry', (['frame_2'], {'width': '(18)'}), '(frame_2, width=18)\n', (17307, 17326), True, 'import tkinter as tk\n'), ((17540, 17617), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Replot"""', 'font': '"""Verdana 13 bold"""', 'command': 'screen_fig'}), "(frame_2, text='Replot', font='Verdana 13 bold', command=screen_fig)\n", (17549, 17617), True, 'import tkinter as tk\n'), ((17663, 17736), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Save"""', 'font': '"""Verdana 13 bold"""', 'command': 'save_fig'}), "(frame_2, text='Save', font='Verdana 13 bold', command=save_fig)\n", (17672, 17736), True, 'import tkinter as tk\n'), ((17782, 17808), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (17790, 17808), True, 'import tkinter as tk\n'), ((17856, 17931), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Reset"""', 'font': '"""Verdana 13 bold"""', 'command': 'reset_fig'}), "(frame_2, text='Reset', font='Verdana 13 bold', command=reset_fig)\n", (17865, 17931), True, 'import tkinter as tk\n'), ((18573, 18585), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (18579, 18585), True, 'import numpy as np\n'), ((18586, 18604), 'numpy.log', 'np.log', (['arg1[0, :]'], {}), '(arg1[0, :])\n', (18592, 18604), True, 'import numpy as np\n'), ((18627, 18639), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (18633, 18639), True, 'import numpy as np\n'), ((18689, 18701), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (18695, 18701), True, 'import numpy as np\n'), ((18702, 18721), 'numpy.log', 'np.log', (['arg1[50, :]'], {}), '(arg1[50, :])\n', (18708, 18721), True, 'import numpy as np\n'), ((18744, 18756), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (18750, 18756), True, 'import numpy as np\n'), ((18806, 18818), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (18812, 18818), True, 'import numpy as np\n'), ((18819, 18838), 'numpy.log', 'np.log', (['arg1[-1, :]'], {}), '(arg1[-1, :])\n', (18825, 18838), True, 'import numpy as np\n'), ((18861, 18873), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (18867, 18873), True, 'import numpy as np\n'), ((20121, 20155), 'os.path.join', 'os.path.join', (['path_tot', '"""mdfa.pdf"""'], {}), "(path_tot, 'mdfa.pdf')\n", (20133, 20155), False, 'import os\n'), ((20301, 20313), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (20307, 20313), True, 'import numpy as np\n'), ((20314, 20332), 'numpy.log', 'np.log', (['arg1[0, :]'], {}), '(arg1[0, :])\n', (20320, 20332), True, 'import numpy as np\n'), ((20353, 20365), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (20359, 20365), True, 'import numpy as np\n'), ((20413, 20425), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (20419, 20425), True, 'import numpy as np\n'), ((20426, 20445), 'numpy.log', 'np.log', (['arg1[50, :]'], {}), '(arg1[50, :])\n', (20432, 20445), True, 'import numpy as np\n'), ((20466, 20478), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (20472, 20478), True, 'import numpy as np\n'), ((20526, 20538), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (20532, 20538), True, 'import numpy as np\n'), ((20539, 20558), 'numpy.log', 'np.log', (['arg1[-1, :]'], {}), '(arg1[-1, :])\n', (20545, 20558), True, 'import numpy as np\n'), ((20579, 20591), 'numpy.log', 'np.log', (['arg0'], {}), '(arg0)\n', (20585, 20591), True, 'import numpy as np\n'), ((23206, 23218), 'numpy.min', 'np.min', (['arg4'], {}), '(arg4)\n', (23212, 23218), True, 'import numpy as np\n'), ((23225, 23237), 'numpy.max', 'np.max', (['arg4'], {}), '(arg4)\n', (23231, 23237), True, 'import numpy as np\n'), ((23299, 23311), 'numpy.min', 'np.min', (['arg6'], {}), '(arg6)\n', (23305, 23311), True, 'import numpy as np\n'), ((23318, 23330), 'numpy.max', 'np.max', (['arg6'], {}), '(arg6)\n', (23324, 23330), True, 'import numpy as np\n'), ((23337, 23349), 'numpy.min', 'np.min', (['arg7'], {}), '(arg7)\n', (23343, 23349), True, 'import numpy as np\n'), ((23711, 23738), 'tkinter.Entry', 'tk.Entry', (['frame_2'], {'width': '(18)'}), '(frame_2, width=18)\n', (23719, 23738), True, 'import tkinter as tk\n'), ((23847, 23873), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (23855, 23873), True, 'import tkinter as tk\n'), ((23913, 23990), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Replot"""', 'font': '"""Verdana 13 bold"""', 'command': 'screen_fig'}), "(frame_2, text='Replot', font='Verdana 13 bold', command=screen_fig)\n", (23922, 23990), True, 'import tkinter as tk\n'), ((24036, 24109), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Save"""', 'font': '"""Verdana 13 bold"""', 'command': 'save_fig'}), "(frame_2, text='Save', font='Verdana 13 bold', command=save_fig)\n", (24045, 24109), True, 'import tkinter as tk\n'), ((24155, 24230), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Reset"""', 'font': '"""Verdana 13 bold"""', 'command': 'reset_fig'}), "(frame_2, text='Reset', font='Verdana 13 bold', command=reset_fig)\n", (24164, 24230), True, 'import tkinter as tk\n'), ((26022, 26058), 'os.path.join', 'os.path.join', (['path_tot', '"""MFDFA2.pdf"""'], {}), "(path_tot, 'MFDFA2.pdf')\n", (26034, 26058), False, 'import os\n'), ((28334, 28346), 'numpy.min', 'np.min', (['arg1'], {}), '(arg1)\n', (28340, 28346), True, 'import numpy as np\n'), ((28353, 28365), 'numpy.max', 'np.max', (['arg1'], {}), '(arg1)\n', (28359, 28365), True, 'import numpy as np\n'), ((28739, 28766), 'tkinter.Entry', 'tk.Entry', (['frame_2'], {'width': '(18)'}), '(frame_2, width=18)\n', (28747, 28766), True, 'import tkinter as tk\n'), ((28875, 28901), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (28883, 28901), True, 'import tkinter as tk\n'), ((28941, 29018), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Replot"""', 'font': '"""Verdana 13 bold"""', 'command': 'screen_fig'}), "(frame_2, text='Replot', font='Verdana 13 bold', command=screen_fig)\n", (28950, 29018), True, 'import tkinter as tk\n'), ((29064, 29137), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Save"""', 'font': '"""Verdana 13 bold"""', 'command': 'save_fig'}), "(frame_2, text='Save', font='Verdana 13 bold', command=save_fig)\n", (29073, 29137), True, 'import tkinter as tk\n'), ((29183, 29209), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (29191, 29209), True, 'import tkinter as tk\n'), ((29253, 29328), 'tkinter.Button', 'tk.Button', (['frame_2'], {'text': '"""Reset"""', 'font': '"""Verdana 13 bold"""', 'command': 'reset_fig'}), "(frame_2, text='Reset', font='Verdana 13 bold', command=reset_fig)\n", (29262, 29328), True, 'import tkinter as tk\n'), ((4599, 4655), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': 'names[i]', 'font': '"""Verdana 13 bold"""'}), "(frame_2, text=names[i], font='Verdana 13 bold')\n", (4607, 4655), True, 'import tkinter as tk\n'), ((4990, 5016), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (4998, 5016), True, 'import tkinter as tk\n'), ((7946, 7958), 'numpy.max', 'np.max', (['arg0'], {}), '(arg0)\n', (7952, 7958), True, 'import numpy as np\n'), ((8053, 8065), 'numpy.max', 'np.max', (['arg0'], {}), '(arg0)\n', (8059, 8065), True, 'import numpy as np\n'), ((8158, 8214), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': 'names[i]', 'font': '"""Verdana 13 bold"""'}), "(frame_2, text=names[i], font='Verdana 13 bold')\n", (8166, 8214), True, 'import tkinter as tk\n'), ((8549, 8575), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (8557, 8575), True, 'import tkinter as tk\n'), ((13646, 13702), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': 'names[i]', 'font': '"""Verdana 13 bold"""'}), "(frame_2, text=names[i], font='Verdana 13 bold')\n", (13654, 13702), True, 'import tkinter as tk\n'), ((14037, 14063), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (14045, 14063), True, 'import tkinter as tk\n'), ((16912, 16924), 'numpy.log', 'np.log', (['arg1'], {}), '(arg1)\n', (16918, 16924), True, 'import numpy as np\n'), ((16939, 16951), 'numpy.log', 'np.log', (['arg1'], {}), '(arg1)\n', (16945, 16951), True, 'import numpy as np\n'), ((17067, 17123), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': 'names[i]', 'font': '"""Verdana 13 bold"""'}), "(frame_2, text=names[i], font='Verdana 13 bold')\n", (17075, 17123), True, 'import tkinter as tk\n'), ((17458, 17484), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': '""""""'}), "(frame_2, text='')\n", (17466, 17484), True, 'import tkinter as tk\n'), ((23483, 23539), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': 'names[i]', 'font': '"""Verdana 13 bold"""'}), "(frame_2, text=names[i], font='Verdana 13 bold')\n", (23491, 23539), True, 'import tkinter as tk\n'), ((28389, 28401), 'numpy.max', 'np.max', (['arg2'], {}), '(arg2)\n', (28395, 28401), True, 'import numpy as np\n'), ((28511, 28567), 'tkinter.Label', 'tk.Label', (['frame_2'], {'text': 'names[i]', 'font': '"""Verdana 13 bold"""'}), "(frame_2, text=names[i], font='Verdana 13 bold')\n", (28519, 28567), True, 'import tkinter as tk\n'), ((10505, 10538), 'matplotlib.__version__.split', 'matplotlib.__version__.split', (['"""."""'], {}), "('.')\n", (10533, 10538), False, 'import matplotlib\n'), ((11766, 11799), 'matplotlib.__version__.split', 'matplotlib.__version__.split', (['"""."""'], {}), "('.')\n", (11794, 11799), False, 'import matplotlib\n'), ((22827, 22845), 'numpy.min', 'np.min', (['arg2[:, 0]'], {}), '(arg2[:, 0])\n', (22833, 22845), True, 'import numpy as np\n'), ((22887, 22906), 'numpy.min', 'np.min', (['arg2[:, 50]'], {}), '(arg2[:, 50])\n', (22893, 22906), True, 'import numpy as np\n'), ((22933, 22952), 'numpy.min', 'np.min', (['arg2[:, -1]'], {}), '(arg2[:, -1])\n', (22939, 22952), True, 'import numpy as np\n'), ((23009, 23027), 'numpy.max', 'np.max', (['arg2[:, 0]'], {}), '(arg2[:, 0])\n', (23015, 23027), True, 'import numpy as np\n'), ((23054, 23073), 'numpy.max', 'np.max', (['arg2[:, 50]'], {}), '(arg2[:, 50])\n', (23060, 23073), True, 'import numpy as np\n'), ((23115, 23134), 'numpy.max', 'np.max', (['arg2[:, -1]'], {}), '(arg2[:, -1])\n', (23121, 23134), True, 'import numpy as np\n'), ((24880, 24913), 'matplotlib.__version__.split', 'matplotlib.__version__.split', (['"""."""'], {}), "('.')\n", (24908, 24913), False, 'import matplotlib\n'), ((26230, 26263), 'matplotlib.__version__.split', 'matplotlib.__version__.split', (['"""."""'], {}), "('.')\n", (26258, 26263), False, 'import matplotlib\n'), ((13440, 13454), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (13448, 13454), True, 'import numpy as np\n'), ((13492, 13506), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (13500, 13506), True, 'import numpy as np\n'), ((13529, 13543), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (13537, 13543), True, 'import numpy as np\n'), ((13565, 13579), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (13573, 13579), True, 'import numpy as np\n'), ((22808, 22826), 'numpy.log', 'np.log', (['arg1[0, :]'], {}), '(arg1[0, :])\n', (22814, 22826), True, 'import numpy as np\n'), ((22867, 22886), 'numpy.log', 'np.log', (['arg1[50, :]'], {}), '(arg1[50, :])\n', (22873, 22886), True, 'import numpy as np\n'), ((22913, 22932), 'numpy.log', 'np.log', (['arg1[-1, :]'], {}), '(arg1[-1, :])\n', (22919, 22932), True, 'import numpy as np\n'), ((22990, 23008), 'numpy.log', 'np.log', (['arg1[0, :]'], {}), '(arg1[0, :])\n', (22996, 23008), True, 'import numpy as np\n'), ((23034, 23053), 'numpy.log', 'np.log', (['arg1[50, :]'], {}), '(arg1[50, :])\n', (23040, 23053), True, 'import numpy as np\n'), ((23095, 23114), 'numpy.log', 'np.log', (['arg1[-1, :]'], {}), '(arg1[-1, :])\n', (23101, 23114), True, 'import numpy as np\n'), ((4437, 4451), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (4445, 4451), True, 'import numpy as np\n'), ((4473, 4487), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (4481, 4487), True, 'import numpy as np\n'), ((4016, 4030), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (4024, 4030), True, 'import numpy as np\n'), ((4046, 4060), 'numpy.isnan', 'np.isnan', (['arg1'], {}), '(arg1)\n', (4054, 4060), True, 'import numpy as np\n'), ((4203, 4217), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (4211, 4217), True, 'import numpy as np\n'), ((4233, 4247), 'numpy.isnan', 'np.isnan', (['arg1'], {}), '(arg1)\n', (4241, 4247), True, 'import numpy as np\n'), ((4114, 4128), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (4122, 4128), True, 'import numpy as np\n'), ((4138, 4152), 'numpy.isnan', 'np.isnan', (['arg1'], {}), '(arg1)\n', (4146, 4152), True, 'import numpy as np\n'), ((4290, 4304), 'numpy.isnan', 'np.isnan', (['arg0'], {}), '(arg0)\n', (4298, 4304), True, 'import numpy as np\n'), ((4314, 4328), 'numpy.isnan', 'np.isnan', (['arg1'], {}), '(arg1)\n', (4322, 4328), True, 'import numpy as np\n')] |
"""classicML的优化器."""
import os
from time import time
import numpy as np
from classicML import _cml_precision
if os.environ['CLASSICML_ENGINE'] == 'CC':
from classicML.backend.cc.activations import relu
from classicML.backend.cc.activations import sigmoid
from classicML.backend.cc.activations import softmax
from classicML.backend.cc.callbacks import History
from classicML.backend.cc._utils import ProgressBar
from classicML.backend.cc.ops import cc_calculate_error as calculate_error
from classicML.backend.cc.ops import cc_clip_alpha as clip_alpha
from classicML.backend.cc.ops import cc_select_second_alpha as select_second_alpha
else:
from classicML.backend.python.activations import relu
from classicML.backend.python.activations import sigmoid
from classicML.backend.python.activations import softmax
from classicML.backend.python.callbacks import History
from classicML.backend.python._utils import ProgressBar
from classicML.backend.python.ops import calculate_error
from classicML.backend.python.ops import clip_alpha
from classicML.backend.python.ops import select_second_alpha
__version__ = 'backend.python.optimizers.0.1.a1'
def _get_optimizer_parameters(args, kwargs):
"""获取优化器的额外参数.
Arguments:
args: *args元组.
kwargs: **kwargs字典.
Returns:
额外的参数列表.
"""
parameters = ['verbose', 'loss', 'metric', 'callbacks']
for index, arg in enumerate(args):
parameters[index] = arg
for kwarg in kwargs:
for index, parameter in enumerate(parameters):
if str(kwarg) == parameter:
parameters[index] = kwargs[kwarg]
return parameters
def _record_callbacks(callbacks, loss_value, metric_value):
"""记录callbacks数据.
Arguments:
callbacks: list, callbacks列表.
loss_value: float, 当前的损失值.
metric_value: float, 当前的评估值.
"""
for callback in callbacks:
if isinstance(callback, History):
callback(loss_value, metric_value)
else:
pass
class Optimizer(object):
"""优化器的基类.
Attributes:
name: str, default=None,
优化器的名称.
_progressbar: classicML.backend.python.utils.ProgressBar,
进度条.
"""
def __init__(self, name=None):
"""
Arguments:
name: str, default=None,
优化器的名称.
"""
self.name = name
self._progressbar = None
def __call__(self, x, y, epochs, parameters, *args, **kwargs):
"""函数实现.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
parameters: numpy.ndarray, 模型的参数矩阵.
"""
return self.run(x, y, epochs, parameters, *args, **kwargs)
def run(self, x, y, epochs, parameters, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
parameters: numpy.ndarray, 模型的参数矩阵.
"""
raise NotImplementedError
def _update_parameters(self, parameters, grad, *args, **kwargs):
"""更新模型的参数.
Arguments:
parameters: numpy.ndarray, 模型的参数矩阵.
grad: numpy.ndarray, 优化器的实时梯度矩阵.
"""
raise NotImplementedError
class StochasticGradientDescent(Optimizer):
"""随机梯度下降优化器.
Notes:
- 如果想固定随机种子, 实现复现的话,
请在模型实例化的时候将随机种子置为一个常整数.
"""
def __init__(self, name='stochastic_gradient_descent', learning_rate=1e-2):
"""
Arguments:
name: str, default=None,
优化器的名称.
learning_rate: float, default=1e-2,
优化器的学习率.
"""
super(StochasticGradientDescent, self).__init__(name=name)
self.learning_rate = learning_rate
def run(self, x, y, epochs, parameters, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
parameters: numpy.ndarray, 模型的参数矩阵.
args:
verbose: bool, 显示日志信息.
loss: str, classicML.losses.Loss 实例,
模型使用的损失函数.
metric: str, classicML.metrics.Metric 实例,
模型使用的评估函数.
callbacks: list,
模型训练过程的中间数据记录器.
Returns:
模型的参数矩阵.
"""
verbose, loss, metric, callbacks = _get_optimizer_parameters(args, kwargs)
num_of_features = x.shape[0]
if verbose is True:
# 实例化进度条
self._progressbar = ProgressBar(epochs, loss, metric)
for epoch in range(1, epochs + 1):
current = time()
# 随机选取样本
random_index = np.random.randint(0, num_of_features)
# 前向传播
y_pred, caches = self.forward(x[[random_index], :], parameters)
# 反向传播
# Notes: 此时y_true是个scalar
grad = self.backward(y_pred, np.asarray([y[random_index]]), caches)
# 更新参数
parameters = self._update_parameters(parameters, grad)
# 以下的操作是为了更好可视化的, 不参与实际优化
if self._progressbar or callbacks is not None:
# 由于每次只更新一个样本导致预测值的形状和标签不一致,
# 因此需要多做一次前向传播
y_pred, _ = self.forward(x, parameters)
loss_value = loss(y_pred, y)
metric_value = metric(y_pred, y)
# 进度条显示
if self._progressbar:
self._progressbar(epoch, current, loss_value, metric_value)
# 记录callbacks
if callbacks:
_record_callbacks(callbacks, loss_value, metric_value)
return parameters
def _update_parameters(self, parameters, grad, *args, **kwargs):
"""更新模型的参数.
Arguments:
parameters: numpy.ndarray, 模型的参数矩阵.
grad: numpy.ndarray, 优化器的实时梯度矩阵.
Returns:
模型的参数矩阵.
"""
num_of_matrix_ops = len(parameters) // 2
for i in range(1, num_of_matrix_ops + 1):
parameters['w' + str(i)] -= self.learning_rate * grad['dw' + str(i)]
parameters['b' + str(i)] -= self.learning_rate * grad['db' + str(i)]
return parameters
@staticmethod
def forward(x, parameters):
"""优化器前向传播.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
parameters: numpy.ndarray, 模型的参数矩阵.
Returns:
预测的标签(概率形式)和参数矩阵缓存.
"""
num_of_matrix_ops = len(parameters) // 2
caches = []
a = x # 除了第一个输入层以外, 其他的是经过激活后的
for i in range(1, num_of_matrix_ops):
# 提取参数
w = parameters['w' + str(i)]
b = parameters['b' + str(i)]
x = a # 提取上一轮的a作为输入
# 计算输出并激活
z = np.matmul(x, w.T) + b
a = relu(z)
# 保存参数缓存
cache = (x, w, a)
caches.append(cache)
# 输出层的激活函数不同, 单独处理
w_output = parameters['w' + str(num_of_matrix_ops)]
b_output = parameters['b' + str(num_of_matrix_ops)]
z = np.matmul(a, w_output.T) + b_output
if w_output.shape[0] == 1:
y_pred = sigmoid(z)
else:
y_pred = softmax(z)
cache = (a, w_output, y_pred)
caches.append(cache)
return y_pred, caches
@staticmethod
def backward(y_pred, y_true, caches):
"""优化器反向传播.
Arguments:
y_pred: numpy.ndarray, 预测的标签(概率形式).
y_true: numpy.ndarray, 真实的标签.
caches: numpy.ndarray, 参数缓存.
Returns:
优化器的实时梯度矩阵字典.
"""
num_of_caches = len(caches)
grad = {}
if y_true.ndim == 1:
y_true = y_true.reshape(-1, 1)
# 提取缓存
(x, w, a) = caches[num_of_caches - 1]
if y_true.shape[1] == 1:
# 在二分类的情况下, y_true的标签为零的时候会导致无法计算梯度
# 这里为了解决这个问题进行了归一化, (-无穷, 0)和(0, 无穷),
# 实际上到不了无穷, 在10以内
y_true = -(y_true / y_pred - (1 - y_true) / (1 - y_pred))
da = sigmoid.diff(y_pred, a, y_true)
else:
da = softmax.diff(y_true, a)
# 计算梯度并保存
dw, db, da_ = StochasticGradientDescent._get_grad(da, y_true, caches[num_of_caches - 1])
grad['dw' + str(num_of_caches)] = dw
grad['db' + str(num_of_caches)] = db
grad['da' + str(num_of_caches)] = da_
for cache in range(num_of_caches - 1, 0, -1):
(x, w, a) = caches[cache - 1]
output = grad.get('da' + str(cache + 1))
da = relu.diff(output, a)
# 计算梯度并保存
dw, db, da_ = StochasticGradientDescent._get_grad(da, y_true, caches[cache - 1])
grad['dw' + str(cache)] = dw
grad['db' + str(cache)] = db
grad['da' + str(cache)] = da_
return grad
@staticmethod
def _get_grad(da, y_true, cache):
"""获取梯度.
Arguments:
da: numpy.ndarray, 输出张量的梯度.
y_true: numpy.ndarray, 真实的标签.
cache: numpy.ndarray, 参数缓存.
Returns:
参数矩阵, 偏置矩阵, 输出张量的梯度.
"""
# 提取缓存
(x, w, a) = cache
dw = np.matmul(da.T, x) / y_true.shape[0]
db = np.sum(da, axis=0, keepdims=True) / y_true.shape[0]
da_ = np.matmul(da, w)
return dw, db, da_
class Adam(StochasticGradientDescent):
"""自适应矩估计优化器.
References:
- [Kingma et al., 2014](https://arxiv.org/abs/1412.6980)
参照算法1具体实现.
Notes:
- 如果想固定随机种子实现, 实现复现的话,
请在模型实例化的时候将随机种子置为一个常整数.
- 并采用随机梯度下降作为基础优化算法实现的.
- 超参数epsilon按照其他的机器学习框架设置的1e-7,
非原论文的1e-8.
"""
def __init__(self,
name='adam',
learning_rate=1e-3,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7):
"""
Arguments:
name: str, default=None,
优化器的名称.
learning_rate: float, default=1e-3,
优化器的学习率.
beta_1: float, default=0.9,
一阶矩估计衰减率.
beta_2: float, default=0.999
二阶矩估计衰减率.
epsilon: float, default=1e-7,
数值稳定的小常数.
"""
super(Adam, self).__init__(name=name)
self.learning_rate = learning_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def run(self, x, y, epochs, parameters, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
parameters: numpy.ndarray, 模型的参数矩阵.
args:
verbose: bool, 显示日志信息.
loss: str, classicML.losses.Loss 实例,
模型使用的损失函数.
metric: str, classicML.metrics.Metric 实例,
模型使用的评估函数.
callbacks: list,
模型训练过程的中间数据记录器.
Returns:
模型的参数矩阵.
"""
verbose, loss, metric, callbacks = _get_optimizer_parameters(args, kwargs)
num_of_features = x.shape[0]
# 对Adam进行初始化
m, v = self._initializer(parameters)
if verbose is True:
# 实例化进度条
self._progressbar = ProgressBar(epochs, loss, metric)
for epoch in range(1, epochs + 1):
current = time()
# 随机选取样本
random_index = np.random.randint(0, num_of_features)
# 前向传播
y_pred, caches = self.forward(x[[random_index], :], parameters)
# 反向传播
# Notes: 此时y_true是个scalar
grad = self.backward(y_pred, np.asarray([y[random_index]]), caches)
# 更新参数
parameters, m, v = self._update_parameters(parameters, grad, m, v, epoch)
# 以下的操作是为了更好可视化的, 不参与实际优化
if self._progressbar or callbacks is not None:
# 由于每次只更新一个样本导致预测值的形状和标签不一致,
# 因此需要多做一次前向传播
y_pred, _ = self.forward(x, parameters)
loss_value = loss(y_pred, y)
metric_value = metric(y_pred, y)
# 进度条显示
if self._progressbar:
self._progressbar(epoch, current, loss_value, metric_value)
# 记录callbacks
if callbacks:
_record_callbacks(callbacks, loss_value, metric_value)
return parameters
def _update_parameters(self, parameters, grad, *args, **kwargs):
"""更新模型的参数.
Arguments:
parameters: numpy.ndarray, 模型的参数矩阵.
grad: numpy.ndarray, 优化器的实时梯度矩阵.
args:
moment_vector_1: dict of numpy.ndarray,
一阶向量.
moment_vector_2: dict of numpy.ndarray,
二阶向量.
epoch: int, 当前的训练轮数.
Returns:
模型的参数矩阵.
"""
m, v, epoch = args[0], args[1], args[2]
num_of_matrix_ops = len(parameters) // 2
for i in range(1, num_of_matrix_ops + 1):
# 更新一阶矩估计误差
m['dw' + str(i)] = self.beta_1 * m['dw' + str(i)] + (1 - self.beta_1) * grad['dw' + str(i)]
m['db' + str(i)] = self.beta_1 * m['db' + str(i)] + (1 - self.beta_1) * grad['db' + str(i)]
# 更新二阶矩估计误差
v['dw' + str(i)] = self.beta_2 * v['dw' + str(i)] + (1 - self.beta_2) * (grad['dw' + str(i)] ** 2)
v['db' + str(i)] = self.beta_2 * v['db' + str(i)] + (1 - self.beta_2) * (grad['db' + str(i)] ** 2)
# 矫正一阶矩估计误差
m_hat_w = m['dw' + str(i)] / (1 - np.power(self.beta_1, epoch))
m_hat_b = m['db' + str(i)] / (1 - np.power(self.beta_1, epoch))
# 矫正二阶矩估计误差
v_hat_w = v['dw' + str(i)] / (1 - np.power(self.beta_2, epoch))
v_hat_b = v['db' + str(i)] / (1 - np.power(self.beta_2, epoch))
# 更新参数
parameters['w' + str(i)] -= self.learning_rate * m_hat_w / (np.sqrt(v_hat_w) + self.epsilon)
parameters['b' + str(i)] -= self.learning_rate * m_hat_b / (np.sqrt(v_hat_b) + self.epsilon)
return parameters, m, v
@staticmethod
def _initializer(parameters):
"""初始化一阶和二阶向量, 算法一实现全初始化为零.
Arguments:
parameters: numpy.ndarray, 模型的参数矩阵.
Returns:
一阶和二阶向量字典.
"""
num_of_matrix_ops = len(parameters) // 2
moment_vector_1 = {}
moment_vector_2 = {}
for i in range(1, num_of_matrix_ops + 1):
moment_vector_1['dw' + str(i)] = np.zeros(parameters['w' + str(i)].shape)
moment_vector_1['db' + str(i)] = np.zeros(parameters['b' + str(i)].shape)
moment_vector_2['dw' + str(i)] = np.zeros(parameters['w' + str(i)].shape)
moment_vector_2['db' + str(i)] = np.zeros(parameters['b' + str(i)].shape)
return moment_vector_1, moment_vector_2
class GradientDescent(Optimizer):
"""梯度下降优化器.
"""
def __init__(self, name='gradient_descent', learning_rate=1e-2):
"""
Arguments:
name: str, default=None,
优化器的名称.
learning_rate: float, default=1e-2,
优化器的学习率.
"""
super(GradientDescent, self).__init__(name=name)
self.learning_rate = learning_rate
def run(self, x, y, epochs, beta, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
beta: numpy.ndarray, 模型的参数矩阵.
args:
verbose: bool, 显示日志信息.
loss: str, classicML.losses.Loss 实例,
模型使用的损失函数.
metric: str, classicML.metrics.Metric 实例,
模型使用的评估函数.
Returns:
模型的参数矩阵.
"""
verbose, loss, metric, callbacks = _get_optimizer_parameters(args, kwargs)
if verbose is True:
# 实例化进度条
self._progressbar = ProgressBar(epochs, loss, metric)
for epoch in range(1, epochs+1):
current = time()
# 前向传播
y_pred, x_hat = self.forward(x, beta)
# 反向传播
grad = self.backward(y_pred, y, x_hat)
# 更新参数
beta = self._update_parameters(beta, grad)
# 以下的操作是为了更好可视化的, 不参与实际优化
if self._progressbar or callbacks is not None:
if loss.name == 'log_likelihood':
loss_value = loss(y, beta, x_hat)
else:
loss_value = loss(y_pred, y)
metric_value = metric(y_pred, y)
# 记录callbacks
if callbacks:
_record_callbacks(callbacks, loss_value, metric_value)
# 进度条显示
if self._progressbar:
self._progressbar(epoch, current, loss_value, metric_value)
return beta
def _update_parameters(self, beta, grad, *args, **kwargs):
"""更新模型的参数.
Arguments:
beta: numpy.ndarray, 模型的参数矩阵.
grad: numpy.ndarray, 优化器的实时梯度矩阵.
"""
beta -= self.learning_rate * grad
return beta
@staticmethod
def forward(x, parameters):
"""优化器前向传播.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
parameters: numpy.ndarray, 模型的参数矩阵.
Returns:
预测的标签(概率形式)和参数(x;1)矩阵.
"""
number_of_sample = x.shape[0]
parameters = parameters.reshape(-1, 1)
x_hat = np.c_[x, np.ones((number_of_sample, 1), dtype=_cml_precision.float)]
cache = np.matmul(x_hat, parameters)
y_pred = sigmoid(cache)
return y_pred, x_hat
@staticmethod
def backward(y_pred, y_true, x_hat):
"""优化器反向传播.
Arguments:
y_pred: numpy.ndarray, 预测的标签(概率形式).
y_true: numpy.ndarray, 真实的标签.
x_hat: numpy.ndarray, 属性的参数矩阵.
Returns:
优化器的实时梯度矩阵.
"""
y_true = y_true.reshape(-1, 1)
error = _cml_precision.float(y_true - y_pred)
grad = np.sum((-x_hat * error), axis=0)
grad = grad.reshape(-1, 1)
return grad
class NewtonMethod(Optimizer):
"""牛顿法优化器.
"""
def __init__(self, name='newton_method'):
"""
Arguments:
name: str, default=None,
优化器的名称.
"""
super(NewtonMethod, self).__init__(name=name)
def run(self, x, y, epochs, beta, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
beta: numpy.ndarray, 模型的参数矩阵.
args:
verbose: bool, 显示日志信息.
loss: str, classicML.losses.Loss 实例,
模型使用的损失函数.
metric: str, classicML.metrics.Metric 实例,
模型使用的评估函数.
Returns:
模型的参数矩阵.
"""
verbose, loss, metric, callbacks = _get_optimizer_parameters(args, kwargs)
if verbose:
# 实例化进度条
self._progressbar = ProgressBar(epochs, loss, metric)
for epoch in range(1, epochs + 1):
current = time()
# 前向传播
y_pred, x_hat = self.forward(x, beta)
# 反向传播
grad = self.backward(y_pred, y, x_hat)
# 求解海森矩阵
hessian = self._get_hessian_matrix(y_pred, x_hat)
# 更新参数
beta = self._update_parameters(beta, grad, hessian)
# 以下的操作是为了更好可视化的, 不参与实际优化
if self._progressbar or callbacks is not None:
if loss.name == 'log_likelihood':
loss_value = loss(y, beta, x_hat)
else:
loss_value = loss(y_pred, y)
metric_value = metric(y_pred, y)
# 进度条显示
if self._progressbar:
self._progressbar(epoch, current, loss_value, metric_value)
# 记录callbacks
if callbacks:
_record_callbacks(callbacks, loss_value, metric_value)
return beta
def _update_parameters(self, beta, grad, *args, **kwargs):
"""更新模型的参数.
Arguments:
beta: numpy.ndarray, 模型的参数矩阵.
grad: numpy.ndarray, 优化器的实时梯度矩阵.
args:
hessian: numpy.ndarray, 海森矩阵.
"""
hessian = args[0]
beta -= np.matmul(np.linalg.inv(hessian), grad)
return beta
@staticmethod
def forward(x, parameters):
"""优化器前向传播.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
parameters: numpy.ndarray, 模型的参数矩阵.
Returns:
预测的标签(概率形式)和参数(x;1)矩阵.
"""
number_of_sample = x.shape[0]
parameters = parameters.reshape(-1, 1)
x_hat = np.c_[x, np.ones((number_of_sample, 1), dtype=_cml_precision.float)]
cache = np.matmul(x_hat, parameters)
y_pred = sigmoid(cache)
return y_pred, x_hat
@staticmethod
def backward(y_pred, y_true, x_hat):
"""优化器反向传播.
Arguments:
y_pred: numpy.ndarray, 预测的标签(概率形式).
y_true: numpy.ndarray, 真实的标签.
x_hat: numpy.ndarray, 属性的参数矩阵.
Returns:
优化器的实时梯度矩阵.
"""
y_true = y_true.reshape(-1, 1)
error = _cml_precision.float(y_true - y_pred)
grad = np.sum((-x_hat * error), axis=0)
grad = grad.reshape(-1, 1)
return grad
@staticmethod
def _get_hessian_matrix(y_pred, x_hat):
"""计算海森矩阵(二阶导数).
Arguments:
y_pred: numpy.ndarray, 预测的标签(概率形式).
x_hat: numpy.ndarray, 属性的参数矩阵.
Returns:
海森矩阵.
"""
P = np.eye(x_hat.shape[0]) * y_pred * (1 - y_pred)
H = np.matmul(np.matmul(x_hat.T, P), x_hat)
return H
class RadialBasisFunctionOptimizer(Optimizer):
"""径向基函数优化器.
"""
def __init__(self, name='rbf', learning_rate=1e-2):
"""
Arguments:
name: str, default=None,
优化器的名称.
learning_rate: float, default=1e-2,
优化器的学习率.
"""
super(RadialBasisFunctionOptimizer, self).__init__(name=name)
self.learning_rate = learning_rate
def run(self, x, y, epochs, parameters, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
epochs: int, default=1, 训练的轮数.
parameters: numpy.ndarray, 模型的参数矩阵.
args:
verbose: bool, 显示日志信息.
loss: str, classicML.losses.Loss 实例,
模型使用的损失函数.
metric: str, classicML.metrics.Metric 实例,
模型使用的评估函数.
callbacks: list,
模型训练过程的中间数据记录器.
Returns:
模型的参数矩阵.
"""
verbose, loss, metric, callbacks = _get_optimizer_parameters(args, kwargs)
if verbose is True:
# 实例化进度条
self._progressbar = ProgressBar(epochs, loss, metric)
for epoch in range(1, epochs+1):
current = time()
# 前向传播
y_pred, cache = self.forward(x, parameters)
# 反向传播
grad = self.backward(y_pred, y, cache)
# 更新参数
parameters = self._update_parameters(parameters, grad)
# 以下的操作是为了更好可视化的, 不参与实际优化
if self._progressbar or callbacks is not None:
loss_value = loss(y_pred, y)
metric_value = metric(y_pred, y)
# 进度条显示
if self._progressbar:
self._progressbar(epoch, current, loss_value, metric_value)
# 记录callbacks
if callbacks:
_record_callbacks(callbacks, loss_value, metric_value)
return parameters
def _update_parameters(self, parameters, grad, *args, **kwargs):
"""更新模型的参数.
Arguments:
parameters: numpy.ndarray, 模型的参数矩阵.
grad: numpy.ndarray, 优化器的实时梯度矩阵.
Returns:
模型的参数矩阵.
"""
parameters['w'] -= self.learning_rate * grad['dw']
parameters['b'] -= self.learning_rate * grad['db']
parameters['beta'] -= self.learning_rate * grad['dbeta']
return parameters
@staticmethod
def forward(x, parameters):
"""优化器前向传播.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
parameters: numpy.ndarray, 模型的参数矩阵.
Returns:
预测的标签(概率形式)和参数矩阵缓存.
"""
number_of_sample = x.shape[0]
# 提取参数
w, b, c, beta = parameters['w'], parameters['b'], parameters['c'], parameters['beta']
# 获取隐含层神经元的数量
units = c.shape[0]
# 初始化高斯径向基函数
rho = np.zeros([number_of_sample, units], dtype=_cml_precision.float)
x_ci = np.zeros([number_of_sample, units], dtype=_cml_precision.float)
for unit in range(units):
x_ci[:, unit] = np.linalg.norm(x - c[[unit], ], axis=1) ** 2
rho[:, unit] = np.exp(-beta[0, unit] * x_ci[:, unit])
y_pred = np.matmul(rho, w.T) + b
cache = (rho, x_ci, w, beta)
return y_pred, cache
@staticmethod
def backward(y_pred, y_true, cache):
"""优化器反向传播.
Arguments:
y_pred: numpy.ndarray, 预测的标签(概率形式).
y_true: numpy.ndarray, 真实的标签.
cache: numpy.ndarray, 参数缓存.
Returns:
优化器的实时梯度矩阵.
"""
(rho, x_ci, w, beta) = cache
grad = {}
dy = y_pred - y_true # 这里虽然叫做dy, 但其实是真实值和预测值之间的误差
dw = np.matmul(dy.T, rho) / y_true.shape[0]
db = np.sum(dy, axis=0, keepdims=True) / y_true.shape[0]
drho = np.matmul(y_pred, w)
dbeta = np.sum(drho * rho * (-x_ci), axis=0, keepdims=True) / y_true.shape[0]
grad['dw'] = dw
grad['db'] = db
grad['dbeta'] = dbeta
return grad
class SequentialMinimalOptimization(Optimizer):
"""序列最小最优化算法. SMO算法是一种启发式算法,
即每次优化两个变量, 使之满足KKT条件; 不断迭代, 最后使得全部变量满足KKT条件.
整个SMO算法包括: 求解两个变量的二次规划问题和选择变量的启发式方法.
Attributes:
alphas: numpy.ndarray,
拉格朗日乘子数组.
non_bound_alphas: numpy.ndarray,
非边界拉格朗日乘子.(硬间隔下和非零拉格朗日乘子一样, 软间隔是非零拉格朗日乘子的子集.)
error_cache: numpy.ndarray,
KKT条件的违背值缓存.
non_zero_alphas: numpy.ndarray,
非零拉格朗日乘子.
b: float, default=0,
偏置项.
C: float, default=None
软间隔正则化系数.
kernel: str, classicML.kernel.Kernels 实例, default=None
分类器使用的核函数.
tol: float, default=None
停止训练的最大误差值.
epochs: int, default=None
最大的训练轮数, 如果是-1则表示需要所有的样本满足条件时,
才能停止训练, 即没有限制.
References:
- [李航, 2012.] 统计学习方法 P124~P131
"""
def __init__(self, name='SMO'):
super(SequentialMinimalOptimization, self).__init__(name=name)
self.alphas = None
self.non_bound_alphas = None
self.error_cache = None
self.non_zero_alphas = None
self.b = None
self.C = None
self.kernel = None
self.tol = None
self.epochs = None
def run(self, x, y, *args, **kwargs):
"""运行优化器优化参数.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
args:
C: float,
软间隔正则化系数.
kernel: str, classicML.kernels.Kernel 实例,
分类器使用的核函数.
tol: float,
停止训练的最大误差值.
epochs: int,
最大的训练轮数, 如果是-1则表示需要所有的样本满足条件时,
才能停止训练, 即没有限制.
Returns:
分类器的支持向量下标数组, 支持向量数组, 拉格朗日乘子数组, 支持向量对应的标签数组和偏置项.
"""
self.C, self.kernel, self.tol, self.epochs = args[0], args[1], args[2], args[3]
epoch = 0
number_of_sample = x.shape[0]
entire_flag = True
# 初始化为numpy.ndarray数组, 在Python下更新后会自动转换数据类型为numpy.ndarray,
# 但是, CC在第一次输入的时候为float, 这里强制为numpy.ndarray用以解决CC下不能动态类型输入.
self.b = np.asarray([0.0], dtype=_cml_precision.float)
# 初始化返回参数, 在未知样本的数量时无法初始化.
self.alphas = np.zeros((number_of_sample, ), dtype=_cml_precision.float)
self.non_bound_alphas = np.zeros((number_of_sample, ), dtype=_cml_precision.float)
self.error_cache = np.zeros((number_of_sample, ), dtype=_cml_precision.float)
self.non_zero_alphas = np.zeros((number_of_sample, ), dtype=bool)
while (self.epochs == -1) or (epoch < self.epochs):
pair_of_alpha_changed = 0
epoch += 1 # 更新迭代次数, 在代码块结尾更新有可能因为提前返回而计数不准确.
if entire_flag: # 第一次必须全部全部遍历一遍, 因为初始值全部为零.
for sample in range(number_of_sample):
pair_of_alpha_changed += self._update_parameters(x, y, sample, number_of_sample)
else:
non_bound_index = self.non_bound_alphas.nonzero()[0]
for sample in non_bound_index:
pair_of_alpha_changed += self._update_parameters(x, y, sample, number_of_sample)
if entire_flag is True:
# 遍历所有样本还是没有更新, 就退出循环.
if pair_of_alpha_changed == 0:
support = self.non_zero_alphas.nonzero()[0]
support_vector = x[support]
support_alpha = self.alphas[self.non_zero_alphas]
support_y = y[support]
return support, support_vector, support_alpha, support_y, self.b
entire_flag = False
elif pair_of_alpha_changed == 0:
entire_flag = True
# `numpy.nonzero`获取索引产生的新数组, 类型默认为pure int(参与numpy自动转换为np.int64),
support = _cml_precision.int(self.non_zero_alphas.nonzero()[0])
support_vector = x[support]
support_alpha = self.alphas[self.non_zero_alphas]
support_y = y[support]
return support, support_vector, support_alpha, support_y, self.b
def _update_parameters(self, x, y, *args, **kwargs):
"""SMO算法的内循环(更新参数的具体实现), 寻找第二个要更新的变量alpha_j, 并进行更新.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
args:
i: int, 第i个样本.
number_of_sample: int, 样本的总数.
Returns:
更新是否成功的标记.
"""
i, number_of_sample = args[0], args[1]
y_i = y[i, :]
alpha_i = self.alphas[i]
# 提取违背值, 保存为缓存(可能要实时计算).
if self.non_bound_alphas[i]:
error_i = self.error_cache[i]
else:
error_i = calculate_error(x, y, i, self.kernel, self.alphas, self.non_zero_alphas, self.b)
# 更新的变化量的绝对值要大于tol, 且alpha要满足软间隔C的条件限制.
# TODO(<NAME>, tag:code): 直接使用绝对值会导致异常, 但是这样就能正常运行.
if ((y_i * error_i < -self.tol) and (alpha_i < self.C)) or ((y_i * error_i > self.tol) and (0 < alpha_i)):
# 存在非边界拉格朗日乘子.
if np.sum(self.non_bound_alphas) > 0:
j, error_j = select_second_alpha(error_i, self.error_cache, self.non_bound_alphas)
if self._update_alpha(x, y, i, j, error_i, error_j):
return 1
# 试图逐元素强制更新.
for j in np.random.permutation(number_of_sample):
error_j = calculate_error(x, y, j, self.kernel, self.alphas, self.non_zero_alphas, self.b)
if self._update_alpha(x, y, i, j, error_i, error_j):
return 1
return 0
def _update_alpha(self, x, y, i, j, error_i, error_j):
"""更新拉格朗日乘子.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
i: int, 第i个样本.
j: int, 第j个样本.
error_i: float, 第i个样本的违背值.
error_j: float, 第j个样本的违背值.
Returns:
是否更新成功.
"""
if i == j:
return False
alpha_i_old = self.alphas[i].copy()
alpha_j_old = self.alphas[j].copy()
y_i = y[i, 0]
y_j = y[j, 0]
if y_i != y_j:
low = _cml_precision.float(max(0.0, alpha_j_old - alpha_i_old))
high = _cml_precision.float(min(self.C, self.C + alpha_j_old - alpha_i_old))
else:
low = _cml_precision.float(max(0.0, alpha_i_old + alpha_j_old - self.C))
high = _cml_precision.float(min(self.C, alpha_i_old + alpha_j_old))
if low == high: # 二维空间对角线重合.
return False
x_i = x[[i], :]
x_j = x[[j], :]
kappa_ii = self.kernel(x_i, x_i)
kappa_ij = self.kernel(x_i, x_j)
kappa_ji = self.kernel(x_j, x_i)
kappa_jj = self.kernel(x_j, x_j)
eta = _cml_precision.float(kappa_ii + kappa_jj - 2 * kappa_ij)
if eta <= 0: # 2-范数小于零.
return False
alpha_j_new = alpha_j_old + y_j * (error_i - error_j) / eta
alpha_j_new = clip_alpha(alpha_j_new, low, high)
if np.abs(alpha_j_new - alpha_j_old) < 1e-5: # 更新幅度过小.
return False
alpha_i_new = _cml_precision.float(alpha_i_old + y_i * y_j * (alpha_j_old - alpha_j_new))
# 两个变量优化后要重新计算偏置.
b_i = (-error_i - y_i * kappa_ii * (alpha_i_new - alpha_i_old)
- y_j * kappa_ji * (alpha_j_new - alpha_j_old) + self.b)
b_j = (-error_j - y_i * kappa_ij * (alpha_i_new - alpha_i_old)
- y_j * kappa_jj * (alpha_j_new - alpha_j_old) + self.b)
self.b = (b_i + b_j) / 2 # 这里采用的周志华机器学习中的方法, 求解平均值更为鲁棒.
self.alphas[i] = alpha_i_new
self.alphas[j] = alpha_j_new
# 更新缓存.
self._update_error_cache(x, y)
self._update_alpha_cache(alpha_i_new, i)
self._update_alpha_cache(alpha_j_new, j)
return True
def _update_error_cache(self, x, y):
"""更新违背值缓存.
Arguments:
x: numpy.ndarray, array-like, 特征数据.
y: numpy.ndarray, array-like, 标签.
"""
for i in self.non_bound_alphas.nonzero()[0]:
self.error_cache[i] = calculate_error(x, y, i, self.kernel, self.alphas, self.non_zero_alphas, self.b)
def _update_alpha_cache(self, alpha, index):
"""更新拉格朗日乘子缓存.
Arguments:
alpha: float, 拉格朗日乘子.
index: int, 拉格朗日乘子的下标.
"""
self.non_zero_alphas[index] = (alpha > 0)
self.non_bound_alphas[index] = int(0 < alpha < self.C)
# Aliases.
SGD = StochasticGradientDescent
SMO = SequentialMinimalOptimization
| [
"classicML.backend.python.activations.relu.diff",
"numpy.sqrt",
"classicML._cml_precision.float",
"classicML.backend.python.activations.relu",
"numpy.linalg.norm",
"classicML.backend.python.ops.clip_alpha",
"numpy.asarray",
"numpy.exp",
"numpy.matmul",
"classicML.backend.python._utils.ProgressBar"... | [((9553, 9569), 'numpy.matmul', 'np.matmul', (['da', 'w'], {}), '(da, w)\n', (9562, 9569), True, 'import numpy as np\n'), ((17995, 18023), 'numpy.matmul', 'np.matmul', (['x_hat', 'parameters'], {}), '(x_hat, parameters)\n', (18004, 18023), True, 'import numpy as np\n'), ((18041, 18055), 'classicML.backend.python.activations.sigmoid', 'sigmoid', (['cache'], {}), '(cache)\n', (18048, 18055), False, 'from classicML.backend.python.activations import sigmoid\n'), ((18428, 18465), 'classicML._cml_precision.float', '_cml_precision.float', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (18448, 18465), False, 'from classicML import _cml_precision\n'), ((18481, 18511), 'numpy.sum', 'np.sum', (['(-x_hat * error)'], {'axis': '(0)'}), '(-x_hat * error, axis=0)\n', (18487, 18511), True, 'import numpy as np\n'), ((21386, 21414), 'numpy.matmul', 'np.matmul', (['x_hat', 'parameters'], {}), '(x_hat, parameters)\n', (21395, 21414), True, 'import numpy as np\n'), ((21432, 21446), 'classicML.backend.python.activations.sigmoid', 'sigmoid', (['cache'], {}), '(cache)\n', (21439, 21446), False, 'from classicML.backend.python.activations import sigmoid\n'), ((21819, 21856), 'classicML._cml_precision.float', '_cml_precision.float', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (21839, 21856), False, 'from classicML import _cml_precision\n'), ((21872, 21902), 'numpy.sum', 'np.sum', (['(-x_hat * error)'], {'axis': '(0)'}), '(-x_hat * error, axis=0)\n', (21878, 21902), True, 'import numpy as np\n'), ((25336, 25399), 'numpy.zeros', 'np.zeros', (['[number_of_sample, units]'], {'dtype': '_cml_precision.float'}), '([number_of_sample, units], dtype=_cml_precision.float)\n', (25344, 25399), True, 'import numpy as np\n'), ((25415, 25478), 'numpy.zeros', 'np.zeros', (['[number_of_sample, units]'], {'dtype': '_cml_precision.float'}), '([number_of_sample, units], dtype=_cml_precision.float)\n', (25423, 25478), True, 'import numpy as np\n'), ((26292, 26312), 'numpy.matmul', 'np.matmul', (['y_pred', 'w'], {}), '(y_pred, w)\n', (26301, 26312), True, 'import numpy as np\n'), ((28669, 28714), 'numpy.asarray', 'np.asarray', (['[0.0]'], {'dtype': '_cml_precision.float'}), '([0.0], dtype=_cml_precision.float)\n', (28679, 28714), True, 'import numpy as np\n'), ((28772, 28829), 'numpy.zeros', 'np.zeros', (['(number_of_sample,)'], {'dtype': '_cml_precision.float'}), '((number_of_sample,), dtype=_cml_precision.float)\n', (28780, 28829), True, 'import numpy as np\n'), ((28863, 28920), 'numpy.zeros', 'np.zeros', (['(number_of_sample,)'], {'dtype': '_cml_precision.float'}), '((number_of_sample,), dtype=_cml_precision.float)\n', (28871, 28920), True, 'import numpy as np\n'), ((28949, 29006), 'numpy.zeros', 'np.zeros', (['(number_of_sample,)'], {'dtype': '_cml_precision.float'}), '((number_of_sample,), dtype=_cml_precision.float)\n', (28957, 29006), True, 'import numpy as np\n'), ((29039, 29080), 'numpy.zeros', 'np.zeros', (['(number_of_sample,)'], {'dtype': 'bool'}), '((number_of_sample,), dtype=bool)\n', (29047, 29080), True, 'import numpy as np\n'), ((33321, 33377), 'classicML._cml_precision.float', '_cml_precision.float', (['(kappa_ii + kappa_jj - 2 * kappa_ij)'], {}), '(kappa_ii + kappa_jj - 2 * kappa_ij)\n', (33341, 33377), False, 'from classicML import _cml_precision\n'), ((33527, 33561), 'classicML.backend.python.ops.clip_alpha', 'clip_alpha', (['alpha_j_new', 'low', 'high'], {}), '(alpha_j_new, low, high)\n', (33537, 33561), False, 'from classicML.backend.python.ops import clip_alpha\n'), ((33675, 33750), 'classicML._cml_precision.float', '_cml_precision.float', (['(alpha_i_old + y_i * y_j * (alpha_j_old - alpha_j_new))'], {}), '(alpha_i_old + y_i * y_j * (alpha_j_old - alpha_j_new))\n', (33695, 33750), False, 'from classicML import _cml_precision\n'), ((4787, 4820), 'classicML.backend.python._utils.ProgressBar', 'ProgressBar', (['epochs', 'loss', 'metric'], {}), '(epochs, loss, metric)\n', (4798, 4820), False, 'from classicML.backend.python._utils import ProgressBar\n'), ((4887, 4893), 'time.time', 'time', ([], {}), '()\n', (4891, 4893), False, 'from time import time\n'), ((4942, 4979), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_of_features'], {}), '(0, num_of_features)\n', (4959, 4979), True, 'import numpy as np\n'), ((7087, 7094), 'classicML.backend.python.activations.relu', 'relu', (['z'], {}), '(z)\n', (7091, 7094), False, 'from classicML.backend.python.activations import relu\n'), ((7340, 7364), 'numpy.matmul', 'np.matmul', (['a', 'w_output.T'], {}), '(a, w_output.T)\n', (7349, 7364), True, 'import numpy as np\n'), ((7432, 7442), 'classicML.backend.python.activations.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (7439, 7442), False, 'from classicML.backend.python.activations import sigmoid\n'), ((7478, 7488), 'classicML.backend.python.activations.softmax', 'softmax', (['z'], {}), '(z)\n', (7485, 7488), False, 'from classicML.backend.python.activations import softmax\n'), ((8313, 8344), 'classicML.backend.python.activations.sigmoid.diff', 'sigmoid.diff', (['y_pred', 'a', 'y_true'], {}), '(y_pred, a, y_true)\n', (8325, 8344), False, 'from classicML.backend.python.activations import sigmoid\n'), ((8376, 8399), 'classicML.backend.python.activations.softmax.diff', 'softmax.diff', (['y_true', 'a'], {}), '(y_true, a)\n', (8388, 8399), False, 'from classicML.backend.python.activations import softmax\n'), ((8820, 8840), 'classicML.backend.python.activations.relu.diff', 'relu.diff', (['output', 'a'], {}), '(output, a)\n', (8829, 8840), False, 'from classicML.backend.python.activations import relu\n'), ((9437, 9455), 'numpy.matmul', 'np.matmul', (['da.T', 'x'], {}), '(da.T, x)\n', (9446, 9455), True, 'import numpy as np\n'), ((9487, 9520), 'numpy.sum', 'np.sum', (['da'], {'axis': '(0)', 'keepdims': '(True)'}), '(da, axis=0, keepdims=True)\n', (9493, 9520), True, 'import numpy as np\n'), ((11588, 11621), 'classicML.backend.python._utils.ProgressBar', 'ProgressBar', (['epochs', 'loss', 'metric'], {}), '(epochs, loss, metric)\n', (11599, 11621), False, 'from classicML.backend.python._utils import ProgressBar\n'), ((11688, 11694), 'time.time', 'time', ([], {}), '()\n', (11692, 11694), False, 'from time import time\n'), ((11743, 11780), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_of_features'], {}), '(0, num_of_features)\n', (11760, 11780), True, 'import numpy as np\n'), ((16352, 16385), 'classicML.backend.python._utils.ProgressBar', 'ProgressBar', (['epochs', 'loss', 'metric'], {}), '(epochs, loss, metric)\n', (16363, 16385), False, 'from classicML.backend.python._utils import ProgressBar\n'), ((16450, 16456), 'time.time', 'time', ([], {}), '()\n', (16454, 16456), False, 'from time import time\n'), ((19545, 19578), 'classicML.backend.python._utils.ProgressBar', 'ProgressBar', (['epochs', 'loss', 'metric'], {}), '(epochs, loss, metric)\n', (19556, 19578), False, 'from classicML.backend.python._utils import ProgressBar\n'), ((19645, 19651), 'time.time', 'time', ([], {}), '()\n', (19649, 19651), False, 'from time import time\n'), ((20896, 20918), 'numpy.linalg.inv', 'np.linalg.inv', (['hessian'], {}), '(hessian)\n', (20909, 20918), True, 'import numpy as np\n'), ((22289, 22310), 'numpy.matmul', 'np.matmul', (['x_hat.T', 'P'], {}), '(x_hat.T, P)\n', (22298, 22310), True, 'import numpy as np\n'), ((23561, 23594), 'classicML.backend.python._utils.ProgressBar', 'ProgressBar', (['epochs', 'loss', 'metric'], {}), '(epochs, loss, metric)\n', (23572, 23594), False, 'from classicML.backend.python._utils import ProgressBar\n'), ((23659, 23665), 'time.time', 'time', ([], {}), '()\n', (23663, 23665), False, 'from time import time\n'), ((25613, 25651), 'numpy.exp', 'np.exp', (['(-beta[0, unit] * x_ci[:, unit])'], {}), '(-beta[0, unit] * x_ci[:, unit])\n', (25619, 25651), True, 'import numpy as np\n'), ((25670, 25689), 'numpy.matmul', 'np.matmul', (['rho', 'w.T'], {}), '(rho, w.T)\n', (25679, 25689), True, 'import numpy as np\n'), ((26173, 26193), 'numpy.matmul', 'np.matmul', (['dy.T', 'rho'], {}), '(dy.T, rho)\n', (26182, 26193), True, 'import numpy as np\n'), ((26225, 26258), 'numpy.sum', 'np.sum', (['dy'], {'axis': '(0)', 'keepdims': '(True)'}), '(dy, axis=0, keepdims=True)\n', (26231, 26258), True, 'import numpy as np\n'), ((26329, 26378), 'numpy.sum', 'np.sum', (['(drho * rho * -x_ci)'], {'axis': '(0)', 'keepdims': '(True)'}), '(drho * rho * -x_ci, axis=0, keepdims=True)\n', (26335, 26378), True, 'import numpy as np\n'), ((31216, 31301), 'classicML.backend.python.ops.calculate_error', 'calculate_error', (['x', 'y', 'i', 'self.kernel', 'self.alphas', 'self.non_zero_alphas', 'self.b'], {}), '(x, y, i, self.kernel, self.alphas, self.non_zero_alphas, self.b\n )\n', (31231, 31301), False, 'from classicML.backend.python.ops import calculate_error\n'), ((31842, 31881), 'numpy.random.permutation', 'np.random.permutation', (['number_of_sample'], {}), '(number_of_sample)\n', (31863, 31881), True, 'import numpy as np\n'), ((33574, 33607), 'numpy.abs', 'np.abs', (['(alpha_j_new - alpha_j_old)'], {}), '(alpha_j_new - alpha_j_old)\n', (33580, 33607), True, 'import numpy as np\n'), ((34654, 34739), 'classicML.backend.python.ops.calculate_error', 'calculate_error', (['x', 'y', 'i', 'self.kernel', 'self.alphas', 'self.non_zero_alphas', 'self.b'], {}), '(x, y, i, self.kernel, self.alphas, self.non_zero_alphas, self.b\n )\n', (34669, 34739), False, 'from classicML.backend.python.ops import calculate_error\n'), ((5173, 5202), 'numpy.asarray', 'np.asarray', (['[y[random_index]]'], {}), '([y[random_index]])\n', (5183, 5202), True, 'import numpy as np\n'), ((7049, 7066), 'numpy.matmul', 'np.matmul', (['x', 'w.T'], {}), '(x, w.T)\n', (7058, 7066), True, 'import numpy as np\n'), ((11974, 12003), 'numpy.asarray', 'np.asarray', (['[y[random_index]]'], {}), '([y[random_index]])\n', (11984, 12003), True, 'import numpy as np\n'), ((17919, 17977), 'numpy.ones', 'np.ones', (['(number_of_sample, 1)'], {'dtype': '_cml_precision.float'}), '((number_of_sample, 1), dtype=_cml_precision.float)\n', (17926, 17977), True, 'import numpy as np\n'), ((21310, 21368), 'numpy.ones', 'np.ones', (['(number_of_sample, 1)'], {'dtype': '_cml_precision.float'}), '((number_of_sample, 1), dtype=_cml_precision.float)\n', (21317, 21368), True, 'import numpy as np\n'), ((22220, 22242), 'numpy.eye', 'np.eye', (['x_hat.shape[0]'], {}), '(x_hat.shape[0])\n', (22226, 22242), True, 'import numpy as np\n'), ((25541, 25579), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - c[[unit],])'], {'axis': '(1)'}), '(x - c[[unit],], axis=1)\n', (25555, 25579), True, 'import numpy as np\n'), ((31563, 31592), 'numpy.sum', 'np.sum', (['self.non_bound_alphas'], {}), '(self.non_bound_alphas)\n', (31569, 31592), True, 'import numpy as np\n'), ((31627, 31696), 'classicML.backend.python.ops.select_second_alpha', 'select_second_alpha', (['error_i', 'self.error_cache', 'self.non_bound_alphas'], {}), '(error_i, self.error_cache, self.non_bound_alphas)\n', (31646, 31696), False, 'from classicML.backend.python.ops import select_second_alpha\n'), ((31909, 31994), 'classicML.backend.python.ops.calculate_error', 'calculate_error', (['x', 'y', 'j', 'self.kernel', 'self.alphas', 'self.non_zero_alphas', 'self.b'], {}), '(x, y, j, self.kernel, self.alphas, self.non_zero_alphas, self.b\n )\n', (31924, 31994), False, 'from classicML.backend.python.ops import calculate_error\n'), ((13917, 13945), 'numpy.power', 'np.power', (['self.beta_1', 'epoch'], {}), '(self.beta_1, epoch)\n', (13925, 13945), True, 'import numpy as np\n'), ((13993, 14021), 'numpy.power', 'np.power', (['self.beta_1', 'epoch'], {}), '(self.beta_1, epoch)\n', (14001, 14021), True, 'import numpy as np\n'), ((14093, 14121), 'numpy.power', 'np.power', (['self.beta_2', 'epoch'], {}), '(self.beta_2, epoch)\n', (14101, 14121), True, 'import numpy as np\n'), ((14169, 14197), 'numpy.power', 'np.power', (['self.beta_2', 'epoch'], {}), '(self.beta_2, epoch)\n', (14177, 14197), True, 'import numpy as np\n'), ((14290, 14306), 'numpy.sqrt', 'np.sqrt', (['v_hat_w'], {}), '(v_hat_w)\n', (14297, 14306), True, 'import numpy as np\n'), ((14395, 14411), 'numpy.sqrt', 'np.sqrt', (['v_hat_b'], {}), '(v_hat_b)\n', (14402, 14411), True, 'import numpy as np\n')] |
"""Unit test for the :func:`esmvalcore.preprocessor._units` function"""
import unittest
import cf_units
import iris
import numpy as np
import tests
from esmvalcore.preprocessor._units import convert_units
class Test(tests.Test):
"""Test class for _units"""
def setUp(self):
"""Prepare tests"""
coord_sys = iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS)
self.data2 = np.array([[0., 1.], [2., 3.]])
lons2 = iris.coords.DimCoord([1.5, 2.5],
standard_name='longitude',
bounds=[[1., 2.], [2., 3.]],
units='degrees_east',
coord_system=coord_sys)
lats2 = iris.coords.DimCoord([1.5, 2.5],
standard_name='latitude',
bounds=[[1., 2.], [2., 3.]],
units='degrees_north',
coord_system=coord_sys)
coords_spec3 = [(lats2, 0), (lons2, 1)]
self.arr = iris.cube.Cube(self.data2, units='K',
dim_coords_and_dims=coords_spec3)
def test_convert_incompatible_units(self):
"""Test conversion to incompatible units."""
self.assertRaises(ValueError, convert_units, self.arr, 'm')
def test_convert_compatible_units(self):
"""Test conversion to compatible units."""
result = convert_units(self.arr, 'degC')
expected_data = np.array([[-273.15, -272.15], [-271.15, -270.15]])
expected_units = cf_units.Unit('degC')
self.assertEqual(result.units, expected_units)
self.assert_array_equal(result.data, expected_data)
if __name__ == '__main__':
unittest.main()
| [
"iris.coords.DimCoord",
"cf_units.Unit",
"numpy.array",
"iris.coord_systems.GeogCS",
"esmvalcore.preprocessor._units.convert_units",
"unittest.main",
"iris.cube.Cube"
] | [((1804, 1819), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1817, 1819), False, 'import unittest\n'), ((336, 395), 'iris.coord_systems.GeogCS', 'iris.coord_systems.GeogCS', (['iris.fileformats.pp.EARTH_RADIUS'], {}), '(iris.fileformats.pp.EARTH_RADIUS)\n', (361, 395), False, 'import iris\n'), ((417, 451), 'numpy.array', 'np.array', (['[[0.0, 1.0], [2.0, 3.0]]'], {}), '([[0.0, 1.0], [2.0, 3.0]])\n', (425, 451), True, 'import numpy as np\n'), ((464, 607), 'iris.coords.DimCoord', 'iris.coords.DimCoord', (['[1.5, 2.5]'], {'standard_name': '"""longitude"""', 'bounds': '[[1.0, 2.0], [2.0, 3.0]]', 'units': '"""degrees_east"""', 'coord_system': 'coord_sys'}), "([1.5, 2.5], standard_name='longitude', bounds=[[1.0, \n 2.0], [2.0, 3.0]], units='degrees_east', coord_system=coord_sys)\n", (484, 607), False, 'import iris\n'), ((763, 906), 'iris.coords.DimCoord', 'iris.coords.DimCoord', (['[1.5, 2.5]'], {'standard_name': '"""latitude"""', 'bounds': '[[1.0, 2.0], [2.0, 3.0]]', 'units': '"""degrees_north"""', 'coord_system': 'coord_sys'}), "([1.5, 2.5], standard_name='latitude', bounds=[[1.0, \n 2.0], [2.0, 3.0]], units='degrees_north', coord_system=coord_sys)\n", (783, 906), False, 'import iris\n'), ((1113, 1184), 'iris.cube.Cube', 'iris.cube.Cube', (['self.data2'], {'units': '"""K"""', 'dim_coords_and_dims': 'coords_spec3'}), "(self.data2, units='K', dim_coords_and_dims=coords_spec3)\n", (1127, 1184), False, 'import iris\n'), ((1502, 1533), 'esmvalcore.preprocessor._units.convert_units', 'convert_units', (['self.arr', '"""degC"""'], {}), "(self.arr, 'degC')\n", (1515, 1533), False, 'from esmvalcore.preprocessor._units import convert_units\n'), ((1558, 1608), 'numpy.array', 'np.array', (['[[-273.15, -272.15], [-271.15, -270.15]]'], {}), '([[-273.15, -272.15], [-271.15, -270.15]])\n', (1566, 1608), True, 'import numpy as np\n'), ((1634, 1655), 'cf_units.Unit', 'cf_units.Unit', (['"""degC"""'], {}), "('degC')\n", (1647, 1655), False, 'import cf_units\n')] |
#Python 3.4
#PySide 1.2.4
#PyOpenGL 3.1.0
import sys
import numpy as np
from ctypes import sizeof, c_float, c_void_p
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtOpenGL import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GL.shaders import compileShader, compileProgram
class Window(QDialog):
def __init__(self, parent=None):
"""
This is a top-level window for displaying the OpenGL graphics.
Use this as a container of the QGLWidget for better extensibility.
We can add other widgets to interact with the graphics in the future.
"""
super().__init__(parent)
self.setupUI()
def setupUI(self):
self.setWindowTitle("OpenGL")
self.setWindowFlags(Qt.WindowMinMaxButtonsHint)
self.resize(800, 600)
self.glWidget = GLWidget(self)
self.layout = QGridLayout(self)
self.layout.addWidget(self.glWidget, 0, 0, 1, 1)
class GLWidget(QGLWidget):
def __init__(self, parent=None):
"""
QGLWidget provides functionality for displaying OpenGL graphics integrated into a Qt application.
Reimplement 3 virtual functions in the subclass:
1 paintGL() -- Renders the OpenGL scene.
2 resizeGL() -- Sets up the OpenGL viewport, projection, ect. Gets called whenever the widget is resized.
3 initializeGL() -- Sets up the OpenGL rendering context.
"""
super().__init__(parent)
self.parent = parent
#set focus to catch keyPressEvent
self.setFocus()
#Define parameters
self.NumVAOs = 1
self.NumBuffers = 1
self.vPosition = 0
self.NumVertices = 6
self.float_size = sizeof(c_float)
def initializeGL(self):
#Initialize vertex-array objects
self.VAOs = glGenVertexArrays(self.NumVAOs) #return a name for use as VAO
glBindVertexArray(self.VAOs) #create VAO and assign the name
self.vertices = np.array([
-0.90, -0.90,
0.85, -0.90,
-0.90, 0.85,
0.90, -0.85,
0.90, 0.90,
-0.85, 0.90], dtype='float32')
#Allocate vertex-buffer objects
self.VBOs = glGenBuffers(self.NumBuffers) #return a name for use as VBO
glBindBuffer(GL_ARRAY_BUFFER, self.VBOs) #create VBO and assign the name
#Load data into a buffer object
glBufferData(GL_ARRAY_BUFFER, self.vertices, GL_STATIC_DRAW) #transfer vertex data from objects into buffer
#Initialize vertex and fragment shaders
strVertexShader = """
#version 430 core //version of OpenGL Shading Language to use
layout(location = 0) in vec4 vPosition; //shader variable
void main() {
gl_Position = vPosition; //special vertex-shader output
}
"""
strFragmentShader = """
#version 430 core
out vec4 fColor;
void main() {
fColor = vec4(0.0, 0.0, 1.0, 1.0); //RGB color space, ranging from [0, 1]
}
"""
self.program = compileProgram(
compileShader(strVertexShader, GL_VERTEX_SHADER),
compileShader(strFragmentShader, GL_FRAGMENT_SHADER)
)
#Associate variables in a vertex shader with data stored in VBO
stride = 2 * self.float_size
offset = c_void_p(0 * self.float_size)
glVertexAttribPointer(self.vPosition, 2, GL_FLOAT, GL_FALSE, stride, offset)
glEnableVertexAttribArray(self.vPosition)
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.program)
glBindVertexArray(self.VAOs) #select the vertex array as vertex data
glDrawArrays(GL_TRIANGLES, 0, self.NumVertices)
def resizeGL(self, width, height):
glViewport(0, 0, width, height)
self.updateGL()
def keyPressEvent(self, event):
pass
def mousePressEvent(self, event):
pass
def mouseMoveEvent(self, event):
pass
def wheelEvent(self, event):
pass
def main():
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
main() | [
"numpy.array",
"OpenGL.GL.shaders.compileShader",
"ctypes.c_void_p",
"ctypes.sizeof"
] | [((1772, 1787), 'ctypes.sizeof', 'sizeof', (['c_float'], {}), '(c_float)\n', (1778, 1787), False, 'from ctypes import sizeof, c_float, c_void_p\n'), ((2057, 2159), 'numpy.array', 'np.array', (['[-0.9, -0.9, 0.85, -0.9, -0.9, 0.85, 0.9, -0.85, 0.9, 0.9, -0.85, 0.9]'], {'dtype': '"""float32"""'}), "([-0.9, -0.9, 0.85, -0.9, -0.9, 0.85, 0.9, -0.85, 0.9, 0.9, -0.85, \n 0.9], dtype='float32')\n", (2065, 2159), True, 'import numpy as np\n'), ((3497, 3526), 'ctypes.c_void_p', 'c_void_p', (['(0 * self.float_size)'], {}), '(0 * self.float_size)\n', (3505, 3526), False, 'from ctypes import sizeof, c_float, c_void_p\n'), ((3241, 3289), 'OpenGL.GL.shaders.compileShader', 'compileShader', (['strVertexShader', 'GL_VERTEX_SHADER'], {}), '(strVertexShader, GL_VERTEX_SHADER)\n', (3254, 3289), False, 'from OpenGL.GL.shaders import compileShader, compileProgram\n'), ((3303, 3355), 'OpenGL.GL.shaders.compileShader', 'compileShader', (['strFragmentShader', 'GL_FRAGMENT_SHADER'], {}), '(strFragmentShader, GL_FRAGMENT_SHADER)\n', (3316, 3355), False, 'from OpenGL.GL.shaders import compileShader, compileProgram\n')] |
import sys
sys.path.append("../")
from pathlib import Path
import numpy as np
import healpy as hp
import torch
import gpytorch
import pyro.distributions as dist
from scipy.stats import poisson
from scipy.optimize import minimize
import utils.create_mask as cm
from utils.psf_correction import PSFCorrection
from utils.utils import make_dirs
from models.psf import KingPSF
from models.scd import dnds
from models.gp_regression import HealpixGPRegressionModel
from simulations.simulate_ps import SimulateMap
class ModelConstructor:
def __init__(self, data_dir=None, r_outer=20.0, nside=128, theta_poiss=[0.5, 0.1, 0.5, 0.0, 8.0, 4.0], theta_ps=[1.5, 20.0, 1.8, -20.0, 20.0, 0.1], num_inducing=200, guide_name="MVN", n_exp=1, dif_sim="mO", dif_fit="p6", gp_mean="zero", run_name="test", save_dir=None, kernel="matern52", mask_ps=True, mask_plane=True, outputscale_prior=None, lengthscale_prior=None, learn_inducing_locations=True, inducing_strategy="uniform", poiss_only=True, plot_inducing=True):
""" High-level interface to construct Pyro model with GP component.
:param data_dir: Directory where data is stored. Defaults to '../data/'.
:param r_outer: Outer radius of mask. Defaults to 20.
:param nside: Healpix nside parameter. Defaults to 128.
:param theta_poiss: Norms of Poissonian templates ["bub", "iso", "psc", "gce", "dif_pibrem", "dif_ics"]. Defaults to [0.5, 0.1, 0.5, 0.0, 8.0, 4.0].
:param theta_ps: Non-Poissonian GCE template parameters for 2-break SCD, NPTFit convention. Defaults to [1.5, 20.0, 1.8, -20.0, 20.0, 0.1].
:param num_inducing: Number of variational GP inducing points. Defaults to 200.
:param guide_name: Guide type for template parameters. Defaults to "MVN".
"MVN": Multivariate normal guide
"IAF": Inverse autoregressive flow guide
"ConditionalIAF": Inverse autoregressive flow guide with NN conditioned on GP summary parameters
:param n_exp: Number of exposure regions for NPTF likelihood. Defaults to 1.
:param dif_sim: Diffuse model used for simulation. "Model O" hard-coded at the moment. Defaults to "mO".
:param dif_fit: Diffuse model used for fit. "p6" hard-coded at the moment. Defaults to "p6".
:param gp_mean: GP mean specification, ["zero", "constant"]. Defaults to "zero".
:param run_name: Tag of run. Defaults to "test".
:param save_dir: Directory in which to save model state. By default `save` in current dir. Defaults to None.
:param kernel: GP kernel. One of ["matern52", "matern32", "matern12", "rbf]. Defaults to "matern52".
:param mask_ps: Whether to mask resolved PSs in ROI. Defaults to True.
:param mask_plane: Whether to mask the plane in ROI. Defaults to True.
:param outputscale_prior: (mean, variance) of outputscale prior of GP kernel. Defaults to None.
:param lengthscale_prior: (mean, variance) of lengthscale prior of GP kernel. Defaults to None.
:param learn_inducing_locations: Whether to learn positions of inducing points. Defaults to True.
:param inducing_strategy: Inducing points specification, either "uniform" or "all", latter setting all ROI pixels as inducing points. Defaults to "uniform".
:param poiss_only: Whether to use only Poissonian likelihood (no PS templates). Defaults to True.
:param plot_inducing: Whether to plot a healpix map of inducing point locations. Defaults to True.
"""
self.nside = nside
self.num_inducing = num_inducing
self.guide_name = guide_name
self.n_exp = n_exp
self.r_outer = r_outer
self.theta_ps = theta_ps
self.theta_poiss = theta_poiss
self.gp_mean = gp_mean
self.kernel = kernel
self.poiss_only = poiss_only
self.inducing_strategy = inducing_strategy
self.learn_inducing_locations = learn_inducing_locations
self.outputscale_prior = outputscale_prior
self.lengthscale_prior = lengthscale_prior
self.mask_ps = mask_ps
self.mask_plane = mask_plane
self.dif_fit = dif_fit
self.dif_sim = dif_sim
self.run_name = run_name
self.plot_inducing = plot_inducing
self.n_pix = hp.nside2npix(self.nside)
if self.lengthscale_prior is not None:
mu, sigma = self.lengthscale_prior
self.lengthscale_prior = gpytorch.priors.NormalPrior(mu, sigma)
if self.outputscale_prior is not None:
mu, sigma = self.outputscale_prior
self.outputscale_prior = gpytorch.priors.NormalPrior(mu, sigma)
if not data_dir:
data_dir = str(Path(__file__).parent / "../data/")
self.save_dir = save_dir
if not self.save_dir:
self.save_dir = str(Path(__file__).parent / "../inference/save/")
self.save_dir += "/" + run_name + "/"
make_dirs([self.save_dir])
self.load_data(data_dir)
self.construct_mask()
self.load_psf()
self.simulate()
self.do_scipy_fit(self.data)
self.construct_model()
def do_scipy_fit(self, data):
""" SciPy fit to data to get norms of templates
"""
def log_like(theta, data):
""" Bin-wise Poisson likelihood
"""
# The parameters are the overall norms of each template
A_bub, A_iso, A_psc, A_gce, A_dif = theta
# Model is the sum of templates with free normalizations
mu = (A_bub * self.temp_bub + A_iso * self.temp_iso + A_psc * self.temp_psc + A_gce * self.temp_gce + A_dif * self.temp_dif_fit)[~self.mask]
# Bin-wise Poisson sum
return np.sum(poisson.logpmf(data, mu))
self.opt = minimize(lambda theta: -log_like(theta, data[~self.mask]), x0=[1.0] * 5, bounds=[[0, 30]] * 5, method="L-BFGS-B")
def load_data(self, data_dir):
""" Load all the data and templates
"""
# Load data and exposure
self.fermi_counts = hp.ud_grade(np.load(data_dir + "/fermi_data/fermidata_counts.npy"), nside_out=self.nside, power=-2)
self.fermi_exp = hp.ud_grade(np.load(data_dir + "/fermi_data/fermidata_exposure.npy"), nside_out=self.nside, power=-2)
# Load templates
self.temp_bub = hp.ud_grade(np.load(data_dir + "/fermi_data/template_bub.npy"), nside_out=self.nside, power=-2)
self.temp_dsk = hp.ud_grade(np.load(data_dir + "/fermi_data/template_dsk.npy"), nside_out=self.nside, power=-2)
self.temp_psc = hp.ud_grade(np.load(data_dir + "/fermi_data/template_psc.npy"), nside_out=self.nside, power=-2)
self.temp_iso = hp.ud_grade(np.load(data_dir + "/fermi_data/template_iso.npy"), nside_out=self.nside, power=-2)
self.temp_gce = hp.ud_grade(np.load(data_dir + "/fermi_data/template_gce.npy"), nside_out=self.nside, power=-2)
self.temp_p6 = hp.ud_grade(np.load(data_dir + "/fermi_data/template_dif.npy"), nside_out=self.nside, power=-2)
# Model O
self.temp_mO_ics = hp.ud_grade(np.load(data_dir + "/fermi_data/ModelO_r25_q1_ics.npy"), nside_out=self.nside, power=-2)
self.temp_mO_pibrem = hp.ud_grade(np.load(data_dir + "/fermi_data/ModelO_r25_q1_pibrem.npy"), nside_out=self.nside, power=-2)
self.temp_m0_tot = self.temp_mO_ics + self.temp_mO_pibrem
# Model A
self.temp_mA_ics = hp.ud_grade(np.load(data_dir + "/modelA/modelA_ics.npy"), nside_out=self.nside, power=-2)
self.temp_mA_pibrem = hp.ud_grade(np.load(data_dir + "/modelA/modelA_brempi0.npy"), nside_out=self.nside, power=-2)
if self.dif_sim == "p6":
self.temp_dif_sim = self.temp_p6
elif self.dif_sim == "m0":
self.temp_dif_sim = self.temp_m0_tot
if self.dif_fit == "p6":
self.temp_dif_fit = self.temp_p6
elif self.dif_fit == "m0":
self.temp_dif_fit = self.temp_m0_tot
# PS templates
self.temp_gce_ps = self.temp_gce / self.fermi_exp
self.temp_gce_ps /= np.mean(self.temp_gce_ps)
self.temp_dsk_ps = self.temp_dsk / self.fermi_exp
self.temp_dsk_ps /= np.mean(self.temp_dsk_ps)
# Load PS mask
self.ps_mask = hp.ud_grade(np.load(data_dir + "/mask_3fgl_0p8deg.npy") > 0, nside_out=self.nside)
def construct_mask(self):
""" Construct mask with or without masking PSs
"""
if self.mask_ps:
self.mask = cm.make_mask_total(nside=self.nside, band_mask=self.mask_plane, band_mask_range=2, mask_ring=True, inner=0, outer=self.r_outer, custom_mask=self.ps_mask)
else:
self.mask = cm.make_mask_total(nside=self.nside, band_mask=self.mask_plane, band_mask_range=2, mask_ring=True, inner=0, outer=self.r_outer)
def load_psf(self):
""" Load Fermi PSF, use hard-coded King PSF at 2 GeV
"""
self.kp = KingPSF()
pc_inst = PSFCorrection(delay_compute=True)
pc_inst.psf_r_func = lambda r: self.kp.psf_fermi_r(r)
pc_inst.sample_psf_max = 10.0 * self.kp.spe * (self.kp.score + self.kp.stail) / 2.0
pc_inst.psf_samples = 10000
pc_inst.psf_tag = "Fermi_PSF_2GeV"
pc_inst.make_or_load_psf_corr()
self.f_ary = pc_inst.f_ary
self.df_rho_div_f_ary = pc_inst.df_rho_div_f_ary
def simulate(self):
""" Simulate map with point sources
"""
s_ary = torch.logspace(-2, 2, 1000) # Range of counts considered
theta_ps_sim = torch.tensor(self.theta_ps).clone()
theta_ps_sim[0] *= self.n_pix # Normalization of source-count distribution amplitude
dnds_ary = dnds(s_ary, theta_ps_sim) # Get source-count distribution corresponding to PS parameters
psf_r_func = lambda r: self.kp.psf_fermi_r(r) # Fermi King PSF hard-coded
# Use either "p6" or "mO" diffuse model as specified, other templates always the same
temp_sim_list = []
if self.dif_sim == "p6":
temp_sim_list = [self.temp_bub, self.temp_iso, self.temp_psc, self.temp_gce, self.temp_p6]
elif self.dif_sim == "mO":
temp_sim_list = [self.temp_bub, self.temp_iso, self.temp_psc, self.temp_gce, self.temp_mO_pibrem, self.temp_mO_ics]
self.sim = SimulateMap(temp_sim_list, self.theta_poiss, [s_ary], [dnds_ary.detach().numpy()], [self.temp_gce_ps], psf_r_func, nside=self.nside)
self.data = self.sim.create_map()
def construct_model(self):
# Define angular coordinates and counts data
X = torch.tensor(np.radians(hp.pix2ang(self.nside, np.arange(hp.nside2npix(self.nside)), lonlat=True))).float().T
Y = torch.tensor(self.data).float()
# Mask to get "training" data
self.train_x = X[~self.mask]
self.train_y = Y[~self.mask]
# If Poisson regression, no PS
if self.poiss_only:
ps_temps = []
else:
ps_temps = [self.temp_gce_ps, self.temp_dsk_ps]
# Instantiate Pyro model to be trained
self.model = HealpixGPRegressionModel(
exposure_map=self.fermi_exp,
nexp=self.n_exp,
temp_dif=self.opt.x[-1] * self.temp_dif_fit,
poiss_temps=[self.temp_bub, self.temp_iso, self.temp_psc, self.temp_gce],
poiss_labels=["bub", "iso", "psc", "gce"],
poiss_priors=[dist.Normal(torch.tensor(0.5), torch.tensor(0.1)), dist.Normal(torch.tensor(0.1), torch.tensor(0.02)), dist.Normal(torch.tensor(0.5), torch.tensor(0.1)), dist.Uniform(torch.tensor(0.0), torch.tensor(1.5))],
poiss_log_priors=[0.0, 0.0, 0.0, 0.0],
ps_temps=ps_temps,
ps_labels=["gce", "dsk"],
ps_priors=2 * [[dist.Uniform(torch.tensor(0.0), torch.tensor(0.5)), dist.Uniform(torch.tensor(11.0), torch.tensor(20.0)), dist.Uniform(torch.tensor(1.1), torch.tensor(1.99)), dist.Uniform(torch.tensor(-10.0), torch.tensor(1.99)), dist.Uniform(torch.tensor(1.0), torch.tensor(40.0)), dist.Uniform(torch.tensor(0.01), torch.tensor(0.1))]],
ps_log_priors=torch.tensor(2 * [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
relative_breaks=True,
mask=self.mask,
name_prefix=self.run_name,
num_inducing=self.num_inducing,
nside=self.nside,
guide_name=self.guide_name,
no_gp=False,
f_ary=self.f_ary,
df_rho_div_f_ary=self.df_rho_div_f_ary,
gp_mean=self.gp_mean,
kernel=self.kernel,
lengthscale_prior=self.lengthscale_prior,
outputscale_prior=self.outputscale_prior,
inducing_strategy=self.inducing_strategy,
learn_inducing_locations=self.learn_inducing_locations,
plot_inducing=self.plot_inducing,
)
| [
"numpy.mean",
"models.psf.KingPSF",
"pathlib.Path",
"utils.create_mask.make_mask_total",
"torch.logspace",
"models.scd.dnds",
"torch.tensor",
"utils.utils.make_dirs",
"gpytorch.priors.NormalPrior",
"scipy.stats.poisson.logpmf",
"healpy.nside2npix",
"utils.psf_correction.PSFCorrection",
"nump... | [((12, 34), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (27, 34), False, 'import sys\n'), ((4388, 4413), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (4401, 4413), True, 'import healpy as hp\n'), ((5041, 5067), 'utils.utils.make_dirs', 'make_dirs', (['[self.save_dir]'], {}), '([self.save_dir])\n', (5050, 5067), False, 'from utils.utils import make_dirs\n'), ((8185, 8210), 'numpy.mean', 'np.mean', (['self.temp_gce_ps'], {}), '(self.temp_gce_ps)\n', (8192, 8210), True, 'import numpy as np\n'), ((8298, 8323), 'numpy.mean', 'np.mean', (['self.temp_dsk_ps'], {}), '(self.temp_dsk_ps)\n', (8305, 8323), True, 'import numpy as np\n'), ((9038, 9047), 'models.psf.KingPSF', 'KingPSF', ([], {}), '()\n', (9045, 9047), False, 'from models.psf import KingPSF\n'), ((9067, 9100), 'utils.psf_correction.PSFCorrection', 'PSFCorrection', ([], {'delay_compute': '(True)'}), '(delay_compute=True)\n', (9080, 9100), False, 'from utils.psf_correction import PSFCorrection\n'), ((9565, 9592), 'torch.logspace', 'torch.logspace', (['(-2)', '(2)', '(1000)'], {}), '(-2, 2, 1000)\n', (9579, 9592), False, 'import torch\n'), ((9796, 9821), 'models.scd.dnds', 'dnds', (['s_ary', 'theta_ps_sim'], {}), '(s_ary, theta_ps_sim)\n', (9800, 9821), False, 'from models.scd import dnds\n'), ((4546, 4584), 'gpytorch.priors.NormalPrior', 'gpytorch.priors.NormalPrior', (['mu', 'sigma'], {}), '(mu, sigma)\n', (4573, 4584), False, 'import gpytorch\n'), ((4717, 4755), 'gpytorch.priors.NormalPrior', 'gpytorch.priors.NormalPrior', (['mu', 'sigma'], {}), '(mu, sigma)\n', (4744, 4755), False, 'import gpytorch\n'), ((6181, 6235), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/fermidata_counts.npy')"], {}), "(data_dir + '/fermi_data/fermidata_counts.npy')\n", (6188, 6235), True, 'import numpy as np\n'), ((6306, 6362), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/fermidata_exposure.npy')"], {}), "(data_dir + '/fermi_data/fermidata_exposure.npy')\n", (6313, 6362), True, 'import numpy as np\n'), ((6458, 6508), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/template_bub.npy')"], {}), "(data_dir + '/fermi_data/template_bub.npy')\n", (6465, 6508), True, 'import numpy as np\n'), ((6578, 6628), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/template_dsk.npy')"], {}), "(data_dir + '/fermi_data/template_dsk.npy')\n", (6585, 6628), True, 'import numpy as np\n'), ((6698, 6748), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/template_psc.npy')"], {}), "(data_dir + '/fermi_data/template_psc.npy')\n", (6705, 6748), True, 'import numpy as np\n'), ((6818, 6868), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/template_iso.npy')"], {}), "(data_dir + '/fermi_data/template_iso.npy')\n", (6825, 6868), True, 'import numpy as np\n'), ((6938, 6988), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/template_gce.npy')"], {}), "(data_dir + '/fermi_data/template_gce.npy')\n", (6945, 6988), True, 'import numpy as np\n'), ((7057, 7107), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/template_dif.npy')"], {}), "(data_dir + '/fermi_data/template_dif.npy')\n", (7064, 7107), True, 'import numpy as np\n'), ((7199, 7254), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/ModelO_r25_q1_ics.npy')"], {}), "(data_dir + '/fermi_data/ModelO_r25_q1_ics.npy')\n", (7206, 7254), True, 'import numpy as np\n'), ((7330, 7388), 'numpy.load', 'np.load', (["(data_dir + '/fermi_data/ModelO_r25_q1_pibrem.npy')"], {}), "(data_dir + '/fermi_data/ModelO_r25_q1_pibrem.npy')\n", (7337, 7388), True, 'import numpy as np\n'), ((7547, 7591), 'numpy.load', 'np.load', (["(data_dir + '/modelA/modelA_ics.npy')"], {}), "(data_dir + '/modelA/modelA_ics.npy')\n", (7554, 7591), True, 'import numpy as np\n'), ((7667, 7715), 'numpy.load', 'np.load', (["(data_dir + '/modelA/modelA_brempi0.npy')"], {}), "(data_dir + '/modelA/modelA_brempi0.npy')\n", (7674, 7715), True, 'import numpy as np\n'), ((8601, 8762), 'utils.create_mask.make_mask_total', 'cm.make_mask_total', ([], {'nside': 'self.nside', 'band_mask': 'self.mask_plane', 'band_mask_range': '(2)', 'mask_ring': '(True)', 'inner': '(0)', 'outer': 'self.r_outer', 'custom_mask': 'self.ps_mask'}), '(nside=self.nside, band_mask=self.mask_plane,\n band_mask_range=2, mask_ring=True, inner=0, outer=self.r_outer,\n custom_mask=self.ps_mask)\n', (8619, 8762), True, 'import utils.create_mask as cm\n'), ((8793, 8924), 'utils.create_mask.make_mask_total', 'cm.make_mask_total', ([], {'nside': 'self.nside', 'band_mask': 'self.mask_plane', 'band_mask_range': '(2)', 'mask_ring': '(True)', 'inner': '(0)', 'outer': 'self.r_outer'}), '(nside=self.nside, band_mask=self.mask_plane,\n band_mask_range=2, mask_ring=True, inner=0, outer=self.r_outer)\n', (8811, 8924), True, 'import utils.create_mask as cm\n'), ((5855, 5879), 'scipy.stats.poisson.logpmf', 'poisson.logpmf', (['data', 'mu'], {}), '(data, mu)\n', (5869, 5879), False, 'from scipy.stats import poisson\n'), ((8383, 8426), 'numpy.load', 'np.load', (["(data_dir + '/mask_3fgl_0p8deg.npy')"], {}), "(data_dir + '/mask_3fgl_0p8deg.npy')\n", (8390, 8426), True, 'import numpy as np\n'), ((9646, 9673), 'torch.tensor', 'torch.tensor', (['self.theta_ps'], {}), '(self.theta_ps)\n', (9658, 9673), False, 'import torch\n'), ((10807, 10830), 'torch.tensor', 'torch.tensor', (['self.data'], {}), '(self.data)\n', (10819, 10830), False, 'import torch\n'), ((12212, 12262), 'torch.tensor', 'torch.tensor', (['(2 * [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])'], {}), '(2 * [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (12224, 12262), False, 'import torch\n'), ((4809, 4823), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4813, 4823), False, 'from pathlib import Path\n'), ((4941, 4955), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4945, 4955), False, 'from pathlib import Path\n'), ((11521, 11538), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (11533, 11538), False, 'import torch\n'), ((11540, 11557), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (11552, 11557), False, 'import torch\n'), ((11572, 11589), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (11584, 11589), False, 'import torch\n'), ((11591, 11609), 'torch.tensor', 'torch.tensor', (['(0.02)'], {}), '(0.02)\n', (11603, 11609), False, 'import torch\n'), ((11624, 11641), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (11636, 11641), False, 'import torch\n'), ((11643, 11660), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (11655, 11660), False, 'import torch\n'), ((11676, 11693), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (11688, 11693), False, 'import torch\n'), ((11695, 11712), 'torch.tensor', 'torch.tensor', (['(1.5)'], {}), '(1.5)\n', (11707, 11712), False, 'import torch\n'), ((11877, 11894), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (11889, 11894), False, 'import torch\n'), ((11896, 11913), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (11908, 11913), False, 'import torch\n'), ((11929, 11947), 'torch.tensor', 'torch.tensor', (['(11.0)'], {}), '(11.0)\n', (11941, 11947), False, 'import torch\n'), ((11949, 11967), 'torch.tensor', 'torch.tensor', (['(20.0)'], {}), '(20.0)\n', (11961, 11967), False, 'import torch\n'), ((11983, 12000), 'torch.tensor', 'torch.tensor', (['(1.1)'], {}), '(1.1)\n', (11995, 12000), False, 'import torch\n'), ((12002, 12020), 'torch.tensor', 'torch.tensor', (['(1.99)'], {}), '(1.99)\n', (12014, 12020), False, 'import torch\n'), ((12036, 12055), 'torch.tensor', 'torch.tensor', (['(-10.0)'], {}), '(-10.0)\n', (12048, 12055), False, 'import torch\n'), ((12057, 12075), 'torch.tensor', 'torch.tensor', (['(1.99)'], {}), '(1.99)\n', (12069, 12075), False, 'import torch\n'), ((12091, 12108), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (12103, 12108), False, 'import torch\n'), ((12110, 12128), 'torch.tensor', 'torch.tensor', (['(40.0)'], {}), '(40.0)\n', (12122, 12128), False, 'import torch\n'), ((12144, 12162), 'torch.tensor', 'torch.tensor', (['(0.01)'], {}), '(0.01)\n', (12156, 12162), False, 'import torch\n'), ((12164, 12181), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (12176, 12181), False, 'import torch\n'), ((10742, 10767), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (10755, 10767), True, 'import healpy as hp\n')] |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import optimizee
import os
get_path = os.getcwd()
parent_directory = os.path.split(get_path)[0]
datasets = os.path.join(parent_directory, "datasets")
class MnistLinearModel(optimizee.Optimizee):
'''A MLP on dataset MNIST.'''
mnist = None
def __init__(self, activation='sigmoid', n_batches=128, n_h=20, n_l=1, initial_param_scale=0.1,
add_dropout=False):
optimizee.Optimizee.__init__(self)
self.activation = activation
self.n_batches = n_batches
self.n_l = n_l
self.n_h = n_h
self.initial_param_scale = initial_param_scale
self.add_dropout = add_dropout
if n_l == 0:
self.x_dim = 784 * 10 + 10
else:
self.x_dim = 784 * n_h + n_h + (n_h * n_h + n_h) * (n_l - 1) + n_h * 10 + 10
def _build_dataset(self):
if MnistLinearModel.mnist == None:
data_path = os.path.join(datasets, "MNIST_data/")
MnistLinearModel.mnist = input_data.read_data_sets(data_path, one_hot=True)
self.mnist = MnistLinearModel.mnist
def build(self):
self._build_dataset()
self.x = tf.placeholder(tf.float32, [None, None, 784])
self.y_ = tf.placeholder(tf.float32, [None, None, 10])
def build_test(self):
self._build_dataset()
self.x_test = tf.placeholder(tf.float32, [None, 784])
self.y_test = tf.placeholder(tf.float32, [None, 10])
def get_x_dim(self):
return self.x_dim
def get_initial_x(self, seed=None):
# np.random.seed(seed) # TODO seed
para = np.random.normal(size=[self.x_dim], scale=self.initial_param_scale)
return para # initial weights of optimizee
def next_internal_feed_dict(self):
return {}
def next_feed_dict(self, n_iterations):
x_data = np.zeros([n_iterations, self.n_batches, 784])
y_data = np.zeros([n_iterations, self.n_batches, 10])
for i in range(n_iterations):
x_data[i], y_data[i] = self.mnist.train.next_batch(self.n_batches, shuffle=True) # TODO shuffle = True
return {self.x: x_data, self.y_: y_data}
def feed_dict_test(self):
# x_test_data, y_test_data = self.mnist.test.next_batch(n_batches, shuffle=True)
x_test_data = self.mnist.test.images
y_test_data = self.mnist.test.labels
return {self.x_test: x_test_data, self.y_test: y_test_data}
def inference(self, x, inputs):
self.start_get_weights(x)
if self.n_l > 0:
w1 = self.get_weights([784, self.n_h])
b1 = self.get_weights([self.n_h])
w2 = self.get_weights([self.n_h, 10])
b2 = self.get_weights([10])
wl = [self.get_weights([self.n_h, self.n_h]) for k in range(self.n_l - 1)]
bl = [self.get_weights([self.n_h]) for k in range(self.n_l - 1)]
def act(x):
if self.activation == 'sigmoid':
return tf.sigmoid(x)
elif self.activation == 'relu':
return tf.nn.relu(x)
elif self.activation == 'elu':
return tf.nn.elu(x)
elif self.activation == 'tanh':
return tf.tanh(x)
last = tf.matmul(inputs, w1) + b1
last = act(last)
for k in range(self.n_l - 1):
last = tf.matmul(last, wl[k]) + bl[k]
if self.add_dropout:
last = tf.nn.dropout(last, 0.5)
last = act(last)
pred = tf.matmul(last, w2) + b2
else:
w = self.get_weights([784, 10])
b = self.get_weights([10])
pred = tf.matmul(input, w) + b
return pred
def loss(self, i, x):
last = self.inference(x, self.x[i])
return 1.00 * tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=last, labels=self.y_[i]))
def test_optimizee(self, x):
pred_activation = self.inference(x, self.x_test)
predction = tf.argmax(pred_activation, axis=1)
label = tf.argmax(self.y_test, axis=1)
correct_prediction = tf.equal(predction, label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
correct_prediction_2 = tf.nn.in_top_k(pred_activation, label, 2)
accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred_activation, labels=self.y_test))
return [loss, accuracy, accuracy_2]
def forward():
forward_net = MnistLinearModel()
forward_net.build_test()
test = forward_net.feed_dict_test()
x = tf.placeholder(dtype=tf.float32, shape=[None])
para = forward_net.get_initial_x()
fx, accu = forward_net.test_optimizee(x)
feed_dict = forward_net.next_internal_feed_dict()
feed_dict.update(test)
feed_dict.update({x: para})
sess = tf.Session()
f, a = sess.run([fx, accu], feed_dict=feed_dict)
print (f,a)
return f, a
def main(_):
graph = tf.Graph()
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
gpu_options = tf.GPUOptions(allow_growth=True)
with graph.as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options), graph=graph) as session:
f,a = forward()
if __name__ == '__main__':
tf.app.run() | [
"tensorflow.equal",
"tensorflow.nn.elu",
"tensorflow.tanh",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.dropout",
"tensorflow.cast",
"tensorflow.GPUOptions",
"tensorflow.app.run",
"tensorflow.Graph",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflo... | [((141, 152), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (150, 152), False, 'import os\n'), ((210, 252), 'os.path.join', 'os.path.join', (['parent_directory', '"""datasets"""'], {}), "(parent_directory, 'datasets')\n", (222, 252), False, 'import os\n'), ((172, 195), 'os.path.split', 'os.path.split', (['get_path'], {}), '(get_path)\n', (185, 195), False, 'import os\n'), ((4791, 4837), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]'}), '(dtype=tf.float32, shape=[None])\n', (4805, 4837), True, 'import tensorflow as tf\n'), ((5046, 5058), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5056, 5058), True, 'import tensorflow as tf\n'), ((5171, 5181), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5179, 5181), True, 'import tensorflow as tf\n'), ((5248, 5280), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (5261, 5280), True, 'import tensorflow as tf\n'), ((5471, 5483), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5481, 5483), True, 'import tensorflow as tf\n'), ((498, 532), 'optimizee.Optimizee.__init__', 'optimizee.Optimizee.__init__', (['self'], {}), '(self)\n', (526, 532), False, 'import optimizee\n'), ((1247, 1292), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, 784]'], {}), '(tf.float32, [None, None, 784])\n', (1261, 1292), True, 'import tensorflow as tf\n'), ((1311, 1355), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, 10]'], {}), '(tf.float32, [None, None, 10])\n', (1325, 1355), True, 'import tensorflow as tf\n'), ((1435, 1474), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 784]'], {}), '(tf.float32, [None, 784])\n', (1449, 1474), True, 'import tensorflow as tf\n'), ((1497, 1535), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {}), '(tf.float32, [None, 10])\n', (1511, 1535), True, 'import tensorflow as tf\n'), ((1689, 1756), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[self.x_dim]', 'scale': 'self.initial_param_scale'}), '(size=[self.x_dim], scale=self.initial_param_scale)\n', (1705, 1756), True, 'import numpy as np\n'), ((1929, 1974), 'numpy.zeros', 'np.zeros', (['[n_iterations, self.n_batches, 784]'], {}), '([n_iterations, self.n_batches, 784])\n', (1937, 1974), True, 'import numpy as np\n'), ((1992, 2036), 'numpy.zeros', 'np.zeros', (['[n_iterations, self.n_batches, 10]'], {}), '([n_iterations, self.n_batches, 10])\n', (2000, 2036), True, 'import numpy as np\n'), ((4136, 4170), 'tensorflow.argmax', 'tf.argmax', (['pred_activation'], {'axis': '(1)'}), '(pred_activation, axis=1)\n', (4145, 4170), True, 'import tensorflow as tf\n'), ((4187, 4217), 'tensorflow.argmax', 'tf.argmax', (['self.y_test'], {'axis': '(1)'}), '(self.y_test, axis=1)\n', (4196, 4217), True, 'import tensorflow as tf\n'), ((4247, 4273), 'tensorflow.equal', 'tf.equal', (['predction', 'label'], {}), '(predction, label)\n', (4255, 4273), True, 'import tensorflow as tf\n'), ((4380, 4421), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['pred_activation', 'label', '(2)'], {}), '(pred_activation, label, 2)\n', (4394, 4421), True, 'import tensorflow as tf\n'), ((1008, 1045), 'os.path.join', 'os.path.join', (['datasets', '"""MNIST_data/"""'], {}), "(datasets, 'MNIST_data/')\n", (1020, 1045), False, 'import os\n'), ((1083, 1133), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['data_path'], {'one_hot': '(True)'}), '(data_path, one_hot=True)\n', (1108, 1133), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((4308, 4347), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (4315, 4347), True, 'import tensorflow as tf\n'), ((4458, 4499), 'tensorflow.cast', 'tf.cast', (['correct_prediction_2', 'tf.float32'], {}), '(correct_prediction_2, tf.float32)\n', (4465, 4499), True, 'import tensorflow as tf\n'), ((4531, 4619), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'pred_activation', 'labels': 'self.y_test'}), '(logits=pred_activation, labels=self\n .y_test)\n', (4570, 4619), True, 'import tensorflow as tf\n'), ((3364, 3385), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'w1'], {}), '(inputs, w1)\n', (3373, 3385), True, 'import tensorflow as tf\n'), ((3659, 3678), 'tensorflow.matmul', 'tf.matmul', (['last', 'w2'], {}), '(last, w2)\n', (3668, 3678), True, 'import tensorflow as tf\n'), ((3800, 3819), 'tensorflow.matmul', 'tf.matmul', (['input', 'w'], {}), '(input, w)\n', (3809, 3819), True, 'import tensorflow as tf\n'), ((3952, 4023), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'last', 'labels': 'self.y_[i]'}), '(logits=last, labels=self.y_[i])\n', (3991, 4023), True, 'import tensorflow as tf\n'), ((3068, 3081), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {}), '(x)\n', (3078, 3081), True, 'import tensorflow as tf\n'), ((3486, 3508), 'tensorflow.matmul', 'tf.matmul', (['last', 'wl[k]'], {}), '(last, wl[k])\n', (3495, 3508), True, 'import tensorflow as tf\n'), ((3581, 3605), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['last', '(0.5)'], {}), '(last, 0.5)\n', (3594, 3605), True, 'import tensorflow as tf\n'), ((5342, 5381), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (5356, 5381), True, 'import tensorflow as tf\n'), ((3157, 3170), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3167, 3170), True, 'import tensorflow as tf\n'), ((3245, 3257), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (3254, 3257), True, 'import tensorflow as tf\n'), ((3333, 3343), 'tensorflow.tanh', 'tf.tanh', (['x'], {}), '(x)\n', (3340, 3343), True, 'import tensorflow as tf\n')] |
import os
import dataset
import util.tree_model
import uic.view_dataset
import numpy as np
import dataset.budgetary_consistency
from core import Core
from dataset import Dataset, Analysis, ExportVariant, DatasetHeaderC
from dataset.budgetary_consistency import BudgetaryConsistency
from typing import Sequence, NamedTuple, List, Dict, Tuple, Iterator, Union, Optional, cast
from gui.progress import Worker, Cancelled
from util.codec import FileOut, FileIn, namedtupleC, strC, numpyC, listC
from util.codec_progress import CodecProgress, listCP, oneCP
from PyQt5.QtWidgets import QDialog, QTreeWidgetItem, QHeaderView
class Subject(NamedTuple):
name : str
prices : np.ndarray
amounts : np.ndarray
SubjectC = namedtupleC(Subject, strC, numpyC(np.float32), numpyC(np.float32))
class RowNode(util.tree_model.Node):
def __init__(self, parent_node, row: int, prices: np.ndarray, amounts: np.ndarray) -> None:
util.tree_model.Node.__init__(
self, parent_node, row,
fields=[''] \
+ ['%.2f' % p for p in prices] \
+ ['%g' % x for x in amounts] \
+ ['%.2f' % np.dot(prices, amounts)]
)
class SubjectNode(util.tree_model.Node):
def __init__(self, parent_node, row: int, subject: Subject) -> None:
self.subject = subject
util.tree_model.Node.__init__(
self, parent_node, row,
fields=(subject.name,),
child_count=subject.prices.shape[0],
)
def create_child(self, row: int) -> RowNode:
return RowNode(self, row, self.subject.prices[row], self.subject.amounts[row])
class RootNode(util.tree_model.RootNode):
def __init__(self, subjects : List[Subject]) -> None:
util.tree_model.RootNode.__init__(self, len(subjects))
self.subjects = subjects
def create_child(self, row: int) -> SubjectNode:
return SubjectNode(self, row, self.subjects[row])
class Budgetary(Dataset):
class ViewDialog(QDialog, uic.view_dataset.Ui_ViewDataset):
def __init__(self, ds : 'Budgetary') -> None:
QDialog.__init__(self)
self.setupUi(self)
self.ds = ds
self.model = util.tree_model.TreeModel(
RootNode(ds.subjects),
headers= \
['Subject'] \
+ [f'Price {a}' for a in ds.alternatives] \
+ [f'Quantity {a}' for a in ds.alternatives] \
+ ['Expenditure']
)
self.twRows.setModel(self.model)
self.twRows.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.twRows.header().setStretchLastSection(False)
def __init__(self, name : str, alternatives : Sequence[str]) -> None:
Dataset.__init__(self, name, alternatives)
self.subjects : List[Subject] = []
self.nr_observations = 0
def label_size(self):
return f'{len(self.subjects)} subjects, {self.nr_observations} observations'
def analysis_consistency(self, worker : Worker, _config : None) -> BudgetaryConsistency:
with Core() as core:
worker.interrupt = lambda: core.shutdown() # interrupt hook
rows = []
worker.set_work_size(len(self.subjects))
for i, subject in enumerate(self.subjects):
response = core.call(
'budgetary-consistency',
SubjectC,
dataset.budgetary_consistency.SubjectC,
subject
)
rows.append(response)
worker.set_progress(i+1)
ds = BudgetaryConsistency(
self.name + ' (consistency)',
self.alternatives,
rows,
)
return ds
def get_analyses(self) -> Sequence[Analysis]:
return (
Analysis(
name='Consistency analysis',
config=None,
run=self.analysis_consistency,
),
)
@staticmethod
def get_codec_progress() -> CodecProgress:
DatasetHeaderC_encode, DatasetHeaderC_decode = DatasetHeaderC
subjects_get_size, subjects_encode, subjects_decode = listCP(oneCP(SubjectC))
def get_size(x : 'Budgetary') -> int:
return cast(int, subjects_get_size(x.subjects))
def encode(worker : Worker, f : FileOut, x : 'Budgetary') -> None:
DatasetHeaderC_encode(f, (x.name, x.alternatives))
subjects_encode(worker, f, x.subjects)
def decode(worker : Worker, f : FileIn) -> 'Budgetary':
ds = Budgetary(*DatasetHeaderC_decode(f))
ds.subjects = subjects_decode(worker, f)
ds.update_nr_observations()
return ds
return CodecProgress(get_size, encode, decode)
def update_nr_observations(self) -> None:
self.nr_observations = sum(len(s.prices) for s in self.subjects)
def get_export_variants(self) -> Sequence[ExportVariant]:
if not self.subjects:
return []
_n_observ, n_alts = self.subjects[0].prices.shape
return [
ExportVariant(
name='Detailed',
column_names=\
['subject'] \
+ [f'price{i+1}' for i in range(n_alts)] \
+ [f'quantity{i+1}' for i in range(n_alts)],
get_rows=self.export_detailed,
size=len(self.subjects),
),
]
def export_detailed(self) -> Iterator[Optional[Tuple[Union[str, float], ...]]]:
for subject in self.subjects:
# subject.prices and subject.amounts are matrices
for prices, amounts in zip(subject.prices, subject.amounts):
row : List[Union[str, float]] = [subject.name]
row += list(prices)
row += list(amounts)
yield tuple(row)
yield None # bump progress
class BudgetaryError(Exception):
pass
def load_from_csv(fname : str) -> Budgetary:
lines = dataset.load_raw_csv(fname)
if not lines:
raise BudgetaryError("the CSV file is empty")
header, *rows = lines
if (len(header)-1) % 2 != 0:
raise BudgetaryError("budgetary datasets should have an even number of numeric columns")
n_alts = (len(header)-1) // 2
alternatives = [f'{i+1}' for i in range(n_alts)]
subjects : Dict[str,Tuple[List[np.ndarray],List[np.ndarray]]] = dict() # ordered
for line_no, row in enumerate(rows, start=2):
if len(row) != len(header):
raise BudgetaryError(f'{fname}, line {line_no}: incorrect number of columns')
subj_name, *cols = row
if subj_name not in subjects:
subjects[subj_name] = ([], [])
prices, amounts = subjects[subj_name]
prices.append(np.array([float(x) for x in cols[:n_alts]], dtype=np.float32))
amounts.append(np.array([float(x) for x in cols[n_alts:]], dtype=np.float32))
ds = Budgetary(os.path.basename(fname), alternatives)
ds.subjects = [
Subject(name=n, prices=np.vstack(ps), amounts=np.vstack(ams))
for (n,(ps,ams)) in subjects.items()
]
ds.update_nr_observations()
return ds
| [
"util.codec.numpyC",
"util.codec_progress.oneCP",
"dataset.load_raw_csv",
"dataset.Analysis",
"dataset.Dataset.__init__",
"PyQt5.QtWidgets.QDialog.__init__",
"numpy.dot",
"util.codec_progress.CodecProgress",
"os.path.basename",
"numpy.vstack",
"dataset.budgetary_consistency.BudgetaryConsistency"... | [((753, 771), 'util.codec.numpyC', 'numpyC', (['np.float32'], {}), '(np.float32)\n', (759, 771), False, 'from util.codec import FileOut, FileIn, namedtupleC, strC, numpyC, listC\n'), ((773, 791), 'util.codec.numpyC', 'numpyC', (['np.float32'], {}), '(np.float32)\n', (779, 791), False, 'from util.codec import FileOut, FileIn, namedtupleC, strC, numpyC, listC\n'), ((6104, 6131), 'dataset.load_raw_csv', 'dataset.load_raw_csv', (['fname'], {}), '(fname)\n', (6124, 6131), False, 'import dataset\n'), ((2797, 2839), 'dataset.Dataset.__init__', 'Dataset.__init__', (['self', 'name', 'alternatives'], {}), '(self, name, alternatives)\n', (2813, 2839), False, 'from dataset import Dataset, Analysis, ExportVariant, DatasetHeaderC\n'), ((3670, 3745), 'dataset.budgetary_consistency.BudgetaryConsistency', 'BudgetaryConsistency', (["(self.name + ' (consistency)')", 'self.alternatives', 'rows'], {}), "(self.name + ' (consistency)', self.alternatives, rows)\n", (3690, 3745), False, 'from dataset.budgetary_consistency import BudgetaryConsistency\n'), ((4816, 4855), 'util.codec_progress.CodecProgress', 'CodecProgress', (['get_size', 'encode', 'decode'], {}), '(get_size, encode, decode)\n', (4829, 4855), False, 'from util.codec_progress import CodecProgress, listCP, oneCP\n'), ((7063, 7086), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (7079, 7086), False, 'import os\n'), ((2107, 2129), 'PyQt5.QtWidgets.QDialog.__init__', 'QDialog.__init__', (['self'], {}), '(self)\n', (2123, 2129), False, 'from PyQt5.QtWidgets import QDialog, QTreeWidgetItem, QHeaderView\n'), ((3135, 3141), 'core.Core', 'Core', ([], {}), '()\n', (3139, 3141), False, 'from core import Core\n'), ((3891, 3977), 'dataset.Analysis', 'Analysis', ([], {'name': '"""Consistency analysis"""', 'config': 'None', 'run': 'self.analysis_consistency'}), "(name='Consistency analysis', config=None, run=self.\n analysis_consistency)\n", (3899, 3977), False, 'from dataset import Dataset, Analysis, ExportVariant, DatasetHeaderC\n'), ((4252, 4267), 'util.codec_progress.oneCP', 'oneCP', (['SubjectC'], {}), '(SubjectC)\n', (4257, 4267), False, 'from util.codec_progress import CodecProgress, listCP, oneCP\n'), ((7153, 7166), 'numpy.vstack', 'np.vstack', (['ps'], {}), '(ps)\n', (7162, 7166), True, 'import numpy as np\n'), ((7176, 7190), 'numpy.vstack', 'np.vstack', (['ams'], {}), '(ams)\n', (7185, 7190), True, 'import numpy as np\n'), ((1153, 1176), 'numpy.dot', 'np.dot', (['prices', 'amounts'], {}), '(prices, amounts)\n', (1159, 1176), True, 'import numpy as np\n')] |
# Question: https://projecteuler.net/problem=301
import numpy as np
# According to this https://en.wikipedia.org/wiki/Nim, the next player loses when the XOR of 3 heaps is 0.
# When XOR(n, 2n, 3n) == 0? It occurs when n has consecutive 1's.
# Why?
# bit b1 b2 b3
# carry of n+2n x y z
# n = ..a 1 1...
# 2n = ..1 1 b...
# 3n = ..c d e...
# Case 1. z = 0
# Case 1.1. a = 0, b = 0
# -> cde = 001
# -> bit b1 of XOR(n, 2n, 3n) = 1 -> XOR != 0
# Case 1.2. a = 0, b = 1
# -> cde = 010
# -> bit b2 of XOR(n, 2n, 3n) = 1 -> XOR != 0
# Case 1.3. a = 1, b = 0
# -> cde = 101
# -> bit b1 of XOR(n, 2n, 3n) = 1 -> XOR != 0
# Case 1.4. a = 1, b = 1
# -> cde = 110
# -> bit b2 of XOR(n, 2n, 3n) = 1 -> XOR != 0
# Case 2. z = 1
# Case 2.1. b = 0
# -> d = 1
# -> bit b2 of XOR(n, 2n, 3n) = 1 -> XOR != 0
# Case 2.2. b=1
# -> e = 1
# -> bit b3 of XOR(n, 2n, 3n) = 1 -> XOR != 0
N = 30 # 2^30 is 1000...000 with 30 zeros -> 31 digits in base 2 | we only need to count the case of <=30 digits and manually add the case of 2^30
# DP[i][j] -> number of ways of generate a base 2 number of <=i-digit that ends with j
DP = np.zeros((N+1, 2), dtype = np.uint32)
DP[1][0] = 1
DP[1][1] = 1
for i in range(2, N+1):
DP[i][0] = DP[i-1][0] + DP[i-1][1]
DP[i][1] = DP[i-1][0]
print(DP[N][0] + DP[N][1] + 1 - 1)
# ^^^^^^^
# add the case of 2^30, remove the case of 0
| [
"numpy.zeros"
] | [((1260, 1297), 'numpy.zeros', 'np.zeros', (['(N + 1, 2)'], {'dtype': 'np.uint32'}), '((N + 1, 2), dtype=np.uint32)\n', (1268, 1297), True, 'import numpy as np\n')] |
from numpy import array
NOMINAL_OCTAVE_CENTER_FREQUENCIES = array(
[
31.5,
63.0,
125.0,
250.0,
500.0,
1000.0,
2000.0,
4000.0,
8000.0,
16000.0,
]
)
NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES = array(
[
25.0,
31.5,
40.0,
50.0,
63.0,
80.0,
100.0,
125.0,
160.0,
200.0,
250.0,
315.0,
400.0,
500.0,
630.0,
800.0,
1000.0,
1250.0,
1600.0,
2000.0,
2500.0,
3150.0,
4000.0,
5000.0,
6300.0,
8000.0,
10000.0,
12500.0,
16000.0,
20000.0,
]
)
| [
"numpy.array"
] | [((61, 147), 'numpy.array', 'array', (['[31.5, 63.0, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0]'], {}), '([31.5, 63.0, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, \n 16000.0])\n', (66, 147), False, 'from numpy import array\n'), ((279, 523), 'numpy.array', 'array', (['[25.0, 31.5, 40.0, 50.0, 63.0, 80.0, 100.0, 125.0, 160.0, 200.0, 250.0, \n 315.0, 400.0, 500.0, 630.0, 800.0, 1000.0, 1250.0, 1600.0, 2000.0, \n 2500.0, 3150.0, 4000.0, 5000.0, 6300.0, 8000.0, 10000.0, 12500.0, \n 16000.0, 20000.0]'], {}), '([25.0, 31.5, 40.0, 50.0, 63.0, 80.0, 100.0, 125.0, 160.0, 200.0, \n 250.0, 315.0, 400.0, 500.0, 630.0, 800.0, 1000.0, 1250.0, 1600.0, \n 2000.0, 2500.0, 3150.0, 4000.0, 5000.0, 6300.0, 8000.0, 10000.0, \n 12500.0, 16000.0, 20000.0])\n', (284, 523), False, 'from numpy import array\n')] |
import unittest
import warnings
import numpy as np
from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous
from girth import (rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml,
rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml,
grm_mml, rasch_conditional, standard_errors_bootstrap)
def _contains_keys(results, identifier):
"""Checks for standard keys in bootstrap result."""
for key in ['Standard Errors', '95th CI', 'Bias', 'Solution']:
if key not in results.keys():
raise AssertionError(f"Key: {key} not found in return argument."
f"Error in {identifier}")
for key in results['95th CI']:
if np.any(results['95th CI'][key][1] < results['95th CI'][key][0]):
raise AssertionError(f"Confidence Interval Error. {key} "
f"Error in {identifier}")
warnings.filterwarnings('ignore')
class TestBootstrapStandardErrors(unittest.TestCase):
"""Test Fixture for Bootstrap Standard Errors."""
# Smoke Tests to make sure they give an output
# Tests bootstrap errors
def setUp(self):
rng = np.random.default_rng(48725309847520)
self.discrimination = 0.25 + rng.rayleigh(.7, 5)
self.difficulty = np.linspace(-1.5, 1.5, 5)
self.difficulty_poly = np.sort(rng.standard_normal((5, 3)), axis=1)
self.theta = rng.standard_normal(1000)
self.options = {'max_iteration': 2}
self.boot_iter = 10
def test_jml_methods_dichotomous(self):
"""Testing Bootstrap on JML Methods Dichotomous."""
rng = np.random.default_rng(39485720394875)
dataset = create_synthetic_irt_dichotomous(self.difficulty, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, rasch_jml, n_processors=1,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertEqual(result['Standard Errors']['Discrimination'][0], 0)
_contains_keys(result, 'Rasch JML')
result = standard_errors_bootstrap(dataset, onepl_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '1PL JML')
result = standard_errors_bootstrap(dataset, twopl_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '2PL JML')
@unittest.skip(reason="Github")
def test_jml_methods_polytomous(self):
"""Testing Bootstrap on JML Methods Polytomous."""
rng = np.random.default_rng(8672379287302651089)
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, grm_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'GRM JML')
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng, model='pcm')
result = standard_errors_bootstrap(dataset, pcm_jml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'PCM JML')
@unittest.skip(reason="Github")
def test_rasch_conditional(self):
"""Testing rasch conditional methods."""
rng = np.random.default_rng(426376867989075563)
dataset = create_synthetic_irt_dichotomous(self.difficulty, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, rasch_conditional,
n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertEqual(result['Standard Errors']['Discrimination'][0], 0)
_contains_keys(result, 'Rasch MML')
@unittest.skip(reason="Github")
def test_mml_methods_dichotomous(self):
"""Testing Bootstrap on MML Methods Dichotomous."""
rng = np.random.default_rng(8764328976187234)
dataset = create_synthetic_irt_dichotomous(self.difficulty, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, rasch_mml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertEqual(result['Standard Errors']['Discrimination'][0], 0)
_contains_keys(result, 'Rasch MML')
result = standard_errors_bootstrap(dataset, onepl_mml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '1PL MML')
result = standard_errors_bootstrap(dataset, twopl_mml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '2PL MML')
@unittest.skip(reason="Github")
def test_mml_methods_polytomous(self):
"""Testing Bootstrap on MML Methods Polytomous."""
rng = np.random.default_rng(4347621232345345696)
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, grm_mml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'GRM MML')
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng, model='pcm')
result = standard_errors_bootstrap(dataset, pcm_mml, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'PCM MML')
@unittest.skip(reason="Github")
def test_eap_mml_methods(self):
"""Testing Bootstrap on eap methods."""
rng = np.random.default_rng(66739234876520981)
dataset = create_synthetic_irt_dichotomous(self.difficulty, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, twopl_mml_eap, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
_contains_keys(result, '2PL EAP-MML')
dataset = create_synthetic_irt_polytomous(self.difficulty_poly, self.discrimination,
self.theta, seed=rng)
result = standard_errors_bootstrap(dataset, grm_mml_eap, n_processors=2,
bootstrap_iterations=self.boot_iter,
options=self.options)
self.assertTupleEqual(result['95th CI']['Difficulty'][0].shape,
self.difficulty_poly.shape)
_contains_keys(result, 'GRM EAP-MML')
if __name__ == "__main__":
import warnings
warnings.filterwarnings('ignore')
unittest.main()
| [
"girth.standard_errors_bootstrap",
"numpy.random.default_rng",
"numpy.any",
"numpy.linspace",
"girth.synthetic.create_synthetic_irt_polytomous",
"unittest.main",
"unittest.skip",
"warnings.filterwarnings",
"girth.synthetic.create_synthetic_irt_dichotomous"
] | [((937, 970), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (960, 970), False, 'import warnings\n'), ((2760, 2790), 'unittest.skip', 'unittest.skip', ([], {'reason': '"""Github"""'}), "(reason='Github')\n", (2773, 2790), False, 'import unittest\n'), ((4105, 4135), 'unittest.skip', 'unittest.skip', ([], {'reason': '"""Github"""'}), "(reason='Github')\n", (4118, 4135), False, 'import unittest\n'), ((4846, 4876), 'unittest.skip', 'unittest.skip', ([], {'reason': '"""Github"""'}), "(reason='Github')\n", (4859, 4876), False, 'import unittest\n'), ((6086, 6116), 'unittest.skip', 'unittest.skip', ([], {'reason': '"""Github"""'}), "(reason='Github')\n", (6099, 6116), False, 'import unittest\n'), ((7431, 7461), 'unittest.skip', 'unittest.skip', ([], {'reason': '"""Github"""'}), "(reason='Github')\n", (7444, 7461), False, 'import unittest\n'), ((8683, 8716), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (8706, 8716), False, 'import warnings\n'), ((8725, 8740), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8738, 8740), False, 'import unittest\n'), ((740, 803), 'numpy.any', 'np.any', (["(results['95th CI'][key][1] < results['95th CI'][key][0])"], {}), "(results['95th CI'][key][1] < results['95th CI'][key][0])\n", (746, 803), True, 'import numpy as np\n'), ((1206, 1243), 'numpy.random.default_rng', 'np.random.default_rng', (['(48725309847520)'], {}), '(48725309847520)\n', (1227, 1243), True, 'import numpy as np\n'), ((1327, 1352), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(1.5)', '(5)'], {}), '(-1.5, 1.5, 5)\n', (1338, 1352), True, 'import numpy as np\n'), ((1671, 1708), 'numpy.random.default_rng', 'np.random.default_rng', (['(39485720394875)'], {}), '(39485720394875)\n', (1692, 1708), True, 'import numpy as np\n'), ((1727, 1824), 'girth.synthetic.create_synthetic_irt_dichotomous', 'create_synthetic_irt_dichotomous', (['self.difficulty', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty, self.discrimination, self\n .theta, seed=rng)\n', (1759, 1824), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((1890, 2014), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'rasch_jml'], {'n_processors': '(1)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, rasch_jml, n_processors=1,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (1915, 2014), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((2236, 2360), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'onepl_jml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, onepl_jml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (2261, 2360), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((2504, 2628), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'twopl_jml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, twopl_jml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (2529, 2628), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((2907, 2949), 'numpy.random.default_rng', 'np.random.default_rng', (['(8672379287302651089)'], {}), '(8672379287302651089)\n', (2928, 2949), True, 'import numpy as np\n'), ((2977, 3077), 'girth.synthetic.create_synthetic_irt_polytomous', 'create_synthetic_irt_polytomous', (['self.difficulty_poly', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty_poly, self.discrimination,\n self.theta, seed=rng)\n', (3008, 3077), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((3143, 3265), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'grm_jml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, grm_jml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (3168, 3265), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((3541, 3654), 'girth.synthetic.create_synthetic_irt_polytomous', 'create_synthetic_irt_polytomous', (['self.difficulty_poly', 'self.discrimination', 'self.theta'], {'seed': 'rng', 'model': '"""pcm"""'}), "(self.difficulty_poly, self.discrimination,\n self.theta, seed=rng, model='pcm')\n", (3572, 3654), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((3720, 3842), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'pcm_jml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, pcm_jml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (3745, 3842), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((4237, 4278), 'numpy.random.default_rng', 'np.random.default_rng', (['(426376867989075563)'], {}), '(426376867989075563)\n', (4258, 4278), True, 'import numpy as np\n'), ((4297, 4394), 'girth.synthetic.create_synthetic_irt_dichotomous', 'create_synthetic_irt_dichotomous', (['self.difficulty', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty, self.discrimination, self\n .theta, seed=rng)\n', (4329, 4394), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((4460, 4592), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'rasch_conditional'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, rasch_conditional, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (4485, 4592), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((4995, 5034), 'numpy.random.default_rng', 'np.random.default_rng', (['(8764328976187234)'], {}), '(8764328976187234)\n', (5016, 5034), True, 'import numpy as np\n'), ((5053, 5150), 'girth.synthetic.create_synthetic_irt_dichotomous', 'create_synthetic_irt_dichotomous', (['self.difficulty', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty, self.discrimination, self\n .theta, seed=rng)\n', (5085, 5150), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((5216, 5340), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'rasch_mml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, rasch_mml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (5241, 5340), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((5562, 5686), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'onepl_mml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, onepl_mml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (5587, 5686), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((5830, 5954), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'twopl_mml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, twopl_mml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (5855, 5954), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((6233, 6275), 'numpy.random.default_rng', 'np.random.default_rng', (['(4347621232345345696)'], {}), '(4347621232345345696)\n', (6254, 6275), True, 'import numpy as np\n'), ((6303, 6403), 'girth.synthetic.create_synthetic_irt_polytomous', 'create_synthetic_irt_polytomous', (['self.difficulty_poly', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty_poly, self.discrimination,\n self.theta, seed=rng)\n', (6334, 6403), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((6469, 6591), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'grm_mml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, grm_mml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (6494, 6591), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((6867, 6980), 'girth.synthetic.create_synthetic_irt_polytomous', 'create_synthetic_irt_polytomous', (['self.difficulty_poly', 'self.discrimination', 'self.theta'], {'seed': 'rng', 'model': '"""pcm"""'}), "(self.difficulty_poly, self.discrimination,\n self.theta, seed=rng, model='pcm')\n", (6898, 6980), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((7046, 7168), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'pcm_mml'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, pcm_mml, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (7071, 7168), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((7560, 7600), 'numpy.random.default_rng', 'np.random.default_rng', (['(66739234876520981)'], {}), '(66739234876520981)\n', (7581, 7600), True, 'import numpy as np\n'), ((7619, 7716), 'girth.synthetic.create_synthetic_irt_dichotomous', 'create_synthetic_irt_dichotomous', (['self.difficulty', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty, self.discrimination, self\n .theta, seed=rng)\n', (7651, 7716), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((7782, 7910), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'twopl_mml_eap'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, twopl_mml_eap, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (7807, 7910), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n'), ((8075, 8175), 'girth.synthetic.create_synthetic_irt_polytomous', 'create_synthetic_irt_polytomous', (['self.difficulty_poly', 'self.discrimination', 'self.theta'], {'seed': 'rng'}), '(self.difficulty_poly, self.discrimination,\n self.theta, seed=rng)\n', (8106, 8175), False, 'from girth.synthetic import create_synthetic_irt_dichotomous, create_synthetic_irt_polytomous\n'), ((8241, 8367), 'girth.standard_errors_bootstrap', 'standard_errors_bootstrap', (['dataset', 'grm_mml_eap'], {'n_processors': '(2)', 'bootstrap_iterations': 'self.boot_iter', 'options': 'self.options'}), '(dataset, grm_mml_eap, n_processors=2,\n bootstrap_iterations=self.boot_iter, options=self.options)\n', (8266, 8367), False, 'from girth import rasch_jml, onepl_jml, twopl_jml, grm_jml, pcm_jml, rasch_mml, onepl_mml, twopl_mml, twopl_mml_eap, grm_mml_eap, pcm_mml, grm_mml, rasch_conditional, standard_errors_bootstrap\n')] |
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
import numbers
import platform
import struct
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .class_weight import compute_class_weight, compute_sample_weight
from ._joblib import cpu_count, Parallel, Memory, delayed, hash
from ._joblib import parallel_backend, register_parallel_backend
from ._joblib import effective_n_jobs
from ..exceptions import DataConversionWarning
from ..utils.fixes import _Sequence as Sequence
from .deprecation import deprecated
from .. import get_config
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric", "indices_to_mask", "deprecated",
"cpu_count", "Parallel", "Memory", "delayed", "parallel_backend",
"register_parallel_backend", "hash", "effective_n_jobs"]
IS_PYPY = platform.python_implementation() == 'PyPy'
_IS_32BIT = 8 * struct.calcsize("P") == 32
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.signedinteger):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
len_mask : int
The length of the mask.
Returns
-------
mask
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series.
Data from which to sample rows or items.
indices : array-like of int
Indices according to which X will be subsampled.
Returns
-------
subset
Subset of X on first axis
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
"""
if hasattr(X, "iloc"):
# Work-around for indexing with read-only indices in pandas
indices = indices if indices.flags.writeable else indices.copy()
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
Other Parameters
----------------
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled copies of the collections. The original arrays
are not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[1., 0.],
[2., 1.],
[1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[1., 0.],
[2., 1.],
[1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d "
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
Other Parameters
----------------
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled copies of the collections. The original arrays
are not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[0., 0.],
[2., 1.],
[1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[0., 0.],
[2., 1.],
[1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Parameters
----------
n : int
batch_size : int
Number of element in each batch
Yields
------
slice of batch_size elements
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Parameters
----------
n : int
n_packs : int
Number of slices to generate.
n_samples : int or None (default = None)
Number of samples. Pass n_samples when the slices are to be used for
sparse matrix indexing; slicing off-the-end raises an exception, while
it works for NumPy arrays.
Yields
------
slice
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
def indices_to_mask(indices, mask_length):
"""Convert list of indices to boolean mask.
Parameters
----------
indices : list-like
List of integers treated as indices.
mask_length : int
Length of boolean mask to be generated.
This parameter must be greater than max(indices)
Returns
-------
mask : 1d boolean nd-array
Boolean array that is True where indices are present, else False.
Examples
--------
>>> from sklearn.utils import indices_to_mask
>>> indices = [1, 2 , 3, 4]
>>> indices_to_mask(indices, 5)
array([False, True, True, True, True])
"""
if mask_length <= np.max(indices):
raise ValueError("mask_length must be greater than max(indices)")
mask = np.zeros(mask_length, dtype=np.bool)
mask[indices] = True
return mask
def get_chunk_n_rows(row_bytes, max_n_rows=None,
working_memory=None):
"""Calculates how many rows can be processed within working_memory
Parameters
----------
row_bytes : int
The expected number of bytes of memory that will be consumed
during the processing of each row.
max_n_rows : int, optional
The maximum return value.
working_memory : int or float, optional
The number of rows to fit inside this number of MiB will be returned.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
int or the value of n_samples
Warns
-----
Issues a UserWarning if ``row_bytes`` exceeds ``working_memory`` MiB.
"""
if working_memory is None:
working_memory = get_config()['working_memory']
chunk_n_rows = int(working_memory * (2 ** 20) // row_bytes)
if max_n_rows is not None:
chunk_n_rows = min(chunk_n_rows, max_n_rows)
if chunk_n_rows < 1:
warnings.warn('Could not adhere to working_memory config. '
'Currently %.0fMiB, %.0fMiB required.' %
(working_memory, np.ceil(row_bytes * 2 ** -20)))
chunk_n_rows = 1
return chunk_n_rows
def is_scalar_nan(x):
"""Tests if x is NaN
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not np.float('nan').
Parameters
----------
x : any type
Returns
-------
boolean
Examples
--------
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
# convert from numpy.bool_ to python bool to ensure that testing
# is_scalar_nan(x) is True does not fail.
# Redondant np.floating is needed because numbers can't match np.float32
# in python 2.
return bool(isinstance(x, (numbers.Real, np.floating)) and np.isnan(x))
| [
"platform.python_implementation",
"struct.calcsize",
"numpy.ceil",
"numpy.asarray",
"scipy.sparse.issparse",
"numpy.max",
"numpy.issubdtype",
"numpy.zeros",
"numpy.isnan",
"warnings.warn",
"numpy.arange"
] | [((1403, 1435), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (1433, 1435), False, 'import platform\n'), ((2946, 2962), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (2956, 2962), True, 'import numpy as np\n'), ((2970, 3013), 'numpy.issubdtype', 'np.issubdtype', (['mask.dtype', 'np.signedinteger'], {}), '(mask.dtype, np.signedinteger)\n', (2983, 3013), True, 'import numpy as np\n'), ((4082, 4113), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0, X.shape[1])'}), '(shape=(0, X.shape[1]))\n', (4090, 4113), True, 'import numpy as np\n'), ((12088, 12099), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (12096, 12099), False, 'from scipy.sparse import issparse\n'), ((15680, 15716), 'numpy.zeros', 'np.zeros', (['mask_length'], {'dtype': 'np.bool'}), '(mask_length, dtype=np.bool)\n', (15688, 15716), True, 'import numpy as np\n'), ((1462, 1482), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (1477, 1482), False, 'import struct\n'), ((3080, 3104), 'numpy.arange', 'np.arange', (['mask.shape[0]'], {}), '(mask.shape[0])\n', (3089, 3104), True, 'import numpy as np\n'), ((8883, 8903), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (8892, 8903), True, 'import numpy as np\n'), ((14807, 14820), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (14817, 14820), True, 'import numpy as np\n'), ((15577, 15592), 'numpy.max', 'np.max', (['indices'], {}), '(indices)\n', (15583, 15592), True, 'import numpy as np\n'), ((9072, 9083), 'scipy.sparse.issparse', 'issparse', (['a'], {}), '(a)\n', (9080, 9083), False, 'from scipy.sparse import issparse\n'), ((17848, 17859), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (17856, 17859), True, 'import numpy as np\n'), ((5089, 5165), 'warnings.warn', 'warnings.warn', (['"""Copying input dataframe for slicing."""', 'DataConversionWarning'], {}), "('Copying input dataframe for slicing.', DataConversionWarning)\n", (5102, 5165), False, 'import warnings\n'), ((16970, 16999), 'numpy.ceil', 'np.ceil', (['(row_bytes * 2 ** -20)'], {}), '(row_bytes * 2 ** -20)\n', (16977, 16999), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as pl
import torch
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable
import h5py
import shutil
from tqdm import tqdm
import matplotlib.animation as animation
from astropy.io import fits
import glob
import os
from skimage.feature import register_translation
import model
class optical_flow(object):
def __init__(self, n_pixel=256, checkpoint=None):
self.cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if self.cuda else "cpu")
self.n_pixel = n_pixel
self.model = model.network(n_pixel=n_pixel, device=self.device).to(self.device)
if (checkpoint is None):
files = glob.glob('trained/*.pth')
self.checkpoint = max(files, key=os.path.getctime)
else:
self.checkpoint = '{0}.pth'.format(checkpoint)
print("=> loading checkpoint '{}'".format(self.checkpoint))
if (self.cuda):
checkpoint = torch.load(self.checkpoint)
else:
checkpoint = torch.load(self.checkpoint, map_location=lambda storage, loc: storage)
self.model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}'".format(self.checkpoint))
def test(self):
pl.close('all')
self.model.eval()
steps = (slice(None,None,2),slice(None,None,2))
f0 = fits.open('/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_02520.fits')
f1 = fits.open('/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_02534.fits')
ims = np.zeros((1,2,self.n_pixel,self.n_pixel))
ims[0,0,:,:] = f0[0].data[512:512+self.n_pixel,512:512+self.n_pixel]
ims[0,1,:,:] = f1[0].data[512:512+self.n_pixel,512:512+self.n_pixel]
minim = np.min(ims, axis=(2,3))
maxim = np.max(ims, axis=(2,3))
ims = (ims - minim[:,:,None,None]) / (maxim[:,:,None,None] - minim[:,:,None,None])
ims = torch.from_numpy(ims.astype('float32'))
ims = ims.to(self.device)
out_forward, out_backward, flow_forward, flow_backward = self.model(ims)
output = out_forward.cpu().data.numpy()
flow = flow_forward.cpu().data.numpy()
flowx = flow[0,0,:,:]
flowy = flow[0,1,:,:]
ims = ims.cpu().data.numpy()
f, ax = pl.subplots(nrows=2, ncols=4, figsize=(14,6))
ax[0,0].imshow(ims[0,0,:,:])
ax[0,1].imshow(ims[0,1,:,:])
ax[0,2].imshow(flow[0,0,:,:])
ax[0,3].imshow(flow[0,1,:,:])
ax[1,0].imshow(output[0,0,:,:])
ax[1,1].imshow(ims[0,0,:,:]-output[0,0,:,:])
ax[1,2].imshow(ims[0,1,:,:]-output[0,0,:,:])
ax[0,0].set_title('Input 1')
ax[0,1].set_title('Input 2')
ax[0,2].set_title('Flow x')
ax[0,3].set_title('Flow y')
ax[1,0].set_title('NN')
ax[1,1].set_title('NN-I1 {0}'.format(np.std(ims[0,0,:,:]-output[0,0,:,:])))
ax[1,2].set_title('NN-I2 {0}'.format(np.std(ims[0,1,:,:]-output[0,0,:,:])))
f, ax = pl.subplots()
x = np.arange(self.n_pixel)
y = np.arange(self.n_pixel)
X, Y = np.meshgrid(x, y)
ax.imshow(ims[0,0,:,:])
Q = ax.quiver(X[steps], Y[steps], 0.5*self.n_pixel*flowx[steps], 0.5*self.n_pixel*flowy[steps], scale=10, units='inches', color='yellow')
qk = ax.quiverkey(Q, 0.9, 0.9, 1, r'$2 \frac{m}{s}$', labelpos='E', coordinates='figure', color='k')
pl.show()
stop()
def updatefig(self, *args):
f0 = fits.open(self.files[self.loop])
f1 = fits.open(self.files[self.loop+1])
with torch.no_grad():
ims = np.zeros((1,2,self.n_pixel,self.n_pixel))
ims[0,0,:,:] = f0[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]
ims[0,1,:,:] = f1[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]
minim = np.min(ims, axis=(2,3))
maxim = np.max(ims, axis=(2,3))
ims = (ims - minim[:,:,None,None]) / (maxim[:,:,None,None] - minim[:,:,None,None])
shift, error, diffphase = register_translation(self.reference, ims[0,1,:,:])
shift = [int(f) for f in shift]
ims[0,1,:,:] = np.roll(ims[0,1,:,:], shift, axis=(0,1))
shift, error, diffphase = register_translation(self.reference, ims[0,0,:,:])
shift = [int(f) for f in shift]
ims[0,0,:,:] = np.roll(ims[0,0,:,:], shift, axis=(0,1))
ims = torch.from_numpy(ims.astype('float32'))
ims = ims.to(self.device)
out_forward, flow_forward = self.model(ims, backward=False)
output = out_forward.cpu().data.numpy()
flow = flow_forward.cpu().data.numpy()
ims = ims.cpu().data.numpy()
flowx = flow[0,0,:,:]
flowy = flow[0,1,:,:]
f0.close()
f1.close()
flowx *= self.scale
flowy *= self.scale
self.im1.set_array(np.flip(ims[0,0,:,:], axis=0))
self.im2.set_array(np.flip(ims[0,1,:,:], axis=0))
self.flowx.set_array(np.flip(flow[0,0,:,:], axis=0))
self.flowy.set_array(np.flip(flow[0,1,:,:], axis=0))
self.Q.set_UVC(self.n_pixel*flowx[self.steps], self.n_pixel*flowy[self.steps])
self.loop += 1
self.pbar.update(1)
return self.im1, self.im2, self.flowx, self.flowy
def movie(self):
self.origin = 512
self.model.eval()
self.scale = 0.18
self.files = glob.glob('/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_*.fits')
self.files.sort()
self.n_frames = len(self.files) - 2
self.loop = 0
f0 = fits.open(self.files[self.loop])
f1 = fits.open(self.files[self.loop+1])
self.reference = f0[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]
with torch.no_grad():
ims = np.zeros((1,2,self.n_pixel,self.n_pixel))
ims[0,0,:,:] = f0[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]
ims[0,1,:,:] = f1[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]
minim = np.min(ims, axis=(2,3))
maxim = np.max(ims, axis=(2,3))
ims = (ims - minim[:,:,None,None]) / (maxim[:,:,None,None] - minim[:,:,None,None])
shift, error, diffphase = register_translation(self.reference, ims[0,1,:,:])
shift = [int(f) for f in shift]
ims[0,1,:,:] = np.roll(ims[0,1,:,:], shift, axis=(0,1))
shift, error, diffphase = register_translation(self.reference, ims[0,0,:,:])
shift = [int(f) for f in shift]
ims[0,0,:,:] = np.roll(ims[0,0,:,:], shift, axis=(0,1))
ims = torch.from_numpy(ims.astype('float32'))
ims = ims.to(self.device)
out_forward, flow_forward = self.model(ims, backward=False)
output = out_forward.cpu().data.numpy()
flow = flow_forward.cpu().data.numpy()
flowx = flow[0,0,:,:]
flowy = flow[0,1,:,:]
ims = ims.cpu().data.numpy()
f0.close()
f1.close()
x = np.arange(self.n_pixel)
y = np.arange(self.n_pixel)
X, Y = np.meshgrid(x, y)
X = X * self.scale
Y = Y * self.scale
flowx *= self.scale
flowy *= self.scale
self.steps = (slice(None,None,2),slice(None,None,2))
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(12,10))
self.im1 = ax[0,0].imshow(np.flip(ims[0,0,:,:], axis=0), extent=[0,self.n_pixel*self.scale,0,self.n_pixel*self.scale])
self.Q = ax[0,0].quiver(X[self.steps], Y[self.steps], self.n_pixel*flowx[self.steps], self.n_pixel*flowy[self.steps], scale=10, units='inches', headwidth=3, headlength=3, color='yellow')
self.im2 = ax[0,1].imshow(np.flip(ims[0,1,:,:], axis=0), extent=[0,self.n_pixel*self.scale,0,self.n_pixel*self.scale])
self.flowx = ax[1,0].imshow(np.flip(flowx, axis=0), extent=[0,self.n_pixel*self.scale,0,self.n_pixel*self.scale])
self.flowy = ax[1,1].imshow(np.flip(flowy, axis=0), extent=[0,self.n_pixel*self.scale,0,self.n_pixel*self.scale])
qk = ax[0,0].quiverkey(self.Q, 0.9, 0.9, 1, r'$2 \frac{m}{s}$', labelpos='E', coordinates='figure', color='k')
ax[0,0].set_title('Input 1')
ax[0,1].set_title('Input 2')
ax[1,0].set_title('Flow x')
ax[1,1].set_title('Flow y')
self.pbar = tqdm(total=self.n_frames)
self.loop += 1
frames = self.n_frames
#frames = 20
ani = animation.FuncAnimation(f, self.updatefig, interval=100, blit=True, frames=frames-2)
ani.save('CaK.mp4')
self.pbar.close()
optical_flow_network = optical_flow(n_pixel=128)
#optical_flow_network.test()
optical_flow_network.movie()
| [
"skimage.feature.register_translation",
"torch.cuda.is_available",
"astropy.io.fits.open",
"numpy.arange",
"numpy.flip",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.min",
"numpy.meshgrid",
"glob.glob",
"numpy.std",
"matplotlib.pyplot.show",
"torch.device",
"numpy.roll",
"matplotlib.an... | [((458, 483), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (481, 483), False, 'import torch\n'), ((506, 550), 'torch.device', 'torch.device', (["('cuda' if self.cuda else 'cpu')"], {}), "('cuda' if self.cuda else 'cpu')\n", (518, 550), False, 'import torch\n'), ((1354, 1369), 'matplotlib.pyplot.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (1362, 1369), True, 'import matplotlib.pyplot as pl\n'), ((1468, 1576), 'astropy.io.fits.open', 'fits.open', (['"""/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_02520.fits"""'], {}), "(\n '/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_02520.fits'\n )\n", (1477, 1576), False, 'from astropy.io import fits\n'), ((1580, 1688), 'astropy.io.fits.open', 'fits.open', (['"""/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_02534.fits"""'], {}), "(\n '/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_02534.fits'\n )\n", (1589, 1688), False, 'from astropy.io import fits\n'), ((1694, 1738), 'numpy.zeros', 'np.zeros', (['(1, 2, self.n_pixel, self.n_pixel)'], {}), '((1, 2, self.n_pixel, self.n_pixel))\n', (1702, 1738), True, 'import numpy as np\n'), ((1907, 1931), 'numpy.min', 'np.min', (['ims'], {'axis': '(2, 3)'}), '(ims, axis=(2, 3))\n', (1913, 1931), True, 'import numpy as np\n'), ((1947, 1971), 'numpy.max', 'np.max', (['ims'], {'axis': '(2, 3)'}), '(ims, axis=(2, 3))\n', (1953, 1971), True, 'import numpy as np\n'), ((2477, 2523), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(2)', 'ncols': '(4)', 'figsize': '(14, 6)'}), '(nrows=2, ncols=4, figsize=(14, 6))\n', (2488, 2523), True, 'import matplotlib.pyplot as pl\n'), ((3209, 3222), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {}), '()\n', (3220, 3222), True, 'import matplotlib.pyplot as pl\n'), ((3235, 3258), 'numpy.arange', 'np.arange', (['self.n_pixel'], {}), '(self.n_pixel)\n', (3244, 3258), True, 'import numpy as np\n'), ((3271, 3294), 'numpy.arange', 'np.arange', (['self.n_pixel'], {}), '(self.n_pixel)\n', (3280, 3294), True, 'import numpy as np\n'), ((3310, 3327), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3321, 3327), True, 'import numpy as np\n'), ((3634, 3643), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (3641, 3643), True, 'import matplotlib.pyplot as pl\n'), ((3705, 3737), 'astropy.io.fits.open', 'fits.open', (['self.files[self.loop]'], {}), '(self.files[self.loop])\n', (3714, 3737), False, 'from astropy.io import fits\n'), ((3751, 3787), 'astropy.io.fits.open', 'fits.open', (['self.files[self.loop + 1]'], {}), '(self.files[self.loop + 1])\n', (3760, 3787), False, 'from astropy.io import fits\n'), ((5818, 5922), 'glob.glob', 'glob.glob', (['"""/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_*.fits"""'], {}), "(\n '/net/nas/proyectos/fis/aasensio/deep_learning/deepvel_jess/CaK/destretched_*.fits'\n )\n", (5827, 5922), False, 'import glob\n'), ((6021, 6053), 'astropy.io.fits.open', 'fits.open', (['self.files[self.loop]'], {}), '(self.files[self.loop])\n', (6030, 6053), False, 'from astropy.io import fits\n'), ((6067, 6103), 'astropy.io.fits.open', 'fits.open', (['self.files[self.loop + 1]'], {}), '(self.files[self.loop + 1])\n', (6076, 6103), False, 'from astropy.io import fits\n'), ((7633, 7656), 'numpy.arange', 'np.arange', (['self.n_pixel'], {}), '(self.n_pixel)\n', (7642, 7656), True, 'import numpy as np\n'), ((7669, 7692), 'numpy.arange', 'np.arange', (['self.n_pixel'], {}), '(self.n_pixel)\n', (7678, 7692), True, 'import numpy as np\n'), ((7708, 7725), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7719, 7725), True, 'import numpy as np\n'), ((7916, 7963), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(12, 10)'}), '(nrows=2, ncols=2, figsize=(12, 10))\n', (7927, 7963), True, 'import matplotlib.pyplot as pl\n'), ((8952, 8977), 'tqdm.tqdm', 'tqdm', ([], {'total': 'self.n_frames'}), '(total=self.n_frames)\n', (8956, 8977), False, 'from tqdm import tqdm\n'), ((9070, 9161), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['f', 'self.updatefig'], {'interval': '(100)', 'blit': '(True)', 'frames': '(frames - 2)'}), '(f, self.updatefig, interval=100, blit=True, frames=\n frames - 2)\n', (9093, 9161), True, 'import matplotlib.animation as animation\n'), ((733, 759), 'glob.glob', 'glob.glob', (['"""trained/*.pth"""'], {}), "('trained/*.pth')\n", (742, 759), False, 'import glob\n'), ((1051, 1078), 'torch.load', 'torch.load', (['self.checkpoint'], {}), '(self.checkpoint)\n', (1061, 1078), False, 'import torch\n'), ((1118, 1188), 'torch.load', 'torch.load', (['self.checkpoint'], {'map_location': '(lambda storage, loc: storage)'}), '(self.checkpoint, map_location=lambda storage, loc: storage)\n', (1128, 1188), False, 'import torch\n'), ((3800, 3815), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3813, 3815), False, 'import torch\n'), ((3836, 3880), 'numpy.zeros', 'np.zeros', (['(1, 2, self.n_pixel, self.n_pixel)'], {}), '((1, 2, self.n_pixel, self.n_pixel))\n', (3844, 3880), True, 'import numpy as np\n'), ((4125, 4149), 'numpy.min', 'np.min', (['ims'], {'axis': '(2, 3)'}), '(ims, axis=(2, 3))\n', (4131, 4149), True, 'import numpy as np\n'), ((4169, 4193), 'numpy.max', 'np.max', (['ims'], {'axis': '(2, 3)'}), '(ims, axis=(2, 3))\n', (4175, 4193), True, 'import numpy as np\n'), ((4328, 4381), 'skimage.feature.register_translation', 'register_translation', (['self.reference', 'ims[0, 1, :, :]'], {}), '(self.reference, ims[0, 1, :, :])\n', (4348, 4381), False, 'from skimage.feature import register_translation\n'), ((4478, 4522), 'numpy.roll', 'np.roll', (['ims[0, 1, :, :]', 'shift'], {'axis': '(0, 1)'}), '(ims[0, 1, :, :], shift, axis=(0, 1))\n', (4485, 4522), True, 'import numpy as np\n'), ((4558, 4611), 'skimage.feature.register_translation', 'register_translation', (['self.reference', 'ims[0, 0, :, :]'], {}), '(self.reference, ims[0, 0, :, :])\n', (4578, 4611), False, 'from skimage.feature import register_translation\n'), ((4708, 4752), 'numpy.roll', 'np.roll', (['ims[0, 0, :, :]', 'shift'], {'axis': '(0, 1)'}), '(ims[0, 0, :, :], shift, axis=(0, 1))\n', (4715, 4752), True, 'import numpy as np\n'), ((5285, 5317), 'numpy.flip', 'np.flip', (['ims[0, 0, :, :]'], {'axis': '(0)'}), '(ims[0, 0, :, :], axis=0)\n', (5292, 5317), True, 'import numpy as np\n'), ((5343, 5375), 'numpy.flip', 'np.flip', (['ims[0, 1, :, :]'], {'axis': '(0)'}), '(ims[0, 1, :, :], axis=0)\n', (5350, 5375), True, 'import numpy as np\n'), ((5403, 5436), 'numpy.flip', 'np.flip', (['flow[0, 0, :, :]'], {'axis': '(0)'}), '(flow[0, 0, :, :], axis=0)\n', (5410, 5436), True, 'import numpy as np\n'), ((5464, 5497), 'numpy.flip', 'np.flip', (['flow[0, 1, :, :]'], {'axis': '(0)'}), '(flow[0, 1, :, :], axis=0)\n', (5471, 5497), True, 'import numpy as np\n'), ((6228, 6243), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6241, 6243), False, 'import torch\n'), ((6264, 6308), 'numpy.zeros', 'np.zeros', (['(1, 2, self.n_pixel, self.n_pixel)'], {}), '((1, 2, self.n_pixel, self.n_pixel))\n', (6272, 6308), True, 'import numpy as np\n'), ((6553, 6577), 'numpy.min', 'np.min', (['ims'], {'axis': '(2, 3)'}), '(ims, axis=(2, 3))\n', (6559, 6577), True, 'import numpy as np\n'), ((6597, 6621), 'numpy.max', 'np.max', (['ims'], {'axis': '(2, 3)'}), '(ims, axis=(2, 3))\n', (6603, 6621), True, 'import numpy as np\n'), ((6756, 6809), 'skimage.feature.register_translation', 'register_translation', (['self.reference', 'ims[0, 1, :, :]'], {}), '(self.reference, ims[0, 1, :, :])\n', (6776, 6809), False, 'from skimage.feature import register_translation\n'), ((6906, 6950), 'numpy.roll', 'np.roll', (['ims[0, 1, :, :]', 'shift'], {'axis': '(0, 1)'}), '(ims[0, 1, :, :], shift, axis=(0, 1))\n', (6913, 6950), True, 'import numpy as np\n'), ((6986, 7039), 'skimage.feature.register_translation', 'register_translation', (['self.reference', 'ims[0, 0, :, :]'], {}), '(self.reference, ims[0, 0, :, :])\n', (7006, 7039), False, 'from skimage.feature import register_translation\n'), ((7136, 7180), 'numpy.roll', 'np.roll', (['ims[0, 0, :, :]', 'shift'], {'axis': '(0, 1)'}), '(ims[0, 0, :, :], shift, axis=(0, 1))\n', (7143, 7180), True, 'import numpy as np\n'), ((7997, 8029), 'numpy.flip', 'np.flip', (['ims[0, 0, :, :]'], {'axis': '(0)'}), '(ims[0, 0, :, :], axis=0)\n', (8004, 8029), True, 'import numpy as np\n'), ((8319, 8351), 'numpy.flip', 'np.flip', (['ims[0, 1, :, :]'], {'axis': '(0)'}), '(ims[0, 1, :, :], axis=0)\n', (8326, 8351), True, 'import numpy as np\n'), ((8448, 8470), 'numpy.flip', 'np.flip', (['flowx'], {'axis': '(0)'}), '(flowx, axis=0)\n', (8455, 8470), True, 'import numpy as np\n'), ((8570, 8592), 'numpy.flip', 'np.flip', (['flowy'], {'axis': '(0)'}), '(flowy, axis=0)\n', (8577, 8592), True, 'import numpy as np\n'), ((612, 662), 'model.network', 'model.network', ([], {'n_pixel': 'n_pixel', 'device': 'self.device'}), '(n_pixel=n_pixel, device=self.device)\n', (625, 662), False, 'import model\n'), ((3069, 3113), 'numpy.std', 'np.std', (['(ims[0, 0, :, :] - output[0, 0, :, :])'], {}), '(ims[0, 0, :, :] - output[0, 0, :, :])\n', (3075, 3113), True, 'import numpy as np\n'), ((3153, 3197), 'numpy.std', 'np.std', (['(ims[0, 1, :, :] - output[0, 0, :, :])'], {}), '(ims[0, 1, :, :] - output[0, 0, :, :])\n', (3159, 3197), True, 'import numpy as np\n')] |
# <NAME>, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory,
# National Institutes of Health Clinical Center, July 2019
"""Load and pre-process CT images in DeepLesion"""
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.morphology import binary_fill_holes, binary_opening, binary_dilation
import nibabel as nib
from maskrcnn_benchmark.config import cfg
def load_prep_img(data_dir, imname, spacing, slice_intv, do_clip=False, num_slice=3, is_train=False):
"""load volume, windowing, interpolate multiple slices, clip black border, resize according to spacing"""
data_dir = '/data/LITS/npy_images'
#data_dir = '/gdrive/MyDrive/TCS/LiTS/npy_images'
im, mask = load_multislice_img_16bit_png(data_dir, imname, slice_intv, do_clip, num_slice)
im = windowing(im, cfg.INPUT.WINDOWING)
if do_clip: # clip black border
c = get_range(mask, margin=0)
im = im[c[0]:c[1] + 1, c[2]:c[3] + 1, :]
else:
c = [0, im.shape[0]-1, 0, im.shape[1]-1]
im_shape = im.shape[0:2]
if spacing is not None and cfg.INPUT.NORM_SPACING > 0: # spacing adjust, will overwrite simple scaling
im_scale = float(spacing) / cfg.INPUT.NORM_SPACING
else:
im_scale = float(cfg.INPUT.MAX_IM_SIZE) / float(np.max(im_shape)) # simple scaling
max_shape = np.max(im_shape)*im_scale
if max_shape > cfg.INPUT.MAX_IM_SIZE:
im_scale1 = float(cfg.INPUT.MAX_IM_SIZE) / max_shape
im_scale *= im_scale1
if im_scale != 1:
#print(im_scale,computed_scale)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
# mask = cv2.resize(mask, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale, c
def load_multislice_img_16bit_png(data_dir, imname, slice_intv, do_clip, num_slice):
data_cache = {}
def _load_data_from_png(imname, delta=0):
imname1 = get_slice_name(data_dir, imname, delta)
if imname1 not in data_cache.keys():
#print(os.path.join(data_dir, imname1))
data_cache[imname1] = np.load(os.path.join(data_dir, imname1))
assert data_cache[imname1] is not None, 'file reading error: ' + imname1
# if data_cache[imname1] is None:
# print('file reading error:', imname1)
return data_cache[imname1]
def _load_data_from_nifti(imname, delta=0):
# in this case, data_dir is the numpy volume and imname is the slice index
vol = data_dir
idx = min(vol.shape[2]-1, max(int(imname+delta), 0))
return vol[:,:,idx]
if isinstance(data_dir, str) and isinstance(imname, str):
_load_data = _load_data_from_png
elif isinstance(data_dir, np.ndarray) and isinstance(imname, int):
_load_data = _load_data_from_nifti
im_cur = _load_data(imname)
mask = get_mask(im_cur) if do_clip else None
if False:
print("require 3 slices")
# if cfg.INPUT.SLICE_INTV == 0 or np.isnan(slice_intv) or slice_intv < 0:
# ims = [im_cur] * num_slice # only use the central slice
else:
ims = [im_cur]
# find neighboring slices of im_cure
rel_pos = float(cfg.INPUT.SLICE_INTV) / float(slice_intv)
a = rel_pos - np.floor(rel_pos)
b = np.ceil(rel_pos) - rel_pos
if a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation
for p in range(int((num_slice-1)/2)):
im_prev = _load_data(imname, - rel_pos * (p + 1))
im_next = _load_data(imname, rel_pos * (p + 1))
ims = [im_prev] + ims + [im_next]
else:
for p in range(int((num_slice-1)/2)):
intv1 = rel_pos*(p+1)
slice1 = _load_data(imname, - np.ceil(intv1))
slice2 = _load_data(imname, - np.floor(intv1))
im_prev = a * slice1 + b * slice2 # linear interpolation
slice1 = _load_data(imname, np.ceil(intv1))
slice2 = _load_data(imname, np.floor(intv1))
im_next = a * slice1 + b * slice2
ims = [im_prev] + ims + [im_next]
ims = [im.astype(float) for im in ims]
im = cv2.merge(ims)
im = im.astype(np.float32, copy=False) # there is an offset in the 16-bit png files, intensity - 32768 = Hounsfield unit
return im, mask
def get_slice_name(data_dir, imname, delta=0):
"""Infer slice name with an offset"""
if delta == 0:
imname = imname + '.npy'
#print('imname0000',imname )
return imname
delta = int(delta)
dirname, slicename = imname.split(os.sep)
#slice_idx = int(slicename[:-4])
slice_idx = int(slicename)
#imname1 = '%s%s%03d.npy' % (dirname, os.sep, slice_idx + delta)
imname1 = '%s%s' % (dirname, os.sep) + str(slice_idx + delta) + '.npy'
#print('imname11111',imname1 )
# if the slice is not in the dataset, use its neighboring slice
while not os.path.exists(os.path.join(data_dir, imname1)):
# print('file not found:', imname1)
delta -= np.sign(delta)
#imname1 = '%s%s%03d.npy' % (dirname, os.sep, slice_idx + delta)
imname1 = '%s%s' % (dirname, os.sep) + str(slice_idx + delta) + '.npy'
if delta == 0:
break
return imname1
def windowing(im, win):
"""scale intensity from win[0]~win[1] to float numbers in 0~255"""
im1 = im.astype(float)
im1 -= win[0]
im1 /= win[1] - win[0]
im1[im1 > 1] = 1
im1[im1 < 0] = 0
im1 *= 255
return im1
def windowing_rev(im, win):
"""backward windowing"""
im1 = im.astype(float)/255
im1 *= win[1] - win[0]
im1 += win[0]
return im1
def get_mask(im):
"""use a intensity threshold to roughly find the mask of the body"""
th = 0 # an approximate background intensity value
mask = im > th
mask = binary_opening(mask, structure=np.ones((7, 7))) # roughly remove bed
# mask = binary_dilation(mask)
# mask = binary_fill_holes(mask, structure=np.ones((11,11))) # fill parts like lung
if mask.sum() == 0: # maybe atypical intensity
mask = im * 0 + 1
return mask.astype(dtype=np.int32)
def get_range(mask, margin=0):
"""Get up, down, left, right extreme coordinates of a binary mask"""
idx = np.nonzero(mask)
u = max(0, idx[0].min() - margin)
d = min(mask.shape[0] - 1, idx[0].max() + margin)
l = max(0, idx[1].min() - margin)
r = min(mask.shape[1] - 1, idx[1].max() + margin)
return [u, d, l, r]
def map_box_back(boxes, cx=0, cy=0, im_scale=1.):
"""Reverse the scaling and offset of boxes"""
boxes /= im_scale
boxes[:, [0,2]] += cx
boxes[:, [1,3]] += cy
return boxes
| [
"cv2.merge",
"numpy.ceil",
"numpy.ones",
"numpy.floor",
"os.path.join",
"numpy.max",
"numpy.sign",
"numpy.nonzero",
"cv2.resize"
] | [((4304, 4318), 'cv2.merge', 'cv2.merge', (['ims'], {}), '(ims)\n', (4313, 4318), False, 'import cv2\n'), ((6409, 6425), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (6419, 6425), True, 'import numpy as np\n'), ((1371, 1387), 'numpy.max', 'np.max', (['im_shape'], {}), '(im_shape)\n', (1377, 1387), True, 'import numpy as np\n'), ((1607, 1696), 'cv2.resize', 'cv2.resize', (['im', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.\n INTER_LINEAR)\n', (1617, 1696), False, 'import cv2\n'), ((5182, 5196), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (5189, 5196), True, 'import numpy as np\n'), ((3332, 3349), 'numpy.floor', 'np.floor', (['rel_pos'], {}), '(rel_pos)\n', (3340, 3349), True, 'import numpy as np\n'), ((3362, 3378), 'numpy.ceil', 'np.ceil', (['rel_pos'], {}), '(rel_pos)\n', (3369, 3378), True, 'import numpy as np\n'), ((5087, 5118), 'os.path.join', 'os.path.join', (['data_dir', 'imname1'], {}), '(data_dir, imname1)\n', (5099, 5118), False, 'import os\n'), ((6012, 6027), 'numpy.ones', 'np.ones', (['(7, 7)'], {}), '((7, 7))\n', (6019, 6027), True, 'import numpy as np\n'), ((1317, 1333), 'numpy.max', 'np.max', (['im_shape'], {}), '(im_shape)\n', (1323, 1333), True, 'import numpy as np\n'), ((2173, 2204), 'os.path.join', 'os.path.join', (['data_dir', 'imname1'], {}), '(data_dir, imname1)\n', (2185, 2204), False, 'import os\n'), ((4073, 4087), 'numpy.ceil', 'np.ceil', (['intv1'], {}), '(intv1)\n', (4080, 4087), True, 'import numpy as np\n'), ((4133, 4148), 'numpy.floor', 'np.floor', (['intv1'], {}), '(intv1)\n', (4141, 4148), True, 'import numpy as np\n'), ((3875, 3889), 'numpy.ceil', 'np.ceil', (['intv1'], {}), '(intv1)\n', (3882, 3889), True, 'import numpy as np\n'), ((3937, 3952), 'numpy.floor', 'np.floor', (['intv1'], {}), '(intv1)\n', (3945, 3952), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Chesapeake Bay High-Resolution Land Cover Project datasets."""
import abc
import os
import sys
from typing import Any, Callable, Dict, List, Optional, Sequence
import fiona
import numpy as np
import pyproj
import rasterio
import rasterio.mask
import shapely.geometry
import shapely.ops
import torch
from rasterio.crs import CRS
from .geo import GeoDataset, RasterDataset
from .utils import BoundingBox, download_url, extract_archive
class Chesapeake(RasterDataset, abc.ABC):
"""Abstract base class for all Chesapeake datasets.
`Chesapeake Bay High-Resolution Land Cover Project
<https://www.chesapeakeconservancy.org/conservation-innovation-center/high-resolution-data/land-cover-data-project/>`_
dataset.
This dataset was collected by the Chesapeake Conservancy's Conservation Innovation
Center (CIC) in partnership with the University of Vermont and WorldView Solutions,
Inc. It consists of one-meter resolution land cover information for the Chesapeake
Bay watershed (~100,000 square miles of land).
For more information, see:
* `User Guide
<https://chesapeakeconservancy.org/wp-content/uploads/2017/01/LandCover101Guide.pdf>`_
* `Class Descriptions
<https://chesapeakeconservancy.org/wp-content/uploads/2020/03/LC_Class_Descriptions.pdf>`_
* `Accuracy Assessment
<https://chesapeakeconservancy.org/wp-content/uploads/2017/01/Chesapeake_Conservancy_Accuracy_Assessment_Methodology.pdf>`_
"""
# TODO: this shouldn't be needed, but .tif.ovr file is getting picked up
filename_glob = "*.tif"
is_image = False
@property
@abc.abstractmethod
def base_folder(self) -> str:
"""Parent directory of dataset in URL."""
@property
@abc.abstractmethod
def filename(self) -> str:
"""Filename to find/store dataset in."""
@property
@abc.abstractmethod
def zipfile(self) -> str:
"""Name of zipfile in download URL."""
@property
@abc.abstractmethod
def md5(self) -> str:
"""MD5 checksum to verify integrity of dataset."""
@property
def url(self) -> str:
"""URL to download dataset from."""
url = "https://cicwebresources.blob.core.windows.net/chesapeakebaylandcover"
url += f"/{self.base_folder}/{self.zipfile}"
return url
def __init__(
self,
root: str = "data",
crs: Optional[CRS] = None,
res: Optional[float] = None,
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
cache: bool = True,
download: bool = False,
checksum: bool = False,
) -> None:
"""Initialize a new Dataset instance.
Args:
root: root directory where dataset can be found
crs: :term:`coordinate reference system (CRS)` to warp to
(defaults to the CRS of the first file found)
res: resolution of the dataset in units of CRS
(defaults to the resolution of the first file found)
transforms: a function/transform that takes an input sample
and returns a transformed version
cache: if True, cache file handle to speed up repeated sampling
download: if True, download dataset and store it in the root directory
checksum: if True, check the MD5 of the downloaded files (may be slow)
Raises:
FileNotFoundError: if no files are found in ``root``
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
self.root = root
self.download = download
self.checksum = checksum
self._verify()
super().__init__(root, crs, res, transforms, cache)
def _verify(self) -> None:
"""Verify the integrity of the dataset.
Raises:
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
# Check if the extracted file already exists
if os.path.exists(os.path.join(self.root, self.filename)):
return
# Check if the zip file has already been downloaded
if os.path.exists(os.path.join(self.root, self.zipfile)):
self._extract()
return
# Check if the user requested to download the dataset
if not self.download:
raise RuntimeError(
f"Dataset not found in `root={self.root}` and `download=False`, "
"either specify a different `root` directory or use `download=True` "
"to automaticaly download the dataset."
)
# Download the dataset
self._download()
self._extract()
def _download(self) -> None:
"""Download the dataset."""
download_url(self.url, self.root, filename=self.zipfile, md5=self.md5)
def _extract(self) -> None:
"""Extract the dataset."""
extract_archive(os.path.join(self.root, self.zipfile))
class Chesapeake7(Chesapeake):
"""Complete 7-class dataset.
This version of the dataset is composed of 7 classes:
0. No Data: Background values
1. Water: All areas of open water including ponds, rivers, and lakes
2. Tree Canopy and Shrubs: All woody vegetation including trees and shrubs
3. Low Vegetation: Plant material less than 2 meters in height including lawns
4. Barren: Areas devoid of vegetation consisting of natural earthen material
5. Impervious Surfaces: Human-constructed surfaces less than 2 meters in height
6. Impervious Roads: Impervious surfaces that are used for transportation
7. Aberdeen Proving Ground: U.S. Army facility with no labels
"""
base_folder = "BAYWIDE"
filename = "Baywide_7class_20132014.tif"
zipfile = "Baywide_7Class_20132014.zip"
md5 = "61a4e948fb2551840b6557ef195c2084"
cmap = {
0: (0, 0, 0, 0),
1: (0, 197, 255, 255),
2: (38, 115, 0, 255),
3: (163, 255, 115, 255),
4: (255, 170, 0, 255),
5: (156, 156, 156, 255),
6: (0, 0, 0, 255),
7: (197, 0, 255, 255),
8: (0, 0, 0, 0),
9: (0, 0, 0, 0),
10: (0, 0, 0, 0),
11: (0, 0, 0, 0),
12: (0, 0, 0, 0),
13: (0, 0, 0, 0),
14: (0, 0, 0, 0),
15: (0, 0, 0, 0),
}
class Chesapeake13(Chesapeake):
"""Complete 13-class dataset.
This version of the dataset is composed of 13 classes:
0. No Data: Background values
1. Water: All areas of open water including ponds, rivers, and lakes
2. Wetlands: Low vegetation areas located along marine or estuarine regions
3. Tree Canopy: Deciduous and evergreen woody vegetation over 3-5 meters in height
4. Shrubland: Heterogeneous woody vegetation including shrubs and young trees
5. Low Vegetation: Plant material less than 2 meters in height including lawns
6. Barren: Areas devoid of vegetation consisting of natural earthen material
7. Structures: Human-constructed objects made of impervious materials
8. Impervious Surfaces: Human-constructed surfaces less than 2 meters in height
9. Impervious Roads: Impervious surfaces that are used for transportation
10. Tree Canopy over Structures: Tree cover overlapping impervious structures
11. Tree Canopy over Impervious Surfaces: Tree cover overlapping impervious surfaces
12. Tree Canopy over Impervious Roads: Tree cover overlapping impervious roads
13. Aberdeen Proving Ground: U.S. Army facility with no labels
"""
base_folder = "BAYWIDE"
filename = "Baywide_13Class_20132014.tif"
zipfile = "Baywide_13Class_20132014.zip"
md5 = "7e51118923c91e80e6e268156d25a4b9"
class ChesapeakeDC(Chesapeake):
"""This subset of the dataset contains data only for Washington, D.C."""
base_folder = "DC"
filename = os.path.join("DC_11001", "DC_11001.img")
zipfile = "DC_11001.zip"
md5 = "ed06ba7570d2955e8857d7d846c53b06"
class ChesapeakeDE(Chesapeake):
"""This subset of the dataset contains data only for Delaware."""
base_folder = "DE"
filename = "DE_STATEWIDE.tif"
zipfile = "_DE_STATEWIDE.zip"
md5 = "5e12eff3b6950c01092c7e480b38e544"
class ChesapeakeMD(Chesapeake):
"""This subset of the dataset contains data only for Maryland."""
base_folder = "MD"
filename = "MD_STATEWIDE.tif"
zipfile = "_MD_STATEWIDE.zip"
md5 = "40c7cd697a887f2ffdb601b5c114e567"
class ChesapeakeNY(Chesapeake):
"""This subset of the dataset contains data only for New York."""
base_folder = "NY"
filename = "NY_STATEWIDE.tif"
zipfile = "_NY_STATEWIDE.zip"
md5 = "1100078c526616454ef2e508affda915"
class ChesapeakePA(Chesapeake):
"""This subset of the dataset contains data only for Pennsylvania."""
base_folder = "PA"
filename = "PA_STATEWIDE.tif"
zipfile = "_PA_STATEWIDE.zip"
md5 = "20a2a857c527a4dbadd6beed8b47e5ab"
class ChesapeakeVA(Chesapeake):
"""This subset of the dataset contains data only for Virginia."""
base_folder = "VA"
filename = "CIC2014_VA_STATEWIDE.tif"
zipfile = "_VA_STATEWIDE.zip"
md5 = "6f2c97deaf73bb3e1ea9b21bd7a3fc8e"
class ChesapeakeWV(Chesapeake):
"""This subset of the dataset contains data only for West Virginia."""
base_folder = "WV"
filename = "WV_STATEWIDE.tif"
zipfile = "_WV_STATEWIDE.zip"
md5 = "350621ea293651fbc557a1c3e3c64cc3"
class ChesapeakeCVPR(GeoDataset):
"""CVPR 2019 Chesapeake Land Cover dataset.
The `CVPR 2019 Chesapeake Land Cover
<https://lila.science/datasets/chesapeakelandcover>`_ dataset contains two layers of
NAIP aerial imagery, Landsat 8 leaf-on and leaf-off imagery, Chesapeake Bay land
cover labels, NLCD land cover labels, and Microsoft building footprint labels.
This dataset was organized to accompany the 2019 CVPR paper, "Large Scale
High-Resolution Land Cover Mapping with Multi-Resolution Data".
The paper "Resolving label uncertainty with implicit generative models" added an
additional layer of data to this dataset containing a prior over the Chesapeake Bay
land cover classes generated from the NLCD land cover labels. For more information
about this layer see `the dataset documentation
<https://zenodo.org/record/5652512#.YcuAIZLMIQ8>`_.
If you use this dataset in your research, please cite the following paper:
* https://doi.org/10.1109/cvpr.2019.01301
"""
subdatasets = ["base", "prior_extension"]
urls = {
"base": "https://lilablobssc.blob.core.windows.net/lcmcvpr2019/cvpr_chesapeake_landcover.zip", # noqa: E501
"prior_extension": "https://zenodo.org/record/5652512/files/cvpr_chesapeake_landcover_prior_extension.zip?download=1", # noqa: E501
}
filenames = {
"base": "cvpr_chesapeake_landcover.zip",
"prior_extension": "cvpr_chesapeake_landcover_prior_extension.zip",
}
md5s = {
"base": "1225ccbb9590e9396875f221e5031514",
"prior_extension": "8f43ec30e155274dd652e157c48d2598",
}
crs = CRS.from_epsg(3857)
res = 1
valid_layers = [
"naip-new",
"naip-old",
"landsat-leaf-on",
"landsat-leaf-off",
"nlcd",
"lc",
"buildings",
"prior_from_cooccurrences_101_31_no_osm_no_buildings",
]
states = ["de", "md", "va", "wv", "pa", "ny"]
splits = (
[f"{state}-train" for state in states]
+ [f"{state}-val" for state in states]
+ [f"{state}-test" for state in states]
)
# these are used to check the integrity of the dataset
files = [
"de_1m_2013_extended-debuffered-test_tiles",
"de_1m_2013_extended-debuffered-train_tiles",
"de_1m_2013_extended-debuffered-val_tiles",
"md_1m_2013_extended-debuffered-test_tiles",
"md_1m_2013_extended-debuffered-train_tiles",
"md_1m_2013_extended-debuffered-val_tiles",
"ny_1m_2013_extended-debuffered-test_tiles",
"ny_1m_2013_extended-debuffered-train_tiles",
"ny_1m_2013_extended-debuffered-val_tiles",
"pa_1m_2013_extended-debuffered-test_tiles",
"pa_1m_2013_extended-debuffered-train_tiles",
"pa_1m_2013_extended-debuffered-val_tiles",
"va_1m_2014_extended-debuffered-test_tiles",
"va_1m_2014_extended-debuffered-train_tiles",
"va_1m_2014_extended-debuffered-val_tiles",
"wv_1m_2014_extended-debuffered-test_tiles",
"wv_1m_2014_extended-debuffered-train_tiles",
"wv_1m_2014_extended-debuffered-val_tiles",
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_buildings.tif",
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_landsat-leaf-off.tif", # noqa: E501
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_landsat-leaf-on.tif", # noqa: E501
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_lc.tif",
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_naip-new.tif",
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_naip-old.tif",
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_nlcd.tif",
"wv_1m_2014_extended-debuffered-val_tiles/m_3708035_ne_17_1_prior_from_cooccurrences_101_31_no_osm_no_buildings.tif", # noqa: E501
"spatial_index.geojson",
]
p_src_crs = pyproj.CRS("epsg:3857")
p_transformers = {
"epsg:26917": pyproj.Transformer.from_crs(
p_src_crs, pyproj.CRS("epsg:26917"), always_xy=True
).transform,
"epsg:26918": pyproj.Transformer.from_crs(
p_src_crs, pyproj.CRS("epsg:26918"), always_xy=True
).transform,
}
def __init__(
self,
root: str = "data",
splits: Sequence[str] = ["de-train"],
layers: List[str] = ["naip-new", "lc"],
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
cache: bool = True,
download: bool = False,
checksum: bool = False,
) -> None:
"""Initialize a new Dataset instance.
Args:
root: root directory where dataset can be found
splits: a list of strings in the format "{state}-{train,val,test}"
indicating the subset of data to use, for example "ny-train"
layers: a list containing a subset of "naip-new", "naip-old", "lc", "nlcd",
"landsat-leaf-on", "landsat-leaf-off", "buildings", or
"prior_from_cooccurrences_101_31_no_osm_no_buildings" indicating which
layers to load
transforms: a function/transform that takes an input sample
and returns a transformed version
cache: if True, cache file handle to speed up repeated sampling
download: if True, download dataset and store it in the root directory
checksum: if True, check the MD5 of the downloaded files (may be slow)
Raises:
FileNotFoundError: if no files are found in ``root``
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
for split in splits:
assert split in self.splits
assert all([layer in self.valid_layers for layer in layers])
self.root = root
self.layers = layers
self.cache = cache
self.download = download
self.checksum = checksum
self._verify()
super().__init__(transforms)
# Add all tiles into the index in epsg:3857 based on the included geojson
mint: float = 0
maxt: float = sys.maxsize
with fiona.open(os.path.join(root, "spatial_index.geojson"), "r") as f:
for i, row in enumerate(f):
if row["properties"]["split"] in splits:
box = shapely.geometry.shape(row["geometry"])
minx, miny, maxx, maxy = box.bounds
coords = (minx, maxx, miny, maxy, mint, maxt)
prior_fn = row["properties"]["lc"].replace(
"lc.tif",
"prior_from_cooccurrences_101_31_no_osm_no_buildings.tif",
)
self.index.insert(
i,
coords,
{
"naip-new": row["properties"]["naip-new"],
"naip-old": row["properties"]["naip-old"],
"landsat-leaf-on": row["properties"]["landsat-leaf-on"],
"landsat-leaf-off": row["properties"]["landsat-leaf-off"],
"lc": row["properties"]["lc"],
"nlcd": row["properties"]["nlcd"],
"buildings": row["properties"]["buildings"],
"prior_from_cooccurrences_101_31_no_osm_no_buildings": prior_fn, # noqa: E501
},
)
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
hits = self.index.intersection(tuple(query), objects=True)
filepaths = [hit.object for hit in hits]
sample = {"image": [], "mask": [], "crs": self.crs, "bbox": query}
if len(filepaths) == 0:
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
elif len(filepaths) == 1:
filenames = filepaths[0]
query_geom_transformed = None # is set by the first layer
minx, maxx, miny, maxy, mint, maxt = query
query_box = shapely.geometry.box(minx, miny, maxx, maxy)
for layer in self.layers:
fn = filenames[layer]
with rasterio.open(os.path.join(self.root, fn)) as f:
dst_crs = f.crs.to_string().lower()
if query_geom_transformed is None:
query_box_transformed = shapely.ops.transform(
self.p_transformers[dst_crs], query_box
).envelope
query_geom_transformed = shapely.geometry.mapping(
query_box_transformed
)
data, _ = rasterio.mask.mask(
f, [query_geom_transformed], crop=True, all_touched=True
)
if layer in [
"naip-new",
"naip-old",
"landsat-leaf-on",
"landsat-leaf-off",
]:
sample["image"].append(data)
elif layer in [
"lc",
"nlcd",
"buildings",
"prior_from_cooccurrences_101_31_no_osm_no_buildings",
]:
sample["mask"].append(data)
else:
raise IndexError(f"query: {query} spans multiple tiles which is not valid")
sample["image"] = np.concatenate( # type: ignore[no-untyped-call]
sample["image"], axis=0
)
sample["mask"] = np.concatenate( # type: ignore[no-untyped-call]
sample["mask"], axis=0
)
sample["image"] = torch.from_numpy( # type: ignore[attr-defined]
sample["image"]
)
sample["mask"] = torch.from_numpy(sample["mask"]) # type: ignore[attr-defined]
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _verify(self) -> None:
"""Verify the integrity of the dataset.
Raises:
RuntimeError: if ``download=False`` but dataset is missing or checksum fails
"""
# Check if the extracted files already exist
def exists(filename: str) -> bool:
return os.path.exists(os.path.join(self.root, filename))
if all(map(exists, self.files)):
return
# Check if the zip files have already been downloaded
if all(
[
os.path.exists(os.path.join(self.root, self.filenames[subdataset]))
for subdataset in self.subdatasets
]
):
self._extract()
return
# Check if the user requested to download the dataset
if not self.download:
raise RuntimeError(
f"Dataset not found in `root={self.root}` and `download=False`, "
"either specify a different `root` directory or use `download=True` "
"to automaticaly download the dataset."
)
# Download the dataset
self._download()
self._extract()
def _download(self) -> None:
"""Download the dataset."""
for subdataset in self.subdatasets:
download_url(
self.urls[subdataset],
self.root,
filename=self.filenames[subdataset],
md5=self.md5s[subdataset],
)
def _extract(self) -> None:
"""Extract the dataset."""
for subdataset in self.subdatasets:
extract_archive(os.path.join(self.root, self.filenames[subdataset]))
| [
"os.path.join",
"torch.from_numpy",
"pyproj.CRS",
"rasterio.crs.CRS.from_epsg",
"numpy.concatenate",
"rasterio.mask.mask"
] | [((7954, 7994), 'os.path.join', 'os.path.join', (['"""DC_11001"""', '"""DC_11001.img"""'], {}), "('DC_11001', 'DC_11001.img')\n", (7966, 7994), False, 'import os\n'), ((11184, 11203), 'rasterio.crs.CRS.from_epsg', 'CRS.from_epsg', (['(3857)'], {}), '(3857)\n', (11197, 11203), False, 'from rasterio.crs import CRS\n'), ((13505, 13528), 'pyproj.CRS', 'pyproj.CRS', (['"""epsg:3857"""'], {}), "('epsg:3857')\n", (13515, 13528), False, 'import pyproj\n'), ((19495, 19534), 'numpy.concatenate', 'np.concatenate', (["sample['image']"], {'axis': '(0)'}), "(sample['image'], axis=0)\n", (19509, 19534), True, 'import numpy as np\n'), ((19615, 19653), 'numpy.concatenate', 'np.concatenate', (["sample['mask']"], {'axis': '(0)'}), "(sample['mask'], axis=0)\n", (19629, 19653), True, 'import numpy as np\n'), ((19736, 19769), 'torch.from_numpy', 'torch.from_numpy', (["sample['image']"], {}), "(sample['image'])\n", (19752, 19769), False, 'import torch\n'), ((19847, 19879), 'torch.from_numpy', 'torch.from_numpy', (["sample['mask']"], {}), "(sample['mask'])\n", (19863, 19879), False, 'import torch\n'), ((4125, 4163), 'os.path.join', 'os.path.join', (['self.root', 'self.filename'], {}), '(self.root, self.filename)\n', (4137, 4163), False, 'import os\n'), ((4272, 4309), 'os.path.join', 'os.path.join', (['self.root', 'self.zipfile'], {}), '(self.root, self.zipfile)\n', (4284, 4309), False, 'import os\n'), ((5044, 5081), 'os.path.join', 'os.path.join', (['self.root', 'self.zipfile'], {}), '(self.root, self.zipfile)\n', (5056, 5081), False, 'import os\n'), ((13626, 13650), 'pyproj.CRS', 'pyproj.CRS', (['"""epsg:26917"""'], {}), "('epsg:26917')\n", (13636, 13650), False, 'import pyproj\n'), ((13762, 13786), 'pyproj.CRS', 'pyproj.CRS', (['"""epsg:26918"""'], {}), "('epsg:26918')\n", (13772, 13786), False, 'import pyproj\n'), ((15786, 15829), 'os.path.join', 'os.path.join', (['root', '"""spatial_index.geojson"""'], {}), "(root, 'spatial_index.geojson')\n", (15798, 15829), False, 'import os\n'), ((20347, 20380), 'os.path.join', 'os.path.join', (['self.root', 'filename'], {}), '(self.root, filename)\n', (20359, 20380), False, 'import os\n'), ((21643, 21694), 'os.path.join', 'os.path.join', (['self.root', 'self.filenames[subdataset]'], {}), '(self.root, self.filenames[subdataset])\n', (21655, 21694), False, 'import os\n'), ((20567, 20618), 'os.path.join', 'os.path.join', (['self.root', 'self.filenames[subdataset]'], {}), '(self.root, self.filenames[subdataset])\n', (20579, 20618), False, 'import os\n'), ((18740, 18816), 'rasterio.mask.mask', 'rasterio.mask.mask', (['f', '[query_geom_transformed]'], {'crop': '(True)', 'all_touched': '(True)'}), '(f, [query_geom_transformed], crop=True, all_touched=True)\n', (18758, 18816), False, 'import rasterio\n'), ((18237, 18264), 'os.path.join', 'os.path.join', (['self.root', 'fn'], {}), '(self.root, fn)\n', (18249, 18264), False, 'import os\n')] |
""" Unit test file. """
import unittest
import numpy as np
from ..CellVar import CellVar as c
# pylint: disable=protected-access
class TestModel(unittest.TestCase):
"""
Unit test class for the cell class.
"""
def test_cellVar(self):
"""
Make sure cell state assignment is correct.
"""
left_state = 0
right_state = 1
cell_left = c(state=left_state, parent=None, gen=1)
cell_right = c(state=right_state, parent=None, gen=1)
self.assertTrue(cell_left.state == 0)
self.assertTrue(cell_right.state == 1)
def test_cell_divide(self):
"""
Tests the division of the cells.
"""
T = np.array([[1.0, 0.0], [0.0, 1.0]])
parent_state = 1
cell = c(state=parent_state, parent=None, gen=1)
left_cell, right_cell = cell.divide(T)
# the probability of switching states is 0
self.assertTrue(left_cell.state == 1)
self.assertTrue(right_cell.state == 1)
self.assertTrue(right_cell.parent is cell and left_cell.parent is cell)
self.assertTrue(cell.left is left_cell and cell.right is right_cell)
self.assertTrue(not cell.parent)
self.assertTrue(cell.gen == 1)
self.assertTrue(left_cell.gen == 2 and right_cell.gen == 2)
parent_state = 0
cell = c(state=parent_state, parent=None, gen=1)
left_cell, right_cell = cell.divide(T)
# the probability of switching states is 0
self.assertTrue(left_cell.state == 0)
self.assertTrue(right_cell.state == 0)
self.assertTrue(right_cell.parent is cell and left_cell.parent is cell)
self.assertTrue(cell.left is left_cell and cell.right is right_cell)
self.assertTrue(not cell.parent)
self.assertTrue(cell.gen == 1)
self.assertTrue(left_cell.gen == 2 and right_cell.gen == 2)
def test_isRootParent(self):
"""
Tests whether the correct root parent asserts work.
"""
T = np.array([[1.0, 0.0], [0.0, 1.0]])
parent_state = 1
cell = c(state=parent_state, parent=None, gen=1)
left_cell, right_cell = cell.divide(T)
self.assertTrue(cell.isRootParent())
self.assertFalse(left_cell.isRootParent() and right_cell.isRootParent())
def test_isLeafBecauseTerminal(self):
"""
Tests whether the leaf cells are correctly checked.
"""
T = np.array([[1.0, 0.0], [0.0, 1.0]])
parent_state = 1
cell = c(state=parent_state, parent=None, gen=1)
self.assertTrue(cell.isLeafBecauseTerminal())
left_cell, right_cell = cell.divide(T)
self.assertFalse(cell.isLeafBecauseTerminal())
self.assertTrue(left_cell.isLeafBecauseTerminal() and right_cell.isLeafBecauseTerminal())
def test_get_sister(self):
"""
Tests the relationships between related cells.
"""
T = np.array([[1.0, 0.0], [0.0, 1.0]])
parent_state = 1
cell = c(state=parent_state, parent=None, gen=1)
left_cell, right_cell = cell.divide(T)
self.assertTrue(left_cell.get_sister() is right_cell and right_cell.get_sister() is left_cell)
def test_get_root_cell(self):
"""
Tests the function that returns the root cell.
"""
T = np.array([[1.0, 0.0], [0.0, 1.0]])
parent_state = 1
cell = c(state=parent_state, parent=None, gen=1)
self.assertTrue(cell.get_root_cell() is cell)
left_cell, right_cell = cell.divide(T)
self.assertTrue(left_cell.get_root_cell() is cell and right_cell.get_root_cell() is cell)
| [
"numpy.array"
] | [((704, 738), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (712, 738), True, 'import numpy as np\n'), ((2027, 2061), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (2035, 2061), True, 'import numpy as np\n'), ((2457, 2491), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (2465, 2491), True, 'import numpy as np\n'), ((2952, 2986), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (2960, 2986), True, 'import numpy as np\n'), ((3346, 3380), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (3354, 3380), True, 'import numpy as np\n')] |
# %%
# from ._preprocessors import CensorData, Arcsinh, ReduceLocal
# from ..utils.general import make_iterable, _check_is_fitted, is_fitted
import pandas as pd
import numpy as np
import warnings
from skimage.measure import regionprops, regionprops_table
# from sklearn.preprocessing import StandardScaler
# %%
def extract_centroids(so, spl, mask_key='cellmasks', inplace=True):
"""Extract centroids from segementation masks.
Args:
so: SpatialOmics instance
spl: sample for which to extract centroids
mask_key: segmentation masks to use
inplace: Whether to add the metric to the current SpatialOmics instance or to return a new one.
Returns:
"""
so = so if inplace else so.copy()
mask = so.get_mask(spl, mask_key)
ndata = regionprops_table(mask, properties=['label', 'centroid'])
ndata = pd.DataFrame.from_dict(ndata)
ndata.columns = ['cell_id', 'y', 'x'] # NOTE: axis 0 is y and axis 1 is x
ndata.set_index('cell_id', inplace=True)
ndata.sort_index(axis=0, ascending=True, inplace=True)
if spl in so.obs:
if 'x' in so.obs[spl] and 'y' in so.obs[spl]:
so.obs[spl] = so.obs[spl].drop(columns=['x', 'y'])
so.obs[spl] = pd.concat((so.obs[spl], ndata), axis=1)
else:
so.obs[spl] = ndata
if not inplace:
return so
def extract_image_properties(so, spl, inplace=True):
"""
Args:
so: SpatialOmics instance
spl: sample for which to extract centroids
inplace: Whether to add the metric to the current SpatialOmics instance or to return a new one.
Returns:
"""
so = so if inplace else so.copy()
img = so.get_image(spl)
data = list(img.shape[1:])
data.append(data[0]*data[1])
if not np.all([i in so.spl.columns for i in ['height', 'width', 'area']]):
so.spl = pd.concat((so.spl, pd.DataFrame(columns = ['height', 'width', 'area'])), axis=1)
so.spl.loc[spl, ['height', 'width','area']] = data
if not inplace:
return so | [
"skimage.measure.regionprops_table",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"numpy.all",
"pandas.concat"
] | [((791, 848), 'skimage.measure.regionprops_table', 'regionprops_table', (['mask'], {'properties': "['label', 'centroid']"}), "(mask, properties=['label', 'centroid'])\n", (808, 848), False, 'from skimage.measure import regionprops, regionprops_table\n'), ((861, 890), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['ndata'], {}), '(ndata)\n', (883, 890), True, 'import pandas as pd\n'), ((1236, 1275), 'pandas.concat', 'pd.concat', (['(so.obs[spl], ndata)'], {'axis': '(1)'}), '((so.obs[spl], ndata), axis=1)\n', (1245, 1275), True, 'import pandas as pd\n'), ((1781, 1849), 'numpy.all', 'np.all', (["[(i in so.spl.columns) for i in ['height', 'width', 'area']]"], {}), "([(i in so.spl.columns) for i in ['height', 'width', 'area']])\n", (1787, 1849), True, 'import numpy as np\n'), ((1885, 1934), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['height', 'width', 'area']"}), "(columns=['height', 'width', 'area'])\n", (1897, 1934), True, 'import pandas as pd\n')] |
import os,sys,time
import numpy as np
import copy
import math
import torch
import torch.nn.functional as F
from .utils import BayesianSGD
class Appr(object):
def __init__(self,model,args,lr_min=1e-6,lr_factor=3,lr_patience=5,clipgrad=1000):
self.model=model
self.device = args.device
self.lr_min=lr_min
self.lr_factor=lr_factor
self.lr_patience=lr_patience
self.clipgrad=clipgrad
self.init_lr=args.lr
self.sbatch=args.sbatch
self.nepochs=args.nepochs
self.arch=args.arch
self.samples=args.samples
self.lambda_=1.
self.output=args.output
self.checkpoint = args.checkpoint
self.experiment=args.experiment
self.num_tasks=args.num_tasks
self.modules_names_with_cls = self.find_modules_names(with_classifier=True)
self.modules_names_without_cls = self.find_modules_names(with_classifier=False)
def train(self,t,xtrain,ytrain,xvalid,yvalid):
# Update the next learning rate for each parameter based on their uncertainty
params_dict = self.update_lr(t)
self.optimizer = BayesianSGD(params=params_dict)
best_loss=np.inf
# best_model=copy.deepcopy(self.model)
best_model = copy.deepcopy(self.model.state_dict())
lr = self.init_lr
patience = self.lr_patience
# Loop epochs
try:
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(t,xtrain,ytrain)
clock1=time.time()
train_loss,train_acc=self.eval(t,xtrain,ytrain)
clock2=time.time()
print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,
1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),
train_loss,100*train_acc),end='')
# Valid
valid_loss,valid_acc=self.eval(t,xvalid,yvalid)
print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss, 100 * valid_acc), end='')
if math.isnan(valid_loss) or math.isnan(train_loss):
print("saved best model and quit because loss became nan")
break
# Adapt lr
if valid_loss<best_loss:
best_loss=valid_loss
best_model=copy.deepcopy(self.model.state_dict())
patience=self.lr_patience
print(' *',end='')
else:
patience-=1
if patience<=0:
lr/=self.lr_factor
print(' lr={:.1e}'.format(lr),end='')
if lr<self.lr_min:
print()
break
patience=self.lr_patience
params_dict = self.update_lr(t, adaptive_lr=True, lr=lr)
self.optimizer=BayesianSGD(params=params_dict)
print()
except KeyboardInterrupt:
print()
# Restore best
self.model.load_state_dict(copy.deepcopy(best_model))
self.save_model(t)
def update_lr(self,t, lr=None, adaptive_lr=False):
params_dict = []
if t==0:
params_dict.append({'params': self.model.parameters(), 'lr': self.init_lr})
else:
for name in self.modules_names_without_cls:
n = name.split('.')
if len(n) == 1:
m = self.model._modules[n[0]]
elif len(n) == 3:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]
elif len(n) == 4:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]._modules[n[3]]
else:
print (name)
if adaptive_lr is True:
params_dict.append({'params': m.weight_rho, 'lr': lr})
params_dict.append({'params': m.bias_rho, 'lr': lr})
else:
w_unc = torch.log1p(torch.exp(m.weight_rho.data))
b_unc = torch.log1p(torch.exp(m.bias_rho.data))
params_dict.append({'params': m.weight_mu, 'lr': torch.mul(w_unc,self.init_lr)})
params_dict.append({'params': m.bias_mu, 'lr': torch.mul(b_unc,self.init_lr)})
params_dict.append({'params': m.weight_rho, 'lr':self.init_lr})
params_dict.append({'params': m.bias_rho, 'lr':self.init_lr})
return params_dict
def find_modules_names(self, with_classifier=False):
modules_names = []
for name, p in self.model.named_parameters():
if with_classifier is False:
if not name.startswith('classifier'):
n = name.split('.')[:-1]
modules_names.append('.'.join(n))
else:
n = name.split('.')[:-1]
modules_names.append('.'.join(n))
modules_names = set(modules_names)
return modules_names
def logs(self,t):
lp, lvp = 0.0, 0.0
for name in self.modules_names_without_cls:
n = name.split('.')
if len(n) == 1:
m = self.model._modules[n[0]]
elif len(n) == 3:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]
elif len(n) == 4:
m = self.model._modules[n[0]]._modules[n[1]]._modules[n[2]]._modules[n[3]]
lp += m.log_prior
lvp += m.log_variational_posterior
lp += self.model.classifier[t].log_prior
lvp += self.model.classifier[t].log_variational_posterior
return lp, lvp
def train_epoch(self,t,x,y):
self.model.train()
r=np.arange(x.size(0))
np.random.shuffle(r)
r=torch.LongTensor(r).to(self.device)
num_batches = len(x)//self.sbatch
j=0
# Loop batches
for i in range(0,len(r),self.sbatch):
if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]
else: b=r[i:]
images, targets = x[b].to(self.device), y[b].to(self.device)
# Forward
loss=self.elbo_loss(images,targets,t,num_batches,sample=True).to(self.device)
# Backward
self.model.cuda()
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.model.cuda()
# Update parameters
self.optimizer.step()
return
def eval(self,t,x,y,debug=False):
total_loss=0
total_acc=0
total_num=0
self.model.eval()
r=np.arange(x.size(0))
r=torch.as_tensor(r, device=self.device, dtype=torch.int64)
with torch.no_grad():
num_batches = len(x)//self.sbatch
# Loop batches
for i in range(0,len(r),self.sbatch):
if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]
else: b=r[i:]
images, targets = x[b].to(self.device), y[b].to(self.device)
# Forward
outputs=self.model(images,sample=False)
output=outputs[t]
loss = self.elbo_loss(images, targets, t, num_batches,sample=False,debug=debug)
_,pred=output.max(1, keepdim=True)
total_loss += loss.detach()*len(b)
total_acc += pred.eq(targets.view_as(pred)).sum().item()
total_num += len(b)
return total_loss/total_num, total_acc/total_num
def set_model_(model, state_dict):
model.model.load_state_dict(copy.deepcopy(state_dict))
def elbo_loss(self, input, target, t, num_batches, sample,debug=False):
if sample:
lps, lvps, predictions = [], [], []
for i in range(self.samples):
predictions.append(self.model(input,sample=sample)[t])
lp, lv = self.logs(t)
lps.append(lp)
lvps.append(lv)
# hack
w1 = 1.e-3
w2 = 1.e-3
w3 = 5.e-2
outputs = torch.stack(predictions,dim=0).to(self.device)
log_var = w1*torch.as_tensor(lvps, device=self.device).mean()
log_p = w2*torch.as_tensor(lps, device=self.device).mean()
nll = w3*torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum').to(device=self.device)
return (log_var - log_p)/num_batches + nll
else:
predictions = []
for i in range(self.samples):
pred = self.model(input,sample=False)[t]
predictions.append(pred)
# hack
# w1 = 1.e-3
# w2 = 1.e-3
w3 = 5.e-6
outputs = torch.stack(predictions,dim=0).to(self.device)
nll = w3*torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum').to(device=self.device)
return nll
# w1, w2, w3 = self.get_coefs(nll,log_var,log_p,num_batches)
# print ("New coefficients for task {} are w1={}, w2={}, w3={}".format(t,w1,w2,w3))
# if math.isnan(log_var) or math.isnan(log_p) or math.isnan(nll):
# nll = torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum')
# # if log_var > 1e3 or log_p > 1e3 or nll>1e3:
# print ("BEFORE: ", (log_var/num_batches).item(), (log_p / num_batches).item(), nll.item())
# # while math.isnan(nll):
# # nll = 1e-5*torch.nn.functional.nll_loss(outputs.mean(0), target, reduction='sum')
def save_model(self,t):
torch.save({'model_state_dict': self.model.state_dict(),
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(t)))
# def get_coefs(self,nll,log_var,log_p,num_batches):
# def take_n(num):
# return torch.log10(num).item()
#
# exponents = np.array([take_n(num) for num in [nll, log_p, log_var]])
# min_exp = exponents.min()
# min_exp_idx = np.argmin(exponents)
# if min_exp_idx == 0:
# w1 = (10**(3-(take_n(log_var)+min_exp)))*num_batches
# w2 = (10**-(3-(take_n(log_p)+min_exp)))*num_batches
# w3 = 10.**(3-min_exp_idx)
# if min_exp_idx == 1:
# w1 = (10**(3-(take_n(log_var)+min_exp)))*num_batches
# w3 = 10**(3-(take_n(nll)+min_exp))
# w2 = (10.**-(3-min_exp_idx))*num_batches
# if min_exp_idx == 2:
# w3 = 10**(3-(take_n(nll)+min_exp))
# w2 = (10**-(3-(take_n(log_p)+min_exp)))*num_batches
# w1 = (10.**(3-min_exp_idx))*num_batches
#
# return w1, w2, w3
| [
"torch.mul",
"torch.as_tensor",
"torch.LongTensor",
"torch.stack",
"torch.exp",
"math.isnan",
"copy.deepcopy",
"torch.no_grad",
"time.time",
"numpy.random.shuffle"
] | [((6015, 6035), 'numpy.random.shuffle', 'np.random.shuffle', (['r'], {}), '(r)\n', (6032, 6035), True, 'import numpy as np\n'), ((6897, 6954), 'torch.as_tensor', 'torch.as_tensor', (['r'], {'device': 'self.device', 'dtype': 'torch.int64'}), '(r, device=self.device, dtype=torch.int64)\n', (6912, 6954), False, 'import torch\n'), ((3267, 3292), 'copy.deepcopy', 'copy.deepcopy', (['best_model'], {}), '(best_model)\n', (3280, 3292), False, 'import copy\n'), ((6969, 6984), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6982, 6984), False, 'import torch\n'), ((7852, 7877), 'copy.deepcopy', 'copy.deepcopy', (['state_dict'], {}), '(state_dict)\n', (7865, 7877), False, 'import copy\n'), ((1504, 1515), 'time.time', 'time.time', ([], {}), '()\n', (1513, 1515), False, 'import os, sys, time\n'), ((1589, 1600), 'time.time', 'time.time', ([], {}), '()\n', (1598, 1600), False, 'import os, sys, time\n'), ((1688, 1699), 'time.time', 'time.time', ([], {}), '()\n', (1697, 1699), False, 'import os, sys, time\n'), ((6046, 6065), 'torch.LongTensor', 'torch.LongTensor', (['r'], {}), '(r)\n', (6062, 6065), False, 'import torch\n'), ((2198, 2220), 'math.isnan', 'math.isnan', (['valid_loss'], {}), '(valid_loss)\n', (2208, 2220), False, 'import math\n'), ((2224, 2246), 'math.isnan', 'math.isnan', (['train_loss'], {}), '(train_loss)\n', (2234, 2246), False, 'import math\n'), ((8350, 8381), 'torch.stack', 'torch.stack', (['predictions'], {'dim': '(0)'}), '(predictions, dim=0)\n', (8361, 8381), False, 'import torch\n'), ((9014, 9045), 'torch.stack', 'torch.stack', (['predictions'], {'dim': '(0)'}), '(predictions, dim=0)\n', (9025, 9045), False, 'import torch\n'), ((4248, 4276), 'torch.exp', 'torch.exp', (['m.weight_rho.data'], {}), '(m.weight_rho.data)\n', (4257, 4276), False, 'import torch\n'), ((4318, 4344), 'torch.exp', 'torch.exp', (['m.bias_rho.data'], {}), '(m.bias_rho.data)\n', (4327, 4344), False, 'import torch\n'), ((8422, 8463), 'torch.as_tensor', 'torch.as_tensor', (['lvps'], {'device': 'self.device'}), '(lvps, device=self.device)\n', (8437, 8463), False, 'import torch\n'), ((8494, 8534), 'torch.as_tensor', 'torch.as_tensor', (['lps'], {'device': 'self.device'}), '(lps, device=self.device)\n', (8509, 8534), False, 'import torch\n'), ((4416, 4446), 'torch.mul', 'torch.mul', (['w_unc', 'self.init_lr'], {}), '(w_unc, self.init_lr)\n', (4425, 4446), False, 'import torch\n'), ((4515, 4545), 'torch.mul', 'torch.mul', (['b_unc', 'self.init_lr'], {}), '(b_unc, self.init_lr)\n', (4524, 4545), False, 'import torch\n')] |
from cgol import CGOL
import numpy as np
import time
print("Welcome to <NAME>'s solution for the Python Challenge of JDERobot for GSoC-2019!")
t = int(input("Please enter the time step value in ms (int): "))
t = np.clip(t, 50, 1000)
max_iterations = int(input("Please enter the maximum number of iterations (int): "))
max_iterations = np.clip(max_iterations, 10, 1000)
g = CGOL()
d = {1: g.BLOCK, 2: g.BEEHIVE, 3: g.TUB, 4: g.BLINKER, 5: g.TOAD, 6: g.BEACON, 7: g.GLIDER, 8: g.LIGHTSPACESHIP}
for _ in range(20):
print("Please enter the number (1-8) of the pattern you would like to add to the grid or enter 9 for random. To start the game with the selected settings enter 0")
print("0 Start Game\n1 Block \n2 Beehive\n3 Tub\n4 Blinker\n5 Toad\n6 Beacon\n7 Glider\n8 Light Space Ship\n9 Random Grid")
selection = int(input("Your Choice: "))
if selection == 0:
break
elif selection == 9:
g.random_grid()
else:
i,j = map(int,input("Please enter the location for the pattern as two space-seperated integers: ").split())
g.add_object(d[selection], i,j)
print("Press Ctrl+C at any point to exit. The program shall automatically exit after the maximum iterations.")
for counter in range(max_iterations):
g.run_once()
time.sleep(t/1000)
g.move_cursor_down()
print("Thank you for playing!") | [
"numpy.clip",
"cgol.CGOL",
"time.sleep"
] | [((214, 234), 'numpy.clip', 'np.clip', (['t', '(50)', '(1000)'], {}), '(t, 50, 1000)\n', (221, 234), True, 'import numpy as np\n'), ((337, 370), 'numpy.clip', 'np.clip', (['max_iterations', '(10)', '(1000)'], {}), '(max_iterations, 10, 1000)\n', (344, 370), True, 'import numpy as np\n'), ((376, 382), 'cgol.CGOL', 'CGOL', ([], {}), '()\n', (380, 382), False, 'from cgol import CGOL\n'), ((1232, 1252), 'time.sleep', 'time.sleep', (['(t / 1000)'], {}), '(t / 1000)\n', (1242, 1252), False, 'import time\n')] |
from shapey.visualization.histogram import HistogramPlot
import argparse
import numpy as np
import pandas as pd
import os
PROJECT_DIR = os.path.join(os.path.dirname(__file__), '..')
DATA_DIR = os.path.join(PROJECT_DIR, 'data')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='passes data directory and output file path')
parser.add_argument('--input_dir', type=str, default=os.path.join(DATA_DIR, 'processed', 'your_feature.h5'))
parser.add_argument('--output_dir', type=str, default=os.path.join(PROJECT_DIR, 'figures', 'your_feature_figs'))
args = parser.parse_args()
print(args)
input_name = args.input_dir
output_dir = os.path.join(args.output_dir, 'histogram')
os.makedirs(output_dir, exist_ok=True)
output_name = os.path.join(output_dir, 'normalized_correlation_value_histogram_same_different_obj_separated.png')
hdfstore = pd.HDFStore(input_name, 'r')
df_normalized = hdfstore['/pairwise_correlation/histogram_normalized/']
xvals = np.array(df_normalized.index)
labels = list(df_normalized.columns)
hists = [df_normalized[l].values for l in labels]
print('making histogram plots...')
histplot = HistogramPlot(xvals, hists, labels)
f = histplot.make_figure()
f.write_image(output_name)
print('done!') | [
"shapey.visualization.histogram.HistogramPlot",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"numpy.array",
"pandas.HDFStore"
] | [((194, 227), 'os.path.join', 'os.path.join', (['PROJECT_DIR', '"""data"""'], {}), "(PROJECT_DIR, 'data')\n", (206, 227), False, 'import os\n'), ((150, 175), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (165, 175), False, 'import os\n'), ((269, 355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""passes data directory and output file path"""'}), "(description=\n 'passes data directory and output file path')\n", (292, 355), False, 'import argparse\n'), ((680, 722), 'os.path.join', 'os.path.join', (['args.output_dir', '"""histogram"""'], {}), "(args.output_dir, 'histogram')\n", (692, 722), False, 'import os\n'), ((727, 765), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (738, 765), False, 'import os\n'), ((784, 887), 'os.path.join', 'os.path.join', (['output_dir', '"""normalized_correlation_value_histogram_same_different_obj_separated.png"""'], {}), "(output_dir,\n 'normalized_correlation_value_histogram_same_different_obj_separated.png')\n", (796, 887), False, 'import os\n'), ((900, 928), 'pandas.HDFStore', 'pd.HDFStore', (['input_name', '"""r"""'], {}), "(input_name, 'r')\n", (911, 928), True, 'import pandas as pd\n'), ((1018, 1047), 'numpy.array', 'np.array', (['df_normalized.index'], {}), '(df_normalized.index)\n', (1026, 1047), True, 'import numpy as np\n'), ((1198, 1233), 'shapey.visualization.histogram.HistogramPlot', 'HistogramPlot', (['xvals', 'hists', 'labels'], {}), '(xvals, hists, labels)\n', (1211, 1233), False, 'from shapey.visualization.histogram import HistogramPlot\n'), ((408, 462), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""processed"""', '"""your_feature.h5"""'], {}), "(DATA_DIR, 'processed', 'your_feature.h5')\n", (420, 462), False, 'import os\n'), ((522, 579), 'os.path.join', 'os.path.join', (['PROJECT_DIR', '"""figures"""', '"""your_feature_figs"""'], {}), "(PROJECT_DIR, 'figures', 'your_feature_figs')\n", (534, 579), False, 'import os\n')] |
# -*- coding: iso-8859-15 -*-
#
# profiler.py
#
# Copyright (C) 2016 <NAME>, Universidad de Granada
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# For additional information, contact to:
# <NAME>
# Dpto. Geodinamica-Universidad de Granada
# 18071 Granada, Spain
# <EMAIL> // <EMAIL>
# Version: 3.0
# March 02, 2017
# Last modified 29 November, 2017
import numpy as np
PROFILE_DEFAULT = {'name': "", 'thetaref': 0.45, 'chi0': 0, 'reg_points': 4, 'srs': "", 'smooth': 0}
class TProfile:
"""
Properties:
============================
self.dem_res :: *float*
Dem resolution of the Digital elevation model used to retreive area-elevation data
self.rid :: *int*
Indentifier of the profile
self.thetaref :: *float*
Value of m/n used in area-slope calculations
self.reg_points :: *int*
Number of points used initially in slope and ksn regressions
self.n_points :: *int*
Number of vertexes of the profile
self._data :: *numpy.array*
11-column numpy.array with profile data
======= ==============================================
Column Description
======= ==============================================
c0 X Coordinates of river profile vertex
c1 Y Coordinates of river profile vertex
c2 Z Elevation of the river profile vertex
c3 L Distance to river head
c4 A Drainage area to the vertex
c5 Chi (Integral mode)
c6 Slope of river profile in each vertex
c7 ksn values for each vertex (calculated by linear regression)
c8 Quality slope, correlation coefficient (r^2) of slope regressions
c9 Quality ksn, correlation coefficient (r^2) of ksn regressions
c10 Raw Z Elevation of the river profile vertex (used to reset the profile)
======= ==============================================
"""
def __init__(self, pf_data, dem_res=0, rid=0, thetaref=0.45, chi0=0, slp_reg_points=4, ksn_reg_points=4, srs="", name="", mouthdist=0,
smooth=0):
"""
Class that defines a river profile with morphometry capabilities.
:param pf_data: *numpy array* - Array with input values
:param dem_res: *float* - Resolution of the DEM used to extract profile features
:param rid: *int* - Profile Identifier
:param thetaref: *float* - Thetaref (m/n) value used to calculate Chi and Ksn indexes
:param chi0: *float* - Value of chi index for first point (for tributaries)
:param name: *str* - Profile name. It will used as the profile label
:param reg_points: *int* - Number of points (at each side) to calculate initial slope and ksn for each vertex
:param srs: *str* - Spatial Reference system expresed as well knwon text (wkt)
:param mouthdist: *float* - Distance from profile to the river mouth (for tributaries)
:param smooth: *float* - Initial distance to smooth elevations (before to calculate slopes and ksn)
pf_data param: (numpy.array) with at least 5 columns:
======= ==============================================
Column Description
======= ==============================================
c0 X Coordinates of river profile vertex
c1 Y Coordinates of river profile vertex
c2 Z Elevation of the river profile vertex
c3 L Distance to head (or to the first vertex)
c4 A Drainage area of the vertex (in square meters!)
======= ==============================================
"""
# Set profile properties
self._srs = srs # WKT with the Spatial Reference
self._mouthdist = float(mouthdist)
self._chi0 = chi0
self._smooth_win = smooth
self.dem_res = float(dem_res)
self.rid = int(rid)
if name == "":
self.name = str(rid)
else:
self.name = str(name)
self.thetaref = abs(thetaref)
self.slope_reg_points = slp_reg_points
self.ksn_reg_points = ksn_reg_points
self.n_points = pf_data.shape[0]
# Get profile data from pf_data array
aux_values = np.empty((pf_data.shape[0], 6))
aux_values.fill(np.nan)
self._data = np.append(pf_data, aux_values, axis=1)
# Set raw elevations
self._data[:, 10] = np.copy(self._data[:, 2])
# Smooth profile elevations before to calculate ksn and chi
self.smooth(self._smooth_win)
# Create slopes, chi and ksn values
self.calculate_slope(self.slope_reg_points)
self.calculate_chi(chi0=chi0)
self.calculate_ksn(self.ksn_reg_points)
def get_projection(self):
"""
Returns a string with the projection as wkt
"""
return self._srs
def set_projection(self, projection):
"""
Set the projection of the profile
:param projection: str - String with the projection in wkt
:return: None
"""
self._srs = projection
def length(self):
"""
Returns the total length of the profile
"""
return self._data[-1, 3]
def get_x(self, head=True):
"""
Returns x coordinates for all vertices
:param head: boolean - Specifies if x coordinates are returned from head (True) or mouth (False)
:return: numpy.array wiht x values for all vertices
"""
if head:
return np.copy(self._data[:, 0])
else:
return np.copy(self._data[::-1, 0])
def get_y(self, head=True):
"""
Returns y coordinates for all vertices
:param head: boolean - Specifies if y coordinates are returned from head (True) or mouth (False)
:return: numpy.array wiht y values for all vertices
"""
if head:
return np.copy(self._data[:, 1])
else:
return np.copy(self._data[::-1, 1])
def get_z(self, head=True, relative=False):
"""
Returns elevation values for all vertices
:param head: boolean - Specifies if elevations are returned from head (True) or mouth (False)
:param relative: boolean - Specifies if elevations are relative (min elevation = 0) or not
:return: numpy.array wiht elevation values for all vertices
"""
z_values = np.copy(self._data[:, 2])
if relative:
z_min = z_values[-1]
z_values -= z_min
if head:
return z_values
else:
return z_values[::-1]
def get_raw_z(self, head=True, relative=False):
"""
Returns raw elevation values for all vertices
:param head: boolean - Specifies if raw elevations are returned from head (True) or mouth (False)
:param relative: boolean - Specifies if elevations are relative (min elevation = 0) or not
:return: numpy.array wiht elevation values for all vertices
"""
raw_z = np.copy(self._data[:, 10])
if relative:
z_min = raw_z[-1]
raw_z -= z_min
if head:
return raw_z
else:
return raw_z[::-1]
def get_l(self, head=True):
"""
Returns a numpy.array with distances for all profile vertices
:param head: boolean - Specifies if distances are returned from head (True) or mouth (False)
If measured from mouth, a initial distance (self._mouthdist) will be added (to account for tributaries)
:return: numpy.array with distances for all vertices (measured from head or mouth)
"""
river_length = float(self._data[-1, 3])
if head:
li = np.copy(self._data[:, 3])
else:
li = river_length - self._data[:, 3] + self._mouthdist
li = li[::-1]
return li
def get_area(self, head=True):
"""
Returns a numpy.array with drainage area values for all vertices
:param head: boolean - Specifies if areas are returned from head (True) or mouth (False)
:return: numpy.array wiht area values for all vertices
"""
areas = np.copy(self._data[:, 4])
if head:
return areas
else:
return areas[::-1]
def get_slope(self, threshold=0, head=True, lq=False):
"""
Returns slopes calculated by linear regression
:param threshold: *float* R^2 threshold. (Slopes with R^2 < threshold will be in lq_slopes array)
:param head: boolean - Specifies if slopes are returned from head (True) or mouth (False)
:param lq: boolean - Specifies lq_slopes will be returned or not
:return: array with slopes (lq_slopes=False) or tuple of arrays (slopes, lq_slopes) (lq_slopes=True)
slopes --> numpy.array of slopes with R^2 >= threshold (lq_slopes will receive a np.nan value)
lq_slopes --> numpy.array of slopes with R^2 < threshold (slopes will receive a np.nan value)
"""
slopes = []
lq_slopes = []
r2_values = self.get_slope_r2()
for n in range(len(self._data)):
if r2_values[n] >= threshold:
slopes.append(self._data[n, 6])
lq_slopes.append(np.nan)
else:
slopes.append(np.nan)
lq_slopes.append(self._data[n, 6])
slopes = np.array(slopes)
lq_slopes = np.array(lq_slopes)
if not head:
slopes = slopes[::-1]
lq_slopes = lq_slopes[::-1]
if lq:
return slopes, lq_slopes
else:
return slopes
def get_ksn(self, threshold=0, head=True, lq=False):
"""
Returns ksn values calculated by linear regression
:param threshold: *float* R^2 threshold. (ksn with R^2 < threshold will be in lq_ksn array)
:param head: boolean - Specifies if ksn are returned from head (True) or mouth (False)
:param lq: boolean - Specifies lq_ksn will be returned or not
:return: array with ksn (lq=False) or tuple of arrays (ksn, lq_ksn) (lq=True)
ksn --> numpy.array of ksn values with R^2 >= threshold (lq_ksn will receive a np.nan value)
lq_ksn --> numpy.array of ksn values with R^2 < threshold (ksn will receive a np.nan value)
"""
ksn = []
lq_ksn = []
ksn_r2 = self.get_ksn_r2()
for n in range(len(self._data)):
if ksn_r2[n] >= threshold:
ksn.append(self._data[n, 7])
lq_ksn.append(np.nan)
else:
ksn.append(np.nan)
lq_ksn.append(self._data[n, 7])
ksn = np.array(ksn)
lq_ksn = np.array(lq_ksn)
if not head:
ksn = ksn[::-1]
lq_ksn = lq_ksn[::-1]
if lq:
return ksn, lq_ksn
else:
return ksn
def get_slope_r2(self, head=True):
"""
Returns slope R2 values from linear regressions for all vertices
:param head: boolean - Specifies if R2 values are returned from head (True) or mouth (False)
:return: numpy.array wiht slope R2 values for all vertices
"""
if head:
return np.copy(self._data[:, 8])
else:
return np.copy(self._data[::-1, 8])
def get_ksn_r2(self, head=True):
"""
Returns ksn R2 values from linear regressions for all vertices
:param head: boolean - Specifies if R2 values are returned from head (True) or mouth (False)
:return: numpy.array wiht ksn R2 values for all vertices
"""
if head:
return np.copy(self._data[:, 9])
else:
return np.copy(self._data[::-1, 9])
def get_chi(self, head=True, relative=False):
"""
Returns chi values for all vertices in ascending order.
:param head: boolean - Specifies if chi values are returned from head (True) or mouth (False)
:param relative: boolean - Specifies if chi values are relative (min chi = 0) or not
:return: numpy.array wiht chi values for all vertices
"""
chi_values = np.copy(self._data[:, 5])
if relative:
chi0 = chi_values[-1]
chi_values -= chi0
if head:
return chi_values
else:
return chi_values[::-1]
def smooth(self, window=0):
"""
Smooths the elevations of the profile with a movil mean of window size. It also removes peaks and flat segments
from the profile (to avoid problems when calculating slopes)
:param window: Window size (in profile units) to smooth the elevations of the river profile
:return: None
"""
# Remove peaks and flat segments
for n in range(len(self._data) - 1):
if self._data[n + 1, 2] >= self._data[n, 2]:
self._data[n + 1, 2] = float(self._data[n, 2]) - 0.001
# Smooth elevations if window distance > 0
if window > 0:
n_cells = int(int((window / self.dem_res) + 0.5) / 2)
for ind in range(len(self._data)):
low = ind - n_cells
high = ind + n_cells + 1
if low < 0:
low = 0
elevations = self._data[low:high, 10]
self._data[ind, 2] = np.mean(elevations)
def reset_elevations(self, raw=False):
"""
Reset smooth elevations. When reset, smooth elevations will equal to raw elevations
"""
for n in range(len(self._data)):
self._data[n, 2] = np.copy(self._data[n, 10])
if not raw:
self.smooth(self._smooth_win)
def calculate_chi(self, a0=1, chi0=0.0):
"""
This function creates the chi data array. Chi data will be calculated for each vertex of the river profile and
stored in the 7th column of self._data numpy array
:param a0: *int* - Reference area to remove dimensionality of Chi index
:param chi0: *float* - Initial Chi value in profile mouth. Needed to calculate chi values for tributaries
"""
# Invert area array
ai = self._data[::-1, 4] ** self.thetaref
a0 = a0 ** self.thetaref
chi = [chi0]
for n in range(len(ai)):
if n > 0:
dx = self._data[n, 3] - self._data[n - 1, 3]
chi.append(chi[n - 1] + (a0 * dx / ai[n]))
self._data[:, 5] = chi[::-1]
def calculate_slope(self, reg_points=4, raw_z=False):
"""
This function calculates slopes for all vertexes by linear regression of distance-elevation data.
Slopes are stored in column c5 of self._data. Together with slopes, R^2 are calculated (column c6)
:param reg_points: Number of profile points before and after each vertex to calculate slope
:param raw_z: bool Specifies if raw_z values are taken from calculate slopes (True) or not (False)
:return: None
"""
self.slope_reg_points = reg_points
li = self.get_l()
if raw_z:
zi = self.get_raw_z()
else:
zi = self.get_z()
for n in range(self.n_points):
low = n - reg_points
high = n + reg_points
if low < 0:
low = 0
sample_l = li[low:high + 1]
sample_z = zi[low:high + 1]
a = np.array([sample_l, np.ones(len(sample_l))]).T
y = sample_z
model, resid = np.linalg.lstsq(a, y)[:2]
if (y.size * y.var()) == 0:
r2 = 0
else:
r2 = 1 - resid / (y.size * y.var())
gradient = model[0]
self._data[n, 8] = abs(r2)
if abs(gradient) < 0.001:
self._data[n, 6] = 0.001
else:
self._data[n, 6] = abs(gradient)
def calculate_ksn(self, reg_points=4, raw_z=False):
"""
This function calculates ksn for all vertexes by linear regression of chi-elevation data.
:param reg_points: *int* - Number of profile points before and after each vertex to calculate ksn
:param raw_z: bool Specifies if raw_z values are taken from calculate slopes (True) or not (False)
:return: numpy.array with ksn values for all vertexes. If full is true, it returns a tuple of arrays (ksn, r^2)
"""
self.ksn_reg_points = reg_points
ksn_values = []
ksn_r2_values = []
chi = self.get_chi(False)
if raw_z:
zi = self.get_raw_z(False)
else:
zi = self.get_z(False)
for n in range(self.n_points):
low = n - reg_points
high = n + reg_points
if low < 0:
low = 0
sample_chi = chi[low:high + 1]
sample_z = zi[low:high + 1]
poli, sce = np.polyfit(sample_chi, sample_z, deg=1, full=True)[:2]
gradient = poli[0]
if (sample_z.size * sample_z.var()) == 0:
r2 = 0
else:
r2 = 1 - sce / (sample_z.size * sample_z.var())
ksn_r2_values.append(float(abs(r2)))
if abs(gradient) < 0.0001:
ksn_values.append(0.0001)
else:
ksn_values.append(abs(gradient))
self._data[:, 7] = np.array(ksn_values)[::-1]
self._data[:, 9] = np.array(ksn_r2_values)[::-1]
def get_best_theta(self, a0=1, step=0.05):
"""
Description
===========
This function obtain the best m/n value for the profile following the approach
proposed in Perron and Royden, 2013. This best m/n value will be the one that
increases the linearity of the Chi-Elevation profile
Parameters:
==============
a0 :: *int (Default = 1)*
Reference area value. By default set as 1 square meter
step :: *float (Default = 0.05)*
Step to test the different theta values. Recommended 0.1 or 0.05
Returns:
==============
best_theta :: *float*
Best m/n value for the profile. The one that icreases the linearity
for the whole Chi-Zi profile
"""
best_r2 = 0
best_theta = 0
theta_values = np.arange(0, 1, step)
zi = self._data[::-1, 2]
for theta in theta_values:
ai = self._data[::-1, 4] ** theta
a0 = a0 ** theta
chi = [0]
for n in range(len(ai)):
if n > 0:
dx = self._data[n, 3] - self._data[n - 1, 3]
chi.append(chi[n - 1] + (a0 * dx / ai[n]))
# Regresion into chi-elevation space to get r^2
a1 = np.array([chi, np.ones(len(chi))]).T
y1 = zi
model, resid = np.linalg.lstsq(a1, y1)[:2]
r2 = 1 - resid / (y1.size * y1.var())
if r2 > best_r2 and best_theta != 0:
best_r2 = r2
best_theta = theta
return best_theta
def version():
return "Version: 26 October 2017"
| [
"numpy.copy",
"numpy.mean",
"numpy.polyfit",
"numpy.append",
"numpy.array",
"numpy.empty",
"numpy.linalg.lstsq",
"numpy.arange"
] | [((4915, 4946), 'numpy.empty', 'np.empty', (['(pf_data.shape[0], 6)'], {}), '((pf_data.shape[0], 6))\n', (4923, 4946), True, 'import numpy as np\n'), ((5000, 5038), 'numpy.append', 'np.append', (['pf_data', 'aux_values'], {'axis': '(1)'}), '(pf_data, aux_values, axis=1)\n', (5009, 5038), True, 'import numpy as np\n'), ((5097, 5122), 'numpy.copy', 'np.copy', (['self._data[:, 2]'], {}), '(self._data[:, 2])\n', (5104, 5122), True, 'import numpy as np\n'), ((7103, 7128), 'numpy.copy', 'np.copy', (['self._data[:, 2]'], {}), '(self._data[:, 2])\n', (7110, 7128), True, 'import numpy as np\n'), ((7728, 7754), 'numpy.copy', 'np.copy', (['self._data[:, 10]'], {}), '(self._data[:, 10])\n', (7735, 7754), True, 'import numpy as np\n'), ((8898, 8923), 'numpy.copy', 'np.copy', (['self._data[:, 4]'], {}), '(self._data[:, 4])\n', (8905, 8923), True, 'import numpy as np\n'), ((10125, 10141), 'numpy.array', 'np.array', (['slopes'], {}), '(slopes)\n', (10133, 10141), True, 'import numpy as np\n'), ((10162, 10181), 'numpy.array', 'np.array', (['lq_slopes'], {}), '(lq_slopes)\n', (10170, 10181), True, 'import numpy as np\n'), ((11418, 11431), 'numpy.array', 'np.array', (['ksn'], {}), '(ksn)\n', (11426, 11431), True, 'import numpy as np\n'), ((11449, 11465), 'numpy.array', 'np.array', (['lq_ksn'], {}), '(lq_ksn)\n', (11457, 11465), True, 'import numpy as np\n'), ((12906, 12931), 'numpy.copy', 'np.copy', (['self._data[:, 5]'], {}), '(self._data[:, 5])\n', (12913, 12931), True, 'import numpy as np\n'), ((19101, 19122), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'step'], {}), '(0, 1, step)\n', (19110, 19122), True, 'import numpy as np\n'), ((6209, 6234), 'numpy.copy', 'np.copy', (['self._data[:, 0]'], {}), '(self._data[:, 0])\n', (6216, 6234), True, 'import numpy as np\n'), ((6268, 6296), 'numpy.copy', 'np.copy', (['self._data[::-1, 0]'], {}), '(self._data[::-1, 0])\n', (6275, 6296), True, 'import numpy as np\n'), ((6603, 6628), 'numpy.copy', 'np.copy', (['self._data[:, 1]'], {}), '(self._data[:, 1])\n', (6610, 6628), True, 'import numpy as np\n'), ((6662, 6690), 'numpy.copy', 'np.copy', (['self._data[::-1, 1]'], {}), '(self._data[::-1, 1])\n', (6669, 6690), True, 'import numpy as np\n'), ((8436, 8461), 'numpy.copy', 'np.copy', (['self._data[:, 3]'], {}), '(self._data[:, 3])\n', (8443, 8461), True, 'import numpy as np\n'), ((11976, 12001), 'numpy.copy', 'np.copy', (['self._data[:, 8]'], {}), '(self._data[:, 8])\n', (11983, 12001), True, 'import numpy as np\n'), ((12035, 12063), 'numpy.copy', 'np.copy', (['self._data[::-1, 8]'], {}), '(self._data[::-1, 8])\n', (12042, 12063), True, 'import numpy as np\n'), ((12400, 12425), 'numpy.copy', 'np.copy', (['self._data[:, 9]'], {}), '(self._data[:, 9])\n', (12407, 12425), True, 'import numpy as np\n'), ((12459, 12487), 'numpy.copy', 'np.copy', (['self._data[::-1, 9]'], {}), '(self._data[::-1, 9])\n', (12466, 12487), True, 'import numpy as np\n'), ((14364, 14390), 'numpy.copy', 'np.copy', (['self._data[n, 10]'], {}), '(self._data[n, 10])\n', (14371, 14390), True, 'import numpy as np\n'), ((18157, 18177), 'numpy.array', 'np.array', (['ksn_values'], {}), '(ksn_values)\n', (18165, 18177), True, 'import numpy as np\n'), ((18211, 18234), 'numpy.array', 'np.array', (['ksn_r2_values'], {}), '(ksn_r2_values)\n', (18219, 18234), True, 'import numpy as np\n'), ((14112, 14131), 'numpy.mean', 'np.mean', (['elevations'], {}), '(elevations)\n', (14119, 14131), True, 'import numpy as np\n'), ((16295, 16316), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a', 'y'], {}), '(a, y)\n', (16310, 16316), True, 'import numpy as np\n'), ((17684, 17734), 'numpy.polyfit', 'np.polyfit', (['sample_chi', 'sample_z'], {'deg': '(1)', 'full': '(True)'}), '(sample_chi, sample_z, deg=1, full=True)\n', (17694, 17734), True, 'import numpy as np\n'), ((19642, 19665), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a1', 'y1'], {}), '(a1, y1)\n', (19657, 19665), True, 'import numpy as np\n')] |
"""Helper file for computing various statistics over our data such as mention
frequency, mention text frequency in the data (even if not labeled as an
anchor), ...
etc.
"""
import argparse
import logging
import multiprocessing
import os
import time
from collections import Counter
import marisa_trie
import nltk
import numpy as np
import ujson as json
from tqdm import tqdm
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
from bootleg.utils.utils import get_lnrm
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default="data/", help="Data dir for training data"
)
parser.add_argument(
"--save_dir", type=str, default="data/", help="Data dir for saving stats"
)
parser.add_argument("--train_file", type=str, default="train.jsonl")
parser.add_argument(
"--entity_symbols_dir",
type=str,
default="entity_db/entity_mappings",
help="Path to entities inside data_dir",
)
parser.add_argument(
"--emb_dir",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/embs",
help="Path to embeddings",
)
parser.add_argument("--max_types", type=int, default=3, help="Max types to load")
parser.add_argument(
"--no_types", action="store_true", help="Do not compute type statistics"
)
parser.add_argument("--lower", action="store_true", help="Lower aliases")
parser.add_argument("--strip", action="store_true", help="Strip punc aliases")
parser.add_argument(
"--num_workers", type=int, help="Number of workers to parallelize"
)
args = parser.parse_args()
return args
def compute_histograms(save_dir, entity_symbols):
al_counts = Counter()
for al in entity_symbols.get_all_aliases():
num_entities = len(entity_symbols.get_qid_cands(al))
al_counts.update([num_entities])
utils.dump_json_file(
filename=os.path.join(save_dir, "candidate_counts.json"), contents=al_counts
)
return
def get_all_aliases(alias2qidcands):
# Load alias2qids
alias2qids = {}
for al in tqdm(alias2qidcands):
alias2qids[al] = [c[0] for c in alias2qidcands[al]]
logging.info(f"Loaded entity save with {len(alias2qids)} aliases.")
all_aliases = marisa_trie.Trie(alias2qids.keys())
return all_aliases
def get_num_lines(input_src):
# get number of lines
num_lines = 0
with open(input_src, "r", encoding="utf-8") as in_file:
try:
for line in in_file:
num_lines += 1
except Exception as e:
logging.error("ERROR READING IN TRAINING DATA")
logging.error(e)
return []
return num_lines
def chunk_text_data(input_src, chunk_files, chunk_size, num_lines):
logging.info(f"Reading in {input_src}")
start = time.time()
# write out chunks as text data
chunk_id = 0
num_lines_in_chunk = 0
# keep track of what files are written
out_file = open(chunk_files[chunk_id], "w")
with open(input_src, "r", encoding="utf-8") as in_file:
for i, line in enumerate(in_file):
out_file.write(line)
num_lines_in_chunk += 1
# move on to new chunk when it hits chunk size
if num_lines_in_chunk == chunk_size:
chunk_id += 1
# reset number of lines in chunk and open new file if not at end
num_lines_in_chunk = 0
out_file.close()
if i < (num_lines - 1):
out_file = open(chunk_files[chunk_id], "w")
out_file.close()
logging.info(f"Wrote out data chunks in {round(time.time() - start, 2)}s")
def compute_occurrences_single(args, max_alias_len=6):
data_file, lower, strip = args
num_lines = sum(1 for _ in open(data_file))
global all_aliases
# entity histogram
ent_occurrences = Counter()
# alias histogram
alias_occurrences = Counter()
# alias text occurrances
alias_text_occurrences = Counter()
# number of aliases per sentence
alias_pair_occurrences = Counter()
# alias|entity histogram
alias_entity_pair = Counter()
with open(data_file, "r") as in_file:
for line in tqdm(in_file, total=num_lines):
line = json.loads(line.strip())
for n in range(max_alias_len + 1, 0, -1):
grams = nltk.ngrams(line["sentence"].split(), n)
for gram_words in grams:
gram_attempt = get_lnrm(" ".join(gram_words), lower, strip)
if gram_attempt in all_aliases:
alias_text_occurrences[gram_attempt] += 1
# Get aliases in wikipedia _before_ the swapping - these represent the true textual aliases
aliases = line["unswap_aliases"]
qids = line["qids"]
for qid, alias in zip(qids, aliases):
ent_occurrences[qid] += 1
alias_occurrences[alias] += 1
alias_entity_pair[alias + "|" + qid] += 1
alias_pair_occurrences[len(aliases)] += 1
results = {
"ent_occurrences": ent_occurrences,
"alias_occurrences": alias_occurrences,
"alias_text_occurrences": alias_text_occurrences,
"alias_pair_occurrences": alias_pair_occurrences,
"alias_entity_pair": alias_entity_pair,
}
return results
def compute_occurrences(save_dir, data_file, entity_dump, lower, strip, num_workers=8):
global all_aliases
all_aliases = get_all_aliases(entity_dump._alias2qids)
# divide up data into chunks
num_lines = get_num_lines(data_file)
num_processes = min(num_workers, int(multiprocessing.cpu_count()))
logging.info(f"Using {num_processes} workers...")
chunk_size = int(np.ceil(num_lines / (num_processes)))
chunk_file_path = os.path.join(save_dir, "tmp")
utils.ensure_dir(chunk_file_path)
chunk_infiles = [
os.path.join(f"{chunk_file_path}", f"data_chunk_{chunk_id}_in.jsonl")
for chunk_id in range(num_processes)
]
chunk_text_data(data_file, chunk_infiles, chunk_size, num_lines)
pool = multiprocessing.Pool(processes=num_processes)
subprocess_args = [[chunk_infiles[i], lower, strip] for i in range(num_processes)]
results = pool.map(compute_occurrences_single, subprocess_args)
pool.close()
pool.join()
logging.info("Finished collecting counts")
logging.info("Merging counts....")
# merge counters together
ent_occurrences = Counter()
# alias histogram
alias_occurrences = Counter()
# alias text occurrances
alias_text_occurrences = Counter()
# number of aliases per sentence
alias_pair_occurrences = Counter()
# alias|entity histogram
alias_entity_pair = Counter()
for result_set in tqdm(results, desc="Merging"):
ent_occurrences += result_set["ent_occurrences"]
alias_occurrences += result_set["alias_occurrences"]
alias_text_occurrences += result_set["alias_text_occurrences"]
alias_pair_occurrences += result_set["alias_pair_occurrences"]
alias_entity_pair += result_set["alias_entity_pair"]
# save counters
utils.dump_json_file(
filename=os.path.join(save_dir, "entity_count.json"), contents=ent_occurrences
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_counts.json"), contents=alias_occurrences
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_text_counts.json"),
contents=alias_text_occurrences,
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_pair_occurrences.json"),
contents=alias_pair_occurrences,
)
utils.dump_json_file(
filename=os.path.join(save_dir, "alias_entity_counts.json"),
contents=alias_entity_pair,
)
def main():
args = parse_args()
logging.info(json.dumps(vars(args), indent=4))
entity_symbols = EntitySymbols.load_from_cache(
load_dir=os.path.join(args.data_dir, args.entity_symbols_dir)
)
train_file = os.path.join(args.data_dir, args.train_file)
save_dir = os.path.join(args.save_dir, "stats")
logging.info(f"Will save data to {save_dir}")
utils.ensure_dir(save_dir)
# compute_histograms(save_dir, entity_symbols)
compute_occurrences(
save_dir,
train_file,
entity_symbols,
args.lower,
args.strip,
num_workers=args.num_workers,
)
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"numpy.ceil",
"argparse.ArgumentParser",
"tqdm.tqdm",
"os.path.join",
"multiprocessing.cpu_count",
"collections.Counter",
"multiprocessing.Pool",
"bootleg.utils.utils.ensure_dir",
"time.time",
"logging.info",
"logging.error"
] | [((509, 582), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (528, 582), False, 'import logging\n'), ((616, 641), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (639, 641), False, 'import argparse\n'), ((1868, 1877), 'collections.Counter', 'Counter', ([], {}), '()\n', (1875, 1877), False, 'from collections import Counter\n'), ((2251, 2271), 'tqdm.tqdm', 'tqdm', (['alias2qidcands'], {}), '(alias2qidcands)\n', (2255, 2271), False, 'from tqdm import tqdm\n'), ((2932, 2971), 'logging.info', 'logging.info', (['f"""Reading in {input_src}"""'], {}), "(f'Reading in {input_src}')\n", (2944, 2971), False, 'import logging\n'), ((2984, 2995), 'time.time', 'time.time', ([], {}), '()\n', (2993, 2995), False, 'import time\n'), ((4042, 4051), 'collections.Counter', 'Counter', ([], {}), '()\n', (4049, 4051), False, 'from collections import Counter\n'), ((4098, 4107), 'collections.Counter', 'Counter', ([], {}), '()\n', (4105, 4107), False, 'from collections import Counter\n'), ((4166, 4175), 'collections.Counter', 'Counter', ([], {}), '()\n', (4173, 4175), False, 'from collections import Counter\n'), ((4242, 4251), 'collections.Counter', 'Counter', ([], {}), '()\n', (4249, 4251), False, 'from collections import Counter\n'), ((4305, 4314), 'collections.Counter', 'Counter', ([], {}), '()\n', (4312, 4314), False, 'from collections import Counter\n'), ((5861, 5910), 'logging.info', 'logging.info', (['f"""Using {num_processes} workers..."""'], {}), "(f'Using {num_processes} workers...')\n", (5873, 5910), False, 'import logging\n'), ((5992, 6021), 'os.path.join', 'os.path.join', (['save_dir', '"""tmp"""'], {}), "(save_dir, 'tmp')\n", (6004, 6021), False, 'import os\n'), ((6026, 6059), 'bootleg.utils.utils.ensure_dir', 'utils.ensure_dir', (['chunk_file_path'], {}), '(chunk_file_path)\n', (6042, 6059), False, 'from bootleg.utils import utils\n'), ((6292, 6337), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'num_processes'}), '(processes=num_processes)\n', (6312, 6337), False, 'import multiprocessing\n'), ((6530, 6572), 'logging.info', 'logging.info', (['"""Finished collecting counts"""'], {}), "('Finished collecting counts')\n", (6542, 6572), False, 'import logging\n'), ((6577, 6611), 'logging.info', 'logging.info', (['"""Merging counts...."""'], {}), "('Merging counts....')\n", (6589, 6611), False, 'import logging\n'), ((6664, 6673), 'collections.Counter', 'Counter', ([], {}), '()\n', (6671, 6673), False, 'from collections import Counter\n'), ((6720, 6729), 'collections.Counter', 'Counter', ([], {}), '()\n', (6727, 6729), False, 'from collections import Counter\n'), ((6788, 6797), 'collections.Counter', 'Counter', ([], {}), '()\n', (6795, 6797), False, 'from collections import Counter\n'), ((6864, 6873), 'collections.Counter', 'Counter', ([], {}), '()\n', (6871, 6873), False, 'from collections import Counter\n'), ((6927, 6936), 'collections.Counter', 'Counter', ([], {}), '()\n', (6934, 6936), False, 'from collections import Counter\n'), ((6959, 6988), 'tqdm.tqdm', 'tqdm', (['results'], {'desc': '"""Merging"""'}), "(results, desc='Merging')\n", (6963, 6988), False, 'from tqdm import tqdm\n'), ((8227, 8271), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.train_file'], {}), '(args.data_dir, args.train_file)\n', (8239, 8271), False, 'import os\n'), ((8287, 8323), 'os.path.join', 'os.path.join', (['args.save_dir', '"""stats"""'], {}), "(args.save_dir, 'stats')\n", (8299, 8323), False, 'import os\n'), ((8328, 8373), 'logging.info', 'logging.info', (['f"""Will save data to {save_dir}"""'], {}), "(f'Will save data to {save_dir}')\n", (8340, 8373), False, 'import logging\n'), ((8378, 8404), 'bootleg.utils.utils.ensure_dir', 'utils.ensure_dir', (['save_dir'], {}), '(save_dir)\n', (8394, 8404), False, 'from bootleg.utils import utils\n'), ((4377, 4407), 'tqdm.tqdm', 'tqdm', (['in_file'], {'total': 'num_lines'}), '(in_file, total=num_lines)\n', (4381, 4407), False, 'from tqdm import tqdm\n'), ((5932, 5966), 'numpy.ceil', 'np.ceil', (['(num_lines / num_processes)'], {}), '(num_lines / num_processes)\n', (5939, 5966), True, 'import numpy as np\n'), ((6090, 6159), 'os.path.join', 'os.path.join', (['f"""{chunk_file_path}"""', 'f"""data_chunk_{chunk_id}_in.jsonl"""'], {}), "(f'{chunk_file_path}', f'data_chunk_{chunk_id}_in.jsonl')\n", (6102, 6159), False, 'import os\n'), ((2071, 2118), 'os.path.join', 'os.path.join', (['save_dir', '"""candidate_counts.json"""'], {}), "(save_dir, 'candidate_counts.json')\n", (2083, 2118), False, 'import os\n'), ((5827, 5854), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5852, 5854), False, 'import multiprocessing\n'), ((7374, 7417), 'os.path.join', 'os.path.join', (['save_dir', '"""entity_count.json"""'], {}), "(save_dir, 'entity_count.json')\n", (7386, 7417), False, 'import os\n'), ((7493, 7536), 'os.path.join', 'os.path.join', (['save_dir', '"""alias_counts.json"""'], {}), "(save_dir, 'alias_counts.json')\n", (7505, 7536), False, 'import os\n'), ((7614, 7662), 'os.path.join', 'os.path.join', (['save_dir', '"""alias_text_counts.json"""'], {}), "(save_dir, 'alias_text_counts.json')\n", (7626, 7662), False, 'import os\n'), ((7754, 7807), 'os.path.join', 'os.path.join', (['save_dir', '"""alias_pair_occurrences.json"""'], {}), "(save_dir, 'alias_pair_occurrences.json')\n", (7766, 7807), False, 'import os\n'), ((7899, 7949), 'os.path.join', 'os.path.join', (['save_dir', '"""alias_entity_counts.json"""'], {}), "(save_dir, 'alias_entity_counts.json')\n", (7911, 7949), False, 'import os\n'), ((8151, 8203), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.entity_symbols_dir'], {}), '(args.data_dir, args.entity_symbols_dir)\n', (8163, 8203), False, 'import os\n'), ((2738, 2785), 'logging.error', 'logging.error', (['"""ERROR READING IN TRAINING DATA"""'], {}), "('ERROR READING IN TRAINING DATA')\n", (2751, 2785), False, 'import logging\n'), ((2798, 2814), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (2811, 2814), False, 'import logging\n'), ((3806, 3817), 'time.time', 'time.time', ([], {}), '()\n', (3815, 3817), False, 'import time\n')] |
import unittest
import numpy as np
from pax.datastructure import Event, Pulse
from pax import core
class TestZLE(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='XENON100',
just_testing=True,
config_dict={
'pax': {
'plugin_group_names': ['test'],
'test': 'ZLE.SoftwareZLE'},
'ZLE.SoftwareZLE': {
'zle_threshold': 40,
'samples_to_store_before': 50,
'samples_to_store_after': 50,
'max_intervals': 32,
'special_thresholds': {}
}})
self.plugin = self.pax.get_plugin_by_name('SoftwareZLE')
def tearDown(self):
delattr(self, 'pax')
delattr(self, 'plugin')
def test_zle(self):
for w, pulse_bounds_should_be in (
([60, 60], [[0, 1]]),
([0, 60, 60, 0], [[0, 3]]),
([1] * 100 + [60] + [2] * 100, [[50, 149]]),
([1] * 100 + [30] + [2] * 100, []),
([1] * 100 + [60] + [2] * 200 + [60] + [3] * 100, [[50, 149], [252, 351]]),
([1] * 100 + [60] + [2] * 70 + [60] + [3] * 100, [[50, 100 + 1 + 70 + 1 + 50 - 1]]),
):
w = np.array(w).astype(np.int16)
# Convert from ADC above baseline (easier to specify) to raw ADC counts (what the plugin needs)
w = self.plugin.config['digitizer_reference_baseline'] - w
e = Event(n_channels=self.plugin.config['n_channels'],
start_time=0,
stop_time=int(1e6),
sample_duration=self.pax.config['DEFAULT']['sample_duration'],
pulses=[Pulse(left=0,
channel=1,
raw_data=w)])
e = self.plugin.transform_event(e)
pulse_bounds = [[pulse.left, pulse.right] for pulse in e.pulses]
# Check the pulse bounds
self.assertEqual(pulse_bounds, pulse_bounds_should_be)
# Check if the data was put in correctly
for i, (l, r) in enumerate(pulse_bounds):
self.assertEqual(e.pulses[i].raw_data.tolist(), w[l:r + 1].tolist())
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"pax.datastructure.Pulse",
"pax.core.Processor"
] | [((2725, 2740), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2738, 2740), False, 'import unittest\n'), ((178, 485), 'pax.core.Processor', 'core.Processor', ([], {'config_names': '"""XENON100"""', 'just_testing': '(True)', 'config_dict': "{'pax': {'plugin_group_names': ['test'], 'test': 'ZLE.SoftwareZLE'},\n 'ZLE.SoftwareZLE': {'zle_threshold': 40, 'samples_to_store_before': 50,\n 'samples_to_store_after': 50, 'max_intervals': 32, 'special_thresholds':\n {}}}"}), "(config_names='XENON100', just_testing=True, config_dict={\n 'pax': {'plugin_group_names': ['test'], 'test': 'ZLE.SoftwareZLE'},\n 'ZLE.SoftwareZLE': {'zle_threshold': 40, 'samples_to_store_before': 50,\n 'samples_to_store_after': 50, 'max_intervals': 32, 'special_thresholds':\n {}}})\n", (192, 485), False, 'from pax import core\n'), ((1691, 1702), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (1699, 1702), True, 'import numpy as np\n'), ((2159, 2195), 'pax.datastructure.Pulse', 'Pulse', ([], {'left': '(0)', 'channel': '(1)', 'raw_data': 'w'}), '(left=0, channel=1, raw_data=w)\n', (2164, 2195), False, 'from pax.datastructure import Event, Pulse\n')] |
"""
Variety of filters to detect areas inside the images.
Signal rich areas should have a large density, while the areas dominated by noise are
low in density.
"""
import cv2
import numpy as np
import scipy as sp
import scipy.ndimage
def keypoint_density(image,convolve_size,n_pix=10,hess=1600):
detector = cv2.xfeatures2d.SURF_create(hessianThreshold=hess)
(kps, features) = detector.detectAndCompute(image,None)
kps_pts = np.array([i.pt for i in kps])
#Convolve with the gaussian
kps_digx = np.digitize(kps_pts[:,0],range(image.shape[1]))
kps_digy = np.digitize(kps_pts[:,1],range(image.shape[0]))
pts_im = np.zeros([image.shape[0],image.shape[1]],dtype=np.float32)
pts_im[kps_digy,kps_digx] = 1 #Points shouldn't be in the same pixel, though technically possible.
#Change if this causes issues
blurred_pts = cv2.GaussianBlur(pts_im,(convolve_size,convolve_size),0)#,np.std(image),np.std(image))
blurred_pts_low = blurred_pts.copy()
blurred_pts_low[np.where(image==0)] = np.inf
blurred_pts_low[0,:] = np.inf
blurred_pts_low[-1,:] = np.inf
blurred_pts_low[:,0] = np.inf
blurred_pts_low[:,-1] = np.inf
blur_max = sp.ndimage.filters.maximum_filter(blurred_pts_low,size=convolve_size,mode='constant')
#blur_max[np.where(sp.ndimage.filters.minimum_filter(image,3,mode='constant')<np.mean(image[image>0]))] = np.inf
blurred_pts_low[np.where(blur_max==np.inf)] = np.inf
blurred_pts_high = blurred_pts.copy()
blurred_pts_high[np.where(image==0)] = -np.inf
blurred_pts_high[0,:] = -np.inf
blurred_pts_high[-1,:] = -np.inf
blurred_pts_high[:,0] = -np.inf
blurred_pts_high[:,-1] = -np.inf
blur_min = sp.ndimage.filters.minimum_filter(blurred_pts_high,size=convolve_size,mode='constant')
#blur_min[np.where(sp.ndimage.filters.maximum_filter(image,3,mode='constant')>np.mean(image[image>0]))] = -np.inf
blurred_pts_high[np.where(blur_min==-np.inf)] = -np.inf
#Need MIN points, but if pixels near each other, can make them nan or inf??
pix_low = []
for i in range(n_pix):
flat = blurred_pts_low.flatten()
shuffle = np.random.permutation(len(flat))
new_pix = np.array(np.unravel_index(shuffle[np.argmin(flat[shuffle])], blurred_pts_low.shape))
new_pix[0] = new_pix[0]
new_pix[1] = new_pix[1]
pix_low.append(new_pix)
blurred_pts_low[max(new_pix[0]-convolve_size,0):min(new_pix[0]+convolve_size,blurred_pts_low.shape[0]),max(new_pix[1]-convolve_size,0):min(new_pix[1]+convolve_size,blurred_pts_low.shape[1])] += np.inf
pix_high = []
for i in range(n_pix):
flat = blurred_pts_high.flatten()
shuffle = np.random.permutation(len(flat))
new_pix = np.array(np.unravel_index(shuffle[np.argmax(flat[shuffle])], blurred_pts_high.shape))
new_pix[0] = new_pix[0]
new_pix[1] = new_pix[1]
pix_high.append(new_pix)
blurred_pts_high[max(new_pix[0]-convolve_size,0):min(new_pix[0]+convolve_size,blurred_pts_high.shape[0]),max(new_pix[1]-convolve_size,0):min(new_pix[1]+convolve_size,blurred_pts_high.shape[1])] -= np.inf
return pix_low,pix_high,blurred_pts_low,blurred_pts_high,kps_pts,blurred_pts
def median_subtract(image,convolve_size):
#median = cv2.medianBlur(image,convolve_size)
median = cv2.blur(image,(convolve_size,convolve_size))
return image.astype(np.float32)-median.astype(np.float32)
| [
"numpy.where",
"cv2.xfeatures2d.SURF_create",
"numpy.argmax",
"scipy.ndimage.filters.minimum_filter",
"numpy.array",
"numpy.zeros",
"scipy.ndimage.filters.maximum_filter",
"numpy.argmin",
"cv2.GaussianBlur",
"cv2.blur"
] | [((314, 364), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', ([], {'hessianThreshold': 'hess'}), '(hessianThreshold=hess)\n', (341, 364), False, 'import cv2\n'), ((439, 468), 'numpy.array', 'np.array', (['[i.pt for i in kps]'], {}), '([i.pt for i in kps])\n', (447, 468), True, 'import numpy as np\n'), ((651, 711), 'numpy.zeros', 'np.zeros', (['[image.shape[0], image.shape[1]]'], {'dtype': 'np.float32'}), '([image.shape[0], image.shape[1]], dtype=np.float32)\n', (659, 711), True, 'import numpy as np\n'), ((866, 925), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['pts_im', '(convolve_size, convolve_size)', '(0)'], {}), '(pts_im, (convolve_size, convolve_size), 0)\n', (882, 925), False, 'import cv2\n'), ((1196, 1288), 'scipy.ndimage.filters.maximum_filter', 'sp.ndimage.filters.maximum_filter', (['blurred_pts_low'], {'size': 'convolve_size', 'mode': '"""constant"""'}), "(blurred_pts_low, size=convolve_size, mode\n ='constant')\n", (1229, 1288), True, 'import scipy as sp\n'), ((1710, 1802), 'scipy.ndimage.filters.minimum_filter', 'sp.ndimage.filters.minimum_filter', (['blurred_pts_high'], {'size': 'convolve_size', 'mode': '"""constant"""'}), "(blurred_pts_high, size=convolve_size,\n mode='constant')\n", (1743, 1802), True, 'import scipy as sp\n'), ((3340, 3387), 'cv2.blur', 'cv2.blur', (['image', '(convolve_size, convolve_size)'], {}), '(image, (convolve_size, convolve_size))\n', (3348, 3387), False, 'import cv2\n'), ((1014, 1034), 'numpy.where', 'np.where', (['(image == 0)'], {}), '(image == 0)\n', (1022, 1034), True, 'import numpy as np\n'), ((1419, 1447), 'numpy.where', 'np.where', (['(blur_max == np.inf)'], {}), '(blur_max == np.inf)\n', (1427, 1447), True, 'import numpy as np\n'), ((1519, 1539), 'numpy.where', 'np.where', (['(image == 0)'], {}), '(image == 0)\n', (1527, 1539), True, 'import numpy as np\n'), ((1936, 1965), 'numpy.where', 'np.where', (['(blur_min == -np.inf)'], {}), '(blur_min == -np.inf)\n', (1944, 1965), True, 'import numpy as np\n'), ((2244, 2268), 'numpy.argmin', 'np.argmin', (['flat[shuffle]'], {}), '(flat[shuffle])\n', (2253, 2268), True, 'import numpy as np\n'), ((2790, 2814), 'numpy.argmax', 'np.argmax', (['flat[shuffle]'], {}), '(flat[shuffle])\n', (2799, 2814), True, 'import numpy as np\n')] |
# Author: <NAME>
import json
import pandas as pd
from rnn.biLSTM_inference import biLSTM_inference
from torch import from_numpy
from numpy import load, copy
from flask import Flask, jsonify, request
from flask_cors import CORS
from rnn.parameter import FEATURES
app = Flask(__name__)
CORS(app)
time = '20200115-194901'
best_epoch = 46
best_accuracy = 0.96
filepath = './data/mimic/'
dataset = load(filepath + 'unique_data/uniqDataLabelsIds')
def jsonData2numpy():
return None
def numpyData2Json(input, featureIdx):
T = input.shape[0]
features = input[:, featureIdx]
featuresDf = pd.DataFrame(
features, index=['T' + str(t + 1) for t in range(T)])
featuresDf.columns = featureIdx
featuresDf['time'] = featuresDf.index
return json.loads(featuresDf.round(2).to_json(orient='records'))
@app.route("/", methods=["POST", "GET"])
def index():
return jsonify({'abc': 'ddd', 'bbd': 'ccc'})
@app.route("/counterfactual", methods=["POST"])
def counterfactual():
d = request.get_json()
instanceId = int(d['instanceId'])
arr = load(filepath + 'counterfactuals/all_counterfactuals20')
# get counterfactuals for selected data
jsonObjArr = []
for ctfct in arr[instanceId]:
jsonObjArr.append({
'idx': ctfct[0],
'similarity': 1 - ctfct[1]
})
return jsonify(jsonObjArr)
@app.route("/pdplot", methods=["POST"])
def pdplot():
d = request.get_json()
instanceId = int(d['instanceId'])
# with open(filepath + 'pdplot/pdp-negs-30-46-47_clusters_ci_0.999.json') as jsonfile:
with open(filepath + 'pdplot/pdp-negs-30-46-47_clusters_ci_new.json') as jsonfile:
jsonObjArr = json.load(jsonfile)
return jsonify(jsonObjArr)
@app.route("/predict", methods=["POST"])
def predict():
d = request.get_json()
instanceId = int(d['instanceId'])
featureIdx = d['featureIdx']
print('\n ---- predict instanceId', instanceId)
# print('\n ---', d['updatedData'])
# print(dataset['data'].shape) # (14165, 48, 37)
# print(dataset['labels'].shape) # (14165, 2)
# print(dataset['ids'].shape) # (14165,)
data = dataset['data']
labels = dataset['labels']
selectedDatum = data[instanceId]
selectedLabel = labels[instanceId]
# get updated input from frontend
# input = from_numpy(selectedData[instanceId])
input = from_numpy(selectedDatum)
# counterfactual = from_numpy(data[9009]) # 6712
# counterfactual = from_numpy(data[5875]) # 6712
# counterfactual = from_numpy(data[5947]) # 4895
counterfactual = from_numpy(data[12457]) # 4895
# print(numpyData2Json(input.numpy(), featureIdx))
model = biLSTM_inference(filepath, time, best_epoch, best_accuracy)
result = model.predict(input)
# updated input
updatedInput = copy(input)
# d['data2predict'] is empty [] at load and when inputing text id
# d['data2predict'] has values when updating barchart values
updatedDataDf = pd.DataFrame(d['data2predict'])
for column in updatedDataDf.head():
if (column != "time"):
updatedInput[:, int(column)] = updatedDataDf[column]
# prediction result for updated input
updatedResult = model.predict(from_numpy(updatedInput))
print('prediction result\n', result)
print('updated prediction result\n', updatedResult)
obj = {
'xName': 'time',
'yName': [str(i) for i in featureIdx],
'yLabel': [FEATURES[i] for i in featureIdx],
'instanceCnt': 7,
'result':
[{
'class': 'dead',
'original': round(result[0][0], 4),
'predict':round(updatedResult[0][0], 4)
},
{
'class': 'alive',
'original': round(result[0][1], 4),
'predict':round(updatedResult[0][1], 4)
}],
'original': numpyData2Json(input.numpy(), featureIdx),
'counterfactual': numpyData2Json(counterfactual.numpy(), featureIdx),
'updated': numpyData2Json(updatedInput, featureIdx)
}
return jsonify(obj)
@app.route("/tsne", methods=["POST"])
def tsne():
tsneFile = './data/mimic/tsne4vis.json'
with open(tsneFile) as json_file:
tsneData = json.load(json_file)
return jsonify(tsneData)
if __name__ == "__main__":
app.run(debug=True)
| [
"numpy.copy",
"rnn.biLSTM_inference.biLSTM_inference",
"flask_cors.CORS",
"flask.Flask",
"torch.from_numpy",
"json.load",
"flask.request.get_json",
"pandas.DataFrame",
"numpy.load",
"flask.jsonify"
] | [((268, 283), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'from flask import Flask, jsonify, request\n'), ((284, 293), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (288, 293), False, 'from flask_cors import CORS\n'), ((396, 444), 'numpy.load', 'load', (["(filepath + 'unique_data/uniqDataLabelsIds')"], {}), "(filepath + 'unique_data/uniqDataLabelsIds')\n", (400, 444), False, 'from numpy import load, copy\n'), ((894, 931), 'flask.jsonify', 'jsonify', (["{'abc': 'ddd', 'bbd': 'ccc'}"], {}), "({'abc': 'ddd', 'bbd': 'ccc'})\n", (901, 931), False, 'from flask import Flask, jsonify, request\n'), ((1012, 1030), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1028, 1030), False, 'from flask import Flask, jsonify, request\n'), ((1080, 1136), 'numpy.load', 'load', (["(filepath + 'counterfactuals/all_counterfactuals20')"], {}), "(filepath + 'counterfactuals/all_counterfactuals20')\n", (1084, 1136), False, 'from numpy import load, copy\n'), ((1355, 1374), 'flask.jsonify', 'jsonify', (['jsonObjArr'], {}), '(jsonObjArr)\n', (1362, 1374), False, 'from flask import Flask, jsonify, request\n'), ((1439, 1457), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1455, 1457), False, 'from flask import Flask, jsonify, request\n'), ((1728, 1747), 'flask.jsonify', 'jsonify', (['jsonObjArr'], {}), '(jsonObjArr)\n', (1735, 1747), False, 'from flask import Flask, jsonify, request\n'), ((1814, 1832), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1830, 1832), False, 'from flask import Flask, jsonify, request\n'), ((2387, 2412), 'torch.from_numpy', 'from_numpy', (['selectedDatum'], {}), '(selectedDatum)\n', (2397, 2412), False, 'from torch import from_numpy\n'), ((2595, 2618), 'torch.from_numpy', 'from_numpy', (['data[12457]'], {}), '(data[12457])\n', (2605, 2618), False, 'from torch import from_numpy\n'), ((2695, 2754), 'rnn.biLSTM_inference.biLSTM_inference', 'biLSTM_inference', (['filepath', 'time', 'best_epoch', 'best_accuracy'], {}), '(filepath, time, best_epoch, best_accuracy)\n', (2711, 2754), False, 'from rnn.biLSTM_inference import biLSTM_inference\n'), ((2829, 2840), 'numpy.copy', 'copy', (['input'], {}), '(input)\n', (2833, 2840), False, 'from numpy import load, copy\n'), ((2996, 3027), 'pandas.DataFrame', 'pd.DataFrame', (["d['data2predict']"], {}), "(d['data2predict'])\n", (3008, 3027), True, 'import pandas as pd\n'), ((4107, 4119), 'flask.jsonify', 'jsonify', (['obj'], {}), '(obj)\n', (4114, 4119), False, 'from flask import Flask, jsonify, request\n'), ((1696, 1715), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (1705, 1715), False, 'import json\n'), ((3240, 3264), 'torch.from_numpy', 'from_numpy', (['updatedInput'], {}), '(updatedInput)\n', (3250, 3264), False, 'from torch import from_numpy\n'), ((4273, 4293), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4282, 4293), False, 'import json\n'), ((4309, 4326), 'flask.jsonify', 'jsonify', (['tsneData'], {}), '(tsneData)\n', (4316, 4326), False, 'from flask import Flask, jsonify, request\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Optional
import numpy as np
from ..functional.nn import embedding as embedding_func
from ..tensor import Parameter
from . import init
from .module import Module
class Embedding(Module):
r"""
A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding word embeddings.
The indices should less than num_embeddings.
:param num_embeddings: size of embedding dictionary.
:param embedding_dim: size of each embedding vector.
:param padding_idx: should be set to None, not supportted now.
:param max_norm: should be set to None, not supportted now.
:param norm_type: should be set to None, not supportted now.
:param initial_weight: the learnable weights of the module of shape (num_embeddings, embedding_dim).
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.module as M
weight = mge.tensor(np.array([(1.2,2.3,3.4,4.5,5.6)], dtype=np.float32))
data = mge.tensor(np.array([(0,0)], dtype=np.int32))
embedding = M.Embedding(1, 5, initial_weight=weight)
output = embedding(data)
with np.printoptions(precision=6):
print(output.numpy())
Outputs:
.. testoutput::
[[[1.2 2.3 3.4 4.5 5.6]
[1.2 2.3 3.4 4.5 5.6]]]
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: Optional[float] = None,
initial_weight: Parameter = None,
freeze: bool = False,
):
super().__init__()
if padding_idx is not None:
raise ValueError("Not support padding index now.")
if max_norm is not None or norm_type is not None:
raise ValueError("Not support weight normalize now.")
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.freeze = freeze
if initial_weight is None:
self.weight = Parameter(
np.random.uniform(
size=(self.num_embeddings, self.embedding_dim)
).astype(np.float32)
)
self.reset_parameters()
else:
if initial_weight.numpy().shape != (num_embeddings, embedding_dim):
raise ValueError(
"The weight shape should match num_embeddings and embedding_dim"
)
self.weight = Parameter(initial_weight.numpy())
def reset_parameters(self) -> None:
init.normal_(self.weight)
def forward(self, inputs):
if self.freeze:
weight = self.weight.detach()
else:
weight = self.weight
return embedding_func(inputs, weight)
@classmethod
def from_pretrained(
cls,
embeddings: Parameter,
freeze: Optional[bool] = True,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: Optional[float] = None,
):
r"""
Creates Embedding instance from given 2-dimensional FloatTensor.
:param embeddings: tensor contained weight for the embedding.
:param freeze: if ``True``, the weight does not get updated during the learning process. Default: True.
:param padding_idx: should be set to None, not support Now.
:param max_norm: should be set to None, not support Now.
:param norm_type: should be set to None, not support Now.
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.module as M
weight = mge.tensor(np.array([(1.2,2.3,3.4,4.5,5.6)], dtype=np.float32))
data = mge.tensor(np.array([(0,0)], dtype=np.int32))
embedding = M.Embedding.from_pretrained(weight, freeze=False)
output = embedding(data)
print(output.numpy())
Outputs:
.. testoutput::
[[[1.2 2.3 3.4 4.5 5.6]
[1.2 2.3 3.4 4.5 5.6]]]
"""
embeddings_shape = embeddings.shape
embeddings_dim = len(embeddings_shape)
if embeddings_dim != 2:
raise ValueError("Embeddings parameter is expected to be 2-dimensional")
rows = embeddings_shape[0]
cols = embeddings_shape[1]
embedding = cls(
num_embeddings=rows,
embedding_dim=cols,
initial_weight=embeddings,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
freeze=freeze,
)
return embedding
| [
"numpy.random.uniform"
] | [((2748, 2813), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(self.num_embeddings, self.embedding_dim)'}), '(size=(self.num_embeddings, self.embedding_dim))\n', (2765, 2813), True, 'import numpy as np\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION.
"""
Tests for Streamz Dataframes (SDFs) built on top of cuDF DataFrames.
*** Borrowed from streamz.dataframe.tests | License at thirdparty/LICENSE ***
"""
from __future__ import division, print_function
import json
import operator
import numpy as np
import pandas as pd
import pytest
from streamz import Stream
from streamz.dask import DaskStream
from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series
from dask.dataframe.utils import assert_eq
from distributed import Client
cudf = pytest.importorskip("cudf")
@pytest.fixture(scope="module")
def client():
client = Client(processes=False, asynchronous=False)
try:
yield client
finally:
client.close()
@pytest.fixture(params=["core", "dask"])
def stream(request, client): # flake8: noqa
if request.param == "core":
return Stream()
else:
return DaskStream()
def test_identity(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
L = sdf.stream.gather().sink_to_list()
sdf.emit(df)
assert L[0] is df
assert list(sdf.example.columns) == ["x", "y"]
x = sdf.x
assert isinstance(x, Series)
L2 = x.stream.gather().sink_to_list()
assert not L2
sdf.emit(df)
assert isinstance(L2[0], cudf.Series)
assert_eq(L2[0], df.x)
def test_dtype(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
assert str(sdf.dtypes) == str(df.dtypes)
assert sdf.x.dtype == df.x.dtype
assert sdf.index.dtype == df.index.dtype
def test_attributes():
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df)
assert getattr(sdf, "x", -1) != -1
assert getattr(sdf, "z", -1) == -1
sdf.x
with pytest.raises(AttributeError):
sdf.z
def test_exceptions(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
with pytest.raises(TypeError):
sdf.emit(1)
with pytest.raises(IndexError):
sdf.emit(cudf.DataFrame())
@pytest.mark.parametrize(
"func", [lambda x: x.sum(), lambda x: x.mean(), lambda x: x.count()]
)
def test_reductions(stream, func):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
for example in [df, df.iloc[:0]]:
sdf = DataFrame(example=example, stream=stream)
df_out = func(sdf).stream.gather().sink_to_list()
x = sdf.x
x_out = func(x).stream.gather().sink_to_list()
sdf.emit(df)
sdf.emit(df)
assert_eq(df_out[-1], func(cudf.concat([df, df])))
assert_eq(x_out[-1], func(cudf.concat([df, df]).x))
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.and_,
operator.eq,
operator.floordiv,
operator.ge,
operator.gt,
operator.le,
operator.lshift,
operator.lt,
operator.mod,
operator.mul,
operator.ne,
operator.or_,
operator.pow,
operator.rshift,
operator.sub,
operator.truediv,
operator.xor,
],
)
@pytest.mark.parametrize("getter", [lambda df: df, lambda df: df.x])
def test_binary_operators(op, getter, stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
try:
left = op(getter(df), 2)
right = op(2, getter(df))
except Exception:
return
a = DataFrame(example=df, stream=stream)
li = op(getter(a), 2).stream.gather().sink_to_list()
r = op(2, getter(a)).stream.gather().sink_to_list()
a.emit(df)
assert_eq(li[0], left)
assert_eq(r[0], right)
@pytest.mark.parametrize(
"op",
[
operator.abs,
operator.inv,
operator.invert,
operator.neg,
lambda x: x.map(lambda x: x + 1),
lambda x: x.reset_index(),
lambda x: x.astype(float),
],
)
@pytest.mark.parametrize("getter", [lambda df: df, lambda df: df.x])
def test_unary_operators(op, getter):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
try:
expected = op(getter(df))
except Exception:
return
a = DataFrame(example=df)
b = op(getter(a)).stream.sink_to_list()
a.emit(df)
assert_eq(b[0], expected)
@pytest.mark.parametrize(
"func",
[
lambda df: df.query("x > 1 and x < 4"),
pytest.param(
lambda df: df.x.value_counts().nlargest(2).astype(int),
marks=pytest.mark.xfail(reason="Index name lost in _getattr_"),
),
],
)
def test_dataframe_simple(func):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
expected = func(df)
a = DataFrame(example=df)
L = func(a).stream.sink_to_list()
a.emit(df)
assert_eq(L[0], expected)
def test_set_index():
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
a = DataFrame(example=df)
b = a.set_index("x").stream.sink_to_list()
a.emit(df)
assert_eq(b[0], df.set_index("x"))
b = a.set_index(a.y + 1).stream.sink_to_list()
a.emit(df)
assert_eq(b[0], df.set_index(df.y + 1))
def test_binary_stream_operators(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
expected = df.x + df.y
a = DataFrame(example=df, stream=stream)
b = (a.x + a.y).stream.gather().sink_to_list()
a.emit(df)
assert_eq(b[0], expected)
def test_index(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
a = DataFrame(example=df, stream=stream)
b = a.index + 5
L = b.stream.gather().sink_to_list()
a.emit(df)
a.emit(df)
assert_eq(L[0], df.index + 5)
assert_eq(L[1], df.index + 5)
def test_pair_arithmetic(stream):
df = cudf.DataFrame({"x": list(range(10)), "y": [1] * 10})
a = DataFrame(example=df.iloc[:0], stream=stream)
L = ((a.x + a.y) * 2).stream.gather().sink_to_list()
a.emit(df.iloc[:5])
a.emit(df.iloc[5:])
assert len(L) == 2
assert_eq(cudf.concat(L), (df.x + df.y) * 2)
def test_getitem(stream):
df = cudf.DataFrame({"x": list(range(10)), "y": [1] * 10})
a = DataFrame(example=df.iloc[:0], stream=stream)
L = a[a.x > 4].stream.gather().sink_to_list()
a.emit(df.iloc[:5])
a.emit(df.iloc[5:])
assert len(L) == 2
assert_eq(cudf.concat(L), df[df.x > 4])
@pytest.mark.parametrize("agg", [lambda x: x.sum(), lambda x: x.mean()])
@pytest.mark.parametrize(
"grouper",
[lambda a: a.x % 3, lambda a: "x", lambda a: a.index % 2, lambda a: ["x"]],
)
@pytest.mark.parametrize(
"indexer", [lambda g: g, lambda g: g[["y"]], lambda g: g[["x", "y"]]]
)
def test_groupby_aggregate(agg, grouper, indexer, stream):
df = cudf.DataFrame(
{"x": (np.arange(10) // 2).astype(float), "y": [1.0, 2.0] * 5}
)
a = DataFrame(example=df.iloc[:0], stream=stream)
def f(x):
return agg(indexer(x.groupby(grouper(x))))
L = f(a).stream.gather().sink_to_list()
a.emit(df.iloc[:3])
a.emit(df.iloc[3:7])
a.emit(df.iloc[7:])
first = df.iloc[:3]
g = f(first)
h = f(df)
assert_eq(L[0], g)
assert_eq(L[-1], h)
def test_repr(stream):
df = cudf.DataFrame(
{"x": (np.arange(10) // 2).astype(float), "y": [1.0] * 10}
)
a = DataFrame(example=df, stream=stream)
text = repr(a)
assert type(a).__name__ in text
assert "x" in text
assert "y" in text
text = repr(a.x)
assert type(a.x).__name__ in text
assert "x" in text
text = repr(a.x.sum())
assert type(a.x.sum()).__name__ in text
def test_repr_html(stream):
df = cudf.DataFrame(
{"x": (np.arange(10) // 2).astype(float), "y": [1.0] * 10}
)
a = DataFrame(example=df, stream=stream)
for x in [a, a.y, a.y.mean()]:
html = x._repr_html_()
assert type(x).__name__ in html
assert "1" in html
def test_setitem(stream):
df = cudf.DataFrame({"x": list(range(10)), "y": [1] * 10})
sdf = DataFrame(example=df.iloc[:0], stream=stream)
stream = sdf.stream
sdf["z"] = sdf["x"] * 2
sdf["a"] = 10
sdf[["c", "d"]] = sdf[["x", "y"]]
L = sdf.mean().stream.gather().sink_to_list()
stream.emit(df.iloc[:3])
stream.emit(df.iloc[3:7])
stream.emit(df.iloc[7:])
df["z"] = df["x"] * 2
df["a"] = 10
df["c"] = df["x"]
df["d"] = df["y"]
assert_eq(L[-1], df.mean())
def test_setitem_overwrites(stream):
df = cudf.DataFrame({"x": list(range(10))})
sdf = DataFrame(example=df.iloc[:0], stream=stream)
stream = sdf.stream
sdf["x"] = sdf["x"] * 2
L = sdf.stream.gather().sink_to_list()
stream.emit(df.iloc[:3])
stream.emit(df.iloc[3:7])
stream.emit(df.iloc[7:])
assert_eq(L[-1], df.iloc[7:] * 2)
@pytest.mark.parametrize(
"kwargs,op",
[
({}, "sum"),
({}, "mean"),
pytest.param({}, "min"),
pytest.param(
{},
"median",
marks=pytest.mark.xfail(reason="Unavailable for rolling objects"),
),
pytest.param({}, "max"),
pytest.param(
{},
"var",
marks=pytest.mark.xfail(reason="Unavailable for rolling objects"),
),
pytest.param({}, "count"),
pytest.param(
{"ddof": 0},
"std",
marks=pytest.mark.xfail(reason="Unavailable for rolling objects"),
),
pytest.param(
{"quantile": 0.5},
"quantile",
marks=pytest.mark.xfail(reason="Unavailable for rolling objects"),
),
pytest.param(
{"arg": {"A": "sum", "B": "min"}},
"aggregate",
marks=pytest.mark.xfail(reason="Unavailable for rolling objects"),
),
],
)
@pytest.mark.parametrize(
"window",
[pytest.param(2), 7, pytest.param("3h"), pd.Timedelta("200 minutes")],
)
@pytest.mark.parametrize("m", [2, pytest.param(5)])
@pytest.mark.parametrize(
"pre_get,post_get",
[
(lambda df: df, lambda df: df),
(lambda df: df.x, lambda x: x),
(lambda df: df, lambda df: df.x),
],
)
def test_rolling_count_aggregations(
op, window, m, pre_get, post_get, kwargs, stream
):
index = pd.DatetimeIndex(
pd.date_range("2000-01-01", "2000-01-03", freq="1h")
)
df = cudf.DataFrame({"x": np.arange(len(index))}, index=index)
expected = getattr(post_get(pre_get(df).rolling(window)), op)(**kwargs)
sdf = DataFrame(example=df, stream=stream)
roll = getattr(post_get(pre_get(sdf).rolling(window)), op)(**kwargs)
L = roll.stream.gather().sink_to_list()
assert len(L) == 0
for i in range(0, len(df), m):
sdf.emit(df.iloc[i : i + m])
assert len(L) > 1
assert_eq(cudf.concat(L), expected)
def test_stream_to_dataframe(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
source = stream
L = source.to_dataframe(example=df).x.sum().stream.gather().sink_to_list()
source.emit(df)
source.emit(df)
source.emit(df)
assert L == [6, 12, 18]
def test_integration_from_stream(stream):
source = stream
sdf = (
source.partition(4)
.to_batch(example=['{"x": 0, "y": 0}'])
.map(json.loads)
.to_dataframe()
)
result = sdf.groupby(sdf.x).y.sum().mean()
L = result.stream.gather().sink_to_list()
for i in range(12):
source.emit(json.dumps({"x": i % 3, "y": i}))
assert L == [2, 28 / 3, 22.0]
def test_to_frame(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
assert sdf.to_frame() is sdf
a = sdf.x.to_frame()
assert isinstance(a, DataFrame)
assert list(a.columns) == ["x"]
@pytest.mark.parametrize("op", ["cumsum", "cummax", "cumprod", "cummin"])
@pytest.mark.parametrize("getter", [lambda df: df, lambda df: df.x])
def test_cumulative_aggregations(op, getter, stream):
df = cudf.DataFrame({"x": list(range(10)), "y": [1] * 10})
expected = getattr(getter(df), op)()
sdf = DataFrame(example=df, stream=stream)
L = getattr(getter(sdf), op)().stream.gather().sink_to_list()
for i in range(0, 10, 3):
sdf.emit(df.iloc[i : i + 3])
sdf.emit(df.iloc[:0])
assert len(L) > 1
assert_eq(cudf.concat(L), expected)
def test_display(stream):
pytest.importorskip("ipywidgets")
pytest.importorskip("IPython")
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
s = sdf.x.sum()
s._ipython_display_()
def test_tail(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
L = sdf.tail(2).stream.gather().sink_to_list()
sdf.emit(df)
sdf.emit(df)
assert_eq(L[0], df.tail(2))
assert_eq(L[1], df.tail(2))
def test_example_type_error_message():
try:
DataFrame(example=[123])
except Exception as e:
assert "DataFrame" in str(e)
assert "[123]" in str(e)
def test_dataframes(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrames(example=df, stream=stream)
L = sdf.x.sum().stream.gather().sink_to_list()
sdf.emit(df)
sdf.emit(df)
assert L == [6, 6]
def test_groupby_aggregate_updating(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
assert sdf.groupby("x").y.mean()._stream_type == "updating"
assert sdf.x.sum()._stream_type == "updating"
assert (sdf.x.sum() + 1)._stream_type == "updating"
def test_window_sum(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
L = sdf.window(n=4).x.sum().stream.gather().sink_to_list()
sdf.emit(df)
assert L == [6]
sdf.emit(df)
assert L == [6, 9]
sdf.emit(df)
assert L == [6, 9, 9]
def test_window_sum_dataframe(stream):
df = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
sdf = DataFrame(example=df, stream=stream)
L = sdf.window(n=4).sum().stream.gather().sink_to_list()
sdf.emit(df)
assert_eq(L[0], cudf.Series([6, 15], index=["x", "y"]))
sdf.emit(df)
assert_eq(L[0], cudf.Series([6, 15], index=["x", "y"]))
assert_eq(L[1], cudf.Series([9, 21], index=["x", "y"]))
sdf.emit(df)
assert_eq(L[0], cudf.Series([6, 15], index=["x", "y"]))
assert_eq(L[1], cudf.Series([9, 21], index=["x", "y"]))
assert_eq(L[2], cudf.Series([9, 21], index=["x", "y"]))
@pytest.mark.parametrize(
"func",
[
lambda x: x.sum(),
lambda x: x.mean(),
lambda x: x.count(),
lambda x: x.var(ddof=1),
lambda x: x.std(ddof=1),
lambda x: x.var(ddof=0),
],
)
@pytest.mark.parametrize("n", [2, 4])
@pytest.mark.parametrize("getter", [lambda df: df.x])
def test_windowing_n(func, n, getter):
df = cudf.DataFrame({"x": list(range(10)), "y": [1, 2] * 5})
sdf = DataFrame(example=df)
L = func(getter(sdf).window(n=n)).stream.gather().sink_to_list()
for i in range(0, 10, 3):
sdf.emit(df.iloc[i : i + 3])
sdf.emit(df.iloc[:0])
assert len(L) == 5
assert_eq(L[0], func(getter(df).iloc[max(0, 3 - n) : 3]))
assert_eq(L[-1], func(getter(df).iloc[len(df) - n :]))
@pytest.mark.parametrize("func", [lambda x: x.sum(), lambda x: x.mean()])
@pytest.mark.parametrize("value", ["10h", "1d"])
@pytest.mark.parametrize("getter", [lambda df: df, lambda df: df.x])
@pytest.mark.parametrize(
"grouper", [lambda a: "y", lambda a: a.index, lambda a: ["y"]]
)
@pytest.mark.parametrize(
"indexer", [lambda g: g, lambda g: g[["x"]], lambda g: g[["x", "y"]]]
)
def test_groupby_windowing_value(func, value, getter, grouper, indexer):
index = pd.DatetimeIndex(
pd.date_range("2000-01-01", "2000-01-03", freq="1h")
)
df = cudf.DataFrame(
{
"x": np.arange(len(index), dtype=float),
"y": np.arange(len(index), dtype=float) % 2,
},
index=index,
)
value = pd.Timedelta(value)
sdf = DataFrame(example=df)
def f(x):
return func(indexer(x.groupby(grouper(x))))
L = f(sdf.window(value)).stream.gather().sink_to_list()
diff = 13
for i in range(0, len(index), diff):
sdf.emit(df.iloc[i : i + diff])
assert len(L) == 4
first = df.iloc[:diff]
lost = first.loc[first.index.min() + value :]
first = first.iloc[len(lost) :]
g = f(first)
assert_eq(L[0], g)
last = df.loc[index.max() - value + pd.Timedelta("1s") :]
h = f(last)
assert_eq(L[-1], h)
@pytest.mark.parametrize("func", [lambda x: x.sum(), lambda x: x.mean()])
@pytest.mark.parametrize("n", [1, 4])
@pytest.mark.parametrize("getter", [lambda df: df, lambda df: df.x])
@pytest.mark.parametrize(
"grouper",
[lambda a: a.x % 3, lambda a: "y", lambda a: a.index % 2, lambda a: ["y"]],
)
@pytest.mark.parametrize("indexer", [lambda g: g, lambda g: g[["x", "y"]]])
def test_groupby_windowing_n(func, n, getter, grouper, indexer):
df = cudf.DataFrame({"x": np.arange(10, dtype=float), "y": [1.0, 2.0] * 5})
sdf = DataFrame(example=df)
def f(x):
return func(indexer(x.groupby(grouper(x))))
L = f(sdf.window(n=n)).stream.gather().sink_to_list()
diff = 3
for i in range(0, 10, diff):
sdf.emit(df.iloc[i : i + diff])
sdf.emit(df.iloc[:0])
assert len(L) == 5
first = df.iloc[max(0, diff - n) : diff]
g = f(first)
assert_eq(L[0], g)
last = df.iloc[len(df) - n :]
h = f(last)
assert_eq(L[-1], h)
def test_window_full():
df = cudf.DataFrame({"x": np.arange(10, dtype=float), "y": [1.0, 2.0] * 5})
sdf = DataFrame(example=df)
L = sdf.window(n=4).apply(lambda x: x).stream.sink_to_list()
sdf.emit(df.iloc[:3])
sdf.emit(df.iloc[3:8])
sdf.emit(df.iloc[8:])
assert_eq(L[0], df.iloc[:3])
assert_eq(L[1], df.iloc[4:8])
assert_eq(L[2], df.iloc[-4:])
def test_custom_aggregation():
df = cudf.DataFrame({"x": np.arange(10, dtype=float), "y": [1.0, 2.0] * 5})
class Custom(Aggregation):
def initial(self, new):
return 0
def on_new(self, state, new):
return state + 1, state
def on_old(self, state, new):
return state - 100, state
sdf = DataFrame(example=df)
L = sdf.aggregate(Custom()).stream.sink_to_list()
sdf.emit(df)
sdf.emit(df)
sdf.emit(df)
assert L == [0, 1, 2]
sdf = DataFrame(example=df)
L = sdf.window(n=5).aggregate(Custom()).stream.sink_to_list()
sdf.emit(df)
sdf.emit(df)
sdf.emit(df)
assert L == [1, -198, -397]
def test_groupby_aggregate_with_start_state(stream):
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example).groupby(["name"])
output0 = sdf.amount.sum(start=None).stream.gather().sink_to_list()
output1 = (
sdf.amount.mean(with_state=True, start=None)
.stream.gather()
.sink_to_list()
)
output2 = sdf.amount.count(start=None).stream.gather().sink_to_list()
df = cudf.DataFrame({"name": ["Alice", "Tom"], "amount": [50, 100]})
stream.emit(df)
out_df0 = cudf.DataFrame({"name": ["Alice", "Tom"], "amount": [50, 100]})
out_df1 = cudf.DataFrame(
{"name": ["Alice", "Tom"], "amount": [50.0, 100.0]}
)
out_df2 = cudf.DataFrame({"name": ["Alice", "Tom"], "amount": [1, 1]})
assert assert_eq(output0[0].reset_index(), out_df0)
assert assert_eq(output1[0][1].reset_index(), out_df1)
assert assert_eq(output2[0].reset_index(), out_df2)
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example).groupby(["name"])
output3 = sdf.amount.sum(start=output0[0]).stream.gather().sink_to_list()
output4 = (
sdf.amount.mean(with_state=True, start=output1[0][0])
.stream.gather()
.sink_to_list()
)
output5 = sdf.amount.count(start=output2[0]).stream.gather().sink_to_list()
df = cudf.DataFrame(
{"name": ["Alice", "Tom", "Linda"], "amount": [50, 100, 200]}
)
stream.emit(df)
out_df2 = cudf.DataFrame(
{"name": ["Alice", "Linda", "Tom"], "amount": [100, 200, 200]}
)
out_df3 = cudf.DataFrame(
{"name": ["Alice", "Linda", "Tom"], "amount": [50.0, 200.0, 100.0]}
)
out_df4 = cudf.DataFrame(
{"name": ["Alice", "Linda", "Tom"], "amount": [2, 1, 2]}
)
assert assert_eq(output3[0].reset_index(), out_df2)
assert assert_eq(output4[0][1].reset_index(), out_df3)
assert assert_eq(output5[0].reset_index(), out_df4)
def test_reductions_with_start_state(stream):
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output0 = sdf.amount.mean(start=(10, 2)).stream.gather().sink_to_list()
output1 = sdf.amount.count(start=3).stream.gather().sink_to_list()
output2 = sdf.amount.sum(start=10).stream.gather().sink_to_list()
df = cudf.DataFrame(
{"name": ["Alice", "Tom", "Linda"], "amount": [50, 100, 200]}
)
stream.emit(df)
assert output0[0] == 72.0
assert output1[0] == 6
assert output2[0] == 360
def test_rolling_aggs_with_start_state(stream):
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output0 = (
sdf.rolling(2, with_state=True, start=())
.amount.sum()
.stream.gather()
.sink_to_list()
)
df = cudf.DataFrame(
{"name": ["Alice", "Tom", "Linda"], "amount": [50, 100, 200]}
)
stream.emit(df)
df = cudf.DataFrame({"name": ["Bob"], "amount": [250]})
stream.emit(df)
assert assert_eq(
output0[-1][0].reset_index(drop=True),
cudf.Series([200, 250], name="amount"),
)
assert assert_eq(
output0[-1][1].reset_index(drop=True),
cudf.Series([450], name="amount"),
)
stream = Stream()
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output1 = (
sdf.rolling(2, with_state=True, start=output0[-1][0])
.amount.sum()
.stream.gather()
.sink_to_list()
)
df = cudf.DataFrame({"name": ["Alice"], "amount": [50]})
stream.emit(df)
assert assert_eq(
output1[-1][0].reset_index(drop=True),
cudf.Series([250, 50], name="amount"),
)
assert assert_eq(
output1[-1][1].reset_index(drop=True),
cudf.Series([300], name="amount"),
)
def test_window_aggs_with_start_state(stream):
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output0 = (
sdf.window(2, with_state=True, start=None)
.amount.sum()
.stream.gather()
.sink_to_list()
)
df = cudf.DataFrame(
{"name": ["Alice", "Tom", "Linda"], "amount": [50, 100, 200]}
)
stream.emit(df)
df = cudf.DataFrame({"name": ["Bob"], "amount": [250]})
stream.emit(df)
assert output0[-1][1] == 450
stream = Stream()
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output1 = (
sdf.window(2, with_state=True, start=output0[-1][0])
.amount.sum()
.stream.gather()
.sink_to_list()
)
df = cudf.DataFrame({"name": ["Alice"], "amount": [50]})
stream.emit(df)
assert output1[-1][1] == 300
def test_windowed_groupby_aggs_with_start_state(stream):
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output0 = (
sdf.window(5, with_state=True, start=None)
.groupby(["name"])
.amount.sum()
.stream.gather()
.sink_to_list()
)
df = cudf.DataFrame(
{"name": ["Alice", "Tom", "Linda"], "amount": [50, 100, 200]}
)
stream.emit(df)
df = cudf.DataFrame(
{"name": ["Alice", "Linda", "Bob"], "amount": [250, 300, 350]}
)
stream.emit(df)
stream = Stream()
example = cudf.DataFrame({"name": [], "amount": []})
sdf = DataFrame(stream, example=example)
output1 = (
sdf.window(5, with_state=True, start=output0[-1][0])
.groupby(["name"])
.amount.sum()
.stream.gather()
.sink_to_list()
)
df = cudf.DataFrame(
{
"name": ["Alice", "Linda", "Tom", "Bob"],
"amount": [50, 100, 150, 200],
}
)
stream.emit(df)
out_df1 = cudf.DataFrame(
{
"name": ["Alice", "Bob", "Linda", "Tom"],
"amount": [50, 550, 100, 150],
}
)
assert_eq(output1[-1][1].reset_index(), out_df1)
| [
"numpy.arange",
"pytest.mark.xfail",
"dask.dataframe.utils.assert_eq",
"pandas.Timedelta",
"json.dumps",
"pytest.param",
"streamz.dataframe.DataFrames",
"pytest.mark.parametrize",
"distributed.Client",
"pytest.importorskip",
"pytest.raises",
"streamz.dataframe.DataFrame",
"pytest.fixture",
... | [((548, 575), 'pytest.importorskip', 'pytest.importorskip', (['"""cudf"""'], {}), "('cudf')\n", (567, 575), False, 'import pytest\n'), ((579, 609), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (593, 609), False, 'import pytest\n'), ((750, 789), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['core', 'dask']"}), "(params=['core', 'dask'])\n", (764, 789), False, 'import pytest\n'), ((2763, 3074), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op"""', '[operator.add, operator.and_, operator.eq, operator.floordiv, operator.ge,\n operator.gt, operator.le, operator.lshift, operator.lt, operator.mod,\n operator.mul, operator.ne, operator.or_, operator.pow, operator.rshift,\n operator.sub, operator.truediv, operator.xor]'], {}), "('op', [operator.add, operator.and_, operator.eq,\n operator.floordiv, operator.ge, operator.gt, operator.le, operator.\n lshift, operator.lt, operator.mod, operator.mul, operator.ne, operator.\n or_, operator.pow, operator.rshift, operator.sub, operator.truediv,\n operator.xor])\n", (2786, 3074), False, 'import pytest\n'), ((3220, 3287), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""getter"""', '[lambda df: df, lambda df: df.x]'], {}), "('getter', [lambda df: df, lambda df: df.x])\n", (3243, 3287), False, 'import pytest\n'), ((3993, 4060), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""getter"""', '[lambda df: df, lambda df: df.x]'], {}), "('getter', [lambda df: df, lambda df: df.x])\n", (4016, 4060), False, 'import pytest\n'), ((6482, 6596), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""grouper"""', "[lambda a: a.x % 3, lambda a: 'x', lambda a: a.index % 2, lambda a: ['x']]"], {}), "('grouper', [lambda a: a.x % 3, lambda a: 'x', lambda\n a: a.index % 2, lambda a: ['x']])\n", (6505, 6596), False, 'import pytest\n'), ((6605, 6703), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indexer"""', "[lambda g: g, lambda g: g[['y']], lambda g: g[['x', 'y']]]"], {}), "('indexer', [lambda g: g, lambda g: g[['y']], lambda\n g: g[['x', 'y']]])\n", (6628, 6703), False, 'import pytest\n'), ((10008, 10155), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pre_get,post_get"""', '[(lambda df: df, lambda df: df), (lambda df: df.x, lambda x: x), (lambda df:\n df, lambda df: df.x)]'], {}), "('pre_get,post_get', [(lambda df: df, lambda df: df),\n (lambda df: df.x, lambda x: x), (lambda df: df, lambda df: df.x)])\n", (10031, 10155), False, 'import pytest\n'), ((11823, 11895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op"""', "['cumsum', 'cummax', 'cumprod', 'cummin']"], {}), "('op', ['cumsum', 'cummax', 'cumprod', 'cummin'])\n", (11846, 11895), False, 'import pytest\n'), ((11897, 11964), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""getter"""', '[lambda df: df, lambda df: df.x]'], {}), "('getter', [lambda df: df, lambda df: df.x])\n", (11920, 11964), False, 'import pytest\n'), ((14861, 14897), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[2, 4]'], {}), "('n', [2, 4])\n", (14884, 14897), False, 'import pytest\n'), ((14899, 14951), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""getter"""', '[lambda df: df.x]'], {}), "('getter', [lambda df: df.x])\n", (14922, 14951), False, 'import pytest\n'), ((15475, 15522), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value"""', "['10h', '1d']"], {}), "('value', ['10h', '1d'])\n", (15498, 15522), False, 'import pytest\n'), ((15524, 15591), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""getter"""', '[lambda df: df, lambda df: df.x]'], {}), "('getter', [lambda df: df, lambda df: df.x])\n", (15547, 15591), False, 'import pytest\n'), ((15593, 15684), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""grouper"""', "[lambda a: 'y', lambda a: a.index, lambda a: ['y']]"], {}), "('grouper', [lambda a: 'y', lambda a: a.index, lambda\n a: ['y']])\n", (15616, 15684), False, 'import pytest\n'), ((15688, 15786), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indexer"""', "[lambda g: g, lambda g: g[['x']], lambda g: g[['x', 'y']]]"], {}), "('indexer', [lambda g: g, lambda g: g[['x']], lambda\n g: g[['x', 'y']]])\n", (15711, 15786), False, 'import pytest\n'), ((16791, 16827), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[1, 4]'], {}), "('n', [1, 4])\n", (16814, 16827), False, 'import pytest\n'), ((16829, 16896), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""getter"""', '[lambda df: df, lambda df: df.x]'], {}), "('getter', [lambda df: df, lambda df: df.x])\n", (16852, 16896), False, 'import pytest\n'), ((16898, 17012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""grouper"""', "[lambda a: a.x % 3, lambda a: 'y', lambda a: a.index % 2, lambda a: ['y']]"], {}), "('grouper', [lambda a: a.x % 3, lambda a: 'y', lambda\n a: a.index % 2, lambda a: ['y']])\n", (16921, 17012), False, 'import pytest\n'), ((17021, 17095), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indexer"""', "[lambda g: g, lambda g: g[['x', 'y']]]"], {}), "('indexer', [lambda g: g, lambda g: g[['x', 'y']]])\n", (17044, 17095), False, 'import pytest\n'), ((637, 680), 'distributed.Client', 'Client', ([], {'processes': '(False)', 'asynchronous': '(False)'}), '(processes=False, asynchronous=False)\n', (643, 680), False, 'from distributed import Client\n'), ((1026, 1062), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (1035, 1062), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((1370, 1392), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L2[0]', 'df.x'], {}), '(L2[0], df.x)\n', (1379, 1392), False, 'from dask.dataframe.utils import assert_eq\n'), ((1487, 1523), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (1496, 1523), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((1745, 1766), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (1754, 1766), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((2010, 2046), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (2019, 2046), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((3515, 3551), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (3524, 3551), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((3686, 3708), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['li[0]', 'left'], {}), '(li[0], left)\n', (3695, 3708), False, 'from dask.dataframe.utils import assert_eq\n'), ((3713, 3735), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['r[0]', 'right'], {}), '(r[0], right)\n', (3722, 3735), False, 'from dask.dataframe.utils import assert_eq\n'), ((4246, 4267), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (4255, 4267), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((4333, 4358), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['b[0]', 'expected'], {}), '(b[0], expected)\n', (4342, 4358), False, 'from dask.dataframe.utils import assert_eq\n'), ((4763, 4784), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (4772, 4784), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((4844, 4869), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[0]', 'expected'], {}), '(L[0], expected)\n', (4853, 4869), False, 'from dask.dataframe.utils import assert_eq\n'), ((4961, 4982), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (4970, 4982), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((5335, 5371), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (5344, 5371), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((5444, 5469), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['b[0]', 'expected'], {}), '(b[0], expected)\n', (5453, 5469), False, 'from dask.dataframe.utils import assert_eq\n'), ((5562, 5598), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (5571, 5598), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((5696, 5725), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[0]', '(df.index + 5)'], {}), '(L[0], df.index + 5)\n', (5705, 5725), False, 'from dask.dataframe.utils import assert_eq\n'), ((5730, 5759), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[1]', '(df.index + 5)'], {}), '(L[1], df.index + 5)\n', (5739, 5759), False, 'from dask.dataframe.utils import assert_eq\n'), ((5868, 5913), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df.iloc[:0]', 'stream': 'stream'}), '(example=df.iloc[:0], stream=stream)\n', (5877, 5913), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((6193, 6238), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df.iloc[:0]', 'stream': 'stream'}), '(example=df.iloc[:0], stream=stream)\n', (6202, 6238), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((6876, 6921), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df.iloc[:0]', 'stream': 'stream'}), '(example=df.iloc[:0], stream=stream)\n', (6885, 6921), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((7169, 7187), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[0]', 'g'], {}), '(L[0], g)\n', (7178, 7187), False, 'from dask.dataframe.utils import assert_eq\n'), ((7192, 7211), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[-1]', 'h'], {}), '(L[-1], h)\n', (7201, 7211), False, 'from dask.dataframe.utils import assert_eq\n'), ((7343, 7379), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (7352, 7379), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((7773, 7809), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (7782, 7809), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((8046, 8091), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df.iloc[:0]', 'stream': 'stream'}), '(example=df.iloc[:0], stream=stream)\n', (8055, 8091), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((8559, 8604), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df.iloc[:0]', 'stream': 'stream'}), '(example=df.iloc[:0], stream=stream)\n', (8568, 8604), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((8796, 8829), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[-1]', '(df.iloc[7:] * 2)'], {}), '(L[-1], df.iloc[7:] * 2)\n', (8805, 8829), False, 'from dask.dataframe.utils import assert_eq\n'), ((10539, 10575), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (10548, 10575), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((11651, 11687), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (11660, 11687), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((12134, 12170), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (12143, 12170), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((12428, 12461), 'pytest.importorskip', 'pytest.importorskip', (['"""ipywidgets"""'], {}), "('ipywidgets')\n", (12447, 12461), False, 'import pytest\n'), ((12466, 12496), 'pytest.importorskip', 'pytest.importorskip', (['"""IPython"""'], {}), "('IPython')\n", (12485, 12496), False, 'import pytest\n'), ((12566, 12602), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (12575, 12602), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((12744, 12780), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (12753, 12780), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((13212, 13249), 'streamz.dataframe.DataFrames', 'DataFrames', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (13222, 13249), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((13475, 13511), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (13484, 13511), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((13782, 13818), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (13791, 13818), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((14112, 14148), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df', 'stream': 'stream'}), '(example=df, stream=stream)\n', (14121, 14148), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((15067, 15088), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (15076, 15088), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((16155, 16174), 'pandas.Timedelta', 'pd.Timedelta', (['value'], {}), '(value)\n', (16167, 16174), True, 'import pandas as pd\n'), ((16186, 16207), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (16195, 16207), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((16592, 16610), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[0]', 'g'], {}), '(L[0], g)\n', (16601, 16610), False, 'from dask.dataframe.utils import assert_eq\n'), ((16694, 16713), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[-1]', 'h'], {}), '(L[-1], h)\n', (16703, 16713), False, 'from dask.dataframe.utils import assert_eq\n'), ((17252, 17273), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (17261, 17273), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((17605, 17623), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[0]', 'g'], {}), '(L[0], g)\n', (17614, 17623), False, 'from dask.dataframe.utils import assert_eq\n'), ((17679, 17698), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[-1]', 'h'], {}), '(L[-1], h)\n', (17688, 17698), False, 'from dask.dataframe.utils import assert_eq\n'), ((17816, 17837), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (17825, 17837), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((17989, 18017), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[0]', 'df.iloc[:3]'], {}), '(L[0], df.iloc[:3])\n', (17998, 18017), False, 'from dask.dataframe.utils import assert_eq\n'), ((18022, 18051), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[1]', 'df.iloc[4:8]'], {}), '(L[1], df.iloc[4:8])\n', (18031, 18051), False, 'from dask.dataframe.utils import assert_eq\n'), ((18056, 18085), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['L[2]', 'df.iloc[-4:]'], {}), '(L[2], df.iloc[-4:])\n', (18065, 18085), False, 'from dask.dataframe.utils import assert_eq\n'), ((18447, 18468), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (18456, 18468), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((18613, 18634), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'df'}), '(example=df)\n', (18622, 18634), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((20886, 20920), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (20895, 20920), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((21464, 21498), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (21473, 21498), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((22099, 22107), 'streamz.Stream', 'Stream', ([], {}), '()\n', (22105, 22107), False, 'from streamz import Stream\n'), ((22175, 22209), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (22184, 22209), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((22802, 22836), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (22811, 22836), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((23230, 23238), 'streamz.Stream', 'Stream', ([], {}), '()\n', (23236, 23238), False, 'from streamz import Stream\n'), ((23306, 23340), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (23315, 23340), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((23735, 23769), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (23744, 23769), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((24199, 24207), 'streamz.Stream', 'Stream', ([], {}), '()\n', (24205, 24207), False, 'from streamz import Stream\n'), ((24275, 24309), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (24284, 24309), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((882, 890), 'streamz.Stream', 'Stream', ([], {}), '()\n', (888, 890), False, 'from streamz import Stream\n'), ((916, 928), 'streamz.dask.DaskStream', 'DaskStream', ([], {}), '()\n', (926, 928), False, 'from streamz.dask import DaskStream\n'), ((1866, 1895), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1879, 1895), False, 'import pytest\n'), ((2056, 2080), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2069, 2080), False, 'import pytest\n'), ((2112, 2137), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2125, 2137), False, 'import pytest\n'), ((2422, 2463), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': 'example', 'stream': 'stream'}), '(example=example, stream=stream)\n', (2431, 2463), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((10325, 10377), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""', '"""2000-01-03"""'], {'freq': '"""1h"""'}), "('2000-01-01', '2000-01-03', freq='1h')\n", (10338, 10377), True, 'import pandas as pd\n'), ((8932, 8955), 'pytest.param', 'pytest.param', (['{}', '"""min"""'], {}), "({}, 'min')\n", (8944, 8955), False, 'import pytest\n'), ((9115, 9138), 'pytest.param', 'pytest.param', (['{}', '"""max"""'], {}), "({}, 'max')\n", (9127, 9138), False, 'import pytest\n'), ((9295, 9320), 'pytest.param', 'pytest.param', (['{}', '"""count"""'], {}), "({}, 'count')\n", (9307, 9320), False, 'import pytest\n'), ((9883, 9898), 'pytest.param', 'pytest.param', (['(2)'], {}), '(2)\n', (9895, 9898), False, 'import pytest\n'), ((9903, 9921), 'pytest.param', 'pytest.param', (['"""3h"""'], {}), "('3h')\n", (9915, 9921), False, 'import pytest\n'), ((9923, 9950), 'pandas.Timedelta', 'pd.Timedelta', (['"""200 minutes"""'], {}), "('200 minutes')\n", (9935, 9950), True, 'import pandas as pd\n'), ((9989, 10004), 'pytest.param', 'pytest.param', (['(5)'], {}), '(5)\n', (10001, 10004), False, 'import pytest\n'), ((12991, 13015), 'streamz.dataframe.DataFrame', 'DataFrame', ([], {'example': '[123]'}), '(example=[123])\n', (13000, 13015), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((15900, 15952), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""', '"""2000-01-03"""'], {'freq': '"""1h"""'}), "('2000-01-01', '2000-01-03', freq='1h')\n", (15913, 15952), True, 'import pandas as pd\n'), ((11485, 11517), 'json.dumps', 'json.dumps', (["{'x': i % 3, 'y': i}"], {}), "({'x': i % 3, 'y': i})\n", (11495, 11517), False, 'import json\n'), ((17191, 17217), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'float'}), '(10, dtype=float)\n', (17200, 17217), True, 'import numpy as np\n'), ((17755, 17781), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'float'}), '(10, dtype=float)\n', (17764, 17781), True, 'import numpy as np\n'), ((18149, 18175), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'float'}), '(10, dtype=float)\n', (18158, 18175), True, 'import numpy as np\n'), ((18908, 18942), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (18917, 18942), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((19814, 19848), 'streamz.dataframe.DataFrame', 'DataFrame', (['stream'], {'example': 'example'}), '(stream, example=example)\n', (19823, 19848), False, 'from streamz.dataframe import Aggregation, DataFrame, DataFrames, Series\n'), ((4561, 4617), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Index name lost in _getattr_"""'}), "(reason='Index name lost in _getattr_')\n", (4578, 4617), False, 'import pytest\n'), ((9035, 9094), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Unavailable for rolling objects"""'}), "(reason='Unavailable for rolling objects')\n", (9052, 9094), False, 'import pytest\n'), ((9215, 9274), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Unavailable for rolling objects"""'}), "(reason='Unavailable for rolling objects')\n", (9232, 9274), False, 'import pytest\n'), ((9406, 9465), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Unavailable for rolling objects"""'}), "(reason='Unavailable for rolling objects')\n", (9423, 9465), False, 'import pytest\n'), ((9573, 9632), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Unavailable for rolling objects"""'}), "(reason='Unavailable for rolling objects')\n", (9590, 9632), False, 'import pytest\n'), ((9757, 9816), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Unavailable for rolling objects"""'}), "(reason='Unavailable for rolling objects')\n", (9774, 9816), False, 'import pytest\n'), ((16652, 16670), 'pandas.Timedelta', 'pd.Timedelta', (['"""1s"""'], {}), "('1s')\n", (16664, 16670), True, 'import pandas as pd\n'), ((6805, 6818), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (6814, 6818), True, 'import numpy as np\n'), ((7277, 7290), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7286, 7290), True, 'import numpy as np\n'), ((7707, 7720), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7716, 7720), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
class SegmentationBase(Dataset):
def __init__(self,
data_csv, data_root, segmentation_root,
size=None, random_crop=False, interpolation="bicubic",
n_labels=182, shift_segmentation=False,
):
self.n_labels = n_labels
self.shift_segmentation = shift_segmentation
self.data_csv = data_csv
self.data_root = data_root
self.segmentation_root = segmentation_root
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, l)
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.segmentation_root, l.replace(".jpg", ".png"))
for l in self.image_paths]
}
size = None if size is not None and size<=0 else size
self.size = size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.size, width=self.size)
else:
self.cropper = albumentations.RandomCrop(height=self.size, width=self.size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
assert segmentation.mode == "L", segmentation.mode
segmentation = np.array(segmentation).astype(np.uint8)
if self.shift_segmentation:
# used to support segmentations containing unlabeled==255 label
segmentation = segmentation+1
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image,
mask=segmentation
)
else:
processed = {"image": image,
"mask": segmentation
}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class Examples(SegmentationBase):
def __init__(self, size=None, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/sflckr_examples.txt",
data_root="data/sflckr_images",
segmentation_root="data/sflckr_segmentations",
size=size, random_crop=random_crop, interpolation=interpolation)
| [
"numpy.eye",
"PIL.Image.open",
"os.path.join",
"albumentations.RandomCrop",
"numpy.array",
"albumentations.CenterCrop",
"albumentations.SmallestMaxSize"
] | [((2429, 2462), 'PIL.Image.open', 'Image.open', (["example['file_path_']"], {}), "(example['file_path_'])\n", (2439, 2462), False, 'from PIL import Image\n'), ((2708, 2749), 'PIL.Image.open', 'Image.open', (["example['segmentation_path_']"], {}), "(example['segmentation_path_'])\n", (2718, 2749), False, 'from PIL import Image\n'), ((1588, 1677), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size', 'interpolation': 'self.interpolation'}), '(max_size=self.size, interpolation=self.\n interpolation)\n', (1618, 1677), False, 'import albumentations\n'), ((1779, 1867), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size', 'interpolation': 'cv2.INTER_NEAREST'}), '(max_size=self.size, interpolation=cv2.\n INTER_NEAREST)\n', (1809, 1867), False, 'import albumentations\n'), ((3601, 3622), 'numpy.eye', 'np.eye', (['self.n_labels'], {}), '(self.n_labels)\n', (3607, 3622), True, 'import numpy as np\n'), ((849, 880), 'os.path.join', 'os.path.join', (['self.data_root', 'l'], {}), '(self.data_root, l)\n', (861, 880), False, 'import os\n'), ((2046, 2106), 'albumentations.CenterCrop', 'albumentations.CenterCrop', ([], {'height': 'self.size', 'width': 'self.size'}), '(height=self.size, width=self.size)\n', (2071, 2106), False, 'import albumentations\n'), ((2156, 2216), 'albumentations.RandomCrop', 'albumentations.RandomCrop', ([], {'height': 'self.size', 'width': 'self.size'}), '(height=self.size, width=self.size)\n', (2181, 2216), False, 'import albumentations\n'), ((2556, 2571), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2564, 2571), True, 'import numpy as np\n'), ((2832, 2854), 'numpy.array', 'np.array', (['segmentation'], {}), '(segmentation)\n', (2840, 2854), True, 'import numpy as np\n')] |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Block kernel lsqr solver for multi-class classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import numpy as np
import scipy.linalg as linalg
from scipy.sparse.linalg import spsolve
from sklearn import metrics
class BlockKernelSolver(object):
"""Inspired by algorithm from https://arxiv.org/pdf/1602.05310.pdf."""
# TODO: save preformed kernel matrix and reuse if possible
# perhaps not possible if want to keep scikitlearn signature
def __init__(self,
random_state=1,
C=0.1,
block_size=4000,
epochs=3,
verbose=False,
gamma=None):
self.block_size = block_size
self.epochs = epochs
self.C = C
self.kernel = 'rbf'
self.coef_ = None
self.verbose = verbose
self.encode_map = None
self.decode_map = None
self.gamma = gamma
self.X_train = None
self.random_state = random_state
def encode_y(self, y):
# Handles classes that do not start counting from 0.
if self.encode_map is None:
self.classes_ = sorted(list(set(y)))
self.encode_map = dict(zip(self.classes_, range(len(self.classes_))))
self.decode_map = dict(zip(range(len(self.classes_)), self.classes_))
mapper = lambda x: self.encode_map[x]
transformed_y = np.array(map(mapper, y))
return transformed_y
def decode_y(self, y):
mapper = lambda x: self.decode_map[x]
transformed_y = np.array(map(mapper, y))
return transformed_y
def fit(self, X_train, y_train, sample_weight=None):
"""Form K and solve (K + lambda * I)x = y in a block-wise fashion."""
np.random.seed(self.random_state)
self.X_train = X_train
n_features = X_train.shape[1]
y = self.encode_y(y_train)
if self.gamma is None:
self.gamma = 1. / n_features
K = metrics.pairwise.pairwise_kernels(
X_train, metric=self.kernel, gamma=self.gamma)
if self.verbose:
print('Finished forming kernel matrix.')
# compute some constants
num_classes = len(list(set(y)))
num_samples = K.shape[0]
num_blocks = math.ceil(num_samples * 1.0 / self.block_size)
x = np.zeros((K.shape[0], num_classes))
y_hat = np.zeros((K.shape[0], num_classes))
onehot = lambda x: np.eye(num_classes)[x]
y_onehot = np.array(map(onehot, y))
idxes = np.diag_indices(num_samples)
if sample_weight is not None:
weights = np.sqrt(sample_weight)
weights = weights[:, np.newaxis]
y_onehot = weights * y_onehot
K *= np.outer(weights, weights)
if num_blocks == 1:
epochs = 1
else:
epochs = self.epochs
for e in range(epochs):
shuffled_coords = np.random.choice(
num_samples, num_samples, replace=False)
for b in range(int(num_blocks)):
residuals = y_onehot - y_hat
# Form a block of K.
K[idxes] += (self.C * num_samples)
block = shuffled_coords[b * self.block_size:
min((b + 1) * self.block_size, num_samples)]
K_block = K[:, block]
# Dim should be block size x block size
KbTKb = K_block.T.dot(K_block)
if self.verbose:
print('solving block {0}'.format(b))
# Try linalg solve then sparse solve for handling of sparse input.
try:
x_block = linalg.solve(KbTKb, K_block.T.dot(residuals))
except:
try:
x_block = spsolve(KbTKb, K_block.T.dot(residuals))
except:
return None
# update model
x[block] = x[block] + x_block
K[idxes] = K[idxes] - (self.C * num_samples)
y_hat = K.dot(x)
y_pred = np.argmax(y_hat, axis=1)
train_acc = metrics.accuracy_score(y, y_pred)
if self.verbose:
print('Epoch: {0}, Block: {1}, Train Accuracy: {2}'
.format(e, b, train_acc))
self.coef_ = x
def predict(self, X_val):
val_K = metrics.pairwise.pairwise_kernels(
X_val, self.X_train, metric=self.kernel, gamma=self.gamma)
val_pred = np.argmax(val_K.dot(self.coef_), axis=1)
return self.decode_y(val_pred)
def score(self, X_val, val_y):
val_pred = self.predict(X_val)
val_acc = metrics.accuracy_score(val_y, val_pred)
return val_acc
def decision_function(self, X, type='predicted'):
# Return the predicted value of the best class
# Margin_AL will see that a vector is returned and not a matrix and
# simply select the points that have the lowest predicted value to label
K = metrics.pairwise.pairwise_kernels(
X, self.X_train, metric=self.kernel, gamma=self.gamma)
predicted = K.dot(self.coef_)
if type == 'scores':
val_best = np.max(K.dot(self.coef_), axis=1)
return val_best
elif type == 'predicted':
return predicted
else:
raise NotImplementedError('Invalid return type for decision function.')
def get_params(self, deep=False):
params = {}
params['C'] = self.C
params['gamma'] = self.gamma
if deep:
return copy.deepcopy(params)
return copy.copy(params)
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def softmax_over_predicted(self, X):
val_K = metrics.pairwise.pairwise_kernels(
X, self.X_train, metric=self.kernel, gamma=self.gamma)
val_pred = val_K.dot(self.coef_)
row_min = np.min(val_pred, axis=1)
val_pred = val_pred - row_min[:, None]
val_pred = np.exp(val_pred)
sum_exp = np.sum(val_pred, axis=1)
val_pred = val_pred / sum_exp[:, None]
return val_pred
| [
"numpy.eye",
"math.ceil",
"numpy.sqrt",
"copy.deepcopy",
"numpy.random.choice",
"numpy.diag_indices",
"numpy.argmax",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"sklearn.metrics.pairwise.pairwise_kernels",
"numpy.outer",
"numpy.random.seed",
"numpy.min",
"copy.copy",
"sklearn.metrics.acc... | [((2421, 2454), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (2435, 2454), True, 'import numpy as np\n'), ((2643, 2728), 'sklearn.metrics.pairwise.pairwise_kernels', 'metrics.pairwise.pairwise_kernels', (['X_train'], {'metric': 'self.kernel', 'gamma': 'self.gamma'}), '(X_train, metric=self.kernel, gamma=self.gamma\n )\n', (2676, 2728), False, 'from sklearn import metrics\n'), ((2943, 2989), 'math.ceil', 'math.ceil', (['(num_samples * 1.0 / self.block_size)'], {}), '(num_samples * 1.0 / self.block_size)\n', (2952, 2989), False, 'import math\n'), ((3002, 3037), 'numpy.zeros', 'np.zeros', (['(K.shape[0], num_classes)'], {}), '((K.shape[0], num_classes))\n', (3010, 3037), True, 'import numpy as np\n'), ((3054, 3089), 'numpy.zeros', 'np.zeros', (['(K.shape[0], num_classes)'], {}), '((K.shape[0], num_classes))\n', (3062, 3089), True, 'import numpy as np\n'), ((3200, 3228), 'numpy.diag_indices', 'np.diag_indices', (['num_samples'], {}), '(num_samples)\n', (3215, 3228), True, 'import numpy as np\n'), ((5092, 5188), 'sklearn.metrics.pairwise.pairwise_kernels', 'metrics.pairwise.pairwise_kernels', (['X_val', 'self.X_train'], {'metric': 'self.kernel', 'gamma': 'self.gamma'}), '(X_val, self.X_train, metric=self.kernel,\n gamma=self.gamma)\n', (5125, 5188), False, 'from sklearn import metrics\n'), ((5390, 5429), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['val_y', 'val_pred'], {}), '(val_y, val_pred)\n', (5412, 5429), False, 'from sklearn import metrics\n'), ((5732, 5824), 'sklearn.metrics.pairwise.pairwise_kernels', 'metrics.pairwise.pairwise_kernels', (['X', 'self.X_train'], {'metric': 'self.kernel', 'gamma': 'self.gamma'}), '(X, self.X_train, metric=self.kernel,\n gamma=self.gamma)\n', (5765, 5824), False, 'from sklearn import metrics\n'), ((6345, 6362), 'copy.copy', 'copy.copy', (['params'], {}), '(params)\n', (6354, 6362), False, 'import copy\n'), ((6578, 6670), 'sklearn.metrics.pairwise.pairwise_kernels', 'metrics.pairwise.pairwise_kernels', (['X', 'self.X_train'], {'metric': 'self.kernel', 'gamma': 'self.gamma'}), '(X, self.X_train, metric=self.kernel,\n gamma=self.gamma)\n', (6611, 6670), False, 'from sklearn import metrics\n'), ((6739, 6763), 'numpy.min', 'np.min', (['val_pred'], {'axis': '(1)'}), '(val_pred, axis=1)\n', (6745, 6763), True, 'import numpy as np\n'), ((6830, 6846), 'numpy.exp', 'np.exp', (['val_pred'], {}), '(val_pred)\n', (6836, 6846), True, 'import numpy as np\n'), ((6865, 6889), 'numpy.sum', 'np.sum', (['val_pred'], {'axis': '(1)'}), '(val_pred, axis=1)\n', (6871, 6889), True, 'import numpy as np\n'), ((3289, 3311), 'numpy.sqrt', 'np.sqrt', (['sample_weight'], {}), '(sample_weight)\n', (3296, 3311), True, 'import numpy as np\n'), ((3416, 3442), 'numpy.outer', 'np.outer', (['weights', 'weights'], {}), '(weights, weights)\n', (3424, 3442), True, 'import numpy as np\n'), ((3604, 3661), 'numpy.random.choice', 'np.random.choice', (['num_samples', 'num_samples'], {'replace': '(False)'}), '(num_samples, num_samples, replace=False)\n', (3620, 3661), True, 'import numpy as np\n'), ((6308, 6329), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (6321, 6329), False, 'import copy\n'), ((3117, 3136), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (3123, 3136), True, 'import numpy as np\n'), ((4778, 4802), 'numpy.argmax', 'np.argmax', (['y_hat'], {'axis': '(1)'}), '(y_hat, axis=1)\n', (4787, 4802), True, 'import numpy as np\n'), ((4831, 4864), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (4853, 4864), False, 'from sklearn import metrics\n')] |
###########################################
# Model for generating samples from model
#
###########################################
import torch
import torch.nn as nn
from torchtext.data import Iterator as BatchIter
import argparse
import numpy as np
import math
import time
from torch.autograd import Variable
import torch.nn.functional as F
import data_utils as du
import random
import DAVAE
from DAG import example_tree
from EncDec import Encoder, Decoder, Attention, fix_enc_hidden
from masked_cross_entropy import masked_cross_entropy
from data_utils import EOS_TOK, SOS_TOK, PAD_TOK, TUP_TOK, MAX_EVAL_SEQ_LEN, MIN_EVAL_SEQ_LEN
from decode_utils import transform, get_tups, get_pred_events
def generate(args):
"""
Use the trained model for decoding
Args
args (argparse.ArgumentParser)
"""
if args.cuda and torch.cuda.is_available():
device = 0
use_cuda = True
elif args.cuda and not torch.cuda.is_available():
print("You do not have CUDA, turning cuda off")
device = -1
use_cuda = False
else:
device = -1
use_cuda=False
#Load the vocab
vocab = du.load_vocab(args.vocab)
eos_id = vocab.stoi[EOS_TOK]
pad_id = vocab.stoi[PAD_TOK]
if args.ranking: # default is HARD one
dataset = du.NarrativeClozeDataset(args.valid_data, vocab, src_seq_length=MAX_EVAL_SEQ_LEN, min_seq_length=MIN_EVAL_SEQ_LEN, LM=False)
# Batch size during decoding is set to 1
batches = BatchIter(dataset, 1, sort_key=lambda x:len(x.actual), train=False, device=device)
else:
dataset = du.SentenceDataset(args.valid_data, vocab, src_seq_length=MAX_EVAL_SEQ_LEN, min_seq_length=MIN_EVAL_SEQ_LEN, add_eos=False) #put in filter pred later
# Batch size during decoding is set to 1
batches = BatchIter(dataset, 1, sort_key=lambda x:len(x.text), train=False, device=device)
data_len = len(dataset)
#Create the model
with open(args.load, 'rb') as fi:
model = torch.load(fi, map_location=lambda storage, loc : storage)
model.decoder.eval()
model.set_use_cuda(False)
#For reconstruction
if args.perplexity:
loss = calc_perplexity(args, model, batches, vocab, data_len)
print("Loss = {}".format(loss))
elif args.schema:
generate_from_seed(args, model, batches, vocab, data_len)
elif args.ranking:
do_ranking(args, model, batches, vocab, data_len, use_cuda)
else:
# sample_outputs(model, vocab)
reconstruct(args, model, batches, vocab)
def do_ranking(args, model, batches, vocab, data_len, use_cuda):
print("RANKING")
ranked_acc = 0.0
tup_idx = vocab.stoi[TUP_TOK]
for iteration, bl in enumerate(batches):
if (iteration+1)%25 == 0:
print("iteration {}".format(iteration+1))
all_texts = [bl.actual, bl.actual_tgt, bl.dist1, bl.dist1_tgt, bl.dist2, bl.dist2_tgt, bl.dist3, bl.dist3_tgt, bl.dist4, bl.dist4_tgt, bl.dist5, bl.dist5_tgt] # each is a tup
assert len(all_texts) == 12, "12 = 6 * 2."
all_texts_vars = []
if use_cuda:
for tup in all_texts:
all_texts_vars.append((Variable(tup[0].cuda(), volatile=True), tup[1]))
else:
for tup in all_texts:
all_texts_vars.append((Variable(tup[0], volatile=True), tup[1]))
# will itetrate 2 at a time using iterator and next
vars_iter = iter(all_texts_vars)
# run the model for all 6 sentences
pps = []
first_tup = -1
for i in range(bl.actual[0].shape[1]):
if bl.actual[0][0, i] == tup_idx:
first_tup = i
break
if first_tup == -1:
print("WARNING: First TUP is -1")
src_tup = Variable(bl.actual[0][:, :first_tup+1].view(1, -1), volatile=True)
src_lens = torch.LongTensor([src_tup.shape[1]])
dhidden, latent_values = model(src_tup, src_lens, encode_only=True)
# Latent and hidden have been initialized with the first tuple
for tup in vars_iter:
## INIT FEED AND DECODE before every sentence.
model.decoder.init_feed_(Variable(torch.zeros(1, model.decoder.attn_dim)))
next_tup = next(vars_iter)
_, _, _, dec_outputs = model.train(tup[0], 1, dhidden, latent_values, [])
logits = model.logits_out(dec_outputs)
logits = logits.transpose(0,1).contiguous() # convert to [batch, seq, vocab]
nll = masked_cross_entropy(logits, next_tup[0], Variable(next_tup[1]))
#nll = calc_perplexity(args, model, tup[0], vocab, next_tup[0], next_tup[1], hidden)
pp = torch.exp(nll)
#print("NEG-LOSS {} PPL {}".format(nll.data[0], pp.data[0]))
pps.append(pp.data.numpy()[0])
# low perplexity == top ranked sentence- correct answer is the first one of course
assert len(pps) == 6, "6 targets."
#print("\n")
all_texts_str = [transform(text[0].data.numpy()[0], vocab.itos) for text in all_texts_vars]
#print("ALL: {}".format(all_texts_str))
min_index = np.argmin(pps)
if min_index == 0:
ranked_acc += 1
#print("TARGET: {}".format(transform(all_texts_vars[1][0].data.numpy()[0], vocab.itos)))
#print("CORRECT: {}".format(transform(all_texts_vars[1][0].data.numpy()[0], vocab.itos)))
#else:
# print the ones that are wrong
#print("TARGET: {}".format(transform(all_texts_vars[1][0].data.numpy()[0], vocab.itos)))
#print("WRONG: {}".format(transform(all_texts_vars[min_index+2][0].data.numpy()[0], vocab.itos)))
if (iteration+1) == args.max_decode:
print("Max decode reached. Exiting.")
break
ranked_acc /= (iteration+1) * 1/100 # multiplying to get percent
print("Average acc(%): {}".format(ranked_acc))
def calc_perplexity(args, model, batches, vocab, data_len):
total_loss = 0.0
iters = 0
for iteration, bl in enumerate(batches):
print(iteration)
batch, batch_lens = bl.text
target, target_lens = bl.target
if args.cuda:
batch = Variable(batch.cuda(), volatile=True)
else:
batch = Variable(batch, volatile=True)
_, _, _, dec_outputs = model(batch, batch_lens)
logits = model.logits_out(dec_outputs)
logits = logits.transpose(0,1).contiguous() # convert to [batch, seq, vocab]
ce_loss = masked_cross_entropy(logits, Variable(target), Variable(target_lens))
total_loss = total_loss + ce_loss.data[0]
iters += 1
print(iters)
print(data_len)
return total_loss / data_len
def sample_outputs(model, vocab):
model.latent_root.prune_()
for _ in range(100):
val1 = np.random.randint(313)
val2 = np.random.randint(32)
val3 = np.random.randint(38)
val4 = np.random.randint(12)
val5 = np.random.randint(6)
# values = [val1, val2, 15, val4, val5]
values = [247,12,15,val4,1]
outputs = model.decode(values)
print("Reconstruct: {}\n\n".format(transform(outputs, vocab.itos)))
def generate_from_seed(args, model, batches, vocab, data_len):
"""
Generate a script from a seed tuple
Args
args (argparse.ArgumentParser)
seeds (BatchIter) : BatchIter object for a file of seeds, the seed file should be in the
same format as normal validation data
"""
for iteration, bl in enumerate(batches):
batch, batch_lens = bl.text
target, target_lens = bl.target
if args.cuda:
batch = Variable(batch.cuda(), volatile=True)
else:
batch = Variable(batch, volatile=True)
src_lens= torch.LongTensor([batch.size(1)])
dhidden, latent_values = model(batch, src_lens, encode_only=True) #get latent encoding for seed
model.decoder.init_feed_(Variable(torch.zeros(1, model.decoder.attn_dim)))
_, _, dhidden, dec_outputs = model.train(batch, 1, dhidden, latent_values, [], return_hid=True) #decode seed
#print("seq len {}, decode after {} steps".format(seq_len, i+1))
# beam set current state to last word in the sequence
beam_inp = batch[:, -1]
# init beam initializesthe beam with the last sequence element
outputs = model.beam_decode(beam_inp, dhidden, latent_values, args.beam_size, args.max_len_decode, init_beam=True)
print("TRUE: {}".format(transform(batch.data.squeeze(), vocab.itos)))
print("Reconstruct: {}\n\n".format(transform(outputs, vocab.itos)))
def reconstruct(args, model, batches, vocab):
for iteration, bl in enumerate(batches):
batch, batch_lens = bl.text
target, target_lens = bl.target
if args.cuda:
batch = Variable(batch.cuda(), volatile=True)
else:
batch = Variable(batch, volatile=True)
outputs = model(batch, batch_lens, str_out=True, beam_size=args.beam_size, max_len_decode=args.max_len_decode)
print("TRUE: {}".format(transform(batch.data.squeeze(), vocab.itos)))
print("Reconstruct: {}\n\n".format(transform(outputs, vocab.itos)))
def schema_constraint(cands, prev_voc, curr_verbs, min_len_decode=0, step=0, eos_idx=EOS_TOK):
"""
Constraints to use during decoding,
Prevents the model from producing schemas that are obviously wrong (have repeated
predicates or the same arguments as subject and object
Args:
cands (Tensor [batch x vocab]) : the probabilities over the vocab for each batch/beam
prev_voc (Tensor [batch]) : the previous output for each batch/beam
curr_verbs (list of lists [batch x *]) : A list of lists whose kth element is a list of vocab ids of previously used
predicates in the kth beam
tup_idx (int) : the vocab id of the <TUP> symbol
"""
LOW = -1e20
K = cands.shape[0]
for i in range(K): #for each beam
#Replace previous vocabulary items with low probability
beam_prev_voc = prev_voc[i]
cands[i, beam_prev_voc] = LOW
#Replace verbs already used with low probability
for verb in curr_verbs[i]:
cands[i, verb] = LOW
if step < min_len_decode:
cands[i, eos_idx] = LOW
return cands
def update_verb_list(verb_list, b, tup_idx=4):
"""
Update currently used verbs for Beam b
verb_list is a beam_size sized list of list, with the ith list having a list of verb ids used in the ith beam
so far
"""
#First need to update based on prev ks
if len(b.prev_ks) > 1:
new_verb_list = [[]]*b.size
for i in range(b.size):
new_verb_list[i] = list(verb_list[b.prev_ks[-1][i]])
else:
new_verb_list =verb_list
#update the actual lists
if len(b.next_ys) == 2:
for i, li in enumerate(new_verb_list):
li.append(b.next_ys[-1][i])
elif len(b.next_ys) > 2:
for i, li in enumerate(new_verb_list):
if b.next_ys[-2][b.prev_ks[-1][i]] == tup_idx:
li.append(b.next_ys[-1][i])
return new_verb_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DAVAE')
parser.add_argument('--impute_with', type=int, default=0)
parser.add_argument('--valid_data', type=str)
parser.add_argument('--min_vocab_freq', type=int, default=1)
parser.add_argument('--vocab', type=str)
parser.add_argument('--emb_size', type=int, default=200, help='size of word embeddings')
parser.add_argument('--hid_size', type=int, default=200,help='size of hidden')
parser.add_argument('--nlayers', type=int, default=2, help='number of layers')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--log_after', type=int, default=200)
parser.add_argument('--save_after', type=int, default=500)
parser.add_argument('--validate_after', type=int, default=2500)
parser.add_argument('--optimizer', type=str, default='adagrad', help='adam, adagrad, sgd')
parser.add_argument('--clip', type=float, default=0.25, help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40, help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=1, metavar='N', help='batch size')
parser.add_argument('--seed', type=int, default=11, help='random seed')
parser.add_argument('--cuda', action='store_true', help='use CUDA')
parser.add_argument('--load', type=str, default='model.pt',help='path to load the final model')
parser.add_argument('--bidir', type=bool, default=True, help='Use bidirectional encoder')
parser.add_argument('--latent', type=str, help='A str in form of python list')
parser.add_argument('--beam_size', type=int, default=-1, help='Beam size')
parser.add_argument('-perplexity', action='store_true')
parser.add_argument('-schema', action='store_true')
parser.add_argument('-max_len_decode', type=int, default=50, help='Maximum prediction length.')
parser.add_argument('--n_best', type=int, default=1, help="""outputs the n_best decoded sentences""")
parser.add_argument('--ranking', action='store_true', help="""N cloze ranking""")
parser.add_argument('--max_decode', type=int, default=2000, help="""max sentences to be evaluated/decoded.""")
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
generate(args)
| [
"torch.manual_seed",
"data_utils.SentenceDataset",
"argparse.ArgumentParser",
"torch.LongTensor",
"torch.load",
"random.seed",
"data_utils.NarrativeClozeDataset",
"torch.exp",
"data_utils.load_vocab",
"torch.cuda.is_available",
"numpy.random.randint",
"numpy.random.seed",
"numpy.argmin",
"... | [((1155, 1180), 'data_utils.load_vocab', 'du.load_vocab', (['args.vocab'], {}), '(args.vocab)\n', (1168, 1180), True, 'import data_utils as du\n'), ((11363, 11407), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""DAVAE"""'}), "(description='DAVAE')\n", (11386, 11407), False, 'import argparse\n'), ((13599, 13627), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13616, 13627), False, 'import torch\n'), ((13632, 13657), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (13646, 13657), True, 'import numpy as np\n'), ((13662, 13684), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (13673, 13684), False, 'import random\n'), ((13693, 13718), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13716, 13718), False, 'import torch\n'), ((843, 868), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (866, 868), False, 'import torch\n'), ((1309, 1438), 'data_utils.NarrativeClozeDataset', 'du.NarrativeClozeDataset', (['args.valid_data', 'vocab'], {'src_seq_length': 'MAX_EVAL_SEQ_LEN', 'min_seq_length': 'MIN_EVAL_SEQ_LEN', 'LM': '(False)'}), '(args.valid_data, vocab, src_seq_length=\n MAX_EVAL_SEQ_LEN, min_seq_length=MIN_EVAL_SEQ_LEN, LM=False)\n', (1333, 1438), True, 'import data_utils as du\n'), ((1612, 1739), 'data_utils.SentenceDataset', 'du.SentenceDataset', (['args.valid_data', 'vocab'], {'src_seq_length': 'MAX_EVAL_SEQ_LEN', 'min_seq_length': 'MIN_EVAL_SEQ_LEN', 'add_eos': '(False)'}), '(args.valid_data, vocab, src_seq_length=MAX_EVAL_SEQ_LEN,\n min_seq_length=MIN_EVAL_SEQ_LEN, add_eos=False)\n', (1630, 1739), True, 'import data_utils as du\n'), ((2016, 2073), 'torch.load', 'torch.load', (['fi'], {'map_location': '(lambda storage, loc: storage)'}), '(fi, map_location=lambda storage, loc: storage)\n', (2026, 2073), False, 'import torch\n'), ((3907, 3943), 'torch.LongTensor', 'torch.LongTensor', (['[src_tup.shape[1]]'], {}), '([src_tup.shape[1]])\n', (3923, 3943), False, 'import torch\n'), ((5199, 5213), 'numpy.argmin', 'np.argmin', (['pps'], {}), '(pps)\n', (5208, 5213), True, 'import numpy as np\n'), ((6897, 6919), 'numpy.random.randint', 'np.random.randint', (['(313)'], {}), '(313)\n', (6914, 6919), True, 'import numpy as np\n'), ((6935, 6956), 'numpy.random.randint', 'np.random.randint', (['(32)'], {}), '(32)\n', (6952, 6956), True, 'import numpy as np\n'), ((6972, 6993), 'numpy.random.randint', 'np.random.randint', (['(38)'], {}), '(38)\n', (6989, 6993), True, 'import numpy as np\n'), ((7009, 7030), 'numpy.random.randint', 'np.random.randint', (['(12)'], {}), '(12)\n', (7026, 7030), True, 'import numpy as np\n'), ((7046, 7066), 'numpy.random.randint', 'np.random.randint', (['(6)'], {}), '(6)\n', (7063, 7066), True, 'import numpy as np\n'), ((4744, 4758), 'torch.exp', 'torch.exp', (['nll'], {}), '(nll)\n', (4753, 4758), False, 'import torch\n'), ((6335, 6365), 'torch.autograd.Variable', 'Variable', (['batch'], {'volatile': '(True)'}), '(batch, volatile=True)\n', (6343, 6365), False, 'from torch.autograd import Variable\n'), ((6606, 6622), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (6614, 6622), False, 'from torch.autograd import Variable\n'), ((6624, 6645), 'torch.autograd.Variable', 'Variable', (['target_lens'], {}), '(target_lens)\n', (6632, 6645), False, 'from torch.autograd import Variable\n'), ((7814, 7844), 'torch.autograd.Variable', 'Variable', (['batch'], {'volatile': '(True)'}), '(batch, volatile=True)\n', (7822, 7844), False, 'from torch.autograd import Variable\n'), ((9026, 9056), 'torch.autograd.Variable', 'Variable', (['batch'], {'volatile': '(True)'}), '(batch, volatile=True)\n', (9034, 9056), False, 'from torch.autograd import Variable\n'), ((13865, 13898), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13887, 13898), False, 'import torch\n'), ((940, 965), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (963, 965), False, 'import torch\n'), ((4606, 4627), 'torch.autograd.Variable', 'Variable', (['next_tup[1]'], {}), '(next_tup[1])\n', (4614, 4627), False, 'from torch.autograd import Variable\n'), ((7233, 7263), 'decode_utils.transform', 'transform', (['outputs', 'vocab.itos'], {}), '(outputs, vocab.itos)\n', (7242, 7263), False, 'from decode_utils import transform, get_tups, get_pred_events\n'), ((8045, 8083), 'torch.zeros', 'torch.zeros', (['(1)', 'model.decoder.attn_dim'], {}), '(1, model.decoder.attn_dim)\n', (8056, 8083), False, 'import torch\n'), ((8709, 8739), 'decode_utils.transform', 'transform', (['outputs', 'vocab.itos'], {}), '(outputs, vocab.itos)\n', (8718, 8739), False, 'from decode_utils import transform, get_tups, get_pred_events\n'), ((9315, 9345), 'decode_utils.transform', 'transform', (['outputs', 'vocab.itos'], {}), '(outputs, vocab.itos)\n', (9324, 9345), False, 'from decode_utils import transform, get_tups, get_pred_events\n'), ((4237, 4275), 'torch.zeros', 'torch.zeros', (['(1)', 'model.decoder.attn_dim'], {}), '(1, model.decoder.attn_dim)\n', (4248, 4275), False, 'import torch\n'), ((3353, 3384), 'torch.autograd.Variable', 'Variable', (['tup[0]'], {'volatile': '(True)'}), '(tup[0], volatile=True)\n', (3361, 3384), False, 'from torch.autograd import Variable\n')] |
import os
import numpy
from chainer_chemistry.dataset.preprocessors import preprocess_method_dict
from chainer_chemistry import datasets as D
from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset
from rdkit import Chem
from tqdm import tqdm
import utils
class _CacheNamePolicy(object):
train_file_name = 'train.npz'
val_file_name = 'val.npz'
test_file_name = 'test.npz'
smiles_file_name = 'smiles.npz'
def _get_cache_directory_path(self, method, labels, prefix):
if labels:
return os.path.join(prefix, '{}_{}'.format(method, labels))
else:
return os.path.join(prefix, '{}_all'.format(method))
def __init__(self, method, labels, prefix='input'):
self.method = method
self.labels = labels
self.prefix = prefix
self.cache_dir = self._get_cache_directory_path(method, labels, prefix)
def get_train_file_path(self):
return os.path.join(self.cache_dir, self.train_file_name)
def get_val_file_path(self):
return os.path.join(self.cache_dir, self.val_file_name)
def get_test_file_path(self):
return os.path.join(self.cache_dir, self.test_file_name)
def get_smiles_path(self):
return os.path.join(self.cache_dir, self.smiles_file_name)
def create_cache_directory(self):
try:
os.makedirs(self.cache_dir)
except OSError:
if not os.path.isdir(self.cache_dir):
raise
PYRIDINE_SMILES = 'c1ccncc1'
def hassubst(mol, smart=PYRIDINE_SMILES):
return numpy.array(int(mol.HasSubstructMatch(Chem.MolFromSmarts(smart)))).astype('int32')
def load_dataset(method, labels, prefix='input'):
method = 'nfp' if 'nfp' in method else method # to deal with nfpdrop
method = 'ggnn' if 'ggnn' in method else method # to deal with ggnndrop
policy = _CacheNamePolicy(method, labels, prefix)
train_path = policy.get_train_file_path()
val_path = policy.get_val_file_path()
test_path = policy.get_test_file_path()
smiles_path = policy.get_smiles_path()
train, val, test = None, None, None
train_smiles, val_smiles, test_smiles = None, None, None
print()
if os.path.exists(policy.cache_dir):
print('load from cache {}'.format(policy.cache_dir))
train = NumpyTupleDataset.load(train_path)
val = NumpyTupleDataset.load(val_path)
test = NumpyTupleDataset.load(test_path)
train_smiles, val_smiles, test_smiles = utils.load_npz(smiles_path)
if train is None or val is None or test is None:
print('preprocessing dataset...')
preprocessor = preprocess_method_dict[method]()
if labels == 'pyridine':
train, val, test, train_smiles, val_smiles, test_smiles = D.get_tox21(
preprocessor, labels=None, return_smiles=True)
print('converting label into pyridine...')
# --- Pyridine = 1 ---
train_pyridine_label = [
hassubst(Chem.MolFromSmiles(smi), smart=PYRIDINE_SMILES) for smi in tqdm(train_smiles)]
val_pyridine_label = [
hassubst(Chem.MolFromSmiles(smi), smart=PYRIDINE_SMILES) for smi in tqdm(val_smiles)]
test_pyridine_label = [
hassubst(Chem.MolFromSmiles(smi), smart=PYRIDINE_SMILES) for smi in tqdm(test_smiles)]
train_pyridine_label = numpy.array(train_pyridine_label)[:, None]
val_pyridine_label = numpy.array(val_pyridine_label)[:, None]
test_pyridine_label = numpy.array(test_pyridine_label)[:, None]
print('train positive/negative', numpy.sum(train_pyridine_label == 1), numpy.sum(train_pyridine_label == 0))
train = NumpyTupleDataset(*train.features[:, :-1], train_pyridine_label)
val = NumpyTupleDataset(*val.features[:, :-1], val_pyridine_label)
test = NumpyTupleDataset(*test.features[:, :-1], test_pyridine_label)
else:
train, val, test, train_smiles, val_smiles, test_smiles = D.get_tox21(
preprocessor, labels=labels, return_smiles=True)
# Cache dataset
policy.create_cache_directory()
NumpyTupleDataset.save(train_path, train)
NumpyTupleDataset.save(val_path, val)
NumpyTupleDataset.save(test_path, test)
train_smiles = numpy.array(train_smiles)
val_smiles = numpy.array(val_smiles)
test_smiles = numpy.array(test_smiles)
utils.save_npz(smiles_path, (train_smiles, val_smiles, test_smiles))
return train, val, test, train_smiles, val_smiles, test_smiles
| [
"os.path.exists",
"utils.load_npz",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"utils.save_npz",
"rdkit.Chem.MolFromSmiles",
"chainer_chemistry.datasets.get_tox21",
"numpy.array",
"numpy.sum",
"os.path.isdir",
"rdkit.Chem.MolFromSmarts",
"chainer_chemistry.datasets.numpy_tuple_dataset.Numpy... | [((2215, 2247), 'os.path.exists', 'os.path.exists', (['policy.cache_dir'], {}), '(policy.cache_dir)\n', (2229, 2247), False, 'import os\n'), ((957, 1007), 'os.path.join', 'os.path.join', (['self.cache_dir', 'self.train_file_name'], {}), '(self.cache_dir, self.train_file_name)\n', (969, 1007), False, 'import os\n'), ((1057, 1105), 'os.path.join', 'os.path.join', (['self.cache_dir', 'self.val_file_name'], {}), '(self.cache_dir, self.val_file_name)\n', (1069, 1105), False, 'import os\n'), ((1156, 1205), 'os.path.join', 'os.path.join', (['self.cache_dir', 'self.test_file_name'], {}), '(self.cache_dir, self.test_file_name)\n', (1168, 1205), False, 'import os\n'), ((1253, 1304), 'os.path.join', 'os.path.join', (['self.cache_dir', 'self.smiles_file_name'], {}), '(self.cache_dir, self.smiles_file_name)\n', (1265, 1304), False, 'import os\n'), ((2326, 2360), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset.load', 'NumpyTupleDataset.load', (['train_path'], {}), '(train_path)\n', (2348, 2360), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((2375, 2407), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset.load', 'NumpyTupleDataset.load', (['val_path'], {}), '(val_path)\n', (2397, 2407), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((2423, 2456), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset.load', 'NumpyTupleDataset.load', (['test_path'], {}), '(test_path)\n', (2445, 2456), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((2505, 2532), 'utils.load_npz', 'utils.load_npz', (['smiles_path'], {}), '(smiles_path)\n', (2519, 2532), False, 'import utils\n'), ((4201, 4242), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset.save', 'NumpyTupleDataset.save', (['train_path', 'train'], {}), '(train_path, train)\n', (4223, 4242), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((4251, 4288), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset.save', 'NumpyTupleDataset.save', (['val_path', 'val'], {}), '(val_path, val)\n', (4273, 4288), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((4297, 4336), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset.save', 'NumpyTupleDataset.save', (['test_path', 'test'], {}), '(test_path, test)\n', (4319, 4336), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((4360, 4385), 'numpy.array', 'numpy.array', (['train_smiles'], {}), '(train_smiles)\n', (4371, 4385), False, 'import numpy\n'), ((4407, 4430), 'numpy.array', 'numpy.array', (['val_smiles'], {}), '(val_smiles)\n', (4418, 4430), False, 'import numpy\n'), ((4453, 4477), 'numpy.array', 'numpy.array', (['test_smiles'], {}), '(test_smiles)\n', (4464, 4477), False, 'import numpy\n'), ((4486, 4554), 'utils.save_npz', 'utils.save_npz', (['smiles_path', '(train_smiles, val_smiles, test_smiles)'], {}), '(smiles_path, (train_smiles, val_smiles, test_smiles))\n', (4500, 4554), False, 'import utils\n'), ((1369, 1396), 'os.makedirs', 'os.makedirs', (['self.cache_dir'], {}), '(self.cache_dir)\n', (1380, 1396), False, 'import os\n'), ((2787, 2845), 'chainer_chemistry.datasets.get_tox21', 'D.get_tox21', (['preprocessor'], {'labels': 'None', 'return_smiles': '(True)'}), '(preprocessor, labels=None, return_smiles=True)\n', (2798, 2845), True, 'from chainer_chemistry import datasets as D\n'), ((3740, 3804), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['*train.features[:, :-1]', 'train_pyridine_label'], {}), '(*train.features[:, :-1], train_pyridine_label)\n', (3757, 3804), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((3823, 3883), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['*val.features[:, :-1]', 'val_pyridine_label'], {}), '(*val.features[:, :-1], val_pyridine_label)\n', (3840, 3883), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((3903, 3965), 'chainer_chemistry.datasets.numpy_tuple_dataset.NumpyTupleDataset', 'NumpyTupleDataset', (['*test.features[:, :-1]', 'test_pyridine_label'], {}), '(*test.features[:, :-1], test_pyridine_label)\n', (3920, 3965), False, 'from chainer_chemistry.datasets.numpy_tuple_dataset import NumpyTupleDataset\n'), ((4050, 4110), 'chainer_chemistry.datasets.get_tox21', 'D.get_tox21', (['preprocessor'], {'labels': 'labels', 'return_smiles': '(True)'}), '(preprocessor, labels=labels, return_smiles=True)\n', (4061, 4110), True, 'from chainer_chemistry import datasets as D\n'), ((3406, 3439), 'numpy.array', 'numpy.array', (['train_pyridine_label'], {}), '(train_pyridine_label)\n', (3417, 3439), False, 'import numpy\n'), ((3482, 3513), 'numpy.array', 'numpy.array', (['val_pyridine_label'], {}), '(val_pyridine_label)\n', (3493, 3513), False, 'import numpy\n'), ((3557, 3589), 'numpy.array', 'numpy.array', (['test_pyridine_label'], {}), '(test_pyridine_label)\n', (3568, 3589), False, 'import numpy\n'), ((3644, 3680), 'numpy.sum', 'numpy.sum', (['(train_pyridine_label == 1)'], {}), '(train_pyridine_label == 1)\n', (3653, 3680), False, 'import numpy\n'), ((3682, 3718), 'numpy.sum', 'numpy.sum', (['(train_pyridine_label == 0)'], {}), '(train_pyridine_label == 0)\n', (3691, 3718), False, 'import numpy\n'), ((1440, 1469), 'os.path.isdir', 'os.path.isdir', (['self.cache_dir'], {}), '(self.cache_dir)\n', (1453, 1469), False, 'import os\n'), ((3015, 3038), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (3033, 3038), False, 'from rdkit import Chem\n'), ((3074, 3092), 'tqdm.tqdm', 'tqdm', (['train_smiles'], {}), '(train_smiles)\n', (3078, 3092), False, 'from tqdm import tqdm\n'), ((3154, 3177), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (3172, 3177), False, 'from rdkit import Chem\n'), ((3213, 3229), 'tqdm.tqdm', 'tqdm', (['val_smiles'], {}), '(val_smiles)\n', (3217, 3229), False, 'from tqdm import tqdm\n'), ((3292, 3315), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (3310, 3315), False, 'from rdkit import Chem\n'), ((3351, 3368), 'tqdm.tqdm', 'tqdm', (['test_smiles'], {}), '(test_smiles)\n', (3355, 3368), False, 'from tqdm import tqdm\n'), ((1617, 1642), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['smart'], {}), '(smart)\n', (1635, 1642), False, 'from rdkit import Chem\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_only_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import graph_only_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class GraphOnlyOpsTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testGraphZerosLike(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
z_tf = graph_only_ops.graph_zeros_like(x)
with self.cached_session():
self.assertAllClose(np.zeros((2, 3)), self.evaluate(z_tf))
@test_util.run_deprecated_v1
def testGraphPlaceholder(self):
x_tf = graph_only_ops.graph_placeholder(dtypes.int32, shape=(1,))
y_tf = math_ops.square(x_tf)
with self.cached_session() as sess:
x = np.array([42])
y = sess.run(y_tf, feed_dict={x_tf: np.array([42])})
self.assertAllClose(np.square(x), y)
if __name__ == '__main__':
test.main()
| [
"tensorflow.python.eager.graph_only_ops.graph_zeros_like",
"numpy.square",
"numpy.array",
"numpy.zeros",
"tensorflow.python.eager.graph_only_ops.graph_placeholder",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.platform.test.main"
] | [((1774, 1785), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (1783, 1785), False, 'from tensorflow.python.platform import test\n'), ((1215, 1263), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'np.int32'}), '([[1, 2, 3], [4, 5, 6]], dtype=np.int32)\n', (1223, 1263), True, 'import numpy as np\n'), ((1275, 1309), 'tensorflow.python.eager.graph_only_ops.graph_zeros_like', 'graph_only_ops.graph_zeros_like', (['x'], {}), '(x)\n', (1306, 1309), False, 'from tensorflow.python.eager import graph_only_ops\n'), ((1484, 1542), 'tensorflow.python.eager.graph_only_ops.graph_placeholder', 'graph_only_ops.graph_placeholder', (['dtypes.int32'], {'shape': '(1,)'}), '(dtypes.int32, shape=(1,))\n', (1516, 1542), False, 'from tensorflow.python.eager import graph_only_ops\n'), ((1554, 1575), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['x_tf'], {}), '(x_tf)\n', (1569, 1575), False, 'from tensorflow.python.ops import math_ops\n'), ((1626, 1640), 'numpy.array', 'np.array', (['[42]'], {}), '([42])\n', (1634, 1640), True, 'import numpy as np\n'), ((1368, 1384), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (1376, 1384), True, 'import numpy as np\n'), ((1726, 1738), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1735, 1738), True, 'import numpy as np\n'), ((1683, 1697), 'numpy.array', 'np.array', (['[42]'], {}), '([42])\n', (1691, 1697), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_ntuple
----------------------------------
Tests for TOPAS ntuple reading.
"""
# system imports
import unittest
import os.path
# third-party imports
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.lib.recfunctions import append_fields
# project imports
from topas2numpy import read_ntuple
data_dir = 'tests/data'
ascii_path = os.path.join(data_dir, 'ascii-phasespace.phsp')
binary_path = os.path.join(data_dir, 'binary-phasespace.phsp')
limited_path = os.path.join(data_dir, 'limited-phasespace.phsp')
column_names = (
'Position X (cm)',
'Position Y (cm)',
'Position Z (cm)',
'Direction Cosine X',
'Direction Cosine Y',
'Energy (MeV)',
'Weight',
'Particle Type (in PDG Format)',
'Flag to tell if Third Direction Cosine is Negative (1 means true)',
'Flag to tell if this is the First Scored Particle from this History (1 means true)',
)
column_names_limited = (
'Particle Type (sign from z direction)',
'Energy (MeV) (-ve if new history)',
'Position X (cm)',
'Position Y (cm)',
'Position Z (cm)',
'Direction Cosine X',
'Direction Cosine Y',
'Weight',
)
class CommonTests(object):
def test_column_names(self):
self.assertEqual(self.result.dtype.names, self.column_names)
def test_size(self):
self.assertEqual(self.result.size, 104)
class TestAsciiNtuple(unittest.TestCase, CommonTests):
def setUp(self):
self.result = read_ntuple(ascii_path)
self.column_names = column_names
class TestBinaryNtuple(unittest.TestCase, CommonTests):
def setUp(self):
self.result = read_ntuple(binary_path)
self.column_names = column_names
class TestLimitedNtuple(unittest.TestCase, CommonTests):
def setUp(self):
self.result = read_ntuple(limited_path)
self.column_names = column_names_limited
class TestCompare(unittest.TestCase):
def setUp(self):
self.ascii = read_ntuple(ascii_path)
self.binary = read_ntuple(binary_path)
self.limited = read_ntuple(limited_path)
def test_compare_ascii_to_binary(self):
for col in self.ascii.dtype.names:
assert_array_almost_equal(self.ascii[col], self.binary[col],
decimal=3)
def test_compare_ascii_to_limited(self):
# c = a * (b ? -1 : +1)
a_name = 'Energy (MeV)'
b_name = 'Flag to tell if this is the First Scored Particle from this History (1 means true)'
c_name = 'Energy (MeV) (-ve if new history)'
c = np.copy(self.ascii[a_name])
c[self.ascii[b_name].astype('bool')] *= -1
self.ascii = append_fields(self.ascii, c_name, c)
# too much hassle to convert particle types to IAEA format
excluded = ['Particle Type (sign from z direction)']
checked = [s for s in self.limited.dtype.names if s not in excluded]
for col in checked:
assert_array_almost_equal(self.ascii[col], self.limited[col],
decimal=3)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| [
"numpy.copy",
"numpy.testing.assert_array_almost_equal",
"topas2numpy.read_ntuple",
"numpy.lib.recfunctions.append_fields",
"unittest.main"
] | [((1526, 1549), 'topas2numpy.read_ntuple', 'read_ntuple', (['ascii_path'], {}), '(ascii_path)\n', (1537, 1549), False, 'from topas2numpy import read_ntuple\n'), ((1692, 1716), 'topas2numpy.read_ntuple', 'read_ntuple', (['binary_path'], {}), '(binary_path)\n', (1703, 1716), False, 'from topas2numpy import read_ntuple\n'), ((1860, 1885), 'topas2numpy.read_ntuple', 'read_ntuple', (['limited_path'], {}), '(limited_path)\n', (1871, 1885), False, 'from topas2numpy import read_ntuple\n'), ((2017, 2040), 'topas2numpy.read_ntuple', 'read_ntuple', (['ascii_path'], {}), '(ascii_path)\n', (2028, 2040), False, 'from topas2numpy import read_ntuple\n'), ((2063, 2087), 'topas2numpy.read_ntuple', 'read_ntuple', (['binary_path'], {}), '(binary_path)\n', (2074, 2087), False, 'from topas2numpy import read_ntuple\n'), ((2111, 2136), 'topas2numpy.read_ntuple', 'read_ntuple', (['limited_path'], {}), '(limited_path)\n', (2122, 2136), False, 'from topas2numpy import read_ntuple\n'), ((2625, 2652), 'numpy.copy', 'np.copy', (['self.ascii[a_name]'], {}), '(self.ascii[a_name])\n', (2632, 2652), True, 'import numpy as np\n'), ((2725, 2761), 'numpy.lib.recfunctions.append_fields', 'append_fields', (['self.ascii', 'c_name', 'c'], {}), '(self.ascii, c_name, c)\n', (2738, 2761), False, 'from numpy.lib.recfunctions import append_fields\n'), ((3177, 3192), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3190, 3192), False, 'import unittest\n'), ((2237, 2308), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['self.ascii[col]', 'self.binary[col]'], {'decimal': '(3)'}), '(self.ascii[col], self.binary[col], decimal=3)\n', (2262, 2308), False, 'from numpy.testing import assert_array_almost_equal\n'), ((3009, 3081), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['self.ascii[col]', 'self.limited[col]'], {'decimal': '(3)'}), '(self.ascii[col], self.limited[col], decimal=3)\n', (3034, 3081), False, 'from numpy.testing import assert_array_almost_equal\n')] |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
import numpy as np
ext_modules = [
Extension(
"create_graph",
["create_graph.pyx"],
extra_compile_args=['-fopenmp', '-O3'], #'-g'],
extra_link_args=['-fopenmp'],# '-g'],
include_dirs=[np.get_include()],
language="c++"
)
]
setup(
name='create_graph',
ext_modules=cythonize(ext_modules, annotate=True),
# gdb_debug=True
)
| [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((509, 546), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {'annotate': '(True)'}), '(ext_modules, annotate=True)\n', (518, 546), False, 'from Cython.Build import cythonize\n'), ((410, 426), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (424, 426), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Catalog utility functions / classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.coordinates import Angle, SkyCoord
__all__ = [
'coordinate_iau_format',
'ra_iau_format',
'dec_iau_format',
'skycoord_from_table',
'select_sky_box',
'select_sky_circle',
]
def coordinate_iau_format(coordinate, ra_digits, dec_digits=None,
prefix=''):
"""Coordinate format as an IAU source designation.
Reference: http://cdsweb.u-strasbg.fr/Dic/iau-spec.html
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord`
Source coordinate.
ra_digits : int (>=2)
Number of digits for the Right Ascension part.
dec_digits : int (>=2) or None
Number of digits for the declination part
Default is ``dec_digits`` = None, meaning ``dec_digits`` = ``ra_digits`` - 1.
prefix : str
Prefix to put before the coordinate string, e.g. "SDSS J".
Returns
-------
strrepr : str or list of strings
IAU format string representation of the coordinate.
If this input coordinate is an array, the output is a list of strings.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> from gammapy.catalog import coordinate_iau_format
Example position from IAU specification
>>> coordinate = SkyCoord('00h51m09.38s -42d26m33.8s', frame='icrs')
>>> designation = 'QSO J' + coordinate_iau_format(coordinate, ra_digits=6)
>>> print(designation)
QSO J005109-4226.5
>>> coordinate = coordinate.transform_to('fk4')
>>> designation = 'QSO B' + coordinate_iau_format(coordinate, ra_digits=6)
>>> print(designation)
QSO B004848-4242.8
Crab pulsar position (positive declination)
>>> coordinate = SkyCoord('05h34m31.93830s +22d00m52.1758s', frame='icrs')
>>> designation = 'HESS J' + coordinate_iau_format(coordinate, ra_digits=4)
>>> print(designation)
HESS J0534+220
PKS 2155-304 AGN position (negative declination)
>>> coordinate = SkyCoord('21h58m52.06511s -30d13m32.1182s', frame='icrs')
>>> designation = '2FGL J' + coordinate_iau_format(coordinate, ra_digits=5)
>>> print(designation)
2FGL J2158.8-3013
Coordinate array inputs result in list of string output.
>>> coordinates = SkyCoord(ra=[10.68458, 83.82208],
... dec=[41.26917, -5.39111],
... unit=('deg', 'deg'), frame='icrs')
>>> designations = coordinate_iau_format(coordinates, ra_digits=5, prefix='HESS J')
>>> print(designations)
['HESS J0042.7+4116', 'HESS J0535.2-0523']
"""
if coordinate.frame.name == 'galactic':
coordinate = coordinate.transform_to('icrs')
if dec_digits is None:
dec_digits = max(2, ra_digits - 1)
ra_str = ra_iau_format(coordinate.ra, ra_digits)
dec_str = dec_iau_format(coordinate.dec, dec_digits)
if coordinate.isscalar:
out = prefix + ra_str + dec_str
else:
out = [prefix + r + d for (r, d) in zip(ra_str, dec_str)]
return out
def ra_iau_format(ra, digits):
"""Right Ascension part of an IAU source designation.
Reference: http://cdsweb.u-strasbg.fr/Dic/iau-spec.html
====== ========
digits format
====== ========
2 HH
3 HHh
4 HHMM
5 HHMM.m
6 HHMMSS
7 HHMMSS.s
====== ========
Parameters
----------
ra : `~astropy.coordinates.Longitude`
Right ascension.
digits : int (>=2)
Number of digits.
Returns
-------
strrepr : str
IAU format string representation of the angle.
"""
if not isinstance(digits, int) and (digits >= 2):
raise ValueError('Invalid digits: {}. Valid options: int >= 2'.format(digits))
if ra.isscalar:
out = _ra_iau_format_scalar(ra, digits)
else:
out = [_ra_iau_format_scalar(_, digits) for _ in ra]
return out
def _ra_iau_format_scalar(ra, digits):
"""Format a single Right Ascension."""
# Note that Python string formatting always rounds the last digit,
# but the IAU spec requires to truncate instead.
# That's why integers with the correct digits are computed and formatted
# instead of formatting floats directly
ra_h = int(ra.hms[0])
ra_m = int(ra.hms[1])
ra_s = ra.hms[2]
if digits == 2: # format: HH
ra_str = '{0:02d}'.format(ra_h)
elif digits == 3: # format: HHh
ra_str = '{0:03d}'.format(int(10 * ra.hour))
elif digits == 4: # format: HHMM
ra_str = '{0:02d}{1:02d}'.format(ra_h, ra_m)
elif digits == 5: # format : HHMM.m
ra_str = '{0:02d}{1:02d}.{2:01d}'.format(ra_h, ra_m, int(ra_s / 6))
elif digits == 6: # format: HHMMSS
ra_str = '{0:02d}{1:02d}{2:02d}'.format(ra_h, ra_m, int(ra_s))
else: # format: HHMMSS.s
SS = int(ra_s)
s_digits = digits - 6
s = int(10 ** s_digits * (ra_s - SS))
fmt = '{0:02d}{1:02d}{2:02d}.{3:0' + str(s_digits) + 'd}'
ra_str = fmt.format(ra_h, ra_m, SS, s)
return ra_str
def dec_iau_format(dec, digits):
"""Declination part of an IAU source designation.
Reference: http://cdsweb.u-strasbg.fr/Dic/iau-spec.html
====== =========
digits format
====== =========
2 +DD
3 +DDd
4 +DDMM
5 +DDMM.m
6 +DDMMSS
7 +DDMMSS.s
====== =========
Parameters
----------
dec : `~astropy.coordinates.Latitude`
Declination.
digits : int (>=2)
Number of digits.
Returns
-------
strrepr : str
IAU format string representation of the angle.
"""
if not isinstance(digits, int) and digits >= 2:
raise ValueError('Invalid digits: {}. Valid options: int >= 2'.format(digits))
if dec.isscalar:
out = _dec_iau_format_scalar(dec, digits)
else:
out = [_dec_iau_format_scalar(_, digits) for _ in dec]
return out
def _dec_iau_format_scalar(dec, digits):
"""Format a single declination."""
# Note that Python string formatting always rounds the last digit,
# but the IAU spec requires to truncate instead.
# That's why integers with the correct digits are computed and formatted
# instead of formatting floats directly
dec_sign = '+' if dec.deg >= 0 else '-'
dec_d = int(abs(dec.dms[0]))
dec_m = int(abs(dec.dms[1]))
dec_s = abs(dec.dms[2])
if digits == 2: # format: +DD
dec_str = '{}{:02d}'.format(dec_sign, dec_d)
elif digits == 3: # format: +DDd
dec_str = '{:+04d}'.format(int(10 * dec.deg))
elif digits == 4: # format : +DDMM
dec_str = '{}{:02d}{:02d}'.format(dec_sign, dec_d, dec_m)
elif digits == 5: # format: +DDMM.m
dec_str = '{}{:02d}{:02d}.{:01d}'.format(dec_sign, dec_d, dec_m, int(dec_s / 6))
elif digits == 6: # format: +DDMMSS
dec_str = '{}{:02d}{:02d}.{:02d}'.format(dec_sign, dec_d, dec_m, int(dec_s))
else: # format: +DDMMSS.s
SS = int(dec_s)
s_digits = digits - 6
s = int(10 ** s_digits * (dec_s - SS))
fmt = '{}{:02d}{:02d}{:02d}.{:0' + str(s_digits) + 'd}'
dec_str = fmt.format(dec_sign, dec_d, dec_m, SS, s)
return dec_str
def skycoord_from_table(table):
"""Make `~astropy.coordinates.SkyCoord` from lon, lat columns in `~astropy.table.Table`.
This is a convenience function similar to `~astropy.coordinates.SkyCoord.guess_from_table`,
but with the column names we usually use.
TODO: I'm not sure if it's a good idea to use this because it's not always clear
which positions are taken.
"""
try:
keys = table.colnames
except AttributeError:
keys = table.keys()
if set(['RAJ2000', 'DEJ2000']).issubset(keys):
lon, lat, frame = 'RAJ2000', 'DEJ2000', 'icrs'
elif set(['RA', 'DEC']).issubset(keys):
lon, lat, frame = 'RA', 'DEC', 'icrs'
elif set(['GLON', 'GLAT']).issubset(keys):
lon, lat, frame = 'GLON', 'GLAT', 'galactic'
elif set(['glon', 'glat']).issubset(keys):
lon, lat, frame = 'glon', 'glat', 'galactic'
else:
raise KeyError('No column GLON / GLAT or RA / DEC or RAJ2000 / DEJ2000 found.')
unit = table[lon].unit.to_string() if table[lon].unit else 'deg'
skycoord = SkyCoord(table[lon], table[lat], unit=unit, frame=frame)
return skycoord
def select_sky_box(table, lon_lim, lat_lim, frame='icrs', inverted=False):
"""Select sky positions in a box.
This function can be applied e.g. to event lists of source catalogs
or observation tables.
Note: if useful we can add a function that returns the mask
or indices instead of applying the selection directly
Parameters
----------
table : `~astropy.table.Table`
Table with sky coordinate columns.
lon_lim, lat_lim : `~astropy.coordinates.Angle`
Box limits (each should be a min, max tuple).
frame : str, optional
Frame in which to apply the box cut.
Built-in Astropy coordinate frames are supported, e.g.
'icrs', 'fk5' or 'galactic'.
inverted : bool, optional
Invert selection: keep all entries outside the selected region.
Returns
-------
table : `~astropy.table.Table`
Copy of input table with box cut applied.
Examples
--------
>>> selected_obs_table = select_sky_box(obs_table,
... lon_lim=Angle([150, 300], 'deg'),
... lat_lim=Angle([-50, 0], 'deg'),
... frame='icrs')
"""
skycoord = skycoord_from_table(table)
skycoord = skycoord.transform_to(frame)
lon = skycoord.data.lon
lat = skycoord.data.lat
# SkyCoord automatically wraps lon angles at 360 deg, so in case
# the lon range is wrapped at 180 deg, lon angles must be wrapped
# also at 180 deg for the comparison to work
if any(l < Angle(0., 'deg') for l in lon_lim):
lon = lon.wrap_at(Angle(180, 'deg'))
lon_mask = (lon_lim[0] <= lon) & (lon < lon_lim[1])
lat_mask = (lat_lim[0] <= lat) & (lat < lat_lim[1])
mask = lon_mask & lat_mask
if inverted:
mask = np.invert(mask)
return table[mask]
def select_sky_circle(table, lon_cen, lat_cen, radius, frame='icrs', inverted=False):
"""Select sky positions in a circle.
This function can be applied e.g. to event lists of source catalogs
or observation tables.
Note: if useful we can add a function that returns the mask
or indices instead of applying the selection directly
Parameters
----------
table : `~astropy.table.Table`
Table with sky coordinate columns.
lon_cen, lat_cen : `~astropy.coordinates.Angle`
Circle center.
radius : `~astropy.coordinates.Angle`
Circle radius.
frame : str, optional
Frame in which to apply the box cut.
Built-in Astropy coordinate frames are supported, e.g.
'icrs', 'fk5' or 'galactic'.
inverted : bool, optional
Invert selection: keep all entries outside the selected region.
Returns
-------
table : `~astropy.table.Table`
Copy of input table with circle cut applied.
Examples
--------
>>> selected_obs_table = select_sky_circle(obs_table,
... lon=Angle(0, 'deg'),
... lat=Angle(0, 'deg'),
... radius=Angle(5, 'deg'),
... frame='galactic')
"""
skycoord = skycoord_from_table(table)
skycoord = skycoord.transform_to(frame)
# no need to wrap lon angleshere, since the SkyCoord separation
# method takes care of it
center = SkyCoord(lon_cen, lat_cen, frame=frame)
ang_distance = skycoord.separation(center)
mask = ang_distance < radius
if inverted:
mask = np.invert(mask)
return table[mask]
| [
"astropy.coordinates.Angle",
"astropy.coordinates.SkyCoord",
"numpy.invert"
] | [((8488, 8544), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['table[lon]', 'table[lat]'], {'unit': 'unit', 'frame': 'frame'}), '(table[lon], table[lat], unit=unit, frame=frame)\n', (8496, 8544), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((11995, 12034), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['lon_cen', 'lat_cen'], {'frame': 'frame'}), '(lon_cen, lat_cen, frame=frame)\n', (12003, 12034), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((10406, 10421), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (10415, 10421), True, 'import numpy as np\n'), ((12148, 12163), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (12157, 12163), True, 'import numpy as np\n'), ((10211, 10228), 'astropy.coordinates.Angle', 'Angle', (['(180)', '"""deg"""'], {}), "(180, 'deg')\n", (10216, 10228), False, 'from astropy.coordinates import Angle, SkyCoord\n'), ((10149, 10166), 'astropy.coordinates.Angle', 'Angle', (['(0.0)', '"""deg"""'], {}), "(0.0, 'deg')\n", (10154, 10166), False, 'from astropy.coordinates import Angle, SkyCoord\n')] |
import matplotlib.pyplot as plt
from numpy import corrcoef, mean, zeros
from copy import deepcopy
from scipy import spatial
import Krippendorff
########################################################################################
class Rater:
def __init__(self, ID):
self.ID = ID
self.matrix = self.ReadFile()
self.orientation = self.matrix[0][1]
self.ip_address = self.matrix[0][2]
self.start_timestamp = int(self.matrix[0][3])
self.end_timestamp = int(self.matrix[-2][3])
self.time_taken = (self.end_timestamp - self.start_timestamp) / 60.0
self.timecourse = [(int(self.matrix[i][3]) - self.start_timestamp) / 60.0 for i in range(0, 151)]
self.completion_code = self.matrix[-1][0]
del self.matrix[0]
del self.matrix[-1]
self.ratings, self.test_ratings, self.practice_ratings = self.SeparateMatrix()
self.normalized_ratings = self.MakeNormalizedMatrix()
# Read in a ratings file
def ReadFile(self):
try:
f = open('../data/task_1/' + self.ID)
except IOError:
raise ValueError(self.ID + ' is not a valid rater')
content = f.read()
f.close()
return [line.split('\t') for line in content.split('\n')]
# Separate out the raw data into three matrices: actual ratings, practice ratings, and test ratings
def SeparateMatrix(self):
ratings = []
test_ratings = []
practice_ratings = []
for i in range(0, len(self.matrix)):
if self.matrix[i][2] == 'undefined':
continue # If rating is undefined, skip the row
int_row = [int(self.matrix[i][0]), int(self.matrix[i][1]), int(self.matrix[i][2]), int(self.matrix[i][3])]
if int_row[0] == int_row[1]:
test_ratings.append(int_row)
elif i < 6:
practice_ratings.append(int_row)
else:
ratings.append(int_row)
return ratings, test_ratings, practice_ratings
# Normalize actual ratings over the interval [0,1]
def MakeNormalizedMatrix(self):
normalized_ratings = deepcopy(self.ratings)
ratings = self.GetRatings('actual')
try:
minimum = min(ratings)
except ValueError:
print(self.ID, ratings)
difference = float(max(ratings) - minimum)
if difference == 0.0:
return False
for row in normalized_ratings:
row[2] = (row[2] - minimum) / difference
return normalized_ratings
# Extract ratings of a given kind
def GetRatings(self, kind):
if kind == 'actual': target_matrix = self.ratings
elif kind == 'practice': target_matrix = self.practice_ratings
elif kind == 'test': target_matrix = self.test_ratings
elif kind == 'normalized': target_matrix = self.normalized_ratings
else:
return False
ratings = []
for row in target_matrix:
try:
ratings.append(row[2])
except ValueError:
continue
return ratings
# Produce a histogram of the actual ratings (raw or normalized)
def Hist(self, normalize=False, savefig=False):
if normalize == True:
ratings = self.GetRatings('normalized')
plt.hist(ratings, bins=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.xlim(0, 1)
else:
ratings = self.GetRatings('actual')
plt.hist(ratings, bins=[0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000])
plt.xlim(0, 1000)
plt.ylim(0, 100)
plt.xlabel("Rating")
plt.ylabel("Frequency")
plt.title('Rater ' + self.ID)
if savefig == True:
plt.savefig("hists/" + self.ID + ".pdf")
plt.close()
else:
plt.show()
# Measure rater agreement by correlating this rater's ratings with the mean ratings of all raters
def RaterAgreement(self, distances=False):
x = []
y = []
if type(distances) == bool and distances == False:
distances = all_distance_array
distance_matrix = spatial.distance.squareform(distances, 'tomatrix')
for row in self.normalized_ratings:
x.append(distance_matrix[row[0], row[1]])
y.append(row[2])
return corrcoef(x, y)[0,1]
def MeanTestRating(self):
return mean(self.GetRatings('test'))
########################################################################################
# Average together the normalized ratings of many raters
def AverageDistanceMatrix(raters, agreement_filter=None, test_filter=None, distances=None, krippendorff=False):
count_matrix = zeros([48, 48], dtype=int)
sum_distance_matrix = zeros([48, 48], dtype=float)
ka_data = []
rater_n = 0
for rater in raters:
normalized_matrix = rater.normalized_ratings
if normalized_matrix == False:
#print('Excluding rater %s because the ratings cannot be normalized' % rater.ID)
continue # If the normalized matrix doesn't exist, skip the rater. This can occur if
# the rater gives the same rating for every pair of triangles.
if agreement_filter != None:
rater_agreement = rater.RaterAgreement(distances)
if rater_agreement < agreement_filter:
#print('Excluding rater %s due to low rater agreement: %f' % (rater.ID, rater_agreement))
continue # If agreement filter is being applied and the rater is not good enough, skip the rater
if test_filter != None:
mean_test_rating = rater.MeanTestRating()
if mean_test_rating > test_filter:
#print('Excluding rater %s due to a high average test rating: %f' % (rater.ID, mean_test_rating))
continue # If test filter is being applied and the rater is not good enough, skip the rater
rater_n += 1
if krippendorff == True:
ka_matrix = [[None]*48 for i in range(48)]
for row in normalized_matrix:
sum_distance_matrix[row[0], row[1]] += float(row[2])
sum_distance_matrix[row[1], row[0]] += float(row[2])
count_matrix[row[0], row[1]] += 1
count_matrix[row[1], row[0]] += 1
if krippendorff == True:
ka_matrix[row[0]][row[1]] = float(row[2])
ka_matrix[row[1]][row[0]] = float(row[2])
if krippendorff == True:
ka_array = []
for i in range(0, 47):
for j in range(i+1, 48):
ka_array.append(ka_matrix[i][j])
ka_data.append(ka_array)
sum_distance_array = spatial.distance.squareform(sum_distance_matrix, 'tovector')
count_array = spatial.distance.squareform(count_matrix, 'tovector')
mean_distance_array = sum_distance_array / count_array
return mean_distance_array, count_array, rater_n, ka_data
def most_and_least_similar_pairs(ratings_array):
matrix = spatial.distance.squareform(ratings_array, 'tomatrix')
similar_score = 1
dissimilar_score = 0
n = matrix.shape[0]
for i in range(0, n):
for j in range(i+1, n):
score = matrix[i, j]
if score < similar_score:
similar_score = score
similar_indices = (i, j)
if score > dissimilar_score:
dissimilar_score = score
dissimilar_indices = (i, j)
return similar_score, similar_indices, dissimilar_score, dissimilar_indices
########################################################################################
rater_ids = ['1iuoiX', '8lBsLg', 'iomj8H', 'Uv1Cz5', '6pONEP', 'G4jATI', 'FEjjhj', 'WcOyEo', 'Olsg9E', 'ntzryw', 'KU4BU1', 'QNXer0', 'sgu4Zk', 'RhDU4c', 'mqrNYh', 'xS8ZdN', 'y2UU38', 'CFbWtL', 'anrjOY', 'J4i8dm', 'Wfw8of', 'AqTLsh', 'W2JL0h', 'CYSrZk', '277fiX', 'k2AuXE', 'E4SJqH', 'Hl5kUl', 'I2Gbyg', 'wbaSjO', 'a2abMj', 'MTbOAZ', 'aY17za', 'krvm0W', 'eetbYU', 'RMDCcy', 'qEBAaS', 'aBXXiT', 'JLN0dy', '6o8syk', 'aEOaWJ', 'IB4wVt', 'ufeoHf', 'HutG2f', 'vPKCt3', 'rCHzzR', 'K3rvMd', 'qUZtEJ', 'nJmFj7', 'YgyWJ1', 'huX4Jz', 'chua85', 'jvBO9o', 'zUINg8', '0TiUmt', '2yr15o', '0bPp49', 'mbgoLT', 'lKQ2km', 'YM1TCH', 'EBXkBU', 'oXMKVA', 'N0LMRQ', 'MrX3AS', 'kdNtdY', 'pd55KD', 'ArvwOB', '7ysBYc', 'OiBlzF', 'eLBxSN', 'DlS5ut', 'oyh9eG', 'tzcUm5', 'KidSYY', 'ezOZvk', 'w6VA0U', '40THLn', 'kBy8V2', 'tAVMfZ', 'Bfsv32', 'Tx8WDh', 'efcw0Y', 'm3YPGN', 'gmey91', 'CGaUDW', 'JwYg7R', '4tkpPZ', 'jKdogx', 'onef7t', 'MuSqoP', 'o5GLbD', 'wNvkTK', 'wiNvtD', 'GsLucf', 'TcgHzi', 'Be4LKs']
# Initialize a Rater object for each rater
raters = [Rater(ID) for ID in rater_ids]
# First Pass
# Average everyone's ratings together to form a (condensed) distance matrix, but
# ignore raters whose mean reliability rating is > 100
all_distance_array, all_count_array, all_rater_n, ka_data = AverageDistanceMatrix(raters, None, 100, None, True)
# Second Pass
# Average everyone's ratings together again, this time filtering out raters whose
# agreement with the average ratings of all raters in the first pass is < 0.4.
reliable_distance_array, reliable_count_array, reliable_rater_n, ka_data = AverageDistanceMatrix(raters, 0.4, 100, all_distance_array, True)
# Calculate Krippendorff's alpha - this is very slow
#print(Krippendorff.alpha(ka_data))
| [
"scipy.spatial.distance.squareform",
"matplotlib.pyplot.title",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.corrcoef",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.zeros",
"copy.deepcopy",
"matplotlib.pyplot.ylim",
"matplotlib.pypl... | [((4341, 4367), 'numpy.zeros', 'zeros', (['[48, 48]'], {'dtype': 'int'}), '([48, 48], dtype=int)\n', (4346, 4367), False, 'from numpy import corrcoef, mean, zeros\n'), ((4392, 4420), 'numpy.zeros', 'zeros', (['[48, 48]'], {'dtype': 'float'}), '([48, 48], dtype=float)\n', (4397, 4420), False, 'from numpy import corrcoef, mean, zeros\n'), ((6139, 6199), 'scipy.spatial.distance.squareform', 'spatial.distance.squareform', (['sum_distance_matrix', '"""tovector"""'], {}), "(sum_distance_matrix, 'tovector')\n", (6166, 6199), False, 'from scipy import spatial\n'), ((6216, 6269), 'scipy.spatial.distance.squareform', 'spatial.distance.squareform', (['count_matrix', '"""tovector"""'], {}), "(count_matrix, 'tovector')\n", (6243, 6269), False, 'from scipy import spatial\n'), ((6448, 6502), 'scipy.spatial.distance.squareform', 'spatial.distance.squareform', (['ratings_array', '"""tomatrix"""'], {}), "(ratings_array, 'tomatrix')\n", (6475, 6502), False, 'from scipy import spatial\n'), ((1983, 2005), 'copy.deepcopy', 'deepcopy', (['self.ratings'], {}), '(self.ratings)\n', (1991, 2005), False, 'from copy import deepcopy\n'), ((3299, 3315), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (3307, 3315), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rating"""'], {}), "('Rating')\n", (3330, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3345, 3368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (3355, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3402), 'matplotlib.pyplot.title', 'plt.title', (["('Rater ' + self.ID)"], {}), "('Rater ' + self.ID)\n", (3382, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3801, 3851), 'scipy.spatial.distance.squareform', 'spatial.distance.squareform', (['distances', '"""tomatrix"""'], {}), "(distances, 'tomatrix')\n", (3828, 3851), False, 'from scipy import spatial\n'), ((3033, 3112), 'matplotlib.pyplot.hist', 'plt.hist', (['ratings'], {'bins': '[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]'}), '(ratings, bins=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n', (3041, 3112), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3133), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (3127, 3133), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3270), 'matplotlib.pyplot.hist', 'plt.hist', (['ratings'], {'bins': '[0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]'}), '(ratings, bins=[0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000])\n', (3200, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3294), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1000)'], {}), '(0, 1000)\n', (3285, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3433, 3473), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('hists/' + self.ID + '.pdf')"], {}), "('hists/' + self.ID + '.pdf')\n", (3444, 3473), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3491), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3489, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3508, 3518), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3516, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3974, 3988), 'numpy.corrcoef', 'corrcoef', (['x', 'y'], {}), '(x, y)\n', (3982, 3988), False, 'from numpy import corrcoef, mean, zeros\n')] |
# -*- coding: utf-8 -*-
"""Preview
Code for 'Inf-Net: Automatic COVID-19 Lung Infection Segmentation from CT Scans'
submit to Transactions on Medical Imaging, 2020.
First Version: Created on 2020-05-13 (@author: <NAME>)
"""
import os
import numpy as np
from Code.utils.dataloader_MulClsLungInf_UNet import LungDataset
from torchvision import transforms
from torch.utils.data import DataLoader
from Code.model_lung_infection.InfNet_UNet import * # use U-Net for multi-class segmentation
from scipy import misc
from Code.utils.split_class import split_class
import shutil
def inference(num_classes, input_channels, snapshot_dir, save_path):
test_dataset = LungDataset(
imgs_path='./Dataset/TestingSet/MultiClassInfection-Test/Imgs/',
pseudo_path='./Results/Lung infection segmentation/Semi-Inf-Net/', # NOTES: generated from `Semi-Inf-Net`
label_path='./Dataset/TestingSet/MultiClassInfection-Test/GT/',
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]),
is_test=True
)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
lung_model = Inf_Net_UNet(input_channels, num_classes).cuda()
print(lung_model)
lung_model.load_state_dict(torch.load(snapshot_dir))
lung_model.eval()
for index, (img, pseudo, img_mask, name) in enumerate(test_dataloader):
img = img.to(device)
pseudo = pseudo.to(device)
img_mask = img_mask.to(device)
output = lung_model(torch.cat((img, pseudo), dim=1))
output = torch.sigmoid(output) # output.shape is torch.Size([4, 2, 160, 160])
b, _, w, h = output.size()
_, _, w_gt, h_gt = img_mask.size()
# output b*n_class*h*w -- > b*h*w
pred = output.cpu().permute(0, 2, 3, 1).contiguous().view(-1, num_classes).max(1)[1].view(b, w, h).numpy().squeeze()
print('Class numbers of prediction in total:', np.unique(pred))
# pred = misc.imresize(pred, size=(w_gt, h_gt))
os.makedirs(save_path, exist_ok=True)
misc.imsave(save_path + name[0].replace('.jpg', '.png'), pred)
split_class(save_path, name[0].replace('.jpg', '.png'), w_gt, h_gt)
shutil.rmtree(save_path)
print('Test done!')
if __name__ == "__main__":
inference(num_classes=3,
input_channels=6,
snapshot_dir='./Snapshots/save_weights/Semi-Inf-Net_UNet/unet_model_200.pkl',
save_path='./Results/Multi-class lung infection segmentation/class_12/'
)
| [
"numpy.unique",
"os.makedirs",
"torch.utils.data.DataLoader",
"torchvision.transforms.Normalize",
"shutil.rmtree",
"torchvision.transforms.ToTensor"
] | [((1185, 1253), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(test_dataset, batch_size=1, shuffle=False, num_workers=0)\n', (1195, 1253), False, 'from torch.utils.data import DataLoader\n'), ((2398, 2422), 'shutil.rmtree', 'shutil.rmtree', (['save_path'], {}), '(save_path)\n', (2411, 2422), False, 'import shutil\n'), ((2208, 2245), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (2219, 2245), False, 'import os\n'), ((2127, 2142), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (2136, 2142), True, 'import numpy as np\n'), ((988, 1009), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1007, 1009), False, 'from torchvision import transforms\n'), ((1023, 1098), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1043, 1098), False, 'from torchvision import transforms\n')] |
import wandb
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.fft as fft
import seaborn as sns
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
sns.set()
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from tqdm.notebook import tqdm
import sys
sys.path.append("/home/evangelos/workspace/Channel_Charting/")
from tools import utils
from src.models import supervised_classifier, supervised_regressor
from src.utils.data_preparation import SupervisedDataset
def preprocessing(data, first_data, rx_positions, padding=True):
data = utils.drop_top_right(data, rx_positions)
data = utils.standarize(data)
data = utils.fillna(data)
if padding:
data = utils.zero_padding_as(data, first_data)
#data = utils.take_norm(data)
return data
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--clusters', type=int, default=4)
parser.add_argument('--dropout', type=float, default=0.4)
parser.add_argument('--used_pct', type=float, default=1)
args = parser.parse_args()
wandb.init(project='ChannelCharting', config=args)
epochs = args.epochs
batch_size = args.batch_size
used_pct = args.used_pct
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
# load datasets
(raw_taps, raw_phi, raw_theta, raw_rx_positions,
raw_tx_positions) = utils.load_datasets()
#fourier transform and undersample taps
raw_freq_taps = fft.fft(raw_taps, workers=-1)[:, :, ::2]
# Preprocess datasets
taps = preprocessing(raw_freq_taps, raw_freq_taps, raw_rx_positions)
taps = np.hstack([np.real(taps), np.imag(taps)])
phi = preprocessing(raw_phi, taps, raw_rx_positions)
theta = preprocessing(raw_theta, taps, raw_rx_positions)
X = np.hstack([taps, phi[:-10], theta[:-10]])
y = preprocessing(raw_rx_positions, taps, raw_rx_positions,
padding=False)[:, :2]
#assign labels to certain areas of the map using kmeans
from sklearn.cluster import KMeans
km = KMeans(n_clusters=args.clusters)
km = km.fit(y)
labels = km.predict(y)
#train test split
train_X, test_X, train_y, test_y, train_labels, test_labels = train_test_split(
X, y, labels)
train_DS = SupervisedDataset(train_X, train_labels)
test_DS = SupervisedDataset(test_X, test_labels)
model = supervised_classifier.Classifier(channels=train_DS.channels(),
nb_labels=args.clusters)
criterion = torch.nn.CrossEntropyLoss()
train_loader = DataLoader(train_DS, batch_size=32)
test_loader = DataLoader(test_DS)
optimizer = optim.Adam(model.parameters())
scheduler = StepLR(optimizer, step_size=1)
wandb.watch(model)
best_val_loss = 9999
count = 0
best_accuracy = 0
for e in (range(100)):
loss = supervised_classifier.train(model, train_loader, optimizer,
criterion, device)
val_loss, val_acc = supervised_classifier.test(model, test_loader,
criterion, device)
# print(f"Epoch {epoch+1}: Training Loss {loss}, Validation Loss {val_loss}, Validation Accuracy {val_acc}")
wandb.log({
"Training Loss": loss,
"Validation Loss": val_loss,
"Validation Accuracy": val_acc
})
if best_val_loss < val_loss:
count += 1
else:
best_val_loss = val_loss
if val_acc > best_accuracy:
wandb.run.summary["best_accuracy"] = val_acc
yhats = model(test_DS[:][0]).detach()
predictions = yhats.argmax(dim=1)
report = classification_report(predictions,
test_labels,
output_dict=True)
wandb.log(report)
heatmap = sns.heatmap(confusion_matrix(predictions, test_labels))
wandb.log({"Confusion Matrix": heatmap})
map = sns.scatterplot(test_y[:, 0], test_y[:, 1], hue=predictions)
wandb.log({"Map": map})
if count > 5:
break
| [
"wandb.log",
"torch.nn.CrossEntropyLoss",
"numpy.hstack",
"sklearn.metrics.classification_report",
"wandb.init",
"tools.utils.drop_top_right",
"torch.cuda.is_available",
"seaborn.scatterplot",
"src.models.supervised_classifier.test",
"src.utils.data_preparation.SupervisedDataset",
"scipy.fft.fft... | [((268, 277), 'seaborn.set', 'sns.set', ([], {}), '()\n', (275, 277), True, 'import seaborn as sns\n'), ((558, 620), 'sys.path.append', 'sys.path.append', (['"""/home/evangelos/workspace/Channel_Charting/"""'], {}), "('/home/evangelos/workspace/Channel_Charting/')\n", (573, 620), False, 'import sys\n'), ((1085, 1110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1108, 1110), False, 'import argparse\n'), ((1422, 1472), 'wandb.init', 'wandb.init', ([], {'project': '"""ChannelCharting"""', 'config': 'args'}), "(project='ChannelCharting', config=args)\n", (1432, 1472), False, 'import wandb\n'), ((1557, 1582), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1580, 1582), False, 'import torch\n'), ((1592, 1631), 'torch.device', 'torch.device', (["('cuda' if cuda else 'cpu')"], {}), "('cuda' if cuda else 'cpu')\n", (1604, 1631), False, 'import torch\n'), ((1719, 1740), 'tools.utils.load_datasets', 'utils.load_datasets', ([], {}), '()\n', (1738, 1740), False, 'from tools import utils\n'), ((2093, 2134), 'numpy.hstack', 'np.hstack', (['[taps, phi[:-10], theta[:-10]]'], {}), '([taps, phi[:-10], theta[:-10]])\n', (2102, 2134), True, 'import numpy as np\n'), ((2332, 2364), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'args.clusters'}), '(n_clusters=args.clusters)\n', (2338, 2364), False, 'from sklearn.cluster import KMeans\n'), ((2484, 2514), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y', 'labels'], {}), '(X, y, labels)\n', (2500, 2514), False, 'from sklearn.model_selection import train_test_split\n'), ((2531, 2571), 'src.utils.data_preparation.SupervisedDataset', 'SupervisedDataset', (['train_X', 'train_labels'], {}), '(train_X, train_labels)\n', (2548, 2571), False, 'from src.utils.data_preparation import SupervisedDataset\n'), ((2582, 2620), 'src.utils.data_preparation.SupervisedDataset', 'SupervisedDataset', (['test_X', 'test_labels'], {}), '(test_X, test_labels)\n', (2599, 2620), False, 'from src.utils.data_preparation import SupervisedDataset\n'), ((2771, 2798), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2796, 2798), False, 'import torch\n'), ((2815, 2850), 'torch.utils.data.DataLoader', 'DataLoader', (['train_DS'], {'batch_size': '(32)'}), '(train_DS, batch_size=32)\n', (2825, 2850), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2865, 2884), 'torch.utils.data.DataLoader', 'DataLoader', (['test_DS'], {}), '(test_DS)\n', (2875, 2884), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((2941, 2971), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['optimizer'], {'step_size': '(1)'}), '(optimizer, step_size=1)\n', (2947, 2971), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((2973, 2991), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (2984, 2991), False, 'import wandb\n'), ((847, 887), 'tools.utils.drop_top_right', 'utils.drop_top_right', (['data', 'rx_positions'], {}), '(data, rx_positions)\n', (867, 887), False, 'from tools import utils\n'), ((899, 921), 'tools.utils.standarize', 'utils.standarize', (['data'], {}), '(data)\n', (915, 921), False, 'from tools import utils\n'), ((933, 951), 'tools.utils.fillna', 'utils.fillna', (['data'], {}), '(data)\n', (945, 951), False, 'from tools import utils\n'), ((1797, 1826), 'scipy.fft.fft', 'fft.fft', (['raw_taps'], {'workers': '(-1)'}), '(raw_taps, workers=-1)\n', (1804, 1826), True, 'import scipy.fft as fft\n'), ((3075, 3153), 'src.models.supervised_classifier.train', 'supervised_classifier.train', (['model', 'train_loader', 'optimizer', 'criterion', 'device'], {}), '(model, train_loader, optimizer, criterion, device)\n', (3102, 3153), False, 'from src.models import supervised_classifier, supervised_regressor\n'), ((3217, 3282), 'src.models.supervised_classifier.test', 'supervised_classifier.test', (['model', 'test_loader', 'criterion', 'device'], {}), '(model, test_loader, criterion, device)\n', (3243, 3282), False, 'from src.models import supervised_classifier, supervised_regressor\n'), ((3451, 3550), 'wandb.log', 'wandb.log', (["{'Training Loss': loss, 'Validation Loss': val_loss, 'Validation Accuracy':\n val_acc}"], {}), "({'Training Loss': loss, 'Validation Loss': val_loss,\n 'Validation Accuracy': val_acc})\n", (3460, 3550), False, 'import wandb\n'), ((983, 1022), 'tools.utils.zero_padding_as', 'utils.zero_padding_as', (['data', 'first_data'], {}), '(data, first_data)\n', (1004, 1022), False, 'from tools import utils\n'), ((1948, 1961), 'numpy.real', 'np.real', (['taps'], {}), '(taps)\n', (1955, 1961), True, 'import numpy as np\n'), ((1963, 1976), 'numpy.imag', 'np.imag', (['taps'], {}), '(taps)\n', (1970, 1976), True, 'import numpy as np\n'), ((3865, 3930), 'sklearn.metrics.classification_report', 'classification_report', (['predictions', 'test_labels'], {'output_dict': '(True)'}), '(predictions, test_labels, output_dict=True)\n', (3886, 3930), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((4018, 4035), 'wandb.log', 'wandb.log', (['report'], {}), '(report)\n', (4027, 4035), False, 'import wandb\n'), ((4119, 4159), 'wandb.log', 'wandb.log', (["{'Confusion Matrix': heatmap}"], {}), "({'Confusion Matrix': heatmap})\n", (4128, 4159), False, 'import wandb\n'), ((4175, 4235), 'seaborn.scatterplot', 'sns.scatterplot', (['test_y[:, 0]', 'test_y[:, 1]'], {'hue': 'predictions'}), '(test_y[:, 0], test_y[:, 1], hue=predictions)\n', (4190, 4235), True, 'import seaborn as sns\n'), ((4244, 4267), 'wandb.log', 'wandb.log', (["{'Map': map}"], {}), "({'Map': map})\n", (4253, 4267), False, 'import wandb\n'), ((4067, 4109), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['predictions', 'test_labels'], {}), '(predictions, test_labels)\n', (4083, 4109), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n')] |
# Malaya Natural Language Toolkit
#
# Copyright (C) 2019 Malaya Project
# Licensed under the MIT License
# Author: huseinzol05 <<EMAIL>>
# URL: <https://malaya.readthedocs.io/>
# For license information, see https://github.com/huseinzol05/Malaya/blob/master/LICENSE
import tensorflow.compat.v1 as tf
from malaya.function import get_device, generate_session
from malaya.transformers.alxlnet import xlnet as xlnet_lib
from malaya.text.bpe import (
xlnet_tokenization,
padding_sequence,
merge_sentencepiece_tokens,
SentencePieceTokenizer,
)
from malaya.path import PATH_ALXLNET, S3_PATH_ALXLNET
from malaya.function import check_file
from collections import defaultdict
import collections
import re
import os
import numpy as np
from herpetologist import check_type
from typing import List
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match('^(.*):\\d+$', name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ':0'] = 1
return (assignment_map, initialized_variable_names)
def _extract_attention_weights_import(tf_graph):
attentions = [
n.name
for n in tf_graph.as_graph_def().node
if 'rel_attn/Softmax' in n.name
]
return [tf_graph.get_tensor_by_name('%s:0' % (a)) for a in attentions]
class Model:
def __init__(
self, xlnet_config, tokenizer, checkpoint, pool_mode='last', **kwargs
):
kwargs_config = dict(
is_training=True,
use_tpu=False,
use_bfloat16=False,
dropout=0.0,
dropatt=0.0,
init='normal',
init_range=0.1,
init_std=0.05,
clamp_len=-1,
)
xlnet_parameters = xlnet_lib.RunConfig(**kwargs_config)
self._tokenizer = tokenizer
device = get_device(**kwargs)
_graph = tf.Graph()
with _graph.as_default():
with tf.device(device):
self.X = tf.placeholder(tf.int32, [None, None])
self.segment_ids = tf.placeholder(tf.int32, [None, None])
self.input_masks = tf.placeholder(tf.float32, [None, None])
xlnet_model = xlnet_lib.XLNetModel(
xlnet_config=xlnet_config,
run_config=xlnet_parameters,
input_ids=tf.transpose(self.X, [1, 0]),
seg_ids=tf.transpose(self.segment_ids, [1, 0]),
input_mask=tf.transpose(self.input_masks, [1, 0]),
)
self.logits = xlnet_model.get_pooled_out(pool_mode, True)
self._sess = generate_session(_graph, **kwargs)
self._sess.run(tf.global_variables_initializer())
tvars = tf.trainable_variables()
assignment_map, _ = get_assignment_map_from_checkpoint(
tvars, checkpoint
)
self._saver = tf.train.Saver(var_list=assignment_map)
attentions = [
n.name
for n in tf.get_default_graph().as_graph_def().node
if 'rel_attn/Softmax' in n.name
]
g = tf.get_default_graph()
self.attention_nodes = [
g.get_tensor_by_name('%s:0' % (a)) for a in attentions
]
@check_type
def vectorize(self, strings: List[str]):
"""
Vectorize string inputs.
Parameters
----------
strings : List[str]
Returns
-------
result: np.array
"""
input_ids, input_masks, segment_ids, _ = xlnet_tokenization(
self._tokenizer, strings
)
return self._sess.run(
self.logits,
feed_dict={
self.X: input_ids,
self.segment_ids: segment_ids,
self.input_masks: input_masks,
},
)
def _attention(self, strings):
input_ids, input_masks, segment_ids, s_tokens = xlnet_tokenization(
self._tokenizer, strings
)
maxlen = max([len(s) for s in s_tokens])
s_tokens = padding_sequence(s_tokens, maxlen, pad_int='<cls>')
attentions = self._sess.run(
self.attention_nodes,
feed_dict={
self.X: input_ids,
self.segment_ids: segment_ids,
self.input_masks: input_masks,
},
)
return attentions, s_tokens, input_masks
@check_type
def attention(self, strings: List[str], method: str = 'last', **kwargs):
"""
Get attention string inputs.
Parameters
----------
strings : List[str]
method : str, optional (default='last')
Attention layer supported. Allowed values:
* ``'last'`` - attention from last layer.
* ``'first'`` - attention from first layer.
* ``'mean'`` - average attentions from all layers.
Returns
-------
result : List[List[Tuple[str, float]]]
"""
method = method.lower()
if method not in ['last', 'first', 'mean']:
raise Exception(
"method not supported, only support ['last', 'first', 'mean']"
)
attentions, s_tokens, _ = self._attention(strings)
if method == 'first':
cls_attn = np.transpose(attentions[0][:, 0], (1, 0, 2))
if method == 'last':
cls_attn = np.transpose(attentions[-1][:, 0], (1, 0, 2))
if method == 'mean':
cls_attn = np.transpose(
np.mean(attentions, axis=0).mean(axis=1), (1, 0, 2)
)
cls_attn = np.mean(cls_attn, axis=1)
total_weights = np.sum(cls_attn, axis=-1, keepdims=True)
attn = cls_attn / total_weights
output = []
for i in range(attn.shape[0]):
output.append(
merge_sentencepiece_tokens(
list(zip(s_tokens[i], attn[i])), model='xlnet'
)
)
return output
@check_type
def visualize_attention(self, string: str):
"""
Visualize attention.
Parameters
----------
string : str
"""
from malaya.function.html import _attention
strings = [string]
attentions, s_tokens, _ = self._attention(strings)
attn_dict = defaultdict(list)
for layer, attn_data in enumerate(attentions):
attn = attn_data[:, :, 0]
attn_dict['all'].append(attn.tolist())
results = {
'all': {
'attn': attn_dict['all'],
'left_text': s_tokens[0],
'right_text': s_tokens[0],
}
}
_attention(results)
@check_type
def load(model: str = 'alxlnet', pool_mode: str = 'last', **kwargs):
"""
Load alxlnet model.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'alxlnet'`` - XLNET architecture from google + Malaya.
pool_mode : str, optional (default='last')
Model logits architecture supported. Allowed values:
* ``'last'`` - last of the sequence.
* ``'first'`` - first of the sequence.
* ``'mean'`` - mean of the sequence.
* ``'attn'`` - attention of the sequence.
Returns
-------
result : malaya.transformers.alxlnet.Model class
"""
model = model.lower()
pool_mode = pool_mode.lower()
if pool_mode not in ['last', 'first', 'mean', 'attn']:
raise Exception(
"pool_mode not supported, only support ['last', 'first', 'mean', 'attn']"
)
path = check_file(PATH_ALXLNET[model]['model'], S3_PATH_ALXLNET[model], **kwargs)
if not os.path.exists(os.path.join(PATH_ALXLNET[model]['directory'], 'model.ckpt')):
import tarfile
with tarfile.open(path['model']) as tar:
tar.extractall(path=PATH_ALXLNET[model]['path'])
vocab_model = os.path.join(PATH_ALXLNET[model]['directory'], 'sp10m.cased.v9.model')
vocab = os.path.join(PATH_ALXLNET[model]['directory'], 'sp10m.cased.v9.vocab')
tokenizer = SentencePieceTokenizer(vocab_file=vocab, spm_model_file=vocab_model)
xlnet_config = xlnet_lib.XLNetConfig(
json_path=os.path.join(PATH_ALXLNET[model]['directory'], 'config.json')
)
xlnet_checkpoint = os.path.join(PATH_ALXLNET[model]['directory'], 'model.ckpt')
model = Model(
xlnet_config,
tokenizer,
xlnet_checkpoint,
pool_mode=pool_mode,
**kwargs
)
model._saver.restore(model._sess, xlnet_checkpoint)
return model
| [
"tarfile.open",
"malaya.text.bpe.xlnet_tokenization",
"malaya.function.html._attention",
"tensorflow.compat.v1.device",
"tensorflow.compat.v1.get_default_graph",
"malaya.function.check_file",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"numpy.mean",
"ma... | [((953, 978), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (976, 978), False, 'import collections\n'), ((1177, 1217), 'tensorflow.compat.v1.train.list_variables', 'tf.train.list_variables', (['init_checkpoint'], {}), '(init_checkpoint)\n', (1200, 1217), True, 'import tensorflow.compat.v1 as tf\n'), ((1240, 1265), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1263, 1265), False, 'import collections\n'), ((8335, 8409), 'malaya.function.check_file', 'check_file', (["PATH_ALXLNET[model]['model']", 'S3_PATH_ALXLNET[model]'], {}), "(PATH_ALXLNET[model]['model'], S3_PATH_ALXLNET[model], **kwargs)\n", (8345, 8409), False, 'from malaya.function import check_file\n'), ((8653, 8723), 'os.path.join', 'os.path.join', (["PATH_ALXLNET[model]['directory']", '"""sp10m.cased.v9.model"""'], {}), "(PATH_ALXLNET[model]['directory'], 'sp10m.cased.v9.model')\n", (8665, 8723), False, 'import os\n'), ((8736, 8806), 'os.path.join', 'os.path.join', (["PATH_ALXLNET[model]['directory']", '"""sp10m.cased.v9.vocab"""'], {}), "(PATH_ALXLNET[model]['directory'], 'sp10m.cased.v9.vocab')\n", (8748, 8806), False, 'import os\n'), ((8823, 8891), 'malaya.text.bpe.SentencePieceTokenizer', 'SentencePieceTokenizer', ([], {'vocab_file': 'vocab', 'spm_model_file': 'vocab_model'}), '(vocab_file=vocab, spm_model_file=vocab_model)\n', (8845, 8891), False, 'from malaya.text.bpe import xlnet_tokenization, padding_sequence, merge_sentencepiece_tokens, SentencePieceTokenizer\n'), ((9043, 9103), 'os.path.join', 'os.path.join', (["PATH_ALXLNET[model]['directory']", '"""model.ckpt"""'], {}), "(PATH_ALXLNET[model]['directory'], 'model.ckpt')\n", (9055, 9103), False, 'import os\n'), ((1037, 1066), 're.match', 're.match', (['"""^(.*):\\\\d+$"""', 'name'], {}), "('^(.*):\\\\d+$', name)\n", (1045, 1066), False, 'import re\n'), ((2282, 2318), 'malaya.transformers.alxlnet.xlnet.RunConfig', 'xlnet_lib.RunConfig', ([], {}), '(**kwargs_config)\n', (2301, 2318), True, 'from malaya.transformers.alxlnet import xlnet as xlnet_lib\n'), ((2373, 2393), 'malaya.function.get_device', 'get_device', ([], {}), '(**kwargs)\n', (2383, 2393), False, 'from malaya.function import get_device, generate_session\n'), ((2411, 2421), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2419, 2421), True, 'import tensorflow.compat.v1 as tf\n'), ((4195, 4239), 'malaya.text.bpe.xlnet_tokenization', 'xlnet_tokenization', (['self._tokenizer', 'strings'], {}), '(self._tokenizer, strings)\n', (4213, 4239), False, 'from malaya.text.bpe import xlnet_tokenization, padding_sequence, merge_sentencepiece_tokens, SentencePieceTokenizer\n'), ((4588, 4632), 'malaya.text.bpe.xlnet_tokenization', 'xlnet_tokenization', (['self._tokenizer', 'strings'], {}), '(self._tokenizer, strings)\n', (4606, 4632), False, 'from malaya.text.bpe import xlnet_tokenization, padding_sequence, merge_sentencepiece_tokens, SentencePieceTokenizer\n'), ((4723, 4774), 'malaya.text.bpe.padding_sequence', 'padding_sequence', (['s_tokens', 'maxlen'], {'pad_int': '"""<cls>"""'}), "(s_tokens, maxlen, pad_int='<cls>')\n", (4739, 4774), False, 'from malaya.text.bpe import xlnet_tokenization, padding_sequence, merge_sentencepiece_tokens, SentencePieceTokenizer\n'), ((6285, 6310), 'numpy.mean', 'np.mean', (['cls_attn'], {'axis': '(1)'}), '(cls_attn, axis=1)\n', (6292, 6310), True, 'import numpy as np\n'), ((6335, 6375), 'numpy.sum', 'np.sum', (['cls_attn'], {'axis': '(-1)', 'keepdims': '(True)'}), '(cls_attn, axis=-1, keepdims=True)\n', (6341, 6375), True, 'import numpy as np\n'), ((7005, 7022), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7016, 7022), False, 'from collections import defaultdict\n'), ((7368, 7387), 'malaya.function.html._attention', '_attention', (['results'], {}), '(results)\n', (7378, 7387), False, 'from malaya.function.html import _attention\n'), ((5972, 6016), 'numpy.transpose', 'np.transpose', (['attentions[0][:, 0]', '(1, 0, 2)'], {}), '(attentions[0][:, 0], (1, 0, 2))\n', (5984, 6016), True, 'import numpy as np\n'), ((6070, 6115), 'numpy.transpose', 'np.transpose', (['attentions[-1][:, 0]', '(1, 0, 2)'], {}), '(attentions[-1][:, 0], (1, 0, 2))\n', (6082, 6115), True, 'import numpy as np\n'), ((8437, 8497), 'os.path.join', 'os.path.join', (["PATH_ALXLNET[model]['directory']", '"""model.ckpt"""'], {}), "(PATH_ALXLNET[model]['directory'], 'model.ckpt')\n", (8449, 8497), False, 'import os\n'), ((8537, 8564), 'tarfile.open', 'tarfile.open', (["path['model']"], {}), "(path['model'])\n", (8549, 8564), False, 'import tarfile\n'), ((8952, 9013), 'os.path.join', 'os.path.join', (["PATH_ALXLNET[model]['directory']", '"""config.json"""'], {}), "(PATH_ALXLNET[model]['directory'], 'config.json')\n", (8964, 9013), False, 'import os\n'), ((2473, 2490), 'tensorflow.compat.v1.device', 'tf.device', (['device'], {}), '(device)\n', (2482, 2490), True, 'import tensorflow.compat.v1 as tf\n'), ((2517, 2555), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (2531, 2555), True, 'import tensorflow.compat.v1 as tf\n'), ((2591, 2629), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (2605, 2629), True, 'import tensorflow.compat.v1 as tf\n'), ((2665, 2705), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {}), '(tf.float32, [None, None])\n', (2679, 2705), True, 'import tensorflow.compat.v1 as tf\n'), ((3176, 3210), 'malaya.function.generate_session', 'generate_session', (['_graph'], {}), '(_graph, **kwargs)\n', (3192, 3210), False, 'from malaya.function import get_device, generate_session\n'), ((3301, 3325), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3323, 3325), True, 'import tensorflow.compat.v1 as tf\n'), ((3484, 3523), 'tensorflow.compat.v1.train.Saver', 'tf.train.Saver', ([], {'var_list': 'assignment_map'}), '(var_list=assignment_map)\n', (3498, 3523), True, 'import tensorflow.compat.v1 as tf\n'), ((3744, 3766), 'tensorflow.compat.v1.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3764, 3766), True, 'import tensorflow.compat.v1 as tf\n'), ((3242, 3275), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3273, 3275), True, 'import tensorflow.compat.v1 as tf\n'), ((2885, 2913), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['self.X', '[1, 0]'], {}), '(self.X, [1, 0])\n', (2897, 2913), True, 'import tensorflow.compat.v1 as tf\n'), ((2943, 2981), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['self.segment_ids', '[1, 0]'], {}), '(self.segment_ids, [1, 0])\n', (2955, 2981), True, 'import tensorflow.compat.v1 as tf\n'), ((3014, 3052), 'tensorflow.compat.v1.transpose', 'tf.transpose', (['self.input_masks', '[1, 0]'], {}), '(self.input_masks, [1, 0])\n', (3026, 3052), True, 'import tensorflow.compat.v1 as tf\n'), ((6199, 6226), 'numpy.mean', 'np.mean', (['attentions'], {'axis': '(0)'}), '(attentions, axis=0)\n', (6206, 6226), True, 'import numpy as np\n'), ((3611, 3633), 'tensorflow.compat.v1.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3631, 3633), True, 'import tensorflow.compat.v1 as tf\n')] |
#!/usr/bin/env python3
#####!/usr/local/bin/python3
import os
import pytz
import click
import random
import logging
import harness
import datetime
import pandas as pd
import numpy as np
import ml_metrics as metrics
from tqdm import tqdm
from uuid import uuid4
from dateutil import parser
from config import init_config
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql import functions as F
from report import CSVReport, ExcelReport
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
#logging = logging.getlogging(__name__)
#logging.setLevel(level=logging.DEBUG)
cfg = init_config('config_testing.json')
logging.debug("Application was launched with config: %s" % str(cfg.init_dict))
def get_split_date(df, split_event, train_ratio=0.8):
"""Calculates split date
Calculates the moment of time that we will use to split
data into the train (befor the moment) and the test sets
Args:
df: Spark DataFrame
train_ratio: ratio of samples in train set
Returns:
A datetime object
"""
date_rdd = (df
.filter("event = '%s'" % split_event)
.select("Date")
.sort("Date", ascending=True)
.rdd)
total_primary_events = date_rdd.count()
split_date = (date_rdd
.zipWithIndex()
.filter(lambda x: x[1] > total_primary_events * train_ratio)
.first()[0][0])
return split_date
def split_data(df):
if cfg.splitting.type == "random":
return df.randomSplit([cfg.splitting.train_ratio, 1 - cfg.splitting.train_ratio], seed=cfg.splitting.random_seed)
elif cfg.splitting.type == "date":
split_date = get_split_date(df, cfg.splitting.split_event, cfg.splitting.train_ratio)
return df.filter(F.col("Date") < split_date), df.filter(F.col("Date") >= split_date)
def mk_intersection_matrix(by_rows, columns_for_matrix,
horizontal_suffix="", vertical_suffix=""):
""" Makes pandas dataframe of intersections out of list of rows
"""
result = pd.DataFrame(columns=[col + horizontal_suffix for col in columns_for_matrix])
for en in columns_for_matrix:
result.loc[en + vertical_suffix, :] = [0] * len(columns_for_matrix)
for r in by_rows:
row = r.asDict()
en_h = row['event_left']
en_v = row['event_right']
count = row['count']
result.loc[en_v + vertical_suffix, en_h + horizontal_suffix] = count
return result
@click.command()
@click.option('--intersections', is_flag=True)
@click.option('--csv_report', is_flag=True)
def split(intersections, csv_report):
logging.info('Splitting started')
if csv_report:
if cfg.reporting.use_uuid:
uuid = uuid4()
reporter = CSVReport(cfg.reporting.csv_dir, uuid)
else:
reporter = CSVReport(cfg.reporting.csv_dir, None)
else:
reporter = ExcelReport(cfg.reporting.file)
logging.info('Spark initialization')
sc = SparkContext(cfg.spark.master, 'map_test: split')
sqlContext = SQLContext(sc)
logging.info('Source file reading')
df = sqlContext.read.json(cfg.splitting.source_file)
df = df.withColumn("Date", F.from_utc_timestamp("eventTime", "UTC"))
df = df[(df.event != '$set') & (df.event != '$unset')]
users_with_event_count = df.groupBy(F.col("entityId").alias("user")).count()
logging.info('Filter users with small number of events')
min_events = 10
users_with_few_events = (users_with_event_count
.filter("count < %d" % (min_events))
.select(F.col("user").alias("user_with_few_events")))
ndf = df.join(users_with_few_events,
F.col("entityId")==F.col("user_with_few_events"),
how="left_outer")
df1 = ndf.filter("user_with_few_events is NULL").drop("user_with_few_events")
logging.info('Split data into train and test')
train_df, test_df = split_data(df)
train_df.coalesce(1).write.format('json').save(cfg.splitting.train_file)
test_df.coalesce(1).write.format('json').save(cfg.splitting.test_file)
train_df = train_df.select("entityId", "event", "targetEntityId").cache()
test_df = test_df.select("entityId", "event", "targetEntityId").cache()
logging.info('Calculation of different stat metrics of datasets')
events_by_type = (df
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("count_total"))
.toPandas())
events_by_type_test = (test_df
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("count_test"))
.toPandas()
.set_index("event"))
events_by_type_train = (train_df
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("count_train"))
.toPandas()
.set_index("event"))
unique_users_by_event = (df
.select(F.col("entityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_users_total"))
.toPandas()
.set_index("event"))
unique_users_by_event_train = (train_df
.select(F.col("entityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_users_train"))
.toPandas()
.set_index("event"))
unique_users_by_event_test = (test_df
.select(F.col("entityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_users_test"))
.toPandas()
.set_index("event"))
unique_items_by_event = (df
.select(F.col("targetEntityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_items_total"))
.toPandas()
.set_index("event"))
unique_items_by_event_train = (train_df
.select(F.col("targetEntityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_items_train"))
.toPandas()
.set_index("event"))
unique_items_by_event_test = (test_df
.select(F.col("targetEntityId"), F.col("event"))
.distinct()
.groupBy("event")
.count()
.select(F.col("event"), F.col("count").alias("unique_items_test"))
.toPandas()
.set_index("event"))
logging.info('Calculate total counts')
events = df.count()
events_train = train_df.count()
events_test = test_df.count()
unique_users = df.select("entityId").distinct().count()
unique_users_train = train_df.select("entityId").distinct().count()
unique_users_test = test_df.select("entityId").distinct().count()
unique_items = df.select(F.col("targetEntityId")).distinct().count()
unique_items_train = train_df.select(F.col("targetEntityId")).distinct().count()
unique_items_test = test_df.select(F.col("targetEntityId")).distinct().count()
info_df = events_by_type
dfs = [unique_users_by_event, unique_items_by_event,
events_by_type_train, events_by_type_test,
unique_users_by_event_train, unique_users_by_event_test,
unique_items_by_event_train, unique_items_by_event_test]
for data_frame in dfs:
info_df = info_df.join(data_frame, on="event")
n_rows, n_cols = info_df.shape
# totals
info_df.loc[n_rows] = ['ANY EVENT', events, unique_users, unique_items,
events_train, events_test,
unique_users_train, unique_users_test,
unique_items_train, unique_items_test]
info_df.insert(4, 'events per user', info_df.ix[:, 1] / info_df.ix[:, 2])
info_df.insert(5, 'events per item', info_df.ix[:, 1] / info_df.ix[:, 3])
info_df = info_df.fillna(0)
logging.info('Create event stat worksheet')
reporter.start_new_sheet('Events stat')
reporter.report(
['event', 'event count', 'unique users', 'unique items',
'events per user', 'events per item',
'event count train', 'event count test',
'unique users train', 'unique users test',
'unique items train', 'unique items test'],
[column.tolist() for _, column in info_df.iteritems()],
selected_rows=[next(info_df.iteritems())[1].tolist().index(cfg.testing.primary_event)],
cfg=cfg)
reporter.finish_sheet()
if intersections:
logging.info('Start intersections calculation')
reporter.start_new_sheet('Intersections')
columns_for_matrix = cfg.testing.events
logging.info('Process train / train user intersection')
train_train_users = (
train_df
.select(F.col("entityId").alias("user"), F.col("event").alias("event_left"))
.distinct()
.join(train_df.select(F.col("entityId").alias("user"), F.col("event").alias("event_right")).distinct(),
on="user", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtru = mk_intersection_matrix(train_train_users, columns_for_matrix)
reporter.report(
[''] + list(trtru.columns.values),
[trtru.index.tolist()] + [column for _, column in trtru.iteritems()],
title='Train / train user intersection')
logging.info('Process train / test user intersection')
train_test_users = (
train_df
.select(F.col("entityId").alias("user"), F.col("event").alias("event_left"))
.distinct()
.join(test_df.select(F.col("entityId").alias("user"), F.col("event").alias("event_right")).distinct(),
on="user", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtsu = mk_intersection_matrix(train_test_users, columns_for_matrix,
horizontal_suffix=" train", vertical_suffix=" test")
reporter.report(
[''] + list(trtsu.columns.values),
[trtsu.index.tolist()] + [column for _, column in trtsu.iteritems()],
title='Train / test user intersection')
logging.info('Process train / train item intersection')
train_train_items = (
train_df
.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_left"))
.distinct()
.join(train_df.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_right")).distinct(),
on="item", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtri = mk_intersection_matrix(train_train_items, columns_for_matrix)
reporter.report(
[''] + list(trtri.columns.values),
[trtri.index.tolist()] + [column for _, column in trtri.iteritems()],
title='Train / train item intersection'
)
logging.info('Process train / test item intersection')
train_test_items = (
train_df
.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_left"))
.distinct()
.join(test_df.select(F.col("targetEntityId").alias("item"), F.col("event").alias("event_right")).distinct(),
on="item", how="inner")
.groupBy(["event_left", "event_right"])
.count()
.collect())
trtsi = mk_intersection_matrix(train_test_items, columns_for_matrix,
horizontal_suffix=" train", vertical_suffix=" test")
reporter.report(
[''] + list(trtsi.columns.values),
[trtsi.index.tolist()] + [column for _, column in trtsi.iteritems()],
title='Train / test item intersection'
)
reporter.report_config(cfg)
reporter.finish_document()
logging.info('Splitting finished successfully')
def run_map_test_dummy(data, items=None, probs=None, uniform=True, top=True,
users=None, primaryEvent=cfg.testing.primary_event, K=10, no_progress=False):
"""Performs dummy test
Args:
data: list of event rows
items: np.array or list of items sorted in descending popularity order
probs: np.array or list of corresponding probabilities (needed for experiment #2)
uniform: Boolean flag to use uniform sampling
top: Boolean flag to use top items
users: set of users to consider
primaryEvent: str name of primary event
K: int for MAP @ K
no_progress: Boolean flag not to show the progress bar during calculations
Returns:
list of [MAP@1, MAP@2, ... MAP@K] evaluations
"""
user_information = {}
for rec in data:
if rec.event == primaryEvent:
user = rec.entityId
item = rec.targetEntityId
if not users or user in users:
user_information.setdefault(user, []).append(item)
holdoutUsers = [*user_information.keys()]
prediction = []
ground_truth = []
if no_progress:
gen = holdoutUsers
else:
gen = tqdm(holdoutUsers)
for user in gen:
if top:
test_items = items[0:K]
elif uniform:
test_items = np.random.choice(items, size=(K,))
else:
test_items = np.random.choice(items, size=(K,), p=probs)
prediction.append(test_items)
ground_truth.append(user_information.get(user, []))
return [metrics.mapk(ground_truth, prediction, k) for k in range(1, K + 1)]
def import_events(engine_client, events_data,
seed = cfg.splitting.random_seed):
random.seed(seed)
count = 0
logging.info('Importing data..')
for line in events_data:
dict_data = line.asDict()
event_time = parser.parse(dict_data["eventTime"])
if dict_data["event"] != "$set":
engine_client.create(
event = dict_data["event"],
entity_type = "user",
entity_id = dict_data["entityId"],
target_entity_type = "item",
target_entity_id = dict_data["targetEntityId"],
event_time = event_time,
)
print("Event: " + str(dict_data))
else:
engine_client.create(
event = "$set",
entity_type = "item",
entity_id = dict_data['entityId'],
event_time = event_time,
properties = dict_data["properties"].asDict()
)
print("Event: " + str(dict_data))
def run_map_test(data, eventNames, users = None,
primaryEvent = cfg.testing.primary_event,
consider_non_zero_scores = cfg.testing.consider_non_zero_scores_only,
num = 200, K = cfg.testing.map_k,
test = False, harness_url = "http://localhost:9090"):
N_TEST = 2000
user_information = {}
res_data = {}
# Create harness engine for events...
engine_client = harness.EventsClient(
engine_id = cfg.engine_id,
url = harness_url,
threads = 5,
qsize = 500)
import_events(engine_client, data)
logging.info(engine_client.host)
engine_client.close()
# Create query client in harness...
logging.info("Queries for " + cfg.engine_id)
query_client = harness.QueriesClient(
engine_id = cfg.engine_id,
url = harness_url,
threads=5,
qsize=500)
for rec in data:
if rec.event == primaryEvent:
user = rec.entityId
item = rec.targetEntityId
if not users or user in users:
user_information.setdefault(user, []).append(item)
if test:
holdoutUsers = [*user_information.keys()][1:N_TEST]
else:
holdoutUsers = [*user_information.keys()]
prediction = []
ground_truth = []
user_items_cnt = 0.0
users_cnt = 0
for user in tqdm(holdoutUsers):
query = {
"user": user,
"eventNames": eventNames,
"num": num,
}
try:
res = query_client.send_query(query)
# Sort by score then by item name
tuples = sorted([(r["score"], r["item"]) for r in res.json_body['result']], reverse=True)
scores = [score for score, item in tuples]
items = [item for score, item in tuples]
res_data[user] = {
"items": items,
"scores": scores,
}
# Consider only non-zero scores
if consider_non_zero_scores:
if len(scores) > 0 and scores[0] != 0.0:
prediction.append(items)
ground_truth.append(user_information.get(user, []))
user_items_cnt += len(user_information.get(user, []))
users_cnt += 1
else:
prediction.append(items)
ground_truth.append(user_information.get(user, []))
user_items_cnt += len(user_information.get(user, []))
users_cnt += 1
except harness.NotFoundError:
print("Error with user: %s" % user)
return ([metrics.mapk(ground_truth, prediction, k) for k in range(1, K + 1)],
res_data, user_items_cnt / (users_cnt + 0.00001))
def get_nonzero(r_data):
users = [user for user, res_data in r_data.items() if res_data['scores'][0] != 0.0]
return users
@click.command()
@click.option('--csv_report', is_flag = True)
@click.option('--all', default = True, is_flag=True)
@click.option('--dummy_test', is_flag = True)
@click.option('--separate_test', is_flag = True)
@click.option('--all_but_test', is_flag = True)
@click.option('--primary_pairs_test', is_flag = True)
@click.option('--custom_combos_test', is_flag = True)
@click.option('--non_zero_users_from_file', is_flag = True)
def test(csv_report,
all,
dummy_test,
separate_test,
all_but_test,
primary_pairs_test,
custom_combos_test,
non_zero_users_from_file):
logging.info('Testing started')
if csv_report:
if cfg.reporting.use_uuid:
uuid = uuid4()
reporter = CSVReport(cfg.reporting.csv_dir, uuid)
else:
reporter = CSVReport(cfg.reporting.csv_dir, None)
else:
reporter = ExcelReport(cfg.reporting.file)
logging.info('Spark context initialization')
sc = SparkContext(cfg.spark.master, 'map_test: train')
sqlContext = SQLContext(sc)
logging.info('Train data reading')
test_df = sqlContext.read.json(cfg.splitting.test_file).cache()
test_data = test_df.filter("event = '%s'" % (cfg.testing.primary_event)).collect()
#non_zero_users = set([r[0] for r in test_data][500:650]) # Because actually all our users have 0.0 scores -- too few data
if all or dummy_test:
logging.info('Train data reading')
train_df = sqlContext.read.json(cfg.splitting.train_file).cache()
counts = train_df.filter("event = '%s'" % (cfg.testing.primary_event)).groupBy("targetEntityId").count().collect()
sorted_rating = sorted([(row.asDict()['count'], row.asDict()['targetEntityId']) for row in counts], reverse=True)
elements = np.array([item for cnt, item in sorted_rating])
probs = np.array([cnt for cnt, item in sorted_rating])
probs = 1.0 * probs / probs.sum()
logging.info('Process dummy test')
# case 1. Random sampling from items (uniform)
dummy_uniform_res = run_map_test_dummy(test_data, items=elements, probs=probs,
uniform=True, top=False, K=cfg.testing.map_k)
# case 2. Random sampling from items (according to their distribution in training data)
dummy_res = run_map_test_dummy(test_data, items=elements, probs=probs,
uniform=False, top=False, K=cfg.testing.map_k)
# case 3. Top-N items from training data
dummy_top_res = run_map_test_dummy(test_data, items=elements, probs=probs,
uniform=True, top=True, K=cfg.testing.map_k)
reporter.start_new_sheet('Dummy MAP benchmark')
reporter.report(
['', 'Random uniform', 'Random sampled from train', 'Top - N'],
[[('MAP @ %d' % i) for i in range(1, len(dummy_res)+1)]] + [dummy_uniform_res, dummy_res, dummy_top_res],
cfg=cfg
)
reporter.finish_sheet()
logging.info('Process top 20 dummy test')
scores = []
for i in range(20):
scores.append(run_map_test_dummy(test_data, items=elements[i:], uniform=True,
top=True, K=1, no_progress=True)[0])
reporter.start_new_sheet('Top-20 perfomance')
reporter.report(
['Rank', 'MAP@1'],
[list(range(1, 21)), scores],
bold_first_column=False,
cfg=cfg
)
reporter.finish_sheet()
if all or separate_test or all_but_test or primary_pairs_test or custom_combos_test:
logging.info('Non zero users')
if non_zero_users_from_file:
with open(cfg.testing.non_zero_users_file) as input:
non_zero_users = set(input.read().split(','))
else:
_, r_data, _ = run_map_test(test_data, [cfg.testing.primary_event], test=False)
non_zero_users = get_nonzero(r_data)
with open(cfg.testing.non_zero_users_file, 'w') as output:
output.write(','.join(non_zero_users))
if all or separate_test:
logging.info('Process "map separate events" test')
columns = []
for ev in cfg.testing.events:
(r_scores, r_data, ipu) = run_map_test(test_data, [ev], users=non_zero_users, test=False)
columns.append(r_scores + [len(non_zero_users)])
first_column = [('MAP @ %d' % i) for i in range(1, len(columns[0]))] + ['non-zero users']
reporter.start_new_sheet('MAP separate events')
reporter.report(
['event'] + cfg.testing.events,
[first_column] + columns,
selected_columns=[cfg.testing.events.index(cfg.testing.primary_event) + 1],
cfg=cfg
)
reporter.finish_sheet()
if all or all_but_test:
logging.info('Process "map all but..." test')
events_scores = []
for ev in cfg.testing.events:
evs = list(cfg.testing.events)
evs.remove(ev)
(r_scores, r_data, ipu) = run_map_test(test_data, evs, users=non_zero_users, test=False)
events_scores.append(r_scores + [len(non_zero_users)])
evl = cfg.testing.events
all_scores, r_data, ipu = run_map_test(test_data, evl, users=non_zero_users, test=False)
all_scores.append(len(non_zero_users))
first_column = [('MAP @ %d' % i) for i in range(1, len(all_scores))] + ['non-zero users']
reporter.start_new_sheet('MAP all but...')
reporter.report(
['event'] + cfg.testing.events + ['All'],
[first_column] + events_scores + [all_scores],
selected_columns=[cfg.testing.events.index(cfg.testing.primary_event) + 1],
cfg=cfg
)
reporter.finish_sheet()
if all or primary_pairs_test:
logging.info('Process "map pairs with primary" test')
columns = []
events_without_primary = [event for event in cfg.testing.events if event != cfg.testing.primary_event]
for event in events_without_primary:
(r_scores, r_data, ipu) = run_map_test(test_data, [cfg.testing.primary_event, event],
users=non_zero_users, test=False)
columns.append(r_scores + [len(non_zero_users)])
first_column = [('MAP @ %d' % i) for i in range(1, len(columns[0]))] + ['non-zero users']
reporter.start_new_sheet('MAP pairs with primary')
reporter.report(
['event'] + events_without_primary,
[first_column] + columns,
cfg=cfg
)
reporter.finish_sheet()
if all or custom_combos_test:
logging.info('Process "custom combos" test')
columns = []
for event_group in cfg.testing.custom_combos.event_groups:
if len(event_group) == 2 and cfg.testing.primary_event in event_group and primary_pairs_test:
logging.warn("Report for group %s already generated in 'MAP pairs with primary'" % str(event_group))
continue
if len(event_group) == 1 and separate_test:
logging.warn("Report for group %s already generated in 'MAP separate events'" % str(event_group))
continue
if len(event_group) >= len(cfg.testing.events) - 1 and all_but_test:
logging.warn("Report for group %s already generated in 'All but...'" % str(event_group))
continue
if not (set(cfg.testing.events) & set(event_group)):
logging.warn("Event group is not corect!")
continue
(r_scores, r_data, ipu) = run_map_test(test_data, event_group,
users = non_zero_users,
test=False)
columns.append(r_scores + [len(non_zero_users)])
if columns:
first_column = [('MAP @ %d' % i) for i in range(1, len(columns[0]))] + ['non-zero users']
reporter.start_new_sheet('Custom combos')
reporter.report(
['event'] + [str([s.encode('utf-8') for s in group]) for group in cfg.testing.custom_combos.event_groups],
[first_column] + columns,
cfg=cfg
)
reporter.finish_sheet()
reporter.finish_document()
logging.info('Testing finished successfully')
# root group
@click.group()
def root():
pass
root.add_command(split)
root.add_command(test)
if __name__ == "__main__":
root()
| [
"ml_metrics.mapk",
"pyspark.sql.SQLContext",
"config.init_config",
"numpy.array",
"logging.info",
"logging.warn",
"click.option",
"click.group",
"pandas.DataFrame",
"pyspark.SparkContext",
"click.command",
"dateutil.parser.parse",
"numpy.random.choice",
"report.ExcelReport",
"uuid.uuid4"... | [((473, 565), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (492, 565), False, 'import logging\n'), ((652, 686), 'config.init_config', 'init_config', (['"""config_testing.json"""'], {}), "('config_testing.json')\n", (663, 686), False, 'from config import init_config\n'), ((2579, 2594), 'click.command', 'click.command', ([], {}), '()\n', (2592, 2594), False, 'import click\n'), ((2596, 2641), 'click.option', 'click.option', (['"""--intersections"""'], {'is_flag': '(True)'}), "('--intersections', is_flag=True)\n", (2608, 2641), False, 'import click\n'), ((2643, 2685), 'click.option', 'click.option', (['"""--csv_report"""'], {'is_flag': '(True)'}), "('--csv_report', is_flag=True)\n", (2655, 2685), False, 'import click\n'), ((19222, 19237), 'click.command', 'click.command', ([], {}), '()\n', (19235, 19237), False, 'import click\n'), ((19239, 19281), 'click.option', 'click.option', (['"""--csv_report"""'], {'is_flag': '(True)'}), "('--csv_report', is_flag=True)\n", (19251, 19281), False, 'import click\n'), ((19285, 19334), 'click.option', 'click.option', (['"""--all"""'], {'default': '(True)', 'is_flag': '(True)'}), "('--all', default=True, is_flag=True)\n", (19297, 19334), False, 'import click\n'), ((19338, 19380), 'click.option', 'click.option', (['"""--dummy_test"""'], {'is_flag': '(True)'}), "('--dummy_test', is_flag=True)\n", (19350, 19380), False, 'import click\n'), ((19384, 19429), 'click.option', 'click.option', (['"""--separate_test"""'], {'is_flag': '(True)'}), "('--separate_test', is_flag=True)\n", (19396, 19429), False, 'import click\n'), ((19433, 19477), 'click.option', 'click.option', (['"""--all_but_test"""'], {'is_flag': '(True)'}), "('--all_but_test', is_flag=True)\n", (19445, 19477), False, 'import click\n'), ((19481, 19531), 'click.option', 'click.option', (['"""--primary_pairs_test"""'], {'is_flag': '(True)'}), "('--primary_pairs_test', is_flag=True)\n", (19493, 19531), False, 'import click\n'), ((19535, 19585), 'click.option', 'click.option', (['"""--custom_combos_test"""'], {'is_flag': '(True)'}), "('--custom_combos_test', is_flag=True)\n", (19547, 19585), False, 'import click\n'), ((19589, 19645), 'click.option', 'click.option', (['"""--non_zero_users_from_file"""'], {'is_flag': '(True)'}), "('--non_zero_users_from_file', is_flag=True)\n", (19601, 19645), False, 'import click\n'), ((27768, 27781), 'click.group', 'click.group', ([], {}), '()\n', (27779, 27781), False, 'import click\n'), ((2151, 2230), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[(col + horizontal_suffix) for col in columns_for_matrix]'}), '(columns=[(col + horizontal_suffix) for col in columns_for_matrix])\n', (2163, 2230), True, 'import pandas as pd\n'), ((2728, 2761), 'logging.info', 'logging.info', (['"""Splitting started"""'], {}), "('Splitting started')\n", (2740, 2761), False, 'import logging\n'), ((3048, 3084), 'logging.info', 'logging.info', (['"""Spark initialization"""'], {}), "('Spark initialization')\n", (3060, 3084), False, 'import logging\n'), ((3095, 3144), 'pyspark.SparkContext', 'SparkContext', (['cfg.spark.master', '"""map_test: split"""'], {}), "(cfg.spark.master, 'map_test: split')\n", (3107, 3144), False, 'from pyspark import SparkContext\n'), ((3162, 3176), 'pyspark.sql.SQLContext', 'SQLContext', (['sc'], {}), '(sc)\n', (3172, 3176), False, 'from pyspark.sql import SQLContext\n'), ((3182, 3217), 'logging.info', 'logging.info', (['"""Source file reading"""'], {}), "('Source file reading')\n", (3194, 3217), False, 'import logging\n'), ((3495, 3551), 'logging.info', 'logging.info', (['"""Filter users with small number of events"""'], {}), "('Filter users with small number of events')\n", (3507, 3551), False, 'import logging\n'), ((4007, 4053), 'logging.info', 'logging.info', (['"""Split data into train and test"""'], {}), "('Split data into train and test')\n", (4019, 4053), False, 'import logging\n'), ((4405, 4470), 'logging.info', 'logging.info', (['"""Calculation of different stat metrics of datasets"""'], {}), "('Calculation of different stat metrics of datasets')\n", (4417, 4470), False, 'import logging\n'), ((8001, 8039), 'logging.info', 'logging.info', (['"""Calculate total counts"""'], {}), "('Calculate total counts')\n", (8013, 8039), False, 'import logging\n'), ((9441, 9484), 'logging.info', 'logging.info', (['"""Create event stat worksheet"""'], {}), "('Create event stat worksheet')\n", (9453, 9484), False, 'import logging\n'), ((13559, 13606), 'logging.info', 'logging.info', (['"""Splitting finished successfully"""'], {}), "('Splitting finished successfully')\n", (13571, 13606), False, 'import logging\n'), ((15367, 15384), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (15378, 15384), False, 'import random\n'), ((15404, 15436), 'logging.info', 'logging.info', (['"""Importing data.."""'], {}), "('Importing data..')\n", (15416, 15436), False, 'import logging\n'), ((16755, 16843), 'harness.EventsClient', 'harness.EventsClient', ([], {'engine_id': 'cfg.engine_id', 'url': 'harness_url', 'threads': '(5)', 'qsize': '(500)'}), '(engine_id=cfg.engine_id, url=harness_url, threads=5,\n qsize=500)\n', (16775, 16843), False, 'import harness\n'), ((16925, 16957), 'logging.info', 'logging.info', (['engine_client.host'], {}), '(engine_client.host)\n', (16937, 16957), False, 'import logging\n'), ((17029, 17073), 'logging.info', 'logging.info', (["('Queries for ' + cfg.engine_id)"], {}), "('Queries for ' + cfg.engine_id)\n", (17041, 17073), False, 'import logging\n'), ((17094, 17183), 'harness.QueriesClient', 'harness.QueriesClient', ([], {'engine_id': 'cfg.engine_id', 'url': 'harness_url', 'threads': '(5)', 'qsize': '(500)'}), '(engine_id=cfg.engine_id, url=harness_url, threads=5,\n qsize=500)\n', (17115, 17183), False, 'import harness\n'), ((17694, 17712), 'tqdm.tqdm', 'tqdm', (['holdoutUsers'], {}), '(holdoutUsers)\n', (17698, 17712), False, 'from tqdm import tqdm\n'), ((19850, 19881), 'logging.info', 'logging.info', (['"""Testing started"""'], {}), "('Testing started')\n", (19862, 19881), False, 'import logging\n'), ((20167, 20211), 'logging.info', 'logging.info', (['"""Spark context initialization"""'], {}), "('Spark context initialization')\n", (20179, 20211), False, 'import logging\n'), ((20222, 20271), 'pyspark.SparkContext', 'SparkContext', (['cfg.spark.master', '"""map_test: train"""'], {}), "(cfg.spark.master, 'map_test: train')\n", (20234, 20271), False, 'from pyspark import SparkContext\n'), ((20289, 20303), 'pyspark.sql.SQLContext', 'SQLContext', (['sc'], {}), '(sc)\n', (20299, 20303), False, 'from pyspark.sql import SQLContext\n'), ((20309, 20343), 'logging.info', 'logging.info', (['"""Train data reading"""'], {}), "('Train data reading')\n", (20321, 20343), False, 'import logging\n'), ((27707, 27752), 'logging.info', 'logging.info', (['"""Testing finished successfully"""'], {}), "('Testing finished successfully')\n", (27719, 27752), False, 'import logging\n'), ((3011, 3042), 'report.ExcelReport', 'ExcelReport', (['cfg.reporting.file'], {}), '(cfg.reporting.file)\n', (3022, 3042), False, 'from report import CSVReport, ExcelReport\n'), ((3307, 3347), 'pyspark.sql.functions.from_utc_timestamp', 'F.from_utc_timestamp', (['"""eventTime"""', '"""UTC"""'], {}), "('eventTime', 'UTC')\n", (3327, 3347), True, 'from pyspark.sql import functions as F\n'), ((10054, 10101), 'logging.info', 'logging.info', (['"""Start intersections calculation"""'], {}), "('Start intersections calculation')\n", (10066, 10101), False, 'import logging\n'), ((10211, 10266), 'logging.info', 'logging.info', (['"""Process train / train user intersection"""'], {}), "('Process train / train user intersection')\n", (10223, 10266), False, 'import logging\n'), ((10978, 11032), 'logging.info', 'logging.info', (['"""Process train / test user intersection"""'], {}), "('Process train / test user intersection')\n", (10990, 11032), False, 'import logging\n'), ((11832, 11887), 'logging.info', 'logging.info', (['"""Process train / train item intersection"""'], {}), "('Process train / train item intersection')\n", (11844, 11887), False, 'import logging\n'), ((12620, 12674), 'logging.info', 'logging.info', (['"""Process train / test item intersection"""'], {}), "('Process train / test item intersection')\n", (12632, 12674), False, 'import logging\n'), ((14826, 14844), 'tqdm.tqdm', 'tqdm', (['holdoutUsers'], {}), '(holdoutUsers)\n', (14830, 14844), False, 'from tqdm import tqdm\n'), ((15194, 15235), 'ml_metrics.mapk', 'metrics.mapk', (['ground_truth', 'prediction', 'k'], {}), '(ground_truth, prediction, k)\n', (15206, 15235), True, 'import ml_metrics as metrics\n'), ((15522, 15558), 'dateutil.parser.parse', 'parser.parse', (["dict_data['eventTime']"], {}), "(dict_data['eventTime'])\n", (15534, 15558), False, 'from dateutil import parser\n'), ((20131, 20162), 'report.ExcelReport', 'ExcelReport', (['cfg.reporting.file'], {}), '(cfg.reporting.file)\n', (20142, 20162), False, 'from report import CSVReport, ExcelReport\n'), ((20663, 20697), 'logging.info', 'logging.info', (['"""Train data reading"""'], {}), "('Train data reading')\n", (20675, 20697), False, 'import logging\n'), ((21037, 21084), 'numpy.array', 'np.array', (['[item for cnt, item in sorted_rating]'], {}), '([item for cnt, item in sorted_rating])\n', (21045, 21084), True, 'import numpy as np\n'), ((21101, 21147), 'numpy.array', 'np.array', (['[cnt for cnt, item in sorted_rating]'], {}), '([cnt for cnt, item in sorted_rating])\n', (21109, 21147), True, 'import numpy as np\n'), ((21199, 21233), 'logging.info', 'logging.info', (['"""Process dummy test"""'], {}), "('Process dummy test')\n", (21211, 21233), False, 'import logging\n'), ((22298, 22339), 'logging.info', 'logging.info', (['"""Process top 20 dummy test"""'], {}), "('Process top 20 dummy test')\n", (22310, 22339), False, 'import logging\n'), ((22912, 22942), 'logging.info', 'logging.info', (['"""Non zero users"""'], {}), "('Non zero users')\n", (22924, 22942), False, 'import logging\n'), ((23427, 23477), 'logging.info', 'logging.info', (['"""Process "map separate events" test"""'], {}), '(\'Process "map separate events" test\')\n', (23439, 23477), False, 'import logging\n'), ((24151, 24196), 'logging.info', 'logging.info', (['"""Process "map all but..." test"""'], {}), '(\'Process "map all but..." test\')\n', (24163, 24196), False, 'import logging\n'), ((25159, 25212), 'logging.info', 'logging.info', (['"""Process "map pairs with primary" test"""'], {}), '(\'Process "map pairs with primary" test\')\n', (25171, 25212), False, 'import logging\n'), ((26010, 26054), 'logging.info', 'logging.info', (['"""Process "custom combos" test"""'], {}), '(\'Process "custom combos" test\')\n', (26022, 26054), False, 'import logging\n'), ((2836, 2843), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2841, 2843), False, 'from uuid import uuid4\n'), ((2867, 2905), 'report.CSVReport', 'CSVReport', (['cfg.reporting.csv_dir', 'uuid'], {}), '(cfg.reporting.csv_dir, uuid)\n', (2876, 2905), False, 'from report import CSVReport, ExcelReport\n'), ((2943, 2981), 'report.CSVReport', 'CSVReport', (['cfg.reporting.csv_dir', 'None'], {}), '(cfg.reporting.csv_dir, None)\n', (2952, 2981), False, 'from report import CSVReport, ExcelReport\n'), ((3833, 3850), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (3838, 3850), True, 'from pyspark.sql import functions as F\n'), ((3852, 3881), 'pyspark.sql.functions.col', 'F.col', (['"""user_with_few_events"""'], {}), "('user_with_few_events')\n", (3857, 3881), True, 'from pyspark.sql import functions as F\n'), ((18956, 18997), 'ml_metrics.mapk', 'metrics.mapk', (['ground_truth', 'prediction', 'k'], {}), '(ground_truth, prediction, k)\n', (18968, 18997), True, 'import ml_metrics as metrics\n'), ((19956, 19963), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (19961, 19963), False, 'from uuid import uuid4\n'), ((19987, 20025), 'report.CSVReport', 'CSVReport', (['cfg.reporting.csv_dir', 'uuid'], {}), '(cfg.reporting.csv_dir, uuid)\n', (19996, 20025), False, 'from report import CSVReport, ExcelReport\n'), ((20063, 20101), 'report.CSVReport', 'CSVReport', (['cfg.reporting.csv_dir', 'None'], {}), '(cfg.reporting.csv_dir, None)\n', (20072, 20101), False, 'from report import CSVReport, ExcelReport\n'), ((3728, 3741), 'pyspark.sql.functions.col', 'F.col', (['"""user"""'], {}), "('user')\n", (3733, 3741), True, 'from pyspark.sql import functions as F\n'), ((4598, 4612), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (4603, 4612), True, 'from pyspark.sql import functions as F\n'), ((14965, 14999), 'numpy.random.choice', 'np.random.choice', (['items'], {'size': '(K,)'}), '(items, size=(K,))\n', (14981, 14999), True, 'import numpy as np\n'), ((15039, 15082), 'numpy.random.choice', 'np.random.choice', (['items'], {'size': '(K,)', 'p': 'probs'}), '(items, size=(K,), p=probs)\n', (15055, 15082), True, 'import numpy as np\n'), ((26882, 26924), 'logging.warn', 'logging.warn', (['"""Event group is not corect!"""'], {}), "('Event group is not corect!')\n", (26894, 26924), False, 'import logging\n'), ((1865, 1878), 'pyspark.sql.functions.col', 'F.col', (['"""Date"""'], {}), "('Date')\n", (1870, 1878), True, 'from pyspark.sql import functions as F\n'), ((1904, 1917), 'pyspark.sql.functions.col', 'F.col', (['"""Date"""'], {}), "('Date')\n", (1909, 1917), True, 'from pyspark.sql import functions as F\n'), ((3449, 3466), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (3454, 3466), True, 'from pyspark.sql import functions as F\n'), ((4614, 4628), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (4619, 4628), True, 'from pyspark.sql import functions as F\n'), ((4837, 4851), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (4842, 4851), True, 'from pyspark.sql import functions as F\n'), ((5132, 5146), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (5137, 5146), True, 'from pyspark.sql import functions as F\n'), ((5541, 5555), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (5546, 5555), True, 'from pyspark.sql import functions as F\n'), ((6001, 6015), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (6006, 6015), True, 'from pyspark.sql import functions as F\n'), ((6466, 6480), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (6471, 6480), True, 'from pyspark.sql import functions as F\n'), ((6899, 6913), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (6904, 6913), True, 'from pyspark.sql import functions as F\n'), ((7365, 7379), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (7370, 7379), True, 'from pyspark.sql import functions as F\n'), ((7836, 7850), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (7841, 7850), True, 'from pyspark.sql import functions as F\n'), ((8368, 8391), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (8373, 8391), True, 'from pyspark.sql import functions as F\n'), ((8453, 8476), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (8458, 8476), True, 'from pyspark.sql import functions as F\n'), ((8536, 8559), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (8541, 8559), True, 'from pyspark.sql import functions as F\n'), ((4853, 4867), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (4858, 4867), True, 'from pyspark.sql import functions as F\n'), ((5148, 5162), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (5153, 5162), True, 'from pyspark.sql import functions as F\n'), ((5557, 5571), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (5562, 5571), True, 'from pyspark.sql import functions as F\n'), ((6017, 6031), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (6022, 6031), True, 'from pyspark.sql import functions as F\n'), ((6482, 6496), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (6487, 6496), True, 'from pyspark.sql import functions as F\n'), ((6915, 6929), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (6920, 6929), True, 'from pyspark.sql import functions as F\n'), ((7381, 7395), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (7386, 7395), True, 'from pyspark.sql import functions as F\n'), ((7852, 7866), 'pyspark.sql.functions.col', 'F.col', (['"""count"""'], {}), "('count')\n", (7857, 7866), True, 'from pyspark.sql import functions as F\n'), ((5343, 5360), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (5348, 5360), True, 'from pyspark.sql import functions as F\n'), ((5362, 5376), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (5367, 5376), True, 'from pyspark.sql import functions as F\n'), ((5779, 5796), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (5784, 5796), True, 'from pyspark.sql import functions as F\n'), ((5798, 5812), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (5803, 5812), True, 'from pyspark.sql import functions as F\n'), ((6248, 6265), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (6253, 6265), True, 'from pyspark.sql import functions as F\n'), ((6267, 6281), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (6272, 6281), True, 'from pyspark.sql import functions as F\n'), ((6695, 6718), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (6700, 6718), True, 'from pyspark.sql import functions as F\n'), ((6720, 6734), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (6725, 6734), True, 'from pyspark.sql import functions as F\n'), ((7137, 7160), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (7142, 7160), True, 'from pyspark.sql import functions as F\n'), ((7162, 7176), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (7167, 7176), True, 'from pyspark.sql import functions as F\n'), ((7612, 7635), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (7617, 7635), True, 'from pyspark.sql import functions as F\n'), ((7637, 7651), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (7642, 7651), True, 'from pyspark.sql import functions as F\n'), ((10466, 10483), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (10471, 10483), True, 'from pyspark.sql import functions as F\n'), ((10499, 10513), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (10504, 10513), True, 'from pyspark.sql import functions as F\n'), ((11230, 11247), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (11235, 11247), True, 'from pyspark.sql import functions as F\n'), ((11263, 11277), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (11268, 11277), True, 'from pyspark.sql import functions as F\n'), ((12093, 12116), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (12098, 12116), True, 'from pyspark.sql import functions as F\n'), ((12132, 12146), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (12137, 12146), True, 'from pyspark.sql import functions as F\n'), ((12878, 12901), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (12883, 12901), True, 'from pyspark.sql import functions as F\n'), ((12917, 12931), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (12922, 12931), True, 'from pyspark.sql import functions as F\n'), ((10339, 10356), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (10344, 10356), True, 'from pyspark.sql import functions as F\n'), ((10372, 10386), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (10377, 10386), True, 'from pyspark.sql import functions as F\n'), ((11104, 11121), 'pyspark.sql.functions.col', 'F.col', (['"""entityId"""'], {}), "('entityId')\n", (11109, 11121), True, 'from pyspark.sql import functions as F\n'), ((11137, 11151), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (11142, 11151), True, 'from pyspark.sql import functions as F\n'), ((11960, 11983), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (11965, 11983), True, 'from pyspark.sql import functions as F\n'), ((11999, 12013), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (12004, 12013), True, 'from pyspark.sql import functions as F\n'), ((12746, 12769), 'pyspark.sql.functions.col', 'F.col', (['"""targetEntityId"""'], {}), "('targetEntityId')\n", (12751, 12769), True, 'from pyspark.sql import functions as F\n'), ((12785, 12799), 'pyspark.sql.functions.col', 'F.col', (['"""event"""'], {}), "('event')\n", (12790, 12799), True, 'from pyspark.sql import functions as F\n')] |
# logistic_regression_kfold.py
# python 2.7.14
# logistic-regression with ROC Curve
# MNIST classify between 6 and 8
# <NAME>
from sklearn.model_selection import KFold
import numpy as np
import matplotlib.pyplot as plt
def logistic_regression(x, y, steps, lr):
w = initialize_weights(x.shape[1])
costs = []
for s in range(steps):
gradient, error = gradient_descent(x, y, w)
w = w + lr * gradient
costs.append(error)
return w, costs
def gradient_descent(x, y, w):
# parts of this function are lifted from https://github.com/michelucci/Logistic-Regression-Explained/blob/master/MNIST%20with%20Logistic%20Regression%20from%20scratch.ipynb
# array of sigmoid squashed probabilities
predictions = predict(x, w)
# number of training cases
m = x.shape[1]
# get error surface by subtracting predictions from ground truths
error_surface = (predictions - y).transpose()
gradient = -1.0 / m * np.dot(x.transpose(), error_surface)
return gradient, cost(predictions, y)
def cost(z, y):
# based on the formula in slide 18 from Dr. Kangs class
positiveCases = -y * np.log(z)
negativeCases = -(1-y) * np.log(1-z)
errorCost = sum(positiveCases + negativeCases) / len(y)
return errorCost
def sigmoid(z):
return (1.0/(1.0 + np.exp(-z)))
def predict(features, weights):
# get probabilities and squash them with the sigmoid function
return sigmoid(np.dot(features, weights))
def normalize(x):
return x / 255.0
def rescale_data(y):
# for i in range(len(y)):
# # if it's a 6 we call it a zero label
# if y[i] == 6:
# y[i] = 0
# # if it's an 8 we call it a 1
# else:
# y[i] = 1
return (y - 6) / 2 # this is much more efficient
def initialize_weights(feature_shape):
# initialize weight vector
w = np.zeros(feature_shape)
return w
def calculate_tpr_and_fpr(test_x, test_y, optmized_w):
predictions = predict(test_x, optimized_w)
# print(predictions)
# print(test_x)
tpr = 0
fpr = 0
tnr = 0
fnr = 0
num_positive = sum(test_y == 1)
num_negative = len(test_y) - num_positive
assert (len(predictions) == len(test_y))
for i in range(len(test_y)):
if (test_y[i] == 1):
if(predictions[i] > threshold):
tpr += 1
else:
fnr += 1
if (test_y[i] == 0):
if(predictions[i] <= threshold):
tnr += 1
else:
fpr += 1
tpr = float(tpr) / num_positive
fpr = float(fpr) / num_negative
return tpr, fpr
if __name__ == '__main__':
# hyper-parameters
num_folds = 10
learning_rate = 1e-2
threshold = 0.5
gradient_descent_steps = 1000
# setting up data
mnist_data = np.genfromtxt('MNIST_CV.csv', delimiter=',', dtype=int, skip_header=1)
kf = KFold(n_splits=num_folds)
kf.get_n_splits(mnist_data)
# initialize lists to hold fpr and tpr values to be plotted later
falsePositiveRates = []
truePositiveRates = []
error_costs = []
print("Logistic Regression on MNIST 6's and 8's. \nUsing K-Fold Cross-Validation with {} Folds \nLearning Rate: {} | Gradient Descent Steps: {}".format(num_folds, learning_rate, gradient_descent_steps))
for train_index, test_index in kf.split(mnist_data):
folded_mnist_training = mnist_data[train_index]
folded_mnist_test = mnist_data[test_index]
# get and rescale labels
y_train = rescale_data(np.array(folded_mnist_training[:, 0]))
y_test = rescale_data(np.array(folded_mnist_test[:, 0]))
# normalize data
x_train = normalize(np.array(folded_mnist_training[:,1:]))
x_test = normalize(np.array(folded_mnist_test[:, 1:]))
optimized_w, costs = logistic_regression(x_train, y_train, gradient_descent_steps, learning_rate)
error_costs.append(costs)
tpr, fpr = calculate_tpr_and_fpr(x_test, y_test, optimized_w)
truePositiveRates.append(tpr)
falsePositiveRates.append(fpr)
average_tpr = sum(truePositiveRates)/len(truePositiveRates)
average_fpr = sum(falsePositiveRates)/len(falsePositiveRates)
print("Average True Positive Rate: {} \nAverage False Positive Rate: {}".format(average_tpr, average_fpr))
falsePositiveRates += [0,1]
falsePositiveRates.sort()
truePositiveRates += [0,1]
truePositiveRates.sort()
plt.figure(1)
plt.title("Gradient Descent Convergence, fold: 0")
plt.plot(error_costs[0])
plt.figure(2)
plt.title('Receiver Operating Characteristic')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
# plt.plot([0,falsePositiveRate,1], [0,truePositiveRate,1])
plt.plot(falsePositiveRates, truePositiveRates)
plt.plot([0, 1], [0, 1],'r--')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.dot",
"matplotlib.pyplot.title",
"sklearn.model_selection.KFold",
"matplotlib.pyplot.ylim",
... | [((1869, 1892), 'numpy.zeros', 'np.zeros', (['feature_shape'], {}), '(feature_shape)\n', (1877, 1892), True, 'import numpy as np\n'), ((2827, 2897), 'numpy.genfromtxt', 'np.genfromtxt', (['"""MNIST_CV.csv"""'], {'delimiter': '""","""', 'dtype': 'int', 'skip_header': '(1)'}), "('MNIST_CV.csv', delimiter=',', dtype=int, skip_header=1)\n", (2840, 2897), True, 'import numpy as np\n'), ((2908, 2933), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_folds'}), '(n_splits=num_folds)\n', (2913, 2933), False, 'from sklearn.model_selection import KFold\n'), ((4475, 4488), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4485, 4488), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4543), 'matplotlib.pyplot.title', 'plt.title', (['"""Gradient Descent Convergence, fold: 0"""'], {}), "('Gradient Descent Convergence, fold: 0')\n", (4502, 4543), True, 'import matplotlib.pyplot as plt\n'), ((4548, 4572), 'matplotlib.pyplot.plot', 'plt.plot', (['error_costs[0]'], {}), '(error_costs[0])\n', (4556, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4578, 4591), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4588, 4591), True, 'import matplotlib.pyplot as plt\n'), ((4596, 4642), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic"""'], {}), "('Receiver Operating Characteristic')\n", (4605, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4647, 4670), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (4655, 4670), True, 'import matplotlib.pyplot as plt\n'), ((4675, 4698), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (4683, 4698), True, 'import matplotlib.pyplot as plt\n'), ((4767, 4814), 'matplotlib.pyplot.plot', 'plt.plot', (['falsePositiveRates', 'truePositiveRates'], {}), '(falsePositiveRates, truePositiveRates)\n', (4775, 4814), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4850), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (4827, 4850), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4886), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (4864, 4886), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (4901, 4924), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4937, 4939), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1150), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (1147, 1150), True, 'import numpy as np\n'), ((1180, 1193), 'numpy.log', 'np.log', (['(1 - z)'], {}), '(1 - z)\n', (1186, 1193), True, 'import numpy as np\n'), ((1444, 1469), 'numpy.dot', 'np.dot', (['features', 'weights'], {}), '(features, weights)\n', (1450, 1469), True, 'import numpy as np\n'), ((1313, 1323), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (1319, 1323), True, 'import numpy as np\n'), ((3557, 3594), 'numpy.array', 'np.array', (['folded_mnist_training[:, 0]'], {}), '(folded_mnist_training[:, 0])\n', (3565, 3594), True, 'import numpy as np\n'), ((3626, 3659), 'numpy.array', 'np.array', (['folded_mnist_test[:, 0]'], {}), '(folded_mnist_test[:, 0])\n', (3634, 3659), True, 'import numpy as np\n'), ((3715, 3753), 'numpy.array', 'np.array', (['folded_mnist_training[:, 1:]'], {}), '(folded_mnist_training[:, 1:])\n', (3723, 3753), True, 'import numpy as np\n'), ((3781, 3815), 'numpy.array', 'np.array', (['folded_mnist_test[:, 1:]'], {}), '(folded_mnist_test[:, 1:])\n', (3789, 3815), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import random
import unittest
import numpy as np
import torch
from ml.rl.test.constant_reward.env import Env
from ml.rl.thrift.core.ttypes import (
DiscreteActionModelParameters,
RainbowDQNParameters,
RLParameters,
TrainingParameters,
)
from ml.rl.training.dqn_trainer import DQNTrainer
logger = logging.getLogger(__name__)
class TestConstantReward(unittest.TestCase):
def setUp(self):
self.layers = [-1, 128, -1]
self.activations = ["relu", "linear"]
self.state_dims = 5
self.action_dims = 2
self.num_samples = 10000
self.minibatch_size = 128
self.epochs = 50
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
super(self.__class__, self).setUp()
def test_trainer_maxq(self):
env = Env(self.state_dims, self.action_dims)
env.seed(42)
maxq_parameters = DiscreteActionModelParameters(
actions=env.actions,
rl=RLParameters(
gamma=0.99,
target_update_rate=1.0,
reward_burnin=100,
maxq_learning=True,
),
rainbow=RainbowDQNParameters(
double_q_learning=True, dueling_architecture=False
),
training=TrainingParameters(
layers=self.layers,
activations=self.activations,
minibatch_size=self.minibatch_size,
learning_rate=1.0,
optimizer="ADAM",
),
)
maxq_trainer = DQNTrainer(maxq_parameters, env.normalization)
logger.info("Generating constant_reward MDPs..")
states, actions, rewards, next_states, next_actions, is_terminal, possible_next_actions = env.generate_samples_discrete(
self.num_samples
)
logger.info("Preprocessing constant_reward MDPs..")
for epoch in range(self.epochs):
tdps = env.preprocess_samples_discrete(
states,
actions,
rewards,
next_states,
next_actions,
is_terminal,
possible_next_actions,
self.minibatch_size,
)
logger.info("Training.. " + str(epoch))
for tdp in tdps:
maxq_trainer.train(tdp, None)
logger.info(
" ".join(
[
"Training epoch",
str(epoch),
"average q values",
str(torch.mean(maxq_trainer.all_action_scores)),
]
)
)
# Q value should converge to very close to 100
avg_q_value_after_training = torch.mean(maxq_trainer.all_action_scores)
self.assertLess(avg_q_value_after_training, 101)
self.assertGreater(avg_q_value_after_training, 99)
| [
"logging.getLogger",
"torch.manual_seed",
"ml.rl.test.constant_reward.env.Env",
"ml.rl.thrift.core.ttypes.TrainingParameters",
"torch.mean",
"random.seed",
"ml.rl.thrift.core.ttypes.RLParameters",
"numpy.random.seed",
"ml.rl.thrift.core.ttypes.RainbowDQNParameters",
"ml.rl.training.dqn_trainer.DQN... | [((426, 453), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (443, 453), False, 'import logging\n'), ((761, 778), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (775, 778), True, 'import numpy as np\n'), ((787, 801), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (798, 801), False, 'import random\n'), ((810, 830), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (827, 830), False, 'import torch\n'), ((923, 961), 'ml.rl.test.constant_reward.env.Env', 'Env', (['self.state_dims', 'self.action_dims'], {}), '(self.state_dims, self.action_dims)\n', (926, 961), False, 'from ml.rl.test.constant_reward.env import Env\n'), ((1672, 1718), 'ml.rl.training.dqn_trainer.DQNTrainer', 'DQNTrainer', (['maxq_parameters', 'env.normalization'], {}), '(maxq_parameters, env.normalization)\n', (1682, 1718), False, 'from ml.rl.training.dqn_trainer import DQNTrainer\n'), ((2895, 2937), 'torch.mean', 'torch.mean', (['maxq_trainer.all_action_scores'], {}), '(maxq_trainer.all_action_scores)\n', (2905, 2937), False, 'import torch\n'), ((1088, 1179), 'ml.rl.thrift.core.ttypes.RLParameters', 'RLParameters', ([], {'gamma': '(0.99)', 'target_update_rate': '(1.0)', 'reward_burnin': '(100)', 'maxq_learning': '(True)'}), '(gamma=0.99, target_update_rate=1.0, reward_burnin=100,\n maxq_learning=True)\n', (1100, 1179), False, 'from ml.rl.thrift.core.ttypes import DiscreteActionModelParameters, RainbowDQNParameters, RLParameters, TrainingParameters\n'), ((1276, 1348), 'ml.rl.thrift.core.ttypes.RainbowDQNParameters', 'RainbowDQNParameters', ([], {'double_q_learning': '(True)', 'dueling_architecture': '(False)'}), '(double_q_learning=True, dueling_architecture=False)\n', (1296, 1348), False, 'from ml.rl.thrift.core.ttypes import DiscreteActionModelParameters, RainbowDQNParameters, RLParameters, TrainingParameters\n'), ((1401, 1546), 'ml.rl.thrift.core.ttypes.TrainingParameters', 'TrainingParameters', ([], {'layers': 'self.layers', 'activations': 'self.activations', 'minibatch_size': 'self.minibatch_size', 'learning_rate': '(1.0)', 'optimizer': '"""ADAM"""'}), "(layers=self.layers, activations=self.activations,\n minibatch_size=self.minibatch_size, learning_rate=1.0, optimizer='ADAM')\n", (1419, 1546), False, 'from ml.rl.thrift.core.ttypes import DiscreteActionModelParameters, RainbowDQNParameters, RLParameters, TrainingParameters\n'), ((2703, 2745), 'torch.mean', 'torch.mean', (['maxq_trainer.all_action_scores'], {}), '(maxq_trainer.all_action_scores)\n', (2713, 2745), False, 'import torch\n')] |
"""DyNA-PPO environment module."""
import editdistance
import numpy as np
from tf_agents.environments import py_environment
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import nest_utils
import flexs
from flexs.utils import sequence_utils as s_utils
class DynaPPOEnvironment(py_environment.PyEnvironment): # pylint: disable=W0223
"""DyNA-PPO environment based on TF-Agents."""
def __init__( # pylint: disable=W0231
self,
alphabet: str,
seq_length: int,
model: flexs.Model,
landscape: flexs.Landscape,
batch_size: int,
):
"""
Initialize DyNA-PPO agent environment.
Based on this tutorial:
https://www.mikulskibartosz.name/how-to-create-an-environment-for-a-tensorflow-agent
Args:
alphabet: Usually UCGA.
starting_seq: When initializing the environment,
the sequence which is initially mutated.
model: Landscape or model which evaluates
each sequence.
landscape: True fitness landscape.
batch_size: Number of epsisodes to batch together and run in parallel.
"""
self.alphabet = alphabet
self._batch_size = batch_size
self.seq_length = seq_length
self.partial_seq_len = 0
self.states = np.zeros(
(batch_size, seq_length, len(alphabet) + 1), dtype="float32"
)
self.states[:, np.arange(seq_length), -1] = 1
# model/model/measurements
self.model = model
self.landscape = landscape
self.fitness_model_is_gt = False
self.previous_fitness = -float("inf")
# sequence
self.all_seqs = {}
self.lam = 0.1
# tf_agents environment
self._action_spec = array_spec.BoundedArraySpec(
shape=(1,),
dtype=np.integer,
minimum=0,
maximum=len(self.alphabet) - 1,
name="action",
)
self._observation_spec = array_spec.BoundedArraySpec(
shape=(self.seq_length, len(self.alphabet) + 1),
dtype=np.float32,
minimum=0,
maximum=1,
name="observation",
)
self._time_step_spec = ts.time_step_spec(self._observation_spec)
def _reset(self):
self.partial_seq_len = 0
self.states[:, :, :] = 0
self.states[:, np.arange(self.seq_length), -1] = 1
return nest_utils.stack_nested_arrays(
[ts.restart(seq_state) for seq_state in self.states]
)
def batched(self):
"""Tf-agents function that says that this env returns batches of timesteps."""
return True
@property
def batch_size(self):
"""Tf-agents property that return env batch size."""
return self._batch_size
def time_step_spec(self):
"""Define time steps."""
return self._time_step_spec
def action_spec(self):
"""Define agent actions."""
return self._action_spec
def observation_spec(self):
"""Define environment observations."""
return self._observation_spec
def sequence_density(self, seq):
"""Get average distance to `seq` out of all observed sequences."""
dens = 0
dist_radius = 2
for s in self.all_seqs:
dist = int(editdistance.eval(s, seq))
if dist != 0 and dist <= dist_radius:
dens += self.all_seqs[s] / dist
return dens
def get_cached_fitness(self, seq):
"""Get cached sequence fitness computed in previous episodes."""
return self.all_seqs[seq]
def set_fitness_model_to_gt(self, fitness_model_is_gt):
"""
Set the fitness model to the ground truth landscape or to the model.
Call with `True` when doing an experiment-based training round
and call with `False` when doing a model-based training round.
"""
self.fitness_model_is_gt = fitness_model_is_gt
def _step(self, actions):
"""Progress the agent one step in the environment."""
actions = actions.flatten()
self.states[:, self.partial_seq_len, -1] = 0
self.states[np.arange(self._batch_size), self.partial_seq_len, actions] = 1
self.partial_seq_len += 1
# We have not generated the last residue in the sequence, so continue
if self.partial_seq_len < self.seq_length - 1:
return nest_utils.stack_nested_arrays(
[ts.transition(seq_state, 0) for seq_state in self.states]
)
# If sequence is of full length, score the sequence and end the episode
# We need to take off the column in the matrix (-1) representing the mask token
complete_sequences = [
s_utils.one_hot_to_string(seq_state[:, :-1], self.alphabet)
for seq_state in self.states
]
if self.fitness_model_is_gt:
fitnesses = self.landscape.get_fitness(complete_sequences)
else:
fitnesses = self.model.get_fitness(complete_sequences)
self.all_seqs.update(zip(complete_sequences, fitnesses))
# Reward = fitness - lambda * sequence density
rewards = np.array(
[
f - self.lam * self.sequence_density(seq)
for seq, f in zip(complete_sequences, fitnesses)
]
)
return nest_utils.stack_nested_arrays(
[ts.termination(seq_state, r) for seq_state, r in zip(self.states, rewards)]
)
class DynaPPOEnvironmentMutative(py_environment.PyEnvironment): # pylint: disable=W0223
"""
DyNA-PPO environment based on TF-Agents.
Note that unlike the other DynaPPO environment, this one is mutative rather than
constructive.
"""
def __init__( # pylint: disable=W0231
self,
alphabet: str,
starting_seq: str,
model: flexs.Model,
landscape: flexs.Landscape,
max_num_steps: int,
):
"""
Initialize DyNA-PPO agent environment.
Based on this tutorial:
https://www.mikulskibartosz.name/how-to-create-an-environment-for-a-tensorflow-agent
Args:
alphabet: Usually UCGA.
starting_seq: When initializing the environment,
the sequence which is initially mutated.
model: Landscape or model which evaluates
each sequence.
max_num_steps: Maximum number of steps before
episode is forced to terminate. Usually the
`model_queries_per_batch`.
"""
self.alphabet = alphabet
# model/model/measurements
self.model = model
self.landscape = landscape
self.fitness_model_is_gt = False
self.previous_fitness = -float("inf")
self.seq = starting_seq
self._state = {
"sequence": s_utils.string_to_one_hot(self.seq, self.alphabet).astype(
np.float32
),
"fitness": self.model.get_fitness([starting_seq]).astype(np.float32),
}
self.episode_seqs = set() # the sequences seen in the current episode
self.all_seqs = {}
self.measured_sequences = {}
self.lam = 0.1
# tf_agents environment
self._action_spec = array_spec.BoundedArraySpec(
shape=(1,),
dtype=np.integer,
minimum=0,
maximum=len(self.seq) * len(self.alphabet) - 1,
name="action",
)
self._observation_spec = {
"sequence": array_spec.BoundedArraySpec(
shape=(len(self.seq), len(self.alphabet)),
dtype=np.float32,
minimum=0,
maximum=1,
),
"fitness": array_spec.ArraySpec(shape=(1,), dtype=np.float32),
}
self.num_steps = 0
self.max_num_steps = max_num_steps
def _reset(self):
self.previous_fitness = -float("inf")
self._state = {
"sequence": s_utils.string_to_one_hot(self.seq, self.alphabet).astype(
np.float32
),
"fitness": self.model.get_fitness([self.seq]).astype(np.float32),
}
self.episode_seqs = set()
self.num_steps = 0
return ts.restart(self._state)
def action_spec(self):
"""Define agent actions."""
return self._action_spec
def observation_spec(self):
"""Define environment observations."""
return self._observation_spec
def get_state_string(self):
"""Get sequence representing current state."""
return s_utils.one_hot_to_string(self._state["sequence"], self.alphabet)
def sequence_density(self, seq):
"""Get average distance to `seq` out of all observed sequences."""
dens = 0
dist_radius = 2
for s in self.all_seqs:
dist = int(editdistance.eval(s, seq))
if dist != 0 and dist <= dist_radius:
dens += self.all_seqs[s] / dist
return dens
def set_fitness_model_to_gt(self, fitness_model_is_gt):
"""
Set the fitness model to the ground truth landscape or to the model.
Call with `True` when doing an experiment-based training round
and call with `False` when doing a model-based training round.
"""
self.fitness_model_is_gt = fitness_model_is_gt
def _step(self, action):
"""Progress the agent one step in the environment.
The agent moves until the reward is decreasing. The number of sequences that
can be evaluated at each episode is capped to `self.max_num_steps`.
"""
# if we've exceeded the maximum number of steps, terminate
if self.num_steps >= self.max_num_steps:
return ts.termination(self._state, 0)
# `action` is an integer representing which residue to mutate to 1
# along the flattened one-hot representation of the sequence
pos = action // len(self.alphabet)
res = action % len(self.alphabet)
self.num_steps += 1
# if we are trying to modify the sequence with a no-op, then stop
if self._state["sequence"][pos, res] == 1:
return ts.termination(self._state, 0)
self._state["sequence"][pos] = 0
self._state["sequence"][pos, res] = 1
state_string = s_utils.one_hot_to_string(self._state["sequence"], self.alphabet)
if self.fitness_model_is_gt:
self._state["fitness"] = self.landscape.get_fitness([state_string]).astype(
np.float32
)
else:
self._state["fitness"] = self.model.get_fitness([state_string]).astype(
np.float32
)
self.all_seqs[state_string] = self._state["fitness"].item()
reward = self._state["fitness"].item() - self.lam * self.sequence_density(
state_string
)
# if we have seen the sequence this episode,
# terminate episode and punish
# (to prevent going in loops)
if state_string in self.episode_seqs:
return ts.termination(self._state, -1)
self.episode_seqs.add(state_string)
# if the reward is not increasing, then terminate
if reward < self.previous_fitness:
return ts.termination(self._state, reward=reward)
self.previous_fitness = reward
return ts.transition(self._state, reward=reward)
| [
"tf_agents.trajectories.time_step.termination",
"tf_agents.trajectories.time_step.transition",
"tf_agents.trajectories.time_step.restart",
"flexs.utils.sequence_utils.one_hot_to_string",
"tf_agents.trajectories.time_step.time_step_spec",
"flexs.utils.sequence_utils.string_to_one_hot",
"tf_agents.specs.a... | [((2321, 2362), 'tf_agents.trajectories.time_step.time_step_spec', 'ts.time_step_spec', (['self._observation_spec'], {}), '(self._observation_spec)\n', (2338, 2362), True, 'from tf_agents.trajectories import time_step as ts\n'), ((8403, 8426), 'tf_agents.trajectories.time_step.restart', 'ts.restart', (['self._state'], {}), '(self._state)\n', (8413, 8426), True, 'from tf_agents.trajectories import time_step as ts\n'), ((8745, 8810), 'flexs.utils.sequence_utils.one_hot_to_string', 's_utils.one_hot_to_string', (["self._state['sequence']", 'self.alphabet'], {}), "(self._state['sequence'], self.alphabet)\n", (8770, 8810), True, 'from flexs.utils import sequence_utils as s_utils\n'), ((10499, 10564), 'flexs.utils.sequence_utils.one_hot_to_string', 's_utils.one_hot_to_string', (["self._state['sequence']", 'self.alphabet'], {}), "(self._state['sequence'], self.alphabet)\n", (10524, 10564), True, 'from flexs.utils import sequence_utils as s_utils\n'), ((11549, 11590), 'tf_agents.trajectories.time_step.transition', 'ts.transition', (['self._state'], {'reward': 'reward'}), '(self._state, reward=reward)\n', (11562, 11590), True, 'from tf_agents.trajectories import time_step as ts\n'), ((4860, 4919), 'flexs.utils.sequence_utils.one_hot_to_string', 's_utils.one_hot_to_string', (['seq_state[:, :-1]', 'self.alphabet'], {}), '(seq_state[:, :-1], self.alphabet)\n', (4885, 4919), True, 'from flexs.utils import sequence_utils as s_utils\n'), ((7888, 7938), 'tf_agents.specs.array_spec.ArraySpec', 'array_spec.ArraySpec', ([], {'shape': '(1,)', 'dtype': 'np.float32'}), '(shape=(1,), dtype=np.float32)\n', (7908, 7938), False, 'from tf_agents.specs import array_spec\n'), ((9923, 9953), 'tf_agents.trajectories.time_step.termination', 'ts.termination', (['self._state', '(0)'], {}), '(self._state, 0)\n', (9937, 9953), True, 'from tf_agents.trajectories import time_step as ts\n'), ((10357, 10387), 'tf_agents.trajectories.time_step.termination', 'ts.termination', (['self._state', '(0)'], {}), '(self._state, 0)\n', (10371, 10387), True, 'from tf_agents.trajectories import time_step as ts\n'), ((11254, 11285), 'tf_agents.trajectories.time_step.termination', 'ts.termination', (['self._state', '(-1)'], {}), '(self._state, -1)\n', (11268, 11285), True, 'from tf_agents.trajectories import time_step as ts\n'), ((11451, 11493), 'tf_agents.trajectories.time_step.termination', 'ts.termination', (['self._state'], {'reward': 'reward'}), '(self._state, reward=reward)\n', (11465, 11493), True, 'from tf_agents.trajectories import time_step as ts\n'), ((1515, 1536), 'numpy.arange', 'np.arange', (['seq_length'], {}), '(seq_length)\n', (1524, 1536), True, 'import numpy as np\n'), ((2475, 2501), 'numpy.arange', 'np.arange', (['self.seq_length'], {}), '(self.seq_length)\n', (2484, 2501), True, 'import numpy as np\n'), ((2571, 2592), 'tf_agents.trajectories.time_step.restart', 'ts.restart', (['seq_state'], {}), '(seq_state)\n', (2581, 2592), True, 'from tf_agents.trajectories import time_step as ts\n'), ((3422, 3447), 'editdistance.eval', 'editdistance.eval', (['s', 'seq'], {}), '(s, seq)\n', (3439, 3447), False, 'import editdistance\n'), ((4276, 4303), 'numpy.arange', 'np.arange', (['self._batch_size'], {}), '(self._batch_size)\n', (4285, 4303), True, 'import numpy as np\n'), ((5530, 5558), 'tf_agents.trajectories.time_step.termination', 'ts.termination', (['seq_state', 'r'], {}), '(seq_state, r)\n', (5544, 5558), True, 'from tf_agents.trajectories import time_step as ts\n'), ((9020, 9045), 'editdistance.eval', 'editdistance.eval', (['s', 'seq'], {}), '(s, seq)\n', (9037, 9045), False, 'import editdistance\n'), ((4576, 4603), 'tf_agents.trajectories.time_step.transition', 'ts.transition', (['seq_state', '(0)'], {}), '(seq_state, 0)\n', (4589, 4603), True, 'from tf_agents.trajectories import time_step as ts\n'), ((6991, 7041), 'flexs.utils.sequence_utils.string_to_one_hot', 's_utils.string_to_one_hot', (['self.seq', 'self.alphabet'], {}), '(self.seq, self.alphabet)\n', (7016, 7041), True, 'from flexs.utils import sequence_utils as s_utils\n'), ((8138, 8188), 'flexs.utils.sequence_utils.string_to_one_hot', 's_utils.string_to_one_hot', (['self.seq', 'self.alphabet'], {}), '(self.seq, self.alphabet)\n', (8163, 8188), True, 'from flexs.utils import sequence_utils as s_utils\n')] |
import numpy as np
import sys
import os
sys.path.append(os.path.expanduser('~/darts/cnn'))
#from train_class import Train
OPS = ['max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NUM_VERTICES = 4
INPUT_1 = 'c_k-2'
INPUT_2 = 'c_k-1'
class Arch:
def __init__(self, arch):
self.arch = arch
def serialize(self):
return self.arch
def query(self, epochs=50):
trainer = Train()
val_losses, test_losses = trainer.main(self.arch, epochs=epochs)
val_loss = 100 - np.mean(val_losses)
test_loss = 100 - test_losses[-1]
return val_loss, test_loss
@classmethod
def random_arch(cls):
# output a uniformly random architecture spec
# from the DARTS repository
# https://github.com/quark0/darts
normal = []
reduction = []
for i in range(NUM_VERTICES):
ops = np.random.choice(range(len(OPS)), NUM_VERTICES)
#input nodes for conv
nodes_in_normal = np.random.choice(range(i+2), 2, replace=False)
#input nodes for reduce
nodes_in_reduce = np.random.choice(range(i+2), 2, replace=False)
normal.extend([(nodes_in_normal[0], ops[0]), (nodes_in_normal[1], ops[1])])
reduction.extend([(nodes_in_reduce[0], ops[2]), (nodes_in_reduce[1], ops[3])])
return (normal, reduction)
def mutate(self, edits):
""" mutate a single arch """
# first convert tuple to array so that it is mutable
mutation = []
for cell in self.arch:
mutation.append([])
for pair in cell:
mutation[-1].append([])
for num in pair:
mutation[-1][-1].append(num)
#make mutations
for _ in range(edits):
cell = np.random.choice(2)
pair = np.random.choice(len(OPS))
num = np.random.choice(2)
if num == 1:
mutation[cell][pair][num] = np.random.choice(len(OPS))
else:
inputs = pair // 2 + 2
choice = np.random.choice(inputs)
if pair % 2 == 0 and mutation[cell][pair+1][num] != choice:
mutation[cell][pair][num] = choice
elif pair % 2 != 0 and mutation[cell][pair-1][num] != choice:
mutation[cell][pair][num] = choice
return mutation
def get_paths(self):
""" return all paths from input to output """
path_builder = [[[], [], [], []], [[], [], [], [], ]]
paths = [[], []]
for i, cell in enumerate(self.arch):
for j in range(len(OPS)):
if cell[j][0] == 0:
path = [INPUT_1, OPS[cell[j][1]]]
path_builder[i][j//2].append(path)
paths[i].append(path)
elif cell[j][0] == 1:
path = [INPUT_2, OPS[cell[j][1]]]
path_builder[i][j//2].append(path)
paths[i].append(path)
else:
for path in path_builder[i][cell[j][0] - 2]:
path = [*path, OPS[cell[j][1]]]
path_builder[i][j//2].append(path)
paths[i].append(path)
# check if there are paths of length >=5
contains_long_path = [False, False]
if max([len(path) for path in paths[0]]) >= 5:
contains_long_path[0] = True
if max([len(path) for path in paths[1]]) >= 5:
contains_long_path[1] = True
return paths, contains_long_path
def get_path_indices(self, long_paths=True):
"""
compute the index of each path
There are 4 * (8^0 + ... + 8^4) paths total
If long_paths = False, we give a single boolean to all paths of
size 4, so there are only 4 * (1 + 8^0 + ... + 8^3) paths
"""
paths, contains_long_path = self.get_paths()
normal_paths, reduce_paths = paths
num_ops = len(OPS)
"""
Compute the max number of paths per input per cell.
Since there are two cells and two inputs per cell,
total paths = 4 * max_paths
"""
if not long_paths:
max_paths = 1 + sum([num_ops ** i for i in range(NUM_VERTICES)])
else:
max_paths = sum([num_ops ** i for i in range(NUM_VERTICES + 1)])
path_indices = []
# set the base index based on the cell and the input
for i, paths in enumerate((normal_paths, reduce_paths)):
for path in paths:
index = i * 2 * max_paths
if path[0] == INPUT_2:
index += max_paths
# recursively compute the index of the path
for j in range(NUM_VERTICES + 1):
if j == len(path) - 1:
path_indices.append(index)
break
elif j == (NUM_VERTICES - 1) and not long_paths:
path_indices.append(2 * (i + 1) * max_paths - 1)
break
else:
index += num_ops ** j * (OPS.index(path[j + 1]) + 1)
return (tuple(path_indices), contains_long_path)
def encode_paths(self, long_paths=True):
# output one-hot encoding of paths
path_indices, _ = self.get_path_indices(long_paths=long_paths)
num_ops = len(OPS)
if not long_paths:
max_paths = 1 + sum([num_ops ** i for i in range(NUM_VERTICES)])
else:
max_paths = sum([num_ops ** i for i in range(NUM_VERTICES + 1)])
path_encoding = np.zeros(4 * max_paths)
for index in path_indices:
path_encoding[index] = 1
return path_encoding
def path_distance(self, other):
# compute the distance between two architectures
# by comparing their path encodings
return np.sum(np.array(self.encode_paths() != np.array(other.encode_paths())))
| [
"numpy.random.choice",
"numpy.mean",
"numpy.zeros",
"os.path.expanduser"
] | [((57, 90), 'os.path.expanduser', 'os.path.expanduser', (['"""~/darts/cnn"""'], {}), "('~/darts/cnn')\n", (75, 90), False, 'import os\n'), ((5826, 5849), 'numpy.zeros', 'np.zeros', (['(4 * max_paths)'], {}), '(4 * max_paths)\n', (5834, 5849), True, 'import numpy as np\n'), ((631, 650), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (638, 650), True, 'import numpy as np\n'), ((1941, 1960), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (1957, 1960), True, 'import numpy as np\n'), ((2025, 2044), 'numpy.random.choice', 'np.random.choice', (['(2)'], {}), '(2)\n', (2041, 2044), True, 'import numpy as np\n'), ((2223, 2247), 'numpy.random.choice', 'np.random.choice', (['inputs'], {}), '(inputs)\n', (2239, 2247), True, 'import numpy as np\n')] |
"""
A unit test for function_caller in function_caller and for synthetic
functions in utils.euclidean_synthetic_functions.py
-- <EMAIL>
"""
# pylint: disable=invalid-name
# pylint: disable=no-member
import numpy as np
# Local imports
from ..utils import euclidean_synthetic_functions as esf
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.ancillary_utils import get_list_of_floats_as_str
_TOL = 1e-5
class EuclideanSyntheticFunctionTestCase(BaseTestClass):
""" Unit test for synthetic function. """
def setUp(self):
""" Prior set up. """
self.test_function_data = [('hartmann', 3, None, 'no_noise', None),
('hartmann3', None, None, 'no_noise', None),
('hartmann6', None, None, 'no_noise', None),
('branin', None, None, 'no_noise', None),
('borehole', None, None, 'no_noise', None),
# With noise
('hartmann6', None, None, 'gauss', 0.1),
('branin', None, None, 'gauss', 0.1),
('borehole', None, None, 'gauss', 10.0),
# Multi-fidelity with and without noise
('hartmann3', None, 2, 'no_noise', None),
('branin', None, 3, 'no_noise', None),
('hartmann6', None, 4, 'gauss', 0.1),
('borehole', None, 1, 'gauss', 10.0),
]
self.num_test_points = 100
@classmethod
def _get_test_vals_from_func_caller(cls, func_caller, test_points):
""" Gets the test points. """
test_rets = [func_caller.eval_single(elem) for elem in test_points]
test_vals = [ret[0] for ret in test_rets]
test_qinfos = [ret[1] for ret in test_rets]
return test_vals, test_qinfos, test_rets
@classmethod
def _get_test_vals_from_mf_func_caller(cls, func_caller, test_points, test_fidels):
""" Gets the test points. """
test_rets = [func_caller.eval_at_fidel_single(fidel, point) for \
fidel, point in zip(test_fidels, test_points)]
test_vals = [ret[0] for ret in test_rets]
test_qinfos = [ret[1] for ret in test_rets]
return test_vals, test_qinfos, test_rets
def _test_for_max_val(self, func_caller):
""" Tests that the optimum value is larger than the test points. """
test_points = np.random.random((self.num_test_points, func_caller.domain.dim))
fc_test_vals, _, _ = self._get_test_vals_from_func_caller(func_caller, test_points)
assert np.all(np.array(fc_test_vals) <= func_caller.maxval)
@classmethod
def _test_for_test_err(cls, test_vals_1, test_vals_2, noise_scale):
""" A single function to do the testing. """
test_err = np.linalg.norm(np.array(test_vals_1) - np.array(test_vals_2)) / \
np.sqrt(len(test_vals_1))
assert test_err <= _TOL + 2 * noise_scale
return test_err
def _test_for_func_vals(self, func_caller, unnorm_func_caller):
""" Tests that the function values correspond to those given. The unit test
test_all_synthetic_functions calls this for each of the test functions in
a loop.
"""
# In this unit test, fc_ denotes for quantities corresponding to the (normalised)
# func_caller. ufc_ denotes quantities corresponding to the unnorm_func_caller.
# Test with func_caller and func
test_points = np.random.random((self.num_test_points, func_caller.domain.dim))
raw_test_points = func_caller.get_raw_domain_coords(test_points)
if not func_caller.is_mf():
raw_test_vals = [func_caller.func(elem) for elem in raw_test_points]
else:
test_fidels = np.random.random((self.num_test_points, func_caller.fidel_space.dim))
raw_test_fidels = func_caller.get_raw_fidel_coords(test_fidels)
raw_test_vals = [func_caller.func(rfidel, rpoint) for
(rfidel, rpoint) in zip(raw_test_fidels, raw_test_points)]
# First the normalised function caller
fc_noise_scale = 0.0 if func_caller.noise_scale is None else func_caller.noise_scale
if not func_caller.is_mf():
fc_test_vals, _, _ = self._get_test_vals_from_func_caller(func_caller, test_points)
else:
fc_test_vals, _, _ = self._get_test_vals_from_mf_func_caller(func_caller,
test_points, test_fidels)
fc_test_err = self._test_for_test_err(raw_test_vals, fc_test_vals, fc_noise_scale)
# Test with unnormalised function caller
ufc_noise_scale = 0.0 if func_caller.noise_scale is None else func_caller.noise_scale
if not func_caller.is_mf():
ufc_test_vals, _, _ = self._get_test_vals_from_func_caller(unnorm_func_caller,
raw_test_points)
else:
ufc_test_vals, _, _ = self._get_test_vals_from_mf_func_caller(unnorm_func_caller,
raw_test_points, raw_test_fidels)
ufc_test_err = self._test_for_test_err(raw_test_vals, ufc_test_vals, ufc_noise_scale)
# Test for normalised and unnormalised callers
fc_ufc_noise_scale = fc_noise_scale + ufc_noise_scale
fc_ufc_err = self._test_for_test_err(fc_test_vals, ufc_test_vals, fc_ufc_noise_scale)
return fc_test_err, ufc_test_err, fc_ufc_err
def _test_variation_along_fidel_dim(self, func_caller, unnorm_func_caller,
fidel_dim, test_point=None):
""" Tests and prints variation along the a fidelity dim. """
_grid_size = 10
_rem_coords_pre = np.ones((_grid_size, fidel_dim))
_fidel_dim_coords = np.reshape(np.linspace(0, 1, _grid_size), (_grid_size, 1))
_rem_coords_post = np.ones((_grid_size, func_caller.fidel_space.dim - fidel_dim - 1))
fidel_test_grid = np.hstack((_rem_coords_pre, _fidel_dim_coords, _rem_coords_post))
raw_fidel_test_grid = func_caller.get_raw_fidel_coords(fidel_test_grid)
test_point = test_point if test_point is not None else \
np.random.random((func_caller.domain.dim,))
raw_test_point = func_caller.get_raw_domain_coords(test_point)
test_vals_at_grid = [func_caller.eval_at_fidel_single(fidel, test_point)[0] for
fidel in fidel_test_grid]
ufc_test_vals_at_grid = [
unnorm_func_caller.eval_at_fidel_single(raw_fidel, raw_test_point)[0]
for raw_fidel in raw_fidel_test_grid]
test_vals_at_grid_str = get_list_of_floats_as_str(test_vals_at_grid)
ufc_test_vals_at_grid_str = get_list_of_floats_as_str(ufc_test_vals_at_grid)
test_point_str = get_list_of_floats_as_str(test_point)
raw_test_point_str = get_list_of_floats_as_str(raw_test_point)
self.report('fidel values (normalised func_caller) at x=%s: (fidel_dim=%d) %s'%(
test_point_str, fidel_dim, test_vals_at_grid_str), 'test_result')
self.report('fidel values (unnorm_func_caller) at x=%s: (fidel_dim=%d) %s'%(
raw_test_point_str, fidel_dim, ufc_test_vals_at_grid_str), 'test_result')
def test_all_synthetic_functions(self):
""" Tests all synthetic functions in a loop. """
for idx, (func_name, domain_dim, fidel_dim, noise_type, noise_scale) in \
enumerate(self.test_function_data):
sf_or_mf = 'sf' if fidel_dim is None else 'mf'
self.report(('Testing %d/%d: %s(%s), domain_dim:%s, fidel_dim:%s, ' +
'noise(%s, %s).')%(idx+1, len(self.test_function_data),
func_name, sf_or_mf, domain_dim, fidel_dim,
noise_type, noise_scale))
# get the function
func_caller = esf.get_syn_func_caller(func_name, domain_dim, fidel_dim,
noise_type, noise_scale,
to_normalise_domain=True)
unnorm_func_caller = esf.get_syn_func_caller(func_name, domain_dim, fidel_dim,
noise_type, noise_scale,
to_normalise_domain=False)
# Test for the maximum values
if not func_caller.is_noisy():
self._test_for_max_val(func_caller)
# Test for the function values and print out result
fc_err, ufc_err, fc_ufc_err = \
self._test_for_func_vals(func_caller, unnorm_func_caller)
self.report(('normalised_err: %0.5f, unnormalised_err: %0.5f., ' +
'norm-unnorm_err: %0.5f')%(fc_err, ufc_err, fc_ufc_err), 'test_result')
# Print variation along a fidelity dimension
if func_caller.is_mf() and not func_caller.is_noisy():
self._test_variation_along_fidel_dim(func_caller, unnorm_func_caller, 0)
if __name__ == '__main__':
execute_tests()
| [
"numpy.ones",
"numpy.hstack",
"numpy.random.random",
"numpy.array",
"numpy.linspace"
] | [((2508, 2572), 'numpy.random.random', 'np.random.random', (['(self.num_test_points, func_caller.domain.dim)'], {}), '((self.num_test_points, func_caller.domain.dim))\n', (2524, 2572), True, 'import numpy as np\n'), ((3526, 3590), 'numpy.random.random', 'np.random.random', (['(self.num_test_points, func_caller.domain.dim)'], {}), '((self.num_test_points, func_caller.domain.dim))\n', (3542, 3590), True, 'import numpy as np\n'), ((5651, 5683), 'numpy.ones', 'np.ones', (['(_grid_size, fidel_dim)'], {}), '((_grid_size, fidel_dim))\n', (5658, 5683), True, 'import numpy as np\n'), ((5790, 5856), 'numpy.ones', 'np.ones', (['(_grid_size, func_caller.fidel_space.dim - fidel_dim - 1)'], {}), '((_grid_size, func_caller.fidel_space.dim - fidel_dim - 1))\n', (5797, 5856), True, 'import numpy as np\n'), ((5879, 5944), 'numpy.hstack', 'np.hstack', (['(_rem_coords_pre, _fidel_dim_coords, _rem_coords_post)'], {}), '((_rem_coords_pre, _fidel_dim_coords, _rem_coords_post))\n', (5888, 5944), True, 'import numpy as np\n'), ((3797, 3866), 'numpy.random.random', 'np.random.random', (['(self.num_test_points, func_caller.fidel_space.dim)'], {}), '((self.num_test_points, func_caller.fidel_space.dim))\n', (3813, 3866), True, 'import numpy as np\n'), ((5719, 5748), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '_grid_size'], {}), '(0, 1, _grid_size)\n', (5730, 5748), True, 'import numpy as np\n'), ((6099, 6142), 'numpy.random.random', 'np.random.random', (['(func_caller.domain.dim,)'], {}), '((func_caller.domain.dim,))\n', (6115, 6142), True, 'import numpy as np\n'), ((2679, 2701), 'numpy.array', 'np.array', (['fc_test_vals'], {}), '(fc_test_vals)\n', (2687, 2701), True, 'import numpy as np\n'), ((2890, 2911), 'numpy.array', 'np.array', (['test_vals_1'], {}), '(test_vals_1)\n', (2898, 2911), True, 'import numpy as np\n'), ((2914, 2935), 'numpy.array', 'np.array', (['test_vals_2'], {}), '(test_vals_2)\n', (2922, 2935), True, 'import numpy as np\n')] |
import os
import librosa
import numpy as np
from pathlib import Path
from tqdm import tqdm
from shutil import copyfile
np.random.seed(87)
def split_data(dir, name, type):
files = librosa.util.find_files(dir)
output_dir = 'audio_splited/'
os.makedirs(output_dir, exist_ok=True)
np.random.shuffle(files)
os.makedirs('./{}'.format(type), exist_ok=True)
train = []
validate = []
test = []
train_num = int(len(files)*0.7)
validate_num = int(len(files)*0.15)
for idx, f in enumerate(files):
if idx <= train_num:
train.append(f)
elif train_num < idx <= train_num + validate_num:
validate.append(f)
else:
test.append(f)
data = {
'train': train,
'validate': validate,
'test': test
}
np.save(os.path.join(type, 'filepath_{}.npy'.format(name)), data)
os.makedirs(os.path.join(output_dir, 'train'), exist_ok=True)
for f in tqdm(train):
copyfile(f, os.path.join(output_dir, 'train', '{}'.format(Path(f).name)))
os.makedirs(os.path.join(output_dir, 'validate'), exist_ok=True)
for f in tqdm(validate):
copyfile(f, os.path.join(output_dir, 'validate', '{}'.format(Path(f).name)))
os.makedirs(os.path.join(output_dir, 'test'), exist_ok=True)
for f in tqdm(test):
copyfile(f, os.path.join(output_dir, 'test', '{}'.format(Path(f).name)))
if __name__ == '__main__':
split_data('output/', 'audioset', 'noise') | [
"os.makedirs",
"librosa.util.find_files",
"pathlib.Path",
"tqdm.tqdm",
"os.path.join",
"numpy.random.seed",
"numpy.random.shuffle"
] | [((127, 145), 'numpy.random.seed', 'np.random.seed', (['(87)'], {}), '(87)\n', (141, 145), True, 'import numpy as np\n'), ((197, 225), 'librosa.util.find_files', 'librosa.util.find_files', (['dir'], {}), '(dir)\n', (220, 225), False, 'import librosa\n'), ((266, 304), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (277, 304), False, 'import os\n'), ((310, 334), 'numpy.random.shuffle', 'np.random.shuffle', (['files'], {}), '(files)\n', (327, 334), True, 'import numpy as np\n'), ((1027, 1038), 'tqdm.tqdm', 'tqdm', (['train'], {}), '(train)\n', (1031, 1038), False, 'from tqdm import tqdm\n'), ((1209, 1223), 'tqdm.tqdm', 'tqdm', (['validate'], {}), '(validate)\n', (1213, 1223), False, 'from tqdm import tqdm\n'), ((1393, 1403), 'tqdm.tqdm', 'tqdm', (['test'], {}), '(test)\n', (1397, 1403), False, 'from tqdm import tqdm\n'), ((963, 996), 'os.path.join', 'os.path.join', (['output_dir', '"""train"""'], {}), "(output_dir, 'train')\n", (975, 996), False, 'import os\n'), ((1142, 1178), 'os.path.join', 'os.path.join', (['output_dir', '"""validate"""'], {}), "(output_dir, 'validate')\n", (1154, 1178), False, 'import os\n'), ((1330, 1362), 'os.path.join', 'os.path.join', (['output_dir', '"""test"""'], {}), "(output_dir, 'test')\n", (1342, 1362), False, 'import os\n'), ((1107, 1114), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (1111, 1114), False, 'from pathlib import Path\n'), ((1295, 1302), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (1299, 1302), False, 'from pathlib import Path\n'), ((1471, 1478), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (1475, 1478), False, 'from pathlib import Path\n')] |
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def cross_entropy_2d(predict, target):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), f"{predict.size(0)} vs {target.size(0)}"
assert predict.size(2) == target.size(1), f"{predict.size(2)} vs {target.size(1)}"
assert predict.size(3) == target.size(2), f"{predict.size(3)} vs {target.size(3)}"
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target < 200)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, size_average=True)
return loss
def entropy_loss(v):
"""
Entropy loss for probabilistic prediction vectors
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
assert v.dim() == 4
n, c, h, w = v.size()
return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))
| [
"torch.log2",
"numpy.log2",
"torch.zeros",
"torch.nn.functional.cross_entropy"
] | [((922, 973), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['predict', 'target'], {'size_average': '(True)'}), '(predict, target, size_average=True)\n', (937, 973), True, 'import torch.nn.functional as F\n'), ((745, 759), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (756, 759), False, 'import torch\n'), ((1295, 1305), 'numpy.log2', 'np.log2', (['c'], {}), '(c)\n', (1302, 1305), True, 'import numpy as np\n'), ((1256, 1277), 'torch.log2', 'torch.log2', (['(v + 1e-30)'], {}), '(v + 1e-30)\n', (1266, 1277), False, 'import torch\n')] |
from __future__ import division, print_function
from typing import List, Tuple, Callable
import numpy as np
import scipy
import matplotlib.pyplot as plt
class Perceptron:
def __init__(self, nb_features=2, max_iteration=10, margin=1e-4):
'''
Args :
nb_features : Number of features
max_iteration : maximum iterations. You algorithm should terminate after this
many iterations even if it is not converged
margin is the min value, we use this instead of comparing with 0 in the algorithm
'''
# number of classes
self.nb_features = 2
self.w = [0 for i in range(0,nb_features+1)]
self.margin = margin
self.max_iteration = max_iteration
def train(self, features: List[List[float]], labels: List[int]) -> bool:
'''
Args :
features : List of features. First element of each feature vector is 1
to account for bias
labels : label of each feature [-1,1]
Returns :
True/ False : return True if the algorithm converges else False.
'''
############################################################################
# This should take a list of features and labels [-1,1] and should update
# to correct weights w. Note that w[0] is the bias term. and first term is
# expected to be 1 --- accounting for the bias
############################################################################
# iterate max_iteration times
is_terminate = False
for i in range(0,self.max_iteration):
# iterate over all features
if is_terminate:
return True
is_terminate = True
for x,y in zip(features, labels):
x = np.array(x)
pred = np.array(self.w).transpose().dot(x)
# cast to -1,1
pred_sign = np.sign(pred)
if pred_sign != y:
# if predicted label is wrong
# update rule -> Wnew = W + YX/|X|
x_norm = np.linalg.norm(x)
self.w = np.array(self.w) + ((y * x) / (x_norm + self.margin))
is_terminate = False
return False
def reset(self):
self.w = [0 for i in range(0,self.nb_features+1)]
def predict(self, features: List[List[float]]) -> List[int]:
'''
Args :
features : List of features. First element of each feature vector is 1
to account for bias
Returns :
labels : List of integers of [-1,1]
'''
############################################################################
# This should take a list of features and use the learned
# weights to predict the label
############################################################################
return np.sign(np.inner(self.w.transpose(),features))
def get_weights(self) -> Tuple[List[float], float]:
return self.w
| [
"numpy.array",
"numpy.sign",
"numpy.linalg.norm"
] | [((1864, 1875), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1872, 1875), True, 'import numpy as np\n'), ((1994, 2007), 'numpy.sign', 'np.sign', (['pred'], {}), '(pred)\n', (2001, 2007), True, 'import numpy as np\n'), ((2177, 2194), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (2191, 2194), True, 'import numpy as np\n'), ((2224, 2240), 'numpy.array', 'np.array', (['self.w'], {}), '(self.w)\n', (2232, 2240), True, 'import numpy as np\n'), ((1899, 1915), 'numpy.array', 'np.array', (['self.w'], {}), '(self.w)\n', (1907, 1915), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""
:Script: spaced.py
:Author: <EMAIL>
:Modified: 2017-04-11
:Purpose: tools for working with numpy arrays
:
:Original sources:
:----------------
:n_spaced : ...\arraytools\geom\n_spaced.py
: - n_spaced(L=0, B=0, R=10, T=10, min_space=1, num=10, verbose=True)
: Produce num points within the bounds specified by the extent (L,B,R,T)
: L(eft), B, R, T(op) - extent coordinates
: min_space - minimum spacing between points.
: num - number of points... this value may not be reached if the extent
: is too small and the spacing is large relative to it.
:
:arr_struct : ...\arcpytools.py
: - array_struct(a, fld_names=['X', 'Y'], dt=['<f8', '<f8']):
: Convert an array to a structured array
: a - an ndarray with shape at least (N,2)
: dt = dtype class
: names - names for the fields
:
:---------------------------------------------------------------------:
"""
# ---- imports, formats, constants ----
import sys
from textwrap import dedent
import numpy as np
# import arcpy
from arcpytools_pnt import array_fc, array_struct, tweet
ft = {'bool': lambda x: repr(x.astype(np.int32)),
'float_kind': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=10, linewidth=80, precision=2, suppress=True,
threshold=100, formatter=ft)
np.ma.masked_print_option.set_display('-') # change to a single -
script = sys.argv[0] # print this should you need to locate the script
# ---------------------------------------------------------------------------
# ---- from arraytools.geom ----
def n_spaced(L=0, B=0, R=10, T=10, min_space=1, num=10, verbose=True):
"""Produce num points within the bounds specified by the extent (L,B,R,T)
:Requires:
:--------
: L(eft), B, R, T(op) - extent coordinates
: min_space - minimum spacing between points.
: num - number of points... this value may not be reached if the extent
: is too small and the spacing is large relative to it.
"""
#
def _pnts(L, B, R, T, num):
"""Create the points"""
xs = (R-L) * np.random.random_sample(size=num) + L
ys = (T-B) * np.random.random_sample(size=num) + B
return np.array(list(zip(xs, ys)))
def _not_closer(a, min_space=1):
"""Find the points that are greater than min_space in the extent."""
b = a.reshape(np.prod(a.shape[:-1]), 1, a.shape[-1])
diff = b - a
dist = np.einsum('ijk,ijk->ij', diff, diff)
dist_arr = np.sqrt(dist).squeeze()
case = ~(np.triu(dist_arr <= min_space, 1)).any(0)
return a[case]
#
cnt = 1
n = num * 2 # check double the number required as a check
result = 0
frmt = "Examined: {} Found: {} Need: {}"
a0 = []
while (result < num) and (cnt < 6): # keep using random points
a = _pnts(L, B, R, T, num)
if cnt > 1:
a = np.vstack((a0, a))
a0 = _not_closer(a, min_space)
result = len(a0)
if verbose:
print(dedent(frmt).format(n, result, num))
cnt += 1
n += n
# perform the final sample and calculation
use = min(num, result)
a0 = a0[:use] # could use a0 = np.random.shuffle(a0)[:num]
a0 = a0[np.argsort(a0[:, 0])]
return a0
# ---- main section ---------------------------------------------------------
#
aoi = sys.argv[1] # '340000 5020000 344999.999999999 5025000 NaN NaN NaN NaN'
min_space = int(sys.argv[2])
num = int(sys.argv[3])
SR = sys.argv[4]
out_fc = sys.argv[5]
frmt = """\n
AOI extent for points...
{}
Minimum spacing.... {}
Number of points... {}
Spatial reference.. {}
Output featureclass.. {}\n
"""
args = [aoi, min_space, num, SR, out_fc]
msg = frmt.format(*args)
tweet(msg)
# ---- perform the point creation ----
aoi = aoi.split(" ")[:4] # extent is returned as a string
ext = [round(float(i)) for i in aoi]
L, B, R, T = ext
a = n_spaced(L, B, R, T, min_space, num, verbose=False)
all_flds = ['X', 'Y', 'x_coord', 'y_coord']
xy_flds = all_flds[:2]
xy_dt = ['<f8', '<f8', 'float', 'float']
a = np.c_[(a, a)]
z = array_struct(a, fld_names=all_flds, dt=xy_dt)
# z = np.zeros((len(a)), dtype=[('X', '<f8'), ('Y', '<f8')])
# fld_names = ('X', 'Y')
# z['X'] = a[:, 0]
# z['Y'] = a[:, 1]
out_fc = array_fc(z, out_fc, xy_flds, SR)
# ----------------------------------------------------------------------
# __main__ .... code section
if __name__ == "__main__":
"""Optionally...
: - print the script source name.
: - run the _demo
"""
# print("Script... {}".format(script))
pass
| [
"numpy.prod",
"textwrap.dedent",
"numpy.random.random_sample",
"numpy.sqrt",
"arcpytools_pnt.array_struct",
"numpy.set_printoptions",
"arcpytools_pnt.tweet",
"numpy.argsort",
"numpy.einsum",
"numpy.vstack",
"arcpytools_pnt.array_fc",
"numpy.triu",
"numpy.ma.masked_print_option.set_display"
] | [((1203, 1311), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'edgeitems': '(10)', 'linewidth': '(80)', 'precision': '(2)', 'suppress': '(True)', 'threshold': '(100)', 'formatter': 'ft'}), '(edgeitems=10, linewidth=80, precision=2, suppress=True,\n threshold=100, formatter=ft)\n', (1222, 1311), True, 'import numpy as np\n'), ((1330, 1372), 'numpy.ma.masked_print_option.set_display', 'np.ma.masked_print_option.set_display', (['"""-"""'], {}), "('-')\n", (1367, 1372), True, 'import numpy as np\n'), ((3818, 3828), 'arcpytools_pnt.tweet', 'tweet', (['msg'], {}), '(msg)\n', (3823, 3828), False, 'from arcpytools_pnt import array_fc, array_struct, tweet\n'), ((4188, 4233), 'arcpytools_pnt.array_struct', 'array_struct', (['a'], {'fld_names': 'all_flds', 'dt': 'xy_dt'}), '(a, fld_names=all_flds, dt=xy_dt)\n', (4200, 4233), False, 'from arcpytools_pnt import array_fc, array_struct, tweet\n'), ((4372, 4404), 'arcpytools_pnt.array_fc', 'array_fc', (['z', 'out_fc', 'xy_flds', 'SR'], {}), '(z, out_fc, xy_flds, SR)\n', (4380, 4404), False, 'from arcpytools_pnt import array_fc, array_struct, tweet\n'), ((2482, 2518), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', 'diff', 'diff'], {}), "('ijk,ijk->ij', diff, diff)\n", (2491, 2518), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.argsort', 'np.argsort', (['a0[:, 0]'], {}), '(a0[:, 0])\n', (3311, 3321), True, 'import numpy as np\n'), ((2405, 2426), 'numpy.prod', 'np.prod', (['a.shape[:-1]'], {}), '(a.shape[:-1])\n', (2412, 2426), True, 'import numpy as np\n'), ((2951, 2969), 'numpy.vstack', 'np.vstack', (['(a0, a)'], {}), '((a0, a))\n', (2960, 2969), True, 'import numpy as np\n'), ((2122, 2155), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'num'}), '(size=num)\n', (2145, 2155), True, 'import numpy as np\n'), ((2182, 2215), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'num'}), '(size=num)\n', (2205, 2215), True, 'import numpy as np\n'), ((2539, 2552), 'numpy.sqrt', 'np.sqrt', (['dist'], {}), '(dist)\n', (2546, 2552), True, 'import numpy as np\n'), ((2581, 2614), 'numpy.triu', 'np.triu', (['(dist_arr <= min_space)', '(1)'], {}), '(dist_arr <= min_space, 1)\n', (2588, 2614), True, 'import numpy as np\n'), ((3076, 3088), 'textwrap.dedent', 'dedent', (['frmt'], {}), '(frmt)\n', (3082, 3088), False, 'from textwrap import dedent\n')] |
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy
import numpy as np
import os
dirs = ['/n/fs/visualai-scr/yutingy/boids_res_20_64_validate_switch_label/test',
'/n/fs/visualai-scr/yutingy/boids_res_20_64_validate_switch_label_aux/test'
]
errs = {}
fig = plt.figure()
for dir in dirs:
err = np.load(os.path.join(dir, 'all_seq_err.npy'))
err = np.mean(err, 0)
if 'aux' in dir:
label = 'baseline'
else:
label = 'ours'
errs[label] = err
plt.plot(np.arange(err.shape[0]), err, label=label)
plt.legend()
plt.xlabel('# of simulation steps')
plt.ylabel('L2 error')
plt.savefig('result_figs/boids_seq_err_len_150.png')
plt.close(fig)
fig = plt.figure()
plt.plot(np.arange(errs['ours'].shape[0]), errs['ours'] / errs['baseline'])
plt.xlabel('# of simulation steps')
plt.ylabel('ours err percentage relative to baseline')
plt.title('ratio at length 100 is %f' % (errs['ours'][-1] / errs['baseline'][-1]))
plt.savefig('result_figs/boids_seq_err_ratio_len_150.png')
plt.close(fig) | [
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((305, 317), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (315, 317), True, 'from matplotlib import pyplot as plt\n'), ((600, 612), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (610, 612), True, 'from matplotlib import pyplot as plt\n'), ((613, 648), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# of simulation steps"""'], {}), "('# of simulation steps')\n", (623, 648), True, 'from matplotlib import pyplot as plt\n'), ((649, 671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""L2 error"""'], {}), "('L2 error')\n", (659, 671), True, 'from matplotlib import pyplot as plt\n'), ((673, 725), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result_figs/boids_seq_err_len_150.png"""'], {}), "('result_figs/boids_seq_err_len_150.png')\n", (684, 725), True, 'from matplotlib import pyplot as plt\n'), ((726, 740), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (735, 740), True, 'from matplotlib import pyplot as plt\n'), ((749, 761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (759, 761), True, 'from matplotlib import pyplot as plt\n'), ((838, 873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# of simulation steps"""'], {}), "('# of simulation steps')\n", (848, 873), True, 'from matplotlib import pyplot as plt\n'), ((874, 928), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ours err percentage relative to baseline"""'], {}), "('ours err percentage relative to baseline')\n", (884, 928), True, 'from matplotlib import pyplot as plt\n'), ((929, 1016), 'matplotlib.pyplot.title', 'plt.title', (["('ratio at length 100 is %f' % (errs['ours'][-1] / errs['baseline'][-1]))"], {}), "('ratio at length 100 is %f' % (errs['ours'][-1] / errs['baseline'\n ][-1]))\n", (938, 1016), True, 'from matplotlib import pyplot as plt\n'), ((1012, 1070), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result_figs/boids_seq_err_ratio_len_150.png"""'], {}), "('result_figs/boids_seq_err_ratio_len_150.png')\n", (1023, 1070), True, 'from matplotlib import pyplot as plt\n'), ((1071, 1085), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1080, 1085), True, 'from matplotlib import pyplot as plt\n'), ((401, 416), 'numpy.mean', 'np.mean', (['err', '(0)'], {}), '(err, 0)\n', (408, 416), True, 'import numpy as np\n'), ((771, 803), 'numpy.arange', 'np.arange', (["errs['ours'].shape[0]"], {}), "(errs['ours'].shape[0])\n", (780, 803), True, 'import numpy as np\n'), ((353, 389), 'os.path.join', 'os.path.join', (['dir', '"""all_seq_err.npy"""'], {}), "(dir, 'all_seq_err.npy')\n", (365, 389), False, 'import os\n'), ((552, 575), 'numpy.arange', 'np.arange', (['err.shape[0]'], {}), '(err.shape[0])\n', (561, 575), True, 'import numpy as np\n')] |
import numpy as np
from random import random
from numba import njit
import random as rand
import matplotlib.pyplot as plt
class RotSurCode():
nbr_eq_classes = 4
def __init__(self, size):
self.system_size = size
self.qubit_matrix = np.zeros((self.system_size, self.system_size), dtype=np.uint8)
self.plaquette_defects = np.zeros((size + 1, size + 1))
def generate_random_error(self, p_x, p_y, p_z):
size = self.system_size
for i in range(size):
for j in range(size):
q = 0
r = rand.random()
if r < p_z:
q = 3
if p_z < r < (p_z + p_x):
q = 1
if (p_z + p_x) < r < (p_z + p_x + p_y):
q = 2
self.qubit_matrix[i, j] = q
self.syndrome()
def generate_zbiased_error(self, p_error, eta): # Z-biased noise
eta = eta
p = p_error
p_z = p * eta / (eta + 1)
p_x = p / (2 * (eta + 1))
p_y = p_x
size = self.system_size
for i in range(size):
for j in range(size):
q = 0
r = rand.random()
if r < p_z:
q = 3
elif p_z < r < (p_z + p_x):
q = 1
elif (p_z + p_x) < r < (p_z + p_x + p_y):
q = 2
self.qubit_matrix[i, j] = q
self.syndrome()
# def generate_random_error(self, p_error, eta): # Y-biased noise
# eta = eta
# p = p_error
# p_y = p * eta / (eta + 1)
# p_x = p / (2 * (eta + 1))
# p_z = p_x
# size = self.system_size
# for i in range(size):
# for j in range(size):
# q = 0
# r = rand.random()
# if r < p_y:
# q = 2
# elif p_y < r < (p_y + p_x):
# q = 1
# elif (p_y + p_x) < r < (p_y + p_x + p_z):
# q = 3
# self.qubit_matrix[i, j] = q
def chain_lengths(self):
nx = np.count_nonzero(self.qubit_matrix[:, :] == 1)
ny = np.count_nonzero(self.qubit_matrix[:, :] == 2)
nz = np.count_nonzero(self.qubit_matrix[:, :] == 3)
return nx, ny, nz
def count_errors(self):
return _count_errors(self.qubit_matrix)
def apply_logical(self, operator: int, X_pos=0, Z_pos=0):
return _apply_logical(self.qubit_matrix, operator, X_pos, Z_pos)
def apply_stabilizer(self, row: int, col: int, operator: int):
return _apply_stabilizer(self.qubit_matrix, row, col, operator)
def apply_random_logical(self):
return _apply_random_logical(self.qubit_matrix)
def apply_random_stabilizer(self):
return _apply_random_stabilizer(self.qubit_matrix)
def apply_stabilizers_uniform(self, p=0.5):
return _apply_stabilizers_uniform(self.qubit_matrix, p)
def define_equivalence_class(self):
return _define_equivalence_class(self.qubit_matrix)
def to_class(self, eq):
eq_class = self.define_equivalence_class()
op = eq_class ^ eq
return self.apply_logical(op)[0]
def syndrome(self):
size = self.qubit_matrix.shape[1]
qubit_matrix = self.qubit_matrix
for i in range(size-1):
for j in range(size-1):
self.plaquette_defects[i+1, j+1] = _find_syndrome(qubit_matrix, i, j, 1)
for i in range(int((size - 1)/2)):
for j in range(4):
row = 0
col = 0
if j == 0:
row = 0
col = 2 * i + 2
elif j == 1:
row = 2 * i + 2
col = size
elif j == 2:
row = size
col = 2 * i + 1
elif j == 3:
row = 2 * i + 1
col = 0
self.plaquette_defects[row, col] = _find_syndrome(qubit_matrix, i, j, 3)
def plot(self, title):
system_size = self.system_size
xLine = np.linspace(0, system_size - 1, system_size)
a = range(system_size)
X, Y = np.meshgrid(a, a)
XLine, YLine = np.meshgrid(a, xLine)
plaquette_defect_coordinates = np.where(self.plaquette_defects)
x_error = np.where(self.qubit_matrix[:, :] == 1)
y_error = np.where(self.qubit_matrix[:, :] == 2)
z_error = np.where(self.qubit_matrix[:, :] == 3)
def generate_semicircle(center_x, center_y, radius, stepsize=0.1):
x = np.arange(center_x, center_x + radius + stepsize, stepsize)
y = np.sqrt(radius ** 2 - x ** 2)
x = np.concatenate([x, x[::-1]])
y = np.concatenate([y, -y[::-1]])
return x, y + center_y
markersize_qubit = 15
markersize_excitation = 7
markersize_symbols = 7
linewidth = 2
# Plot grid lines
ax = plt.subplot(111)
x, y = generate_semicircle(0, 1, 0.5, 0.01)
for i in range(int((system_size - 1) / 2)):
ax.plot(y + 0.5 + i * 2, x + system_size - 1, color='black', linewidth=linewidth)
ax.plot(-y + 1.5 + 2 * i, -x, color='black', linewidth=linewidth)
ax.plot(x + system_size - 1, y - 0.5 + i * 2, color='black', linewidth=linewidth)
ax.plot(-x, -y + 0.5 + system_size - 1 - 2 * i, color='black', linewidth=linewidth)
ax.plot(XLine, YLine, 'black', linewidth=linewidth)
ax.plot(YLine, XLine, 'black', linewidth=linewidth)
ax.plot(X, Y, 'o', color='black', markerfacecolor='white', markersize=markersize_qubit + 1)
ax.plot(x_error[1], system_size - 1 - x_error[0], 'o', color='blue', markersize=markersize_symbols, marker=r'$X$')
ax.plot(y_error[1], system_size - 1 - y_error[0], 'o', color='blue', markersize=markersize_symbols, marker=r'$Y$')
ax.plot(z_error[1], system_size - 1 - z_error[0], 'o', color='blue', markersize=markersize_symbols, marker=r'$Z$')
for i in range(len(plaquette_defect_coordinates[1])):
if plaquette_defect_coordinates[1][i] == 0:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5 + 0.25, system_size - plaquette_defect_coordinates[0][i] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
elif plaquette_defect_coordinates[0][i] == 0:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5, system_size - plaquette_defect_coordinates[0][i] - 0.5 - 0.25, 'o', color='red', label="flux", markersize=markersize_excitation)
elif plaquette_defect_coordinates[1][i] == system_size:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5 - 0.25, system_size - plaquette_defect_coordinates[0][i] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
elif plaquette_defect_coordinates[0][i] == system_size:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5, system_size - plaquette_defect_coordinates[0][i] - 0.5 + 0.25, 'o', color='red', label="flux", markersize=markersize_excitation)
else:
ax.plot(plaquette_defect_coordinates[1][i] - 0.5, system_size - plaquette_defect_coordinates[0][i] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
# ax.plot(plaquette_defect_coordinates[1] - 0.5, system_size - plaquette_defect_coordinates[0] - 0.5, 'o', color='red', label="flux", markersize=markersize_excitation)
ax.axis('off')
plt.axis('equal')
#plt.show()
plt.savefig('plots/graph_'+str(title)+'.png')
# plt.close()
@njit('(uint8[:,:],)')
def _count_errors(qubit_matrix):
return np.count_nonzero(qubit_matrix)
@njit('(uint8[:,:], int64, int64, int64)')
def _find_syndrome(qubit_matrix, row: int, col: int, operator: int):
def flip(a):
if a == 0:
return 1
elif a == 1:
return 0
size = qubit_matrix.shape[1]
result_qubit_matrix = np.copy(qubit_matrix)
defect = 0
op = 0
if operator == 1: # full
qarray = [[0 + row, 0 + col], [0 + row, 1 + col], [1 + row, 0 + col], [1 + row, 1 + col]]
if row % 2 == 0:
if col % 2 == 0:
op = 1
else:
op = 3
else:
if col % 2 == 0:
op = 3
else:
op = 1
elif operator == 3: # half
if col == 0:
op = 1
qarray = [[0, row*2 + 1], [0, row*2 + 2]]
elif col == 1:
op = 3
qarray = [[row*2 + 1, size - 1], [row*2 + 2, size - 1]]
elif col == 2:
op = 1
qarray = [[size - 1, row*2], [size - 1, row*2 + 1]]
elif col == 3:
op = 3
qarray = [[row*2, 0], [row*2 + 1, 0]]
for i in qarray:
old_qubit = result_qubit_matrix[i[0], i[1]]
if old_qubit != 0 and old_qubit != op:
defect = flip(defect)
return defect
@njit('(uint8[:,:], int64, int64, int64)') # Z-biased noise
def _apply_logical(qubit_matrix, operator: int, X_pos=0, Z_pos=0):
result_qubit_matrix = np.copy(qubit_matrix)
# List to store how errors redestribute when logical is applied
n_eq = [0, 0, 0, 0]
if operator == 0:
return result_qubit_matrix, (0, 0, 0)
size = qubit_matrix.shape[0]
do_X = (operator == 1 or operator == 2)
do_Z = (operator == 3 or operator == 2)
if do_X:
for i in range(size):
old_qubit = result_qubit_matrix[i, X_pos]
new_qubit = 1 ^ old_qubit
result_qubit_matrix[i, X_pos] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
if do_Z:
for i in range(size):
old_qubit = result_qubit_matrix[Z_pos, i]
new_qubit = 3 ^ old_qubit
result_qubit_matrix[Z_pos, i] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
return result_qubit_matrix, (n_eq[1], n_eq[2], n_eq[3])
@njit('(uint8[:,:],)')
def _apply_random_logical(qubit_matrix):
size = qubit_matrix.shape[0]
op = int(random() * 4)
if op == 1 or op == 2:
X_pos = int(random() * size)
else:
X_pos = 0
if op == 3 or op == 2:
Z_pos = int(random() * size)
else:
Z_pos = 0
return _apply_logical(qubit_matrix, op, X_pos, Z_pos)
@njit('(uint8[:,:], int64, int64, int64)')
def _apply_stabilizer(qubit_matrix, row: int, col: int, operator: int):
size = qubit_matrix.shape[0]
result_qubit_matrix = np.copy(qubit_matrix)
# List to store how errors redestribute when stabilizer is applied
n_eq = [0, 0, 0, 0]
op = 0
if operator == 1: # full
qarray = [[0 + row, 0 + col], [0 + row, 1 + col], [1 + row, 0 + col], [1 + row, 1 + col]]
if row % 2 == 0:
if col % 2 == 0:
op = 1
else:
op = 3
else:
if col % 2 == 0:
op = 3
else:
op = 1
elif operator == 3: # half
if col == 0:
op = 1
qarray = [[0, row*2 + 1], [0, row*2 + 2]]
elif col == 1:
op = 3
qarray = [[row*2 + 1, size - 1], [row*2 + 2, size - 1]]
elif col == 2:
op = 1
qarray = [[size - 1, row*2], [size - 1, row*2 + 1]]
elif col == 3:
op = 3
qarray = [[row*2, 0], [row*2 + 1, 0]]
for i in qarray:
old_qubit = result_qubit_matrix[i[0], i[1]]
new_qubit = op ^ old_qubit
result_qubit_matrix[i[0], i[1]] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
return result_qubit_matrix, (n_eq[1], n_eq[2], n_eq[3])
@njit('(uint8[:,:],)')
def _apply_random_stabilizer(qubit_matrix):
size = qubit_matrix.shape[0]
rows = int((size-1)*random())
cols = int((size-1)*random())
rows2 = int(((size - 1)/2) * random())
cols2 = int(4 * random())
phalf = (size**2 - (size-1)**2 - 1)/(size**2-1)
if rand.random() > phalf:
# operator = 1 = full stabilizer
return _apply_stabilizer(qubit_matrix, rows, cols, 1)
else:
# operator = 3 = half stabilizer
return _apply_stabilizer(qubit_matrix, rows2, cols2, 3)
@njit('(uint8[:,:],)')
def _define_equivalence_class(qubit_matrix):
x_errors = np.count_nonzero(qubit_matrix[0, :] == 1)
x_errors += np.count_nonzero(qubit_matrix[0, :] == 2)
z_errors = np.count_nonzero(qubit_matrix[:, 0] == 3)
z_errors += np.count_nonzero(qubit_matrix[:, 0] == 2)
if x_errors % 2 == 0:
if z_errors % 2 == 0:
return 0
else:
return 3
else:
if z_errors % 2 == 0:
return 1
else:
return 2
def _apply_stabilizers_uniform(qubit_matrix, p=0.5):
size = qubit_matrix.shape[0]
result_qubit_matrix = np.copy(qubit_matrix)
# Apply full stabilizers
random_stabilizers = np.random.rand(size-1, size-1)
random_stabilizers = np.less(random_stabilizers, p)
it = np.nditer(random_stabilizers, flags=['multi_index'])
while not it.finished:
if it[0]:
row, col = it.multi_index
result_qubit_matrix, _ = _apply_stabilizer(result_qubit_matrix, row, col, 1)
it.iternext()
# Apply half stabilizers
random_stabilizers = np.random.rand(int((size - 1)/2), 4)
random_stabilizers = np.less(random_stabilizers, p)
it = np.nditer(random_stabilizers, flags=['multi_index'])
while not it.finished:
if it[0]:
row, col = it.multi_index
result_qubit_matrix, _ = _apply_stabilizer(result_qubit_matrix, row, col, 3)
it.iternext()
return result_qubit_matrix | [
"numpy.copy",
"numpy.less",
"numpy.sqrt",
"numpy.random.rand",
"numpy.where",
"numpy.nditer",
"numba.njit",
"matplotlib.pyplot.axis",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.linspace",
"numpy.concatenate",
"numpy.meshgrid",
"random.random",
"matplotlib.pyplot.subplot",
"numpy.aran... | [((7787, 7808), 'numba.njit', 'njit', (['"""(uint8[:,:],)"""'], {}), "('(uint8[:,:],)')\n", (7791, 7808), False, 'from numba import njit\n'), ((7887, 7928), 'numba.njit', 'njit', (['"""(uint8[:,:], int64, int64, int64)"""'], {}), "('(uint8[:,:], int64, int64, int64)')\n", (7891, 7928), False, 'from numba import njit\n'), ((9171, 9212), 'numba.njit', 'njit', (['"""(uint8[:,:], int64, int64, int64)"""'], {}), "('(uint8[:,:], int64, int64, int64)')\n", (9175, 9212), False, 'from numba import njit\n'), ((10241, 10262), 'numba.njit', 'njit', (['"""(uint8[:,:],)"""'], {}), "('(uint8[:,:],)')\n", (10245, 10262), False, 'from numba import njit\n'), ((10612, 10653), 'numba.njit', 'njit', (['"""(uint8[:,:], int64, int64, int64)"""'], {}), "('(uint8[:,:], int64, int64, int64)')\n", (10616, 10653), False, 'from numba import njit\n'), ((12000, 12021), 'numba.njit', 'njit', (['"""(uint8[:,:],)"""'], {}), "('(uint8[:,:],)')\n", (12004, 12021), False, 'from numba import njit\n'), ((12543, 12564), 'numba.njit', 'njit', (['"""(uint8[:,:],)"""'], {}), "('(uint8[:,:],)')\n", (12547, 12564), False, 'from numba import njit\n'), ((7853, 7883), 'numpy.count_nonzero', 'np.count_nonzero', (['qubit_matrix'], {}), '(qubit_matrix)\n', (7869, 7883), True, 'import numpy as np\n'), ((8158, 8179), 'numpy.copy', 'np.copy', (['qubit_matrix'], {}), '(qubit_matrix)\n', (8165, 8179), True, 'import numpy as np\n'), ((9329, 9350), 'numpy.copy', 'np.copy', (['qubit_matrix'], {}), '(qubit_matrix)\n', (9336, 9350), True, 'import numpy as np\n'), ((10786, 10807), 'numpy.copy', 'np.copy', (['qubit_matrix'], {}), '(qubit_matrix)\n', (10793, 10807), True, 'import numpy as np\n'), ((12626, 12667), 'numpy.count_nonzero', 'np.count_nonzero', (['(qubit_matrix[0, :] == 1)'], {}), '(qubit_matrix[0, :] == 1)\n', (12642, 12667), True, 'import numpy as np\n'), ((12684, 12725), 'numpy.count_nonzero', 'np.count_nonzero', (['(qubit_matrix[0, :] == 2)'], {}), '(qubit_matrix[0, :] == 2)\n', (12700, 12725), True, 'import numpy as np\n'), ((12742, 12783), 'numpy.count_nonzero', 'np.count_nonzero', (['(qubit_matrix[:, 0] == 3)'], {}), '(qubit_matrix[:, 0] == 3)\n', (12758, 12783), True, 'import numpy as np\n'), ((12800, 12841), 'numpy.count_nonzero', 'np.count_nonzero', (['(qubit_matrix[:, 0] == 2)'], {}), '(qubit_matrix[:, 0] == 2)\n', (12816, 12841), True, 'import numpy as np\n'), ((13165, 13186), 'numpy.copy', 'np.copy', (['qubit_matrix'], {}), '(qubit_matrix)\n', (13172, 13186), True, 'import numpy as np\n'), ((13242, 13276), 'numpy.random.rand', 'np.random.rand', (['(size - 1)', '(size - 1)'], {}), '(size - 1, size - 1)\n', (13256, 13276), True, 'import numpy as np\n'), ((13298, 13328), 'numpy.less', 'np.less', (['random_stabilizers', 'p'], {}), '(random_stabilizers, p)\n', (13305, 13328), True, 'import numpy as np\n'), ((13339, 13391), 'numpy.nditer', 'np.nditer', (['random_stabilizers'], {'flags': "['multi_index']"}), "(random_stabilizers, flags=['multi_index'])\n", (13348, 13391), True, 'import numpy as np\n'), ((13703, 13733), 'numpy.less', 'np.less', (['random_stabilizers', 'p'], {}), '(random_stabilizers, p)\n', (13710, 13733), True, 'import numpy as np\n'), ((13743, 13795), 'numpy.nditer', 'np.nditer', (['random_stabilizers'], {'flags': "['multi_index']"}), "(random_stabilizers, flags=['multi_index'])\n", (13752, 13795), True, 'import numpy as np\n'), ((258, 320), 'numpy.zeros', 'np.zeros', (['(self.system_size, self.system_size)'], {'dtype': 'np.uint8'}), '((self.system_size, self.system_size), dtype=np.uint8)\n', (266, 320), True, 'import numpy as np\n'), ((354, 384), 'numpy.zeros', 'np.zeros', (['(size + 1, size + 1)'], {}), '((size + 1, size + 1))\n', (362, 384), True, 'import numpy as np\n'), ((2166, 2212), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.qubit_matrix[:, :] == 1)'], {}), '(self.qubit_matrix[:, :] == 1)\n', (2182, 2212), True, 'import numpy as np\n'), ((2226, 2272), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.qubit_matrix[:, :] == 2)'], {}), '(self.qubit_matrix[:, :] == 2)\n', (2242, 2272), True, 'import numpy as np\n'), ((2286, 2332), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.qubit_matrix[:, :] == 3)'], {}), '(self.qubit_matrix[:, :] == 3)\n', (2302, 2332), True, 'import numpy as np\n'), ((4205, 4249), 'numpy.linspace', 'np.linspace', (['(0)', '(system_size - 1)', 'system_size'], {}), '(0, system_size - 1, system_size)\n', (4216, 4249), True, 'import numpy as np\n'), ((4296, 4313), 'numpy.meshgrid', 'np.meshgrid', (['a', 'a'], {}), '(a, a)\n', (4307, 4313), True, 'import numpy as np\n'), ((4337, 4358), 'numpy.meshgrid', 'np.meshgrid', (['a', 'xLine'], {}), '(a, xLine)\n', (4348, 4358), True, 'import numpy as np\n'), ((4398, 4430), 'numpy.where', 'np.where', (['self.plaquette_defects'], {}), '(self.plaquette_defects)\n', (4406, 4430), True, 'import numpy as np\n'), ((4450, 4488), 'numpy.where', 'np.where', (['(self.qubit_matrix[:, :] == 1)'], {}), '(self.qubit_matrix[:, :] == 1)\n', (4458, 4488), True, 'import numpy as np\n'), ((4507, 4545), 'numpy.where', 'np.where', (['(self.qubit_matrix[:, :] == 2)'], {}), '(self.qubit_matrix[:, :] == 2)\n', (4515, 4545), True, 'import numpy as np\n'), ((4564, 4602), 'numpy.where', 'np.where', (['(self.qubit_matrix[:, :] == 3)'], {}), '(self.qubit_matrix[:, :] == 3)\n', (4572, 4602), True, 'import numpy as np\n'), ((5085, 5101), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (5096, 5101), True, 'import matplotlib.pyplot as plt\n'), ((7670, 7687), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (7678, 7687), True, 'import matplotlib.pyplot as plt\n'), ((12299, 12312), 'random.random', 'rand.random', ([], {}), '()\n', (12310, 12312), True, 'import random as rand\n'), ((4695, 4754), 'numpy.arange', 'np.arange', (['center_x', '(center_x + radius + stepsize)', 'stepsize'], {}), '(center_x, center_x + radius + stepsize, stepsize)\n', (4704, 4754), True, 'import numpy as np\n'), ((4771, 4800), 'numpy.sqrt', 'np.sqrt', (['(radius ** 2 - x ** 2)'], {}), '(radius ** 2 - x ** 2)\n', (4778, 4800), True, 'import numpy as np\n'), ((4817, 4845), 'numpy.concatenate', 'np.concatenate', (['[x, x[::-1]]'], {}), '([x, x[::-1]])\n', (4831, 4845), True, 'import numpy as np\n'), ((4862, 4891), 'numpy.concatenate', 'np.concatenate', (['[y, -y[::-1]]'], {}), '([y, -y[::-1]])\n', (4876, 4891), True, 'import numpy as np\n'), ((10351, 10359), 'random.random', 'random', ([], {}), '()\n', (10357, 10359), False, 'from random import random\n'), ((12123, 12131), 'random.random', 'random', ([], {}), '()\n', (12129, 12131), False, 'from random import random\n'), ((12157, 12165), 'random.random', 'random', ([], {}), '()\n', (12163, 12165), False, 'from random import random\n'), ((12200, 12208), 'random.random', 'random', ([], {}), '()\n', (12206, 12208), False, 'from random import random\n'), ((12230, 12238), 'random.random', 'random', ([], {}), '()\n', (12236, 12238), False, 'from random import random\n'), ((576, 589), 'random.random', 'rand.random', ([], {}), '()\n', (587, 589), True, 'import random as rand\n'), ((1199, 1212), 'random.random', 'rand.random', ([], {}), '()\n', (1210, 1212), True, 'import random as rand\n'), ((10413, 10421), 'random.random', 'random', ([], {}), '()\n', (10419, 10421), False, 'from random import random\n'), ((10505, 10513), 'random.random', 'random', ([], {}), '()\n', (10511, 10513), False, 'from random import random\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import unittest
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
class TrtConvertBatchNormTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]], batch):
if self.dims == 4:
if attrs[0]['data_layout'] == "NCHW":
return np.ones([batch, 3, 24, 24]).astype(np.float32)
elif attrs[0]['data_layout'] == "NHWC":
return np.ones([batch, 24, 24, 3]).astype(np.float32)
elif self.dims == 3:
return np.ones([batch, 3, 24]).astype(np.float32)
elif self.dims == 2:
return np.ones([batch, 3]).astype(np.float32)
def generate_bias(attrs: List[Dict[str, Any]], batch):
return np.full((3), 0.9).astype("float32")
def generate_mean(attrs: List[Dict[str, Any]], batch):
return np.full((3), 0.9).astype("float32")
def generate_scale(attrs: List[Dict[str, Any]], batch):
return np.full((3), 1.1).astype("float32")
def generate_variance(attrs: List[Dict[str, Any]], batch):
return np.full((3), 1.2).astype("float32")
def generate_MomentumTensor(attrs: List[Dict[str, Any]], batch):
return np.full((3), 0.9).astype("float32")
for dims in [2, 3, 4]:
for num_input in [0, 1]:
for batch in [1, 2, 4]:
for epsilon in [1e-6, 1e-5, 1e-4]:
for data_layout in ["NCHW"]:
for momentum in [0.9, 0.8]:
self.num_input = num_input
self.dims = dims
dics = [{
"epsilon": epsilon,
"data_layout": data_layout,
"momentum": momentum,
"is_test": True,
"trainable_statistics": False
}, {}]
dics_intput = [{
"X": ["batch_norm_input"],
"Bias": ["Bias"],
"Mean": ["Mean"],
"Scale": ["Scale"],
"Variance": ["Variance"],
"MomentumTensor": ["MomentumTensor"]
}, {
"X": ["batch_norm_input"],
"Bias": ["Bias"],
"Mean": ["Mean"],
"Scale": ["Scale"],
"Variance": ["Variance"]
}]
dics_intputs = [{
"Bias": TensorConfig(data_gen=partial(
generate_bias, dics, batch)),
"Mean": TensorConfig(data_gen=partial(
generate_mean, dics, batch)),
"Scale": TensorConfig(data_gen=partial(
generate_scale, dics, batch)),
"Variance": TensorConfig(data_gen=partial(
generate_variance, dics, batch)),
"MomentumTensor":
TensorConfig(data_gen=partial(
generate_MomentumTensor, dics, batch)),
}, {
"Bias": TensorConfig(data_gen=partial(
generate_bias, dics, batch)),
"Mean": TensorConfig(data_gen=partial(
generate_mean, dics, batch)),
"Scale": TensorConfig(data_gen=partial(
generate_scale, dics, batch)),
"Variance": TensorConfig(data_gen=partial(
generate_variance, dics, batch))
}]
ops_config = [{
"op_type": "batch_norm",
"op_inputs": dics_intput[num_input],
"op_outputs": {
"Y": ["batch_norm_out"],
"MeanOut": ["Mean"],
"VarianceOut": ["Variance"],
"SavedMean": ["SavedMean"],
"SavedVariance": ["SavedVariance"]
},
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights=dics_intputs[num_input],
inputs={
"batch_norm_input": TensorConfig(
data_gen=partial(generate_input1,
dics, batch))
},
outputs=["batch_norm_out"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
if self.dims == 4:
if attrs[0]['data_layout'] == "NCHW":
self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 3, 24, 24]
}
self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 3, 48, 48]
}
self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 3, 24, 48]
}
elif attrs[0]['data_layout'] == "NHWC":
self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 24, 24, 3]
}
self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 48, 48, 3]
}
self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 24, 48, 3]
}
elif self.dims == 3:
self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 3, 24]
}
self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 3, 48]
}
self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 3, 48]
}
elif self.dims == 2:
self.dynamic_shape.min_input_shape = {
"batch_norm_input": [1, 3]
}
self.dynamic_shape.max_input_shape = {
"batch_norm_input": [4, 3]
}
self.dynamic_shape.opt_input_shape = {
"batch_norm_input": [1, 3]
}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
return 1, 2
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
True), 1e-5
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if len(program_config.weights) == 5:
return True
return False
self.add_skip_case(teller1, SkipReasons.TRT_NOT_SUPPORT,
"INPUT MomentumTensor NOT SUPPORT")
def teller2(program_config, predictor_config):
if len(
program_config.inputs['batch_norm_input'].shape
) == 2 and not predictor_config.tensorrt_dynamic_shape_enabled():
return True
return False
self.add_skip_case(
teller2, SkipReasons.TRT_NOT_IMPLEMENTED,
"The output shape has diff, but we can add shuffle layer to resolve it."
)
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.full",
"functools.partial",
"numpy.ones"
] | [((10794, 10809), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10807, 10809), False, 'import unittest\n'), ((1721, 1736), 'numpy.full', 'np.full', (['(3)', '(0.9)'], {}), '(3, 0.9)\n', (1728, 1736), True, 'import numpy as np\n'), ((1840, 1855), 'numpy.full', 'np.full', (['(3)', '(0.9)'], {}), '(3, 0.9)\n', (1847, 1855), True, 'import numpy as np\n'), ((1960, 1975), 'numpy.full', 'np.full', (['(3)', '(1.1)'], {}), '(3, 1.1)\n', (1967, 1975), True, 'import numpy as np\n'), ((2083, 2098), 'numpy.full', 'np.full', (['(3)', '(1.2)'], {}), '(3, 1.2)\n', (2090, 2098), True, 'import numpy as np\n'), ((2212, 2227), 'numpy.full', 'np.full', (['(3)', '(0.9)'], {}), '(3, 0.9)\n', (2219, 2227), True, 'import numpy as np\n'), ((1267, 1294), 'numpy.ones', 'np.ones', (['[batch, 3, 24, 24]'], {}), '([batch, 3, 24, 24])\n', (1274, 1294), True, 'import numpy as np\n'), ((1500, 1523), 'numpy.ones', 'np.ones', (['[batch, 3, 24]'], {}), '([batch, 3, 24])\n', (1507, 1523), True, 'import numpy as np\n'), ((1397, 1424), 'numpy.ones', 'np.ones', (['[batch, 24, 24, 3]'], {}), '([batch, 24, 24, 3])\n', (1404, 1424), True, 'import numpy as np\n'), ((1599, 1618), 'numpy.ones', 'np.ones', (['[batch, 3]'], {}), '([batch, 3])\n', (1606, 1618), True, 'import numpy as np\n'), ((3894, 3929), 'functools.partial', 'partial', (['generate_bias', 'dics', 'batch'], {}), '(generate_bias, dics, batch)\n', (3901, 3929), False, 'from functools import partial\n'), ((4039, 4074), 'functools.partial', 'partial', (['generate_mean', 'dics', 'batch'], {}), '(generate_mean, dics, batch)\n', (4046, 4074), False, 'from functools import partial\n'), ((4185, 4221), 'functools.partial', 'partial', (['generate_scale', 'dics', 'batch'], {}), '(generate_scale, dics, batch)\n', (4192, 4221), False, 'from functools import partial\n'), ((4335, 4374), 'functools.partial', 'partial', (['generate_variance', 'dics', 'batch'], {}), '(generate_variance, dics, batch)\n', (4342, 4374), False, 'from functools import partial\n'), ((4530, 4575), 'functools.partial', 'partial', (['generate_MomentumTensor', 'dics', 'batch'], {}), '(generate_MomentumTensor, dics, batch)\n', (4537, 4575), False, 'from functools import partial\n'), ((4722, 4757), 'functools.partial', 'partial', (['generate_bias', 'dics', 'batch'], {}), '(generate_bias, dics, batch)\n', (4729, 4757), False, 'from functools import partial\n'), ((4867, 4902), 'functools.partial', 'partial', (['generate_mean', 'dics', 'batch'], {}), '(generate_mean, dics, batch)\n', (4874, 4902), False, 'from functools import partial\n'), ((5013, 5049), 'functools.partial', 'partial', (['generate_scale', 'dics', 'batch'], {}), '(generate_scale, dics, batch)\n', (5020, 5049), False, 'from functools import partial\n'), ((5163, 5202), 'functools.partial', 'partial', (['generate_variance', 'dics', 'batch'], {}), '(generate_variance, dics, batch)\n', (5170, 5202), False, 'from functools import partial\n'), ((6406, 6443), 'functools.partial', 'partial', (['generate_input1', 'dics', 'batch'], {}), '(generate_input1, dics, batch)\n', (6413, 6443), False, 'from functools import partial\n')] |
from datetime import datetime
import tempfile
import os
import json
import shutil
import numpy as np
import ray
from typing import Type
from ray.tune.logger import UnifiedLogger
from config.custom_config import Config
from ray.rllib.agents.trainer import Trainer
def select_policy(agent_id):
if agent_id == "player1":
return "player1"
else:
return "player2"
def select_multiagent_policy(agent_id):
"""
allows to select between different past opponents with a certain frequency
"""
if agent_id == "player1":
return "player1"
else:
return np.random.choice(Config.OPPONENT_POLICIES_NOT_TRAINABLE,1,
p=Config.OPPONENT_POLICIES_PROB)[0]
def select_evaluation_policy(agent_id):
if agent_id == "player1":
return "player1"
else:
return "minimax"
def custom_log_creator(custom_path, p1_trainer_name, p2_trainer_name, epochs):
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_vs_{}_{}-Epochs_{}".format(
p1_trainer_name, p2_trainer_name, epochs, timestr
)
def logger_creator(config):
if not os.path.exists(custom_path):
os.makedirs(custom_path)
logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=custom_path)
return UnifiedLogger(config, logdir, loggers=None)
return logger_creator
def self_play(trainer: Type[Trainer]):
# check if the two policies have the same model (by comparing the models name)
assert trainer.get_policy("player1").model.base_model.name\
== trainer.get_policy("player2").model.base_model.name,\
"Error: you should use the same model for every player"
# get weights
p1_weights = trainer.get_policy("player1").model.base_model.get_weights()
# set weights
trainer.get_policy("player2").model.base_model.set_weights(p1_weights)
print("Weight succesfully updated")
# To check
for w1, w2 in zip(
trainer.get_policy("player1").model.base_model.get_weights(),
trainer.get_policy("player2").model.base_model.get_weights(),
):
assert (w1 == w2).all()
def multiagent_self_play(trainer: Type[Trainer]):
"""
Update weights between multiple policies
"""
new_weights = trainer.get_policy("player1").get_weights()
for opp in Config.OPPONENT_POLICIES:
prev_weights = trainer.get_policy(opp).get_weights()
trainer.get_policy(opp).set_weights(new_weights)
new_weights = prev_weights
# Syncs weights of remote workers with the local worker
# if there are no remote workers, it does nothing
# https://github.com/ray-project/ray/blob/fe06642df0e4b88ac315028ba7de2855cd27a710/rllib/evaluation/worker_set.py#L27
trainer.workers.sync_weights()
# p2_weights = ray.put(trainer.get_policy("player2").get_weights())
# trainer.workers.remote_workers()[0].get_policy("player2").set_weights(ray.get_p2)
# push the changes to the workers
# weights = ray.put(trainer.workers.local_worker().get_weights())
# trainer.workers.foreach_worker(lambda w: w.set_weights(ray.get(weights)))
"""
WARNING eager_tf_policy.py:587 -- Cannot restore an optimizer's state
for tf eager! Keras is not able to save the v1.x optimizers
(from tf.compat.v1.train) since they aren't compatible with checkpoints.
"""
print("Weight succesfully updated")
def copy_weights(to_policy, from_policy, trainer):
"""copy weights from from_policy to to_policy without changing from_policy"""
temp_weights = {} # temp storage with to_policy keys & from_policy values
for (k, v), (k2, v2) in zip(
trainer.get_policy(to_policy).get_weights(as_dict=True).items(),
trainer.get_policy(from_policy).get_weights(as_dict=True).items(),
):
temp_weights[k] = v2
# set weights
trainer.set_weights(
{
to_policy: temp_weights, # weights or values from from_policy with to_policy keys
}
)
# To check
for (k, v), (k2, v2) in zip(
trainer.get_policy(to_policy).get_weights(as_dict=True).items(),
trainer.get_policy(from_policy).get_weights(as_dict=True).items(),
):
assert (v == v2).all()
print("{} == {}".format(to_policy, from_policy))
def shift_policies(trainer, new, p2, p3, p4, p5):
copy_weights(p5, p4, trainer)
copy_weights(p4, p3, trainer)
copy_weights(p3, p2, trainer)
copy_weights(p2, new, trainer)
def compute_best_policies(win_rate_matrix, num_of_policies_to_keep):
"""
given a win_matrix NxN and an index num_of_policies_to_keep < N
returns the policies that performed better in average
"""
policies_average_values = []
for ind, elem in enumerate(win_rate_matrix):
# compute average over columns
policies_average_values.append(sum(elem) / len(elem))
sorted_values = sorted(range(len(policies_average_values)), key=lambda k: policies_average_values[k])#reverse=True)
return sorted_values[-num_of_policies_to_keep:]
def restore_training(trainer_obj,ckpt_dir,metrics_file=None):
"""
Restore the latest checkpoint and the latest metrics
trainer_obj: Trainable
trainer to resume
ckpt_dir: str
path to the directory with the checkpoints
metrics_file: str
path to the directory with the latest custom metrics observed
"""
best_ckpt = 0
ckpt_to_restore = None
# Restore the latest checkpoint if exist:
for ckpt in os.listdir(ckpt_dir):
if ckpt == ".gitkeep":
continue
ckpt_indx = int(ckpt.split("_")[1])
if ckpt_indx > best_ckpt:
best_ckpt = ckpt_indx
# if the checkpoint exists return the checkpoint and latest metrics
if best_ckpt > 0:
ckpt_to_restore = os.path.join(
ckpt_dir, "checkpoint_" + str(best_ckpt), "checkpoint-" + str(best_ckpt)
)
trainer_obj.restore(ckpt_to_restore)
print("Checkpoint number " + str(best_ckpt) + " restored")
# we also need to restore the custom metrics
with open(metrics_file) as json_file:
data = json.load(json_file)
trainer_obj.callbacks.load_values(data)
print("Values of the latest custom metrics have been restored")
else:
print("No checkpoint found, Training starting from scratch...")
return best_ckpt
def save_checkpoint(trainer_obj,ckpt_dir,metrics_file,custom_metrics,ckpt_to_keep = 5):
"""
Save the checkpoint in the ckpt_dir and the current metrics in metrics_file
trainer_obj: Trainable
trainer to resume
ckpt_dir: str
path to the directory with the checkpoints
metrics_file: str
path to the directory with the latest custom metrics observed
custom_metrics: dict
custom metrics to save
ckpt_to_keep: int
number of ckpt to keep
"""
trainer_obj.save(ckpt_dir)
ckpts = os.listdir(ckpt_dir)
# keep only the last ckpt_to_keep ckpts and delete the older ones
ckpts.remove(".gitkeep")
with open(metrics_file, "w") as json_file:
json.dump(custom_metrics, json_file)
if len(ckpts) > ckpt_to_keep:
# sort the checkpoint list
ckpts.sort(key=lambda x: int(x.split("_")[1]),reverse=True)
for i,elem in enumerate(ckpts):
if i > ckpt_to_keep-1:
dir_to_remove = os.path.join(ckpt_dir, elem)
shutil.rmtree(dir_to_remove)
if __name__ == "__main__":
win_rate_matrix = np.random.rand(5,5)
for n1,elem in enumerate(win_rate_matrix):
for n2,p2 in enumerate(elem):
if n2 < n1:
val = 1 - win_rate_matrix[n2][n1]
win_rate_matrix[n1][n2] = val
| [
"os.path.exists",
"os.listdir",
"numpy.random.rand",
"os.makedirs",
"ray.tune.logger.UnifiedLogger",
"numpy.random.choice",
"os.path.join",
"json.load",
"tempfile.mkdtemp",
"shutil.rmtree",
"datetime.datetime.today",
"json.dump"
] | [((5597, 5617), 'os.listdir', 'os.listdir', (['ckpt_dir'], {}), '(ckpt_dir)\n', (5607, 5617), False, 'import os\n'), ((7058, 7078), 'os.listdir', 'os.listdir', (['ckpt_dir'], {}), '(ckpt_dir)\n', (7068, 7078), False, 'import os\n'), ((7666, 7686), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)'], {}), '(5, 5)\n', (7680, 7686), True, 'import numpy as np\n'), ((1257, 1312), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': 'logdir_prefix', 'dir': 'custom_path'}), '(prefix=logdir_prefix, dir=custom_path)\n', (1273, 1312), False, 'import tempfile\n'), ((1328, 1371), 'ray.tune.logger.UnifiedLogger', 'UnifiedLogger', (['config', 'logdir'], {'loggers': 'None'}), '(config, logdir, loggers=None)\n', (1341, 1371), False, 'from ray.tune.logger import UnifiedLogger\n'), ((7234, 7270), 'json.dump', 'json.dump', (['custom_metrics', 'json_file'], {}), '(custom_metrics, json_file)\n', (7243, 7270), False, 'import json\n'), ((608, 705), 'numpy.random.choice', 'np.random.choice', (['Config.OPPONENT_POLICIES_NOT_TRAINABLE', '(1)'], {'p': 'Config.OPPONENT_POLICIES_PROB'}), '(Config.OPPONENT_POLICIES_NOT_TRAINABLE, 1, p=Config.\n OPPONENT_POLICIES_PROB)\n', (624, 705), True, 'import numpy as np\n'), ((962, 978), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (976, 978), False, 'from datetime import datetime\n'), ((1174, 1201), 'os.path.exists', 'os.path.exists', (['custom_path'], {}), '(custom_path)\n', (1188, 1201), False, 'import os\n'), ((1215, 1239), 'os.makedirs', 'os.makedirs', (['custom_path'], {}), '(custom_path)\n', (1226, 1239), False, 'import os\n'), ((6251, 6271), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (6260, 6271), False, 'import json\n'), ((7516, 7544), 'os.path.join', 'os.path.join', (['ckpt_dir', 'elem'], {}), '(ckpt_dir, elem)\n', (7528, 7544), False, 'import os\n'), ((7561, 7589), 'shutil.rmtree', 'shutil.rmtree', (['dir_to_remove'], {}), '(dir_to_remove)\n', (7574, 7589), False, 'import shutil\n')] |
#! usr/bin/env python
# coding:utf-8
#=====================================================
# Copyright (C) 2020 * Ltd. All rights reserved.
#
# Author : Chen_Sheng19
# Editor : VIM
# Create time : 2020-06-09
# File name :
# Description : product TFRecord data from image file
#
#=====================================================
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import os
from sklearn.utils import shuffle
from PIL import Image
def load_smaple(sample_dir,shuffle_flag = True):
print("loading dataset...")
lfilenames = []
labelnames = []
for dirpath,dirnames,filenames in os.walk(sample_dir):
for filename in filenames:
filepath = os.sep.join([dirpath,filename])
lfilenames.append(filepath)
labelname = dirpath.split("\\")[-1]
labelnames.append(labelname)
lab = list(sorted(set(labelnames)))
labdict = dict(zip(lab,list(range(len(lab)))))
labels = [labdict[i] for i in labelnames]
if shuffle_flag:
return shuffle(np.asarray(lfilenames),np.asarray(labels)),np.asarray(lab)
else:
return (np.asarray(lfilenames),np.asarray(labels)),np.asarray(lab)
dir_path = "man_woman"
(filenames,labels),_ = load_smaple(dir_path,False)
def make_TFRec(filenames,labels):
#1.创建writer
writer = tf.python_io.TFRecordWriter("mydata.tfrecords")
for i in tqdm(range(len(labels))):
image = Image.open(filenames[i])
img = image.resize((256,256))
img_raw = img.tobytes()
#2.读取到的内容转化为tfrecords格式
example = tf.train.Example( #example
features = tf.train.Features(#features
feature = {"label": tf.train.Feature(int64_list = tf.train.Int64List(value = [labels[i]])),
"img_raw": tf.train.Feature(bytes_list = tf.train.BytesList(value = [img_raw]))}))#feature字典
writer.write(example.SerializeToString())#序列化压缩
writer.close()
make_TFRec(filenames,labels)
def read_and_decode(filenames,flag="train",batch_size=3):
#1.读取文件生成队列
if flag == "train":
filename_queue = tf.train.string_input_producer(filenames)
else:
filename_queue = tf.train.string_input_producer(filenames,num_epochs=1,shuffle=False)
#2.从队列读取example
reader = tf.TFRecordReader()
_,serialized_example = reader.read(filename_queue)
#3.将example解析为features
features = tf.parse_single_example(serialized_example,
features = {"label":tf.FixedLenFeature([],tf.int64),
"img_raw":tf.FixedLenFeature([],tf.string)})
#4.将features解析为图片数据
image = tf.decode_raw(features['img_raw'],tf.uint8)
image = tf.reshape(image,[256,256,3])
label = tf.cast(features['label'],tf.int32)
if flag == "train":
image = tf.cast(image,tf.float32) * (1./255) - 0.5
img_batch,label_batch = tf.train.batch([image,label],batch_size=batch_size,capacity=20)
return img_batch,label_batch
return image,label
TFRecordfilnames = ["mydata.tfrecords"]
image,label = read_and_decode(TFRecordfilnames,flag='test')
save_image_path = "show\\"
if tf.gfile.Exists(save_image_path):
tf.gfile.DeleteRecursively(save_image_path)
tf.gfile.MakeDirs(save_image_path)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
myset = set([])
try:
i = 0
while True:
example,example_label = sess.run([image,label])
example_label = str(example_label)
if example_label not in myset:
myset.add(example_label)
tf.gfile.MakeDirs(save_image_path+example_label)
img = Image.fromarray(example,'RGB')
img.save(save_image_path+example_label+"\\"+str(i)+'_Label_'+'.jpg')
print(i)
i += 1
except tf.errors.OutOfRangeError:
print('Done Test -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
print("stop()")
| [
"tensorflow.local_variables_initializer",
"tensorflow.train.Int64List",
"os.sep.join",
"tensorflow.TFRecordReader",
"tensorflow.gfile.MakeDirs",
"tensorflow.cast",
"os.walk",
"tensorflow.gfile.Exists",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"tensorflow.gfile.DeleteRecursively",
... | [((3223, 3255), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['save_image_path'], {}), '(save_image_path)\n', (3238, 3255), True, 'import tensorflow as tf\n'), ((3305, 3339), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['save_image_path'], {}), '(save_image_path)\n', (3322, 3339), True, 'import tensorflow as tf\n'), ((641, 660), 'os.walk', 'os.walk', (['sample_dir'], {}), '(sample_dir)\n', (648, 660), False, 'import os\n'), ((1347, 1394), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['"""mydata.tfrecords"""'], {}), "('mydata.tfrecords')\n", (1374, 1394), True, 'import tensorflow as tf\n'), ((2327, 2346), 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (2344, 2346), True, 'import tensorflow as tf\n'), ((2712, 2756), 'tensorflow.decode_raw', 'tf.decode_raw', (["features['img_raw']", 'tf.uint8'], {}), "(features['img_raw'], tf.uint8)\n", (2725, 2756), True, 'import tensorflow as tf\n'), ((2768, 2800), 'tensorflow.reshape', 'tf.reshape', (['image', '[256, 256, 3]'], {}), '(image, [256, 256, 3])\n', (2778, 2800), True, 'import tensorflow as tf\n'), ((2810, 2846), 'tensorflow.cast', 'tf.cast', (["features['label']", 'tf.int32'], {}), "(features['label'], tf.int32)\n", (2817, 2846), True, 'import tensorflow as tf\n'), ((3261, 3304), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['save_image_path'], {}), '(save_image_path)\n', (3287, 3304), True, 'import tensorflow as tf\n'), ((3346, 3358), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3356, 3358), True, 'import tensorflow as tf\n'), ((3428, 3450), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (3448, 3450), True, 'import tensorflow as tf\n'), ((3465, 3506), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (3493, 3506), True, 'import tensorflow as tf\n'), ((1450, 1474), 'PIL.Image.open', 'Image.open', (['filenames[i]'], {}), '(filenames[i])\n', (1460, 1474), False, 'from PIL import Image\n'), ((2148, 2189), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['filenames'], {}), '(filenames)\n', (2178, 2189), True, 'import tensorflow as tf\n'), ((2225, 2295), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['filenames'], {'num_epochs': '(1)', 'shuffle': '(False)'}), '(filenames, num_epochs=1, shuffle=False)\n', (2255, 2295), True, 'import tensorflow as tf\n'), ((2966, 3032), 'tensorflow.train.batch', 'tf.train.batch', (['[image, label]'], {'batch_size': 'batch_size', 'capacity': '(20)'}), '([image, label], batch_size=batch_size, capacity=20)\n', (2980, 3032), True, 'import tensorflow as tf\n'), ((3381, 3413), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3411, 3413), True, 'import tensorflow as tf\n'), ((720, 752), 'os.sep.join', 'os.sep.join', (['[dirpath, filename]'], {}), '([dirpath, filename])\n', (731, 752), False, 'import os\n'), ((1107, 1122), 'numpy.asarray', 'np.asarray', (['lab'], {}), '(lab)\n', (1117, 1122), True, 'import numpy as np\n'), ((1192, 1207), 'numpy.asarray', 'np.asarray', (['lab'], {}), '(lab)\n', (1202, 1207), True, 'import numpy as np\n'), ((3845, 3876), 'PIL.Image.fromarray', 'Image.fromarray', (['example', '"""RGB"""'], {}), "(example, 'RGB')\n", (3860, 3876), False, 'from PIL import Image\n'), ((1064, 1086), 'numpy.asarray', 'np.asarray', (['lfilenames'], {}), '(lfilenames)\n', (1074, 1086), True, 'import numpy as np\n'), ((1087, 1105), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (1097, 1105), True, 'import numpy as np\n'), ((1149, 1171), 'numpy.asarray', 'np.asarray', (['lfilenames'], {}), '(lfilenames)\n', (1159, 1171), True, 'import numpy as np\n'), ((1172, 1190), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (1182, 1190), True, 'import numpy as np\n'), ((2547, 2579), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (2565, 2579), True, 'import tensorflow as tf\n'), ((2641, 2674), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (2659, 2674), True, 'import tensorflow as tf\n'), ((2891, 2917), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2898, 2917), True, 'import tensorflow as tf\n'), ((3778, 3828), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['(save_image_path + example_label)'], {}), '(save_image_path + example_label)\n', (3795, 3828), True, 'import tensorflow as tf\n'), ((1751, 1788), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[labels[i]]'}), '(value=[labels[i]])\n', (1769, 1788), True, 'import tensorflow as tf\n'), ((1867, 1902), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[img_raw]'}), '(value=[img_raw])\n', (1885, 1902), True, 'import tensorflow as tf\n')] |
import rls
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from algos.tf2algos.base.off_policy import make_off_policy_class
from rls.modules import DoubleQ
class TD3(make_off_policy_class(mode='share')):
'''
Twin Delayed Deep Deterministic Policy Gradient, https://arxiv.org/abs/1802.09477
'''
def __init__(self,
s_dim,
visual_sources,
visual_resolution,
a_dim,
is_continuous,
ployak=0.995,
delay_num=2,
noise_type='gaussian',
gaussian_noise_sigma=0.2,
gaussian_noise_bound=0.2,
actor_lr=5.0e-4,
critic_lr=1.0e-3,
discrete_tau=1.0,
hidden_units={
'actor_continuous': [32, 32],
'actor_discrete': [32, 32],
'q': [32, 32]
},
**kwargs):
super().__init__(
s_dim=s_dim,
visual_sources=visual_sources,
visual_resolution=visual_resolution,
a_dim=a_dim,
is_continuous=is_continuous,
**kwargs)
self.ployak = ployak
self.delay_num = delay_num
self.discrete_tau = discrete_tau
self.gaussian_noise_sigma = gaussian_noise_sigma
self.gaussian_noise_bound = gaussian_noise_bound
if self.is_continuous:
def _actor_net(): return rls.actor_dpg(self.feat_dim, self.a_dim, hidden_units['actor_continuous'])
if noise_type == 'gaussian':
self.action_noise = rls.ClippedNormalActionNoise(mu=np.zeros(self.a_dim), sigma=self.gaussian_noise_sigma * np.ones(self.a_dim), bound=self.gaussian_noise_bound)
elif noise_type == 'ou':
self.action_noise = rls.OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.a_dim), sigma=0.2 * np.ones(self.a_dim))
else:
def _actor_net(): return rls.actor_discrete(self.feat_dim, self.a_dim, hidden_units['actor_discrete'])
self.gumbel_dist = tfp.distributions.Gumbel(0, 1)
self.actor_net = _actor_net()
self.actor_target_net = _actor_net()
self.actor_tv = self.actor_net.trainable_variables
def _q_net(): return rls.critic_q_one(self.feat_dim, self.a_dim, hidden_units['q'])
self.critic_net = DoubleQ(_q_net)
self.critic_target_net = DoubleQ(_q_net)
self.critic_tv = self.critic_net.trainable_variables + self.other_tv
self.update_target_net_weights(
self.actor_target_net.weights + self.critic_target_net.weights,
self.actor_net.weights + self.critic_net.weights
)
self.actor_lr, self.critic_lr = map(self.init_lr, [actor_lr, critic_lr])
self.optimizer_actor, self.optimizer_critic = map(self.init_optimizer, [self.actor_lr, self.critic_lr])
self.model_recorder(dict(
actor=self.actor_net,
critic_net=self.critic_net,
optimizer_actor=self.optimizer_actor,
optimizer_critic=self.optimizer_critic
))
def show_logo(self):
self.recorder.logger.info('''
xxxxxxxxx xxxxxxx xxxxx
xx x xx x xxx xx xx
xx x xx x xx xx xx
x x xx xxx
x x xxx xxxx
x x xx xxx
x x xx xx xx
x x xxx xx xxx
xxxxx xxxxxxx xxxxx
''')
def choose_action(self, s, visual_s, evaluation=False):
mu, pi, self.cell_state = self._get_action(s, visual_s, self.cell_state)
a = mu.numpy() if evaluation else pi.numpy()
return a
@tf.function
def _get_action(self, s, visual_s, cell_state):
with tf.device(self.device):
feat, cell_state = self.get_feature(s, visual_s, cell_state=cell_state, record_cs=True)
if self.is_continuous:
mu = self.actor_net(feat)
pi = tf.clip_by_value(mu + self.action_noise(), -1, 1)
else:
logits = self.actor_net(feat)
mu = tf.argmax(logits, axis=1)
cate_dist = tfp.distributions.Categorical(logits)
pi = cate_dist.sample()
return mu, pi, cell_state
def learn(self, **kwargs):
self.train_step = kwargs.get('train_step')
for i in range(self.train_times_per_step):
self._learn(function_dict={
'train_function': self.train,
'update_function': lambda: self.update_target_net_weights(
self.actor_target_net.weights + self.critic_target_net.weights,
self.actor_net.weights + self.critic_net.weights,
self.ployak),
'summary_dict': dict([
['LEARNING_RATE/actor_lr', self.actor_lr(self.train_step)],
['LEARNING_RATE/critic_lr', self.critic_lr(self.train_step)]
])
})
@tf.function(experimental_relax_shapes=True)
def train(self, memories, isw, crsty_loss, cell_state):
ss, vvss, a, r, done = memories
batch_size = tf.shape(a)[0]
with tf.device(self.device):
for _ in range(self.delay_num):
with tf.GradientTape() as tape:
feat, feat_ = self.get_feature(ss, vvss, cell_state=cell_state, s_and_s_=True)
if self.is_continuous:
target_mu = self.actor_target_net(feat_)
action_target = tf.clip_by_value(target_mu + self.action_noise(), -1, 1)
else:
target_logits = self.actor_target_net(feat_)
logp_all = tf.nn.log_softmax(target_logits)
gumbel_noise = tf.cast(self.gumbel_dist.sample([batch_size, self.a_dim]), dtype=tf.float32)
_pi = tf.nn.softmax((logp_all + gumbel_noise) / self.discrete_tau)
_pi_true_one_hot = tf.one_hot(tf.argmax(_pi, axis=-1), self.a_dim)
_pi_diff = tf.stop_gradient(_pi_true_one_hot - _pi)
action_target = _pi_diff + _pi
q1, q2 = self.critic_net(feat, a)
q_target = self.critic_target_net.get_min(feat_, action_target)
dc_r = tf.stop_gradient(r + self.gamma * q_target * (1 - done))
td_error1 = q1 - dc_r
td_error2 = q2 - dc_r
q1_loss = tf.reduce_mean(tf.square(td_error1) * isw)
q2_loss = tf.reduce_mean(tf.square(td_error2) * isw)
critic_loss = 0.5 * (q1_loss + q2_loss) + crsty_loss
critic_grads = tape.gradient(critic_loss, self.critic_tv)
self.optimizer_critic.apply_gradients(
zip(critic_grads, self.critic_tv)
)
with tf.GradientTape() as tape:
if self.is_continuous:
mu = self.actor_net(feat)
else:
logits = self.actor_net(feat)
_pi = tf.nn.softmax(logits)
_pi_true_one_hot = tf.one_hot(tf.argmax(logits, axis=-1), self.a_dim, dtype=tf.float32)
_pi_diff = tf.stop_gradient(_pi_true_one_hot - _pi)
mu = _pi_diff + _pi
q1_actor = self.critic_net.Q1(feat, mu)
actor_loss = -tf.reduce_mean(q1_actor)
actor_grads = tape.gradient(actor_loss, self.actor_tv)
self.optimizer_actor.apply_gradients(
zip(actor_grads, self.actor_tv)
)
self.global_step.assign_add(1)
return (td_error1 + td_error2) / 2, dict([
['LOSS/actor_loss', actor_loss],
['LOSS/critic_loss', critic_loss],
['Statistics/q_min', tf.reduce_min(tf.minimum(q1, q2))],
['Statistics/q_mean', tf.reduce_mean(tf.minimum(q1, q2))],
['Statistics/q_max', tf.reduce_max(tf.maximum(q1, q2))],
])
@tf.function(experimental_relax_shapes=True)
def train_persistent(self, memories, isw, crsty_loss, cell_state):
ss, vvss, a, r, done = memories
batch_size = tf.shape(a)[0]
with tf.device(self.device):
for _ in range(2):
with tf.GradientTape(persistent=True) as tape:
feat, feat_ = self.get_feature(ss, vvss, cell_state=cell_state, s_and_s_=True)
if self.is_continuous:
target_mu = self.actor_target_net(feat_)
action_target = tf.clip_by_value(target_mu + self.action_noise(), -1, 1)
mu = self.actor_net(feat)
else:
target_logits = self.actor_target_net(feat_)
logp_all = tf.nn.log_softmax(target_logits)
gumbel_noise = tf.cast(self.gumbel_dist.sample([batch_size, self.a_dim]), dtype=tf.float32)
_pi = tf.nn.softmax((logp_all + gumbel_noise) / self.discrete_tau)
_pi_true_one_hot = tf.one_hot(tf.argmax(_pi, axis=-1), self.a_dim)
_pi_diff = tf.stop_gradient(_pi_true_one_hot - _pi)
action_target = _pi_diff + _pi
logits = self.actor_net(feat)
_pi = tf.nn.softmax(logits)
_pi_true_one_hot = tf.one_hot(tf.argmax(logits, axis=-1), self.a_dim, dtype=tf.float32)
_pi_diff = tf.stop_gradient(_pi_true_one_hot - _pi)
mu = _pi_diff + _pi
q1, q2 = self.critic_net(feat, a)
q_target = self.critic_target_net.get_min(feat_, action_target)
q1_actor = self.critic_net.Q1(feat, mu)
dc_r = tf.stop_gradient(r + self.gamma * q_target * (1 - done))
td_error1 = q1 - dc_r
td_error2 = q2 - dc_r
q1_loss = tf.reduce_mean(tf.square(td_error1) * isw)
q2_loss = tf.reduce_mean(tf.square(td_error2) * isw)
critic_loss = 0.5 * (q1_loss + q2_loss) + crsty_loss
actor_loss = -tf.reduce_mean(q1_actor)
critic_grads = tape.gradient(critic_loss, self.critic_tv)
self.optimizer_critic.apply_gradients(
zip(critic_grads, self.critic_tv)
)
actor_grads = tape.gradient(actor_loss, self.actor_tv)
self.optimizer_actor.apply_gradients(
zip(actor_grads, self.actor_tv)
)
self.global_step.assign_add(1)
return (td_error1 + td_error2) / 2, dict([
['LOSS/actor_loss', actor_loss],
['LOSS/critic_loss', critic_loss],
['Statistics/q_min', tf.reduce_min(tf.minimum(q1, q2))],
['Statistics/q_mean', tf.reduce_mean(tf.minimum(q1, q2))],
['Statistics/q_max', tf.reduce_max(tf.maximum(q1, q2))]
])
| [
"rls.modules.DoubleQ",
"tensorflow.shape",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"rls.actor_discrete",
"tensorflow.reduce_mean",
"tensorflow_probability.distributions.Categorical",
"rls.actor_dpg",
"rls.critic_q_one",
"tensorflow.square",
"tensorflow.maximum",
"tensorflow.device"... | [((200, 235), 'algos.tf2algos.base.off_policy.make_off_policy_class', 'make_off_policy_class', ([], {'mode': '"""share"""'}), "(mode='share')\n", (221, 235), False, 'from algos.tf2algos.base.off_policy import make_off_policy_class\n'), ((5226, 5269), 'tensorflow.function', 'tf.function', ([], {'experimental_relax_shapes': '(True)'}), '(experimental_relax_shapes=True)\n', (5237, 5269), True, 'import tensorflow as tf\n'), ((8356, 8399), 'tensorflow.function', 'tf.function', ([], {'experimental_relax_shapes': '(True)'}), '(experimental_relax_shapes=True)\n', (8367, 8399), True, 'import tensorflow as tf\n'), ((2450, 2465), 'rls.modules.DoubleQ', 'DoubleQ', (['_q_net'], {}), '(_q_net)\n', (2457, 2465), False, 'from rls.modules import DoubleQ\n'), ((2499, 2514), 'rls.modules.DoubleQ', 'DoubleQ', (['_q_net'], {}), '(_q_net)\n', (2506, 2514), False, 'from rls.modules import DoubleQ\n'), ((2157, 2187), 'tensorflow_probability.distributions.Gumbel', 'tfp.distributions.Gumbel', (['(0)', '(1)'], {}), '(0, 1)\n', (2181, 2187), True, 'import tensorflow_probability as tfp\n'), ((2361, 2423), 'rls.critic_q_one', 'rls.critic_q_one', (['self.feat_dim', 'self.a_dim', "hidden_units['q']"], {}), "(self.feat_dim, self.a_dim, hidden_units['q'])\n", (2377, 2423), False, 'import rls\n'), ((3976, 3998), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (3985, 3998), True, 'import tensorflow as tf\n'), ((5391, 5402), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (5399, 5402), True, 'import tensorflow as tf\n'), ((5419, 5441), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (5428, 5441), True, 'import tensorflow as tf\n'), ((8532, 8543), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (8540, 8543), True, 'import tensorflow as tf\n'), ((8560, 8582), 'tensorflow.device', 'tf.device', (['self.device'], {}), '(self.device)\n', (8569, 8582), True, 'import tensorflow as tf\n'), ((1539, 1613), 'rls.actor_dpg', 'rls.actor_dpg', (['self.feat_dim', 'self.a_dim', "hidden_units['actor_continuous']"], {}), "(self.feat_dim, self.a_dim, hidden_units['actor_continuous'])\n", (1552, 1613), False, 'import rls\n'), ((2048, 2125), 'rls.actor_discrete', 'rls.actor_discrete', (['self.feat_dim', 'self.a_dim', "hidden_units['actor_discrete']"], {}), "(self.feat_dim, self.a_dim, hidden_units['actor_discrete'])\n", (2066, 2125), False, 'import rls\n'), ((4333, 4358), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (4342, 4358), True, 'import tensorflow as tf\n'), ((4387, 4424), 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', (['logits'], {}), '(logits)\n', (4416, 4424), True, 'import tensorflow_probability as tfp\n'), ((7174, 7191), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7189, 7191), True, 'import tensorflow as tf\n'), ((5508, 5525), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5523, 5525), True, 'import tensorflow as tf\n'), ((6596, 6652), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(r + self.gamma * q_target * (1 - done))'], {}), '(r + self.gamma * q_target * (1 - done))\n', (6612, 6652), True, 'import tensorflow as tf\n'), ((7384, 7405), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (7397, 7405), True, 'import tensorflow as tf\n'), ((7545, 7585), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(_pi_true_one_hot - _pi)'], {}), '(_pi_true_one_hot - _pi)\n', (7561, 7585), True, 'import tensorflow as tf\n'), ((7712, 7736), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q1_actor'], {}), '(q1_actor)\n', (7726, 7736), True, 'import tensorflow as tf\n'), ((8636, 8668), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (8651, 8668), True, 'import tensorflow as tf\n'), ((10187, 10243), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(r + self.gamma * q_target * (1 - done))'], {}), '(r + self.gamma * q_target * (1 - done))\n', (10203, 10243), True, 'import tensorflow as tf\n'), ((1723, 1743), 'numpy.zeros', 'np.zeros', (['self.a_dim'], {}), '(self.a_dim)\n', (1731, 1743), True, 'import numpy as np\n'), ((5969, 6001), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['target_logits'], {}), '(target_logits)\n', (5986, 6001), True, 'import tensorflow as tf\n'), ((6148, 6208), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['((logp_all + gumbel_noise) / self.discrete_tau)'], {}), '((logp_all + gumbel_noise) / self.discrete_tau)\n', (6161, 6208), True, 'import tensorflow as tf\n'), ((6335, 6375), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(_pi_true_one_hot - _pi)'], {}), '(_pi_true_one_hot - _pi)\n', (6351, 6375), True, 'import tensorflow as tf\n'), ((7456, 7482), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (7465, 7482), True, 'import tensorflow as tf\n'), ((9162, 9194), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['target_logits'], {}), '(target_logits)\n', (9179, 9194), True, 'import tensorflow as tf\n'), ((9341, 9401), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['((logp_all + gumbel_noise) / self.discrete_tau)'], {}), '((logp_all + gumbel_noise) / self.discrete_tau)\n', (9354, 9401), True, 'import tensorflow as tf\n'), ((9528, 9568), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(_pi_true_one_hot - _pi)'], {}), '(_pi_true_one_hot - _pi)\n', (9544, 9568), True, 'import tensorflow as tf\n'), ((9708, 9729), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (9721, 9729), True, 'import tensorflow as tf\n'), ((9877, 9917), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(_pi_true_one_hot - _pi)'], {}), '(_pi_true_one_hot - _pi)\n', (9893, 9917), True, 'import tensorflow as tf\n'), ((10581, 10605), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q1_actor'], {}), '(q1_actor)\n', (10595, 10605), True, 'import tensorflow as tf\n'), ((1779, 1798), 'numpy.ones', 'np.ones', (['self.a_dim'], {}), '(self.a_dim)\n', (1786, 1798), True, 'import numpy as np\n'), ((1942, 1962), 'numpy.zeros', 'np.zeros', (['self.a_dim'], {}), '(self.a_dim)\n', (1950, 1962), True, 'import numpy as np\n'), ((6263, 6286), 'tensorflow.argmax', 'tf.argmax', (['_pi'], {'axis': '(-1)'}), '(_pi, axis=-1)\n', (6272, 6286), True, 'import tensorflow as tf\n'), ((6782, 6802), 'tensorflow.square', 'tf.square', (['td_error1'], {}), '(td_error1)\n', (6791, 6802), True, 'import tensorflow as tf\n'), ((6855, 6875), 'tensorflow.square', 'tf.square', (['td_error2'], {}), '(td_error2)\n', (6864, 6875), True, 'import tensorflow as tf\n'), ((9456, 9479), 'tensorflow.argmax', 'tf.argmax', (['_pi'], {'axis': '(-1)'}), '(_pi, axis=-1)\n', (9465, 9479), True, 'import tensorflow as tf\n'), ((9784, 9810), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (9793, 9810), True, 'import tensorflow as tf\n'), ((10373, 10393), 'tensorflow.square', 'tf.square', (['td_error1'], {}), '(td_error1)\n', (10382, 10393), True, 'import tensorflow as tf\n'), ((10446, 10466), 'tensorflow.square', 'tf.square', (['td_error2'], {}), '(td_error2)\n', (10455, 10466), True, 'import tensorflow as tf\n'), ((1976, 1995), 'numpy.ones', 'np.ones', (['self.a_dim'], {}), '(self.a_dim)\n', (1983, 1995), True, 'import numpy as np\n'), ((8165, 8183), 'tensorflow.minimum', 'tf.minimum', (['q1', 'q2'], {}), '(q1, q2)\n', (8175, 8183), True, 'import tensorflow as tf\n'), ((8240, 8258), 'tensorflow.minimum', 'tf.minimum', (['q1', 'q2'], {}), '(q1, q2)\n', (8250, 8258), True, 'import tensorflow as tf\n'), ((8313, 8331), 'tensorflow.maximum', 'tf.maximum', (['q1', 'q2'], {}), '(q1, q2)\n', (8323, 8331), True, 'import tensorflow as tf\n'), ((11235, 11253), 'tensorflow.minimum', 'tf.minimum', (['q1', 'q2'], {}), '(q1, q2)\n', (11245, 11253), True, 'import tensorflow as tf\n'), ((11310, 11328), 'tensorflow.minimum', 'tf.minimum', (['q1', 'q2'], {}), '(q1, q2)\n', (11320, 11328), True, 'import tensorflow as tf\n'), ((11383, 11401), 'tensorflow.maximum', 'tf.maximum', (['q1', 'q2'], {}), '(q1, q2)\n', (11393, 11401), True, 'import tensorflow as tf\n')] |
import numpy as np
from preprocess.hierarchical import TreeNodes
from preprocess import utils
from evaluation.metrics import compute_level_loss
from algorithms.MinT import recon_base_forecast
from algorithms.ERM import unbiased_recon
from algorithms import LSTNet, Optim
import torch
import torch.nn as nn
import math
import time
from itertools import chain
import pdb
def evaluate(data, X, Y, model, h, evaluateL2, evaluateL1, batch_size, part, nodes, method, alg, cuda):
model.eval()
total_loss = 0
total_loss_l1 = 0
n_samples = 0
predict = None
test = None
if part == 'test':
output = model(X.cuda()) if cuda else model(X)
result = np.zeros((output.shape[0], len(nodes) + 1))
if method == 'erm':
i = 0
recon_pred = unbiased_recon(nodes, Y.numpy(), output.cpu().detach().numpy())
for pred in recon_pred:
result[i, :] = compute_level_loss(pred.keys(), nodes, pred, Y[i, :].numpy(), len(nodes) + 1, True, h)
i += 1
else:
for i in range(output.shape[0]):
test_pred = output[i, :].cpu().detach().numpy()
full_test = Y[i, :].cpu().detach().numpy()
if method == 'BU':
S = TreeNodes(nodes).get_s_matrix()
full_test = np.dot(S, full_test)
test_pred = np.dot(S, test_pred)
pred_dict = dict(zip(TreeNodes(nodes).col_order(), test_pred))
if 'mint' in method:
pred_dict = recon_base_forecast(pred_dict.keys(), nodes, pred_dict, model,
data, data.P + data.h - 1, method, alg)
result[i, :] = compute_level_loss(pred_dict.keys(), nodes, pred_dict, full_test, len(nodes) + 1, True, h)
result = result.mean(axis=0)
return result
for X, Y in data.get_batches(X, Y, batch_size, False):
output = model(X)
if predict is None:
predict = output
test = Y
else:
predict = torch.cat((predict, output))
test = torch.cat((test, Y))
scale = data.scale.expand(output.size(0), data.m)
total_loss += evaluateL2(output * scale, Y * scale).data.cpu().numpy()
total_loss_l1 += evaluateL1(output * scale, Y * scale).data.cpu().numpy()
n_samples += (output.size(0) * data.m)
rse = math.sqrt(total_loss / n_samples) / data.rse
rae = (total_loss_l1 / n_samples) / data.rae
predict = predict.data.cpu().numpy()
Ytest = test.data.cpu().numpy()
sigma_p = (predict).std(axis=0)
sigma_g = (Ytest).std(axis=0)
mean_p = predict.mean(axis=0)
mean_g = Ytest.mean(axis=0)
index = (sigma_g != 0)
correlation = ((predict - mean_p) * (Ytest - mean_g)).mean(axis=0) / (sigma_p * sigma_g)
correlation = (correlation[index]).mean()
return rse, rae, correlation
def train(data, X, Y, model, criterion, optim, batch_size):
model.train()
total_loss = 0
n_samples = 0
for X, Y in data.get_batches(X, Y, batch_size, True):
model.zero_grad()
output = model(X)
scale = data.scale.expand(output.size(0), data.m)
loss = criterion(output * scale, Y * scale)
loss.backward()
grad_norm = optim.step()
total_loss += loss.data.cpu().numpy()
n_samples += (output.size(0) * data.m)
return total_loss / n_samples
def fit_and_pred(Data, model, h, num_epoch, batch_size, params, optim, TRAINING_METHOD, criterion, evaluateL2, evaluateL1,
nodes, verbose, cuda):
best_val = 10000000
for epoch in range(1, num_epoch + 1):
epoch_start_time = time.time()
train_loss = train(Data, Data.train[0], Data.train[1], model, criterion, optim, 64)
val_loss, val_rae, val_corr = evaluate(Data, Data.valid[0], Data.valid[1], model, h, evaluateL2,
evaluateL1, batch_size, 'valid', nodes,
TRAINING_METHOD, params['alg'], cuda)
if verbose:
print(
'| end of epoch {:3d} | time: {:5.2f}s | train_loss {:5.4f} | valid rse {:5.4f} | valid rae '
'{:5.4f} | valid corr {:5.4f}'.format(
epoch, (time.time() - epoch_start_time), train_loss, val_loss, val_rae, val_corr))
if val_loss < best_val:
with open('./save/LSTNet.pt', 'wb') as f:
torch.save(model, f)
best_val = val_loss
def train_lstn(TRAINING_METHOD, nodes, data, cuda, h, num_epoch, batch_size, params, verbose):
if TRAINING_METHOD == 'BU':
start = sum(list(chain(*nodes[:-1]))) + 1
end = sum(list(chain(*nodes))) + 1
feat_list = [str(i) for i in range(start, end)]
data = data[feat_list]
Data = utils.Data_utility(0.6, 0.2, cuda, h, 24 * 7, data, TRAINING_METHOD, normalize=2)
model = LSTNet.Model(Data)
criterion = nn.MSELoss(size_average=False)
evaluateL2 = nn.MSELoss(size_average=False)
evaluateL1 = nn.L1Loss(size_average=False)
if cuda:
model.cuda()
criterion = criterion.cuda()
evaluateL1 = evaluateL1.cuda()
evaluateL2 = evaluateL2.cuda()
optim = Optim.Optim(model.parameters(), 'adam', 1e-3, 10.)
# optim = torch.optim.Adam(model.parameters(), lr=1e-3)
fit_and_pred(Data, model, h, num_epoch, batch_size, params, optim, TRAINING_METHOD, criterion, evaluateL2,
evaluateL1, nodes, verbose, cuda)
with open('./save/LSTNet.pt', 'rb') as f:
model = torch.load(f)
multilevel_loss = evaluate(Data, Data.test[0], Data.test[1], model, h, evaluateL2, evaluateL1, batch_size,
'test', nodes, TRAINING_METHOD, params['alg'], cuda)
return multilevel_loss
| [
"itertools.chain",
"algorithms.LSTNet.Model",
"torch.nn.L1Loss",
"torch.load",
"math.sqrt",
"preprocess.hierarchical.TreeNodes",
"torch.nn.MSELoss",
"numpy.dot",
"torch.save",
"time.time",
"preprocess.utils.Data_utility",
"torch.cat"
] | [((4901, 4986), 'preprocess.utils.Data_utility', 'utils.Data_utility', (['(0.6)', '(0.2)', 'cuda', 'h', '(24 * 7)', 'data', 'TRAINING_METHOD'], {'normalize': '(2)'}), '(0.6, 0.2, cuda, h, 24 * 7, data, TRAINING_METHOD,\n normalize=2)\n', (4919, 4986), False, 'from preprocess import utils\n'), ((4995, 5013), 'algorithms.LSTNet.Model', 'LSTNet.Model', (['Data'], {}), '(Data)\n', (5007, 5013), False, 'from algorithms import LSTNet, Optim\n'), ((5030, 5060), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (5040, 5060), True, 'import torch.nn as nn\n'), ((5078, 5108), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (5088, 5108), True, 'import torch.nn as nn\n'), ((5126, 5155), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (5135, 5155), True, 'import torch.nn as nn\n'), ((2448, 2481), 'math.sqrt', 'math.sqrt', (['(total_loss / n_samples)'], {}), '(total_loss / n_samples)\n', (2457, 2481), False, 'import math\n'), ((3735, 3746), 'time.time', 'time.time', ([], {}), '()\n', (3744, 3746), False, 'import time\n'), ((5654, 5667), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (5664, 5667), False, 'import torch\n'), ((2102, 2130), 'torch.cat', 'torch.cat', (['(predict, output)'], {}), '((predict, output))\n', (2111, 2130), False, 'import torch\n'), ((2150, 2170), 'torch.cat', 'torch.cat', (['(test, Y)'], {}), '((test, Y))\n', (2159, 2170), False, 'import torch\n'), ((4526, 4546), 'torch.save', 'torch.save', (['model', 'f'], {}), '(model, f)\n', (4536, 4546), False, 'import torch\n'), ((1343, 1363), 'numpy.dot', 'np.dot', (['S', 'full_test'], {}), '(S, full_test)\n', (1349, 1363), True, 'import numpy as np\n'), ((1396, 1416), 'numpy.dot', 'np.dot', (['S', 'test_pred'], {}), '(S, test_pred)\n', (1402, 1416), True, 'import numpy as np\n'), ((4734, 4752), 'itertools.chain', 'chain', (['*nodes[:-1]'], {}), '(*nodes[:-1])\n', (4739, 4752), False, 'from itertools import chain\n'), ((4782, 4795), 'itertools.chain', 'chain', (['*nodes'], {}), '(*nodes)\n', (4787, 4795), False, 'from itertools import chain\n'), ((4349, 4360), 'time.time', 'time.time', ([], {}), '()\n', (4358, 4360), False, 'import time\n'), ((1279, 1295), 'preprocess.hierarchical.TreeNodes', 'TreeNodes', (['nodes'], {}), '(nodes)\n', (1288, 1295), False, 'from preprocess.hierarchical import TreeNodes\n'), ((1454, 1470), 'preprocess.hierarchical.TreeNodes', 'TreeNodes', (['nodes'], {}), '(nodes)\n', (1463, 1470), False, 'from preprocess.hierarchical import TreeNodes\n')] |
import cPickle as pickle
import numpy as np
import theano
_mean = 0.0035805809921434142
_std = 542.48824133746177
def gen_phone(mdl, phones, noise_level):
terr_monitor = mdl.monitor.channels['test_objective']
terr = min(terr_monitor.val_record)
X = theano.tensor.dmatrix('X')
P = theano.tensor.dmatrix('P')
y = mdl.fprop([X,P])
predict = theano.function([X, P], y)
# Let's start with a all zero vector, then use the prediction to populate the next sample
duration = 3
fs = 16000
frame_length = mdl.input_space.components[0].dim
x0 = np.asmatrix(np.zeros((1,duration*fs)))
# phones = np.load('test_phones.npy')
phone_code = np.asmatrix(np.zeros((duration*fs,3*62)))
for pi, p in enumerate(phones):
phone_code[pi,[p, p+62, p+2*62]] = 1 # code for 'aw'
for k in np.arange(frame_length,duration*fs):
frame = x0[:,k-frame_length:k]
x0[0,k] = np.random.normal(predict(frame + np.random.normal(0, noise_level[0]*np.sqrt(terr), frame.shape), phone_code[k]), noise_level[1]*np.sqrt(terr))
x0 = x0.T
x0_scaled = x0*_std + _mean
x0a = np.asarray(x0_scaled, dtype=np.int16)
return x0a
if __name__ == "__main__":
from sys import argv
from scipy.io import wavfile
mdl = pickle.load(open(argv[1]))
x0a = gen_phone(mdl)
wavfile.write(argv[2], 16000, x0a)
| [
"numpy.sqrt",
"theano.function",
"numpy.asarray",
"numpy.zeros",
"scipy.io.wavfile.write",
"theano.tensor.dmatrix",
"numpy.arange"
] | [((264, 290), 'theano.tensor.dmatrix', 'theano.tensor.dmatrix', (['"""X"""'], {}), "('X')\n", (285, 290), False, 'import theano\n'), ((299, 325), 'theano.tensor.dmatrix', 'theano.tensor.dmatrix', (['"""P"""'], {}), "('P')\n", (320, 325), False, 'import theano\n'), ((365, 391), 'theano.function', 'theano.function', (['[X, P]', 'y'], {}), '([X, P], y)\n', (380, 391), False, 'import theano\n'), ((834, 872), 'numpy.arange', 'np.arange', (['frame_length', '(duration * fs)'], {}), '(frame_length, duration * fs)\n', (843, 872), True, 'import numpy as np\n'), ((1129, 1166), 'numpy.asarray', 'np.asarray', (['x0_scaled'], {'dtype': 'np.int16'}), '(x0_scaled, dtype=np.int16)\n', (1139, 1166), True, 'import numpy as np\n'), ((1347, 1381), 'scipy.io.wavfile.write', 'wavfile.write', (['argv[2]', '(16000)', 'x0a'], {}), '(argv[2], 16000, x0a)\n', (1360, 1381), False, 'from scipy.io import wavfile\n'), ((594, 622), 'numpy.zeros', 'np.zeros', (['(1, duration * fs)'], {}), '((1, duration * fs))\n', (602, 622), True, 'import numpy as np\n'), ((692, 725), 'numpy.zeros', 'np.zeros', (['(duration * fs, 3 * 62)'], {}), '((duration * fs, 3 * 62))\n', (700, 725), True, 'import numpy as np\n'), ((1056, 1069), 'numpy.sqrt', 'np.sqrt', (['terr'], {}), '(terr)\n', (1063, 1069), True, 'import numpy as np\n'), ((996, 1009), 'numpy.sqrt', 'np.sqrt', (['terr'], {}), '(terr)\n', (1003, 1009), True, 'import numpy as np\n')] |
'''
https://github.com/christiancosgrove/pytorch-spectral-normalization-gan
chainer: https://github.com/pfnet-research/sngan_projection
'''
# ResNet generator and discriminator
import torch
from torch import nn
import torch.nn.functional as F
# from spectral_normalization import SpectralNorm
import numpy as np
from torch.nn.utils import spectral_norm
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, dim_embed):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.embed_gamma = nn.Linear(dim_embed, num_features, bias=False)
self.embed_beta = nn.Linear(dim_embed, num_features, bias=False)
def forward(self, x, y):
out = self.bn(x)
gamma = self.embed_gamma(y).view(-1, self.num_features, 1, 1)
beta = self.embed_beta(y).view(-1, self.num_features, 1, 1)
out = out + out*gamma + beta
return out
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, dim_embed, bias=True):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=bias)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=bias)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
self.condbn1 = ConditionalBatchNorm2d(in_channels, dim_embed)
self.condbn2 = ConditionalBatchNorm2d(out_channels, dim_embed)
self.relu = nn.ReLU()
self.upsample = nn.Upsample(scale_factor=2)
# unconditional case
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.ReLU(),
self.conv2
)
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0, bias=bias) #h=h
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
self.bypass = nn.Sequential(
nn.Upsample(scale_factor=2),
self.bypass_conv,
)
def forward(self, x, y):
if y is not None:
out = self.condbn1(x, y)
out = self.relu(out)
out = self.upsample(out)
out = self.conv1(out)
out = self.condbn2(out, y)
out = self.relu(out)
out = self.conv2(out)
out = out + self.bypass(x)
else:
out = self.model(x) + self.bypass(x)
return out
class ResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=True)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
if stride == 1:
self.model = nn.Sequential(
nn.ReLU(),
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2)
)
else:
self.model = nn.Sequential(
nn.ReLU(),
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2),
nn.AvgPool2d(2, stride=stride, padding=0)
)
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0, bias=True)
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
if stride != 1:
self.bypass = nn.Sequential(
spectral_norm(self.bypass_conv),
nn.AvgPool2d(2, stride=stride, padding=0)
)
else:
self.bypass = nn.Sequential(
spectral_norm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
# special ResBlock just for the first layer of the discriminator
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FirstResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1, bias=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=True)
self.bypass_conv = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0, bias=True)
nn.init.xavier_uniform_(self.conv1.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, np.sqrt(2))
nn.init.xavier_uniform_(self.bypass_conv.weight.data, 1.0)
# we don't want to apply ReLU activation to raw image before convolution transformation.
self.model = nn.Sequential(
spectral_norm(self.conv1),
nn.ReLU(),
spectral_norm(self.conv2),
nn.AvgPool2d(2)
)
self.bypass = nn.Sequential(
nn.AvgPool2d(2),
spectral_norm(self.bypass_conv),
)
def forward(self, x):
return self.model(x) + self.bypass(x)
class CcGAN_Generator(nn.Module):
def __init__(self, z_dim=128, nc=3, gene_ch=128, dim_embed=128):
super(CcGAN_Generator, self).__init__()
self.z_dim = z_dim
self.dim_embed = dim_embed
self.gene_ch = gene_ch
self.dense = nn.Linear(self.z_dim, 4 * 4 * gene_ch*16, bias=True)
self.final = nn.Conv2d(gene_ch, nc, 3, stride=1, padding=1, bias=True)
nn.init.xavier_uniform_(self.dense.weight.data, 1.)
nn.init.xavier_uniform_(self.final.weight.data, 1.)
self.genblock0 = ResBlockGenerator(gene_ch*16, gene_ch*8, dim_embed=dim_embed) #4--->8
self.genblock1 = ResBlockGenerator(gene_ch*8, gene_ch*4, dim_embed=dim_embed) #8--->16
self.genblock2 = ResBlockGenerator(gene_ch*4, gene_ch*2, dim_embed=dim_embed) #16--->32
self.genblock3 = ResBlockGenerator(gene_ch*2, gene_ch, dim_embed=dim_embed) #32--->64
self.final = nn.Sequential(
nn.BatchNorm2d(gene_ch),
nn.ReLU(),
self.final,
nn.Tanh()
)
def forward(self, z, y): #y is embedded in the feature space
z = z.view(z.size(0), z.size(1))
out = self.dense(z)
out = out.view(-1, self.gene_ch*16, 4, 4)
out = self.genblock0(out, y)
out = self.genblock1(out, y)
out = self.genblock2(out, y)
out = self.genblock3(out, y)
out = self.final(out)
return out
class CcGAN_Discriminator(nn.Module):
def __init__(self, nc=3, disc_ch=128, dim_embed=128):
super(CcGAN_Discriminator, self).__init__()
self.dim_embed = dim_embed
self.disc_ch = disc_ch
self.discblock1 = nn.Sequential(
FirstResBlockDiscriminator(nc, disc_ch, stride=2), #64--->32
ResBlockDiscriminator(disc_ch, disc_ch*2, stride=2), #32--->16
ResBlockDiscriminator(disc_ch*2, disc_ch*4, stride=2), #16--->8
)
self.discblock2 = ResBlockDiscriminator(disc_ch*4, disc_ch*8, stride=2) #8--->4
self.discblock3 = nn.Sequential(
ResBlockDiscriminator(disc_ch*8, disc_ch*16, stride=1), #4--->4;
nn.ReLU(),
)
self.linear1 = nn.Linear(disc_ch*16*4*4, 1, bias=True)
nn.init.xavier_uniform_(self.linear1.weight.data, 1.)
self.linear1 = spectral_norm(self.linear1)
self.linear2 = nn.Linear(self.dim_embed, disc_ch*16*4*4, bias=False)
nn.init.xavier_uniform_(self.linear2.weight.data, 1.)
self.linear2 = spectral_norm(self.linear2)
def forward(self, x, y):
output = self.discblock1(x)
output = self.discblock2(output)
output = self.discblock3(output)
output = output.view(-1, self.disc_ch*16*4*4)
output_y = torch.sum(output*self.linear2(y), 1, keepdim=True)
output = self.linear1(output) + output_y
return output.view(-1, 1)
if __name__ == "__main__":
netG = CcGAN_Generator(z_dim=256, gene_ch=128, dim_embed=128).cuda()
netD = CcGAN_Discriminator(disc_ch=128, dim_embed=128).cuda()
# netG = nn.DataParallel(netG)
# netD = nn.DataParallel(netD)
N=4
z = torch.randn(N, 256).cuda()
y = torch.randn(N, 128).cuda()
x = netG(z,y)
o = netD(x,y)
print(x.size())
print(o.size())
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"numpy.sqrt",
"torch.nn.Tanh",
"torch.nn.init.xavier_uniform_",
"torch.nn.Conv2d",
"torch.nn.utils.spectral_norm",
"torch.nn.Upsample",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.randn"
] | [((535, 577), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['num_features'], {'affine': '(False)'}), '(num_features, affine=False)\n', (549, 577), False, 'from torch import nn\n'), ((606, 652), 'torch.nn.Linear', 'nn.Linear', (['dim_embed', 'num_features'], {'bias': '(False)'}), '(dim_embed, num_features, bias=False)\n', (615, 652), False, 'from torch import nn\n'), ((679, 725), 'torch.nn.Linear', 'nn.Linear', (['dim_embed', 'num_features'], {'bias': '(False)'}), '(dim_embed, num_features, bias=False)\n', (688, 725), False, 'from torch import nn\n'), ((1161, 1225), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)'], {'padding': '(1)', 'bias': 'bias'}), '(in_channels, out_channels, 3, 1, padding=1, bias=bias)\n', (1170, 1225), False, 'from torch import nn\n'), ((1247, 1312), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)', '(1)'], {'padding': '(1)', 'bias': 'bias'}), '(out_channels, out_channels, 3, 1, padding=1, bias=bias)\n', (1256, 1312), False, 'from torch import nn\n'), ((1611, 1620), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1618, 1620), False, 'from torch import nn\n'), ((1645, 1672), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (1656, 1672), False, 'from torch import nn\n'), ((1998, 2062), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)'], {'padding': '(0)', 'bias': 'bias'}), '(in_channels, out_channels, 1, 1, padding=0, bias=bias)\n', (2007, 2062), False, 'from torch import nn\n'), ((2075, 2133), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.bypass_conv.weight.data', '(1.0)'], {}), '(self.bypass_conv.weight.data, 1.0)\n', (2098, 2133), False, 'from torch import nn\n'), ((2856, 2920), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)'], {'padding': '(1)', 'bias': '(True)'}), '(in_channels, out_channels, 3, 1, padding=1, bias=True)\n', (2865, 2920), False, 'from torch import nn\n'), ((2942, 3007), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)', '(1)'], {'padding': '(1)', 'bias': '(True)'}), '(out_channels, out_channels, 3, 1, padding=1, bias=True)\n', (2951, 3007), False, 'from torch import nn\n'), ((3664, 3728), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)'], {'padding': '(0)', 'bias': '(True)'}), '(in_channels, out_channels, 1, 1, padding=0, bias=True)\n', (3673, 3728), False, 'from torch import nn\n'), ((3736, 3794), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.bypass_conv.weight.data', '(1.0)'], {}), '(self.bypass_conv.weight.data, 1.0)\n', (3759, 3794), False, 'from torch import nn\n'), ((4426, 4490), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)'], {'padding': '(1)', 'bias': '(True)'}), '(in_channels, out_channels, 3, 1, padding=1, bias=True)\n', (4435, 4490), False, 'from torch import nn\n'), ((4512, 4577), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)', '(1)'], {'padding': '(1)', 'bias': '(True)'}), '(out_channels, out_channels, 3, 1, padding=1, bias=True)\n', (4521, 4577), False, 'from torch import nn\n'), ((4605, 4669), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)'], {'padding': '(0)', 'bias': '(True)'}), '(in_channels, out_channels, 1, 1, padding=0, bias=True)\n', (4614, 4669), False, 'from torch import nn\n'), ((4814, 4872), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.bypass_conv.weight.data', '(1.0)'], {}), '(self.bypass_conv.weight.data, 1.0)\n', (4837, 4872), False, 'from torch import nn\n'), ((5613, 5667), 'torch.nn.Linear', 'nn.Linear', (['self.z_dim', '(4 * 4 * gene_ch * 16)'], {'bias': '(True)'}), '(self.z_dim, 4 * 4 * gene_ch * 16, bias=True)\n', (5622, 5667), False, 'from torch import nn\n'), ((5687, 5744), 'torch.nn.Conv2d', 'nn.Conv2d', (['gene_ch', 'nc', '(3)'], {'stride': '(1)', 'padding': '(1)', 'bias': '(True)'}), '(gene_ch, nc, 3, stride=1, padding=1, bias=True)\n', (5696, 5744), False, 'from torch import nn\n'), ((5753, 5805), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.dense.weight.data', '(1.0)'], {}), '(self.dense.weight.data, 1.0)\n', (5776, 5805), False, 'from torch import nn\n'), ((5813, 5865), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.final.weight.data', '(1.0)'], {}), '(self.final.weight.data, 1.0)\n', (5836, 5865), False, 'from torch import nn\n'), ((7538, 7583), 'torch.nn.Linear', 'nn.Linear', (['(disc_ch * 16 * 4 * 4)', '(1)'], {'bias': '(True)'}), '(disc_ch * 16 * 4 * 4, 1, bias=True)\n', (7547, 7583), False, 'from torch import nn\n'), ((7586, 7640), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.linear1.weight.data', '(1.0)'], {}), '(self.linear1.weight.data, 1.0)\n', (7609, 7640), False, 'from torch import nn\n'), ((7663, 7690), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.linear1'], {}), '(self.linear1)\n', (7676, 7690), False, 'from torch.nn.utils import spectral_norm\n'), ((7714, 7773), 'torch.nn.Linear', 'nn.Linear', (['self.dim_embed', '(disc_ch * 16 * 4 * 4)'], {'bias': '(False)'}), '(self.dim_embed, disc_ch * 16 * 4 * 4, bias=False)\n', (7723, 7773), False, 'from torch import nn\n'), ((7776, 7830), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.linear2.weight.data', '(1.0)'], {}), '(self.linear2.weight.data, 1.0)\n', (7799, 7830), False, 'from torch import nn\n'), ((7853, 7880), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.linear2'], {}), '(self.linear2)\n', (7866, 7880), False, 'from torch.nn.utils import spectral_norm\n'), ((1369, 1379), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1376, 1379), True, 'import numpy as np\n'), ((1437, 1447), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1444, 1447), True, 'import numpy as np\n'), ((1751, 1778), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_channels'], {}), '(in_channels)\n', (1765, 1778), False, 'from torch import nn\n'), ((1792, 1801), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1799, 1801), False, 'from torch import nn\n'), ((1815, 1842), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (1826, 1842), False, 'from torch import nn\n'), ((1880, 1908), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1894, 1908), False, 'from torch import nn\n'), ((1922, 1931), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1929, 1931), False, 'from torch import nn\n'), ((2183, 2210), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (2194, 2210), False, 'from torch import nn\n'), ((3064, 3074), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3071, 3074), True, 'import numpy as np\n'), ((3132, 3142), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3139, 3142), True, 'import numpy as np\n'), ((4726, 4736), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4733, 4736), True, 'import numpy as np\n'), ((4794, 4804), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4801, 4804), True, 'import numpy as np\n'), ((5019, 5044), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv1'], {}), '(self.conv1)\n', (5032, 5044), False, 'from torch.nn.utils import spectral_norm\n'), ((5058, 5067), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5065, 5067), False, 'from torch import nn\n'), ((5081, 5106), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv2'], {}), '(self.conv2)\n', (5094, 5106), False, 'from torch.nn.utils import spectral_norm\n'), ((5120, 5135), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2)'], {}), '(2)\n', (5132, 5135), False, 'from torch import nn\n'), ((5199, 5214), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2)'], {}), '(2)\n', (5211, 5214), False, 'from torch import nn\n'), ((5228, 5259), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.bypass_conv'], {}), '(self.bypass_conv)\n', (5241, 5259), False, 'from torch.nn.utils import spectral_norm\n'), ((6295, 6318), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['gene_ch'], {}), '(gene_ch)\n', (6309, 6318), False, 'from torch import nn\n'), ((6332, 6341), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6339, 6341), False, 'from torch import nn\n'), ((6379, 6388), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6386, 6388), False, 'from torch import nn\n'), ((7493, 7502), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7500, 7502), False, 'from torch import nn\n'), ((8495, 8514), 'torch.randn', 'torch.randn', (['N', '(256)'], {}), '(N, 256)\n', (8506, 8514), False, 'import torch\n'), ((8530, 8549), 'torch.randn', 'torch.randn', (['N', '(128)'], {}), '(N, 128)\n', (8541, 8549), False, 'import torch\n'), ((3225, 3234), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3232, 3234), False, 'from torch import nn\n'), ((3252, 3277), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv1'], {}), '(self.conv1)\n', (3265, 3277), False, 'from torch.nn.utils import spectral_norm\n'), ((3295, 3304), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3302, 3304), False, 'from torch import nn\n'), ((3322, 3347), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv2'], {}), '(self.conv2)\n', (3335, 3347), False, 'from torch.nn.utils import spectral_norm\n'), ((3436, 3445), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3443, 3445), False, 'from torch import nn\n'), ((3463, 3488), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv1'], {}), '(self.conv1)\n', (3476, 3488), False, 'from torch.nn.utils import spectral_norm\n'), ((3506, 3515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3513, 3515), False, 'from torch import nn\n'), ((3533, 3558), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.conv2'], {}), '(self.conv2)\n', (3546, 3558), False, 'from torch.nn.utils import spectral_norm\n'), ((3576, 3617), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2)'], {'stride': 'stride', 'padding': '(0)'}), '(2, stride=stride, padding=0)\n', (3588, 3617), False, 'from torch import nn\n'), ((3876, 3907), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.bypass_conv'], {}), '(self.bypass_conv)\n', (3889, 3907), False, 'from torch.nn.utils import spectral_norm\n'), ((3925, 3966), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2)'], {'stride': 'stride', 'padding': '(0)'}), '(2, stride=stride, padding=0)\n', (3937, 3966), False, 'from torch import nn\n'), ((4052, 4083), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['self.bypass_conv'], {}), '(self.bypass_conv)\n', (4065, 4083), False, 'from torch.nn.utils import spectral_norm\n')] |
import logging, numpy as np, rpxdock as rp
log = logging.getLogger(__name__)
def filter_redundancy(xforms, body, scores=None, categories=None, every_nth=10, **kw):
kw = rp.Bunch(kw)
if scores is None:
scores = np.repeat(0, len(xforms))
if len(scores) == 0: return []
if categories is None:
categories = np.repeat(0, len(scores))
ibest = np.argsort(-scores)
if kw.max_bb_redundancy <= 0:
return ibest
nclust = kw.max_cluster if kw.max_cluster else int(kw.beam_size) // every_nth
if xforms.ndim == 3:
crd = xforms[ibest[:nclust], None] @ body.cen[::every_nth, :, None]
else:
crd0 = xforms[ibest[:nclust], 0, None] @ body[0].cen[::every_nth, :, None]
crd1 = xforms[ibest[:nclust], 1, None] @ body[1].cen[::every_nth, :, None]
crd = np.concatenate([crd0, crd1], axis=1)
ncen = crd.shape[1]
crd = crd.reshape(-1, 4 * ncen)
# sneaky way to do categories
crd += (categories[ibest[:nclust]] * 1_000_000)[:, None]
keep = rp.cluster.cookie_cutter(crd, kw.max_bb_redundancy * np.sqrt(ncen))
assert len(np.unique(keep)) == len(keep)
log.info(f'filter_redundancy {kw.max_bb_redundancy}A Nmax {nclust} ' +
f'Ntotal {len(ibest)} Nkeep {len(keep)}')
return ibest[keep]
| [
"logging.getLogger",
"numpy.sqrt",
"numpy.unique",
"numpy.argsort",
"numpy.concatenate",
"rpxdock.Bunch"
] | [((50, 77), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (67, 77), False, 'import logging, numpy as np, rpxdock as rp\n'), ((174, 186), 'rpxdock.Bunch', 'rp.Bunch', (['kw'], {}), '(kw)\n', (182, 186), True, 'import logging, numpy as np, rpxdock as rp\n'), ((368, 387), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (378, 387), True, 'import logging, numpy as np, rpxdock as rp\n'), ((804, 840), 'numpy.concatenate', 'np.concatenate', (['[crd0, crd1]'], {'axis': '(1)'}), '([crd0, crd1], axis=1)\n', (818, 840), True, 'import logging, numpy as np, rpxdock as rp\n'), ((1058, 1071), 'numpy.sqrt', 'np.sqrt', (['ncen'], {}), '(ncen)\n', (1065, 1071), True, 'import logging, numpy as np, rpxdock as rp\n'), ((1087, 1102), 'numpy.unique', 'np.unique', (['keep'], {}), '(keep)\n', (1096, 1102), True, 'import logging, numpy as np, rpxdock as rp\n')] |
import scipy
import scipy.ndimage
import os
import numpy as np
import random
import sys
import tensorflow as tf
from PIL import Image
from make_gather_conv import make_gather_conv
BATCH_SIZE = 16
PIXEL_GEOM_P = 0.2
MAX_OFFSET = 40
IMAGE_SIZE = 256
GATHER_SIZE = 7
ADAM_learning_rate = 0.001
INPUT_CHANNELS = 6
OUT_COMPARE_SIZE = 16
def read_image(fname):
data = scipy.ndimage.imread(fname)
data = data.astype(np.float32)/255.0
print(data.shape)
return data
def save_rgb_image(img_data,filename):
img_data = (img_data*255.0).astype(np.uint8)
img = Image.fromarray(img_data,mode="RGB")
img.save(filename)
def get_paths(folder):
filenames = os.listdir(folder)
jpg_filenames = [fname for fname in filenames if ".jpg" in fname]
paths = [os.path.join(folder,fname) for fname in jpg_filenames]
return paths
def get_images():
folder = "example_images/"
paths = get_paths(folder)
images = [read_image(path) for path in paths]
return images
def get_offsets():
offsets = np.ones((BATCH_SIZE,2),dtype=np.int32) * 100000
while np.any(offsets >= MAX_OFFSET):
offsets = np.random.geometric(p=PIXEL_GEOM_P,size=(BATCH_SIZE,2))
return offsets
def rand_image(all_images):
return all_images[random.randrange(len(all_images))]
def filter_images(all_images):
return [img for img in all_images
if img.shape[0] > MAX_OFFSET + IMAGE_SIZE and
img.shape[1] > MAX_OFFSET + IMAGE_SIZE]
def randomly_crop_image(image, x_offset, y_offset):
crop_height = image.shape[0] - y_offset - IMAGE_SIZE
crop_width = image.shape[1] - x_offset - IMAGE_SIZE
crop_pos_y = random.randrange(0,crop_height)
crop_pos_x = random.randrange(0,crop_width)
cropped_image = image[crop_pos_y:-(crop_height-crop_pos_y),
crop_pos_x:-(crop_width-crop_pos_x)]
base_image = cropped_image[:-y_offset,:-x_offset]
offset_image = cropped_image[y_offset:,x_offset:]
return base_image,offset_image
def generate_offset_image_pairs_batch(filtered_images):
batch_offsets = get_offsets()
batch_cropped_images = []
for i in range(BATCH_SIZE):
x_off,y_off = batch_offsets[i]
base_img,off_img = randomly_crop_image(rand_image(filtered_images), x_off, y_off)
comb_img = np.concatenate([base_img,off_img],axis=2)
batch_cropped_images.append(comb_img)
return np.stack(batch_cropped_images), batch_offsets
def offset_cmp_vec(offsets):
OFFSET_LAY1_size = 64
#OFFSET_LAY2_size = 64
one_hot_offsets = tf.one_hot(
indices=offsets,
depth=MAX_OFFSET,
axis=-1,
dtype=tf.float32
)
input_mat = tf.reshape(one_hot_offsets,(BATCH_SIZE*2,MAX_OFFSET*2))
out1 = tf.layers.dense(input_mat,
units=OFFSET_LAY1_size,
activation=tf.nn.relu)
#out2 = tf.layers.dense(out1,
# units=OFFSET_LAY2_size,
# activation=tf.nn.relu)
out3 = tf.layers.dense(out1,
units=OUT_COMPARE_SIZE,
activation=None)
return out3
def image_cmps(images):
OUT_LAY1_SIZE = 128
STRIDE_LEN = 4
gather_fn = make_gather_conv(GATHER_SIZE,INPUT_CHANNELS)
gather_fn_const = tf.constant(gather_fn)
gathered_data = tf.nn.conv2d(images,
filter=gather_fn_const,
strides=(1,STRIDE_LEN,STRIDE_LEN,1),
padding="VALID",
)
out2 = tf.layers.dense(gathered_data,
units=OUT_LAY1_SIZE,
activation=tf.nn.relu)
out3 = tf.layers.dense(out2,
units=OUT_LAY1_SIZE,
activation=tf.nn.relu)
out_cmp_vecs = tf.layers.dense(out2,
units=OUT_COMPARE_SIZE,
activation=None)
return out_cmp_vecs
def get_loss(offset_cmps, img_cmps):
offset_cmps = tf.reshape(offset_cmps,(2*BATCH_SIZE,1,OUT_COMPARE_SIZE))
match_offset_vecs = offset_cmps[:BATCH_SIZE]
mismatch_offset_vecs = offset_cmps[BATCH_SIZE:]
img_shape = img_cmps.get_shape().as_list()
vecs_per_img = img_shape[1] * img_shape[2]
print(vecs_per_img)
img_cmps = tf.reshape(img_cmps,(BATCH_SIZE,vecs_per_img,OUT_COMPARE_SIZE))
match_guesses = tf.reduce_mean(match_offset_vecs * img_cmps,axis=2)
mismatch_guesses = tf.reduce_mean(mismatch_offset_vecs * img_cmps,axis=2)
all_guesses = tf.concat([match_guesses,mismatch_guesses],axis=0)
result_val = tf.concat([tf.ones((BATCH_SIZE,vecs_per_img),dtype=tf.float32),
tf.zeros((BATCH_SIZE,vecs_per_img),dtype=tf.float32)],axis=0)
all_losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=result_val,logits=all_guesses)
return tf.reduce_mean(all_losses)
def train_offset_pairs():
filtered_imgs = filter_images(get_images())
in_imgs = tf.placeholder(tf.float32, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, INPUT_CHANNELS))
in_offsets = tf.placeholder(tf.int32, (2*BATCH_SIZE, 2))
offset_cmps = offset_cmp_vec(in_offsets)
img_cmps = image_cmps(in_imgs)
loss = get_loss(offset_cmps, img_cmps)
info_optimizer = tf.train.AdamOptimizer(learning_rate=ADAM_learning_rate)
opt = info_optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(1000):
loss_tot = 0
for _ in range(100):
imgs, match_offsets = generate_offset_image_pairs_batch(filtered_imgs)
mismatch_offsets = get_offsets()
all_offsets = np.concatenate([match_offsets,mismatch_offsets],axis=0)
opt_val, loss_val = sess.run([opt,loss],feed_dict={
in_imgs:imgs,
in_offsets:all_offsets,
})
loss_tot += loss_val
print(loss_tot/100)
sys.stdout.flush()
train_offset_pairs()
#print(generate_offset_image_pairs_batch(filtered_imgs)[0].shape)
#get_offsets()
| [
"make_gather_conv.make_gather_conv",
"numpy.random.geometric",
"scipy.ndimage.imread",
"tensorflow.reduce_mean",
"os.listdir",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.concat",
"numpy.stack",
"numpy.concatenate",
"tensorflow.train.AdamOptimizer",
"sys.stdout.flush",
"tenso... | [((373, 400), 'scipy.ndimage.imread', 'scipy.ndimage.imread', (['fname'], {}), '(fname)\n', (393, 400), False, 'import scipy\n'), ((580, 617), 'PIL.Image.fromarray', 'Image.fromarray', (['img_data'], {'mode': '"""RGB"""'}), "(img_data, mode='RGB')\n", (595, 617), False, 'from PIL import Image\n'), ((681, 699), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (691, 699), False, 'import os\n'), ((1096, 1125), 'numpy.any', 'np.any', (['(offsets >= MAX_OFFSET)'], {}), '(offsets >= MAX_OFFSET)\n', (1102, 1125), True, 'import numpy as np\n'), ((1682, 1714), 'random.randrange', 'random.randrange', (['(0)', 'crop_height'], {}), '(0, crop_height)\n', (1698, 1714), False, 'import random\n'), ((1731, 1762), 'random.randrange', 'random.randrange', (['(0)', 'crop_width'], {}), '(0, crop_width)\n', (1747, 1762), False, 'import random\n'), ((2586, 2658), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'offsets', 'depth': 'MAX_OFFSET', 'axis': '(-1)', 'dtype': 'tf.float32'}), '(indices=offsets, depth=MAX_OFFSET, axis=-1, dtype=tf.float32)\n', (2596, 2658), True, 'import tensorflow as tf\n'), ((2713, 2774), 'tensorflow.reshape', 'tf.reshape', (['one_hot_offsets', '(BATCH_SIZE * 2, MAX_OFFSET * 2)'], {}), '(one_hot_offsets, (BATCH_SIZE * 2, MAX_OFFSET * 2))\n', (2723, 2774), True, 'import tensorflow as tf\n'), ((2780, 2853), 'tensorflow.layers.dense', 'tf.layers.dense', (['input_mat'], {'units': 'OFFSET_LAY1_size', 'activation': 'tf.nn.relu'}), '(input_mat, units=OFFSET_LAY1_size, activation=tf.nn.relu)\n', (2795, 2853), True, 'import tensorflow as tf\n'), ((3012, 3074), 'tensorflow.layers.dense', 'tf.layers.dense', (['out1'], {'units': 'OUT_COMPARE_SIZE', 'activation': 'None'}), '(out1, units=OUT_COMPARE_SIZE, activation=None)\n', (3027, 3074), True, 'import tensorflow as tf\n'), ((3207, 3252), 'make_gather_conv.make_gather_conv', 'make_gather_conv', (['GATHER_SIZE', 'INPUT_CHANNELS'], {}), '(GATHER_SIZE, INPUT_CHANNELS)\n', (3223, 3252), False, 'from make_gather_conv import make_gather_conv\n'), ((3274, 3296), 'tensorflow.constant', 'tf.constant', (['gather_fn'], {}), '(gather_fn)\n', (3285, 3296), True, 'import tensorflow as tf\n'), ((3318, 3423), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['images'], {'filter': 'gather_fn_const', 'strides': '(1, STRIDE_LEN, STRIDE_LEN, 1)', 'padding': '"""VALID"""'}), "(images, filter=gather_fn_const, strides=(1, STRIDE_LEN,\n STRIDE_LEN, 1), padding='VALID')\n", (3330, 3423), True, 'import tensorflow as tf\n'), ((3463, 3537), 'tensorflow.layers.dense', 'tf.layers.dense', (['gathered_data'], {'units': 'OUT_LAY1_SIZE', 'activation': 'tf.nn.relu'}), '(gathered_data, units=OUT_LAY1_SIZE, activation=tf.nn.relu)\n', (3478, 3537), True, 'import tensorflow as tf\n'), ((3566, 3631), 'tensorflow.layers.dense', 'tf.layers.dense', (['out2'], {'units': 'OUT_LAY1_SIZE', 'activation': 'tf.nn.relu'}), '(out2, units=OUT_LAY1_SIZE, activation=tf.nn.relu)\n', (3581, 3631), True, 'import tensorflow as tf\n'), ((3668, 3730), 'tensorflow.layers.dense', 'tf.layers.dense', (['out2'], {'units': 'OUT_COMPARE_SIZE', 'activation': 'None'}), '(out2, units=OUT_COMPARE_SIZE, activation=None)\n', (3683, 3730), True, 'import tensorflow as tf\n'), ((3828, 3890), 'tensorflow.reshape', 'tf.reshape', (['offset_cmps', '(2 * BATCH_SIZE, 1, OUT_COMPARE_SIZE)'], {}), '(offset_cmps, (2 * BATCH_SIZE, 1, OUT_COMPARE_SIZE))\n', (3838, 3890), True, 'import tensorflow as tf\n'), ((4121, 4187), 'tensorflow.reshape', 'tf.reshape', (['img_cmps', '(BATCH_SIZE, vecs_per_img, OUT_COMPARE_SIZE)'], {}), '(img_cmps, (BATCH_SIZE, vecs_per_img, OUT_COMPARE_SIZE))\n', (4131, 4187), True, 'import tensorflow as tf\n'), ((4206, 4258), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(match_offset_vecs * img_cmps)'], {'axis': '(2)'}), '(match_offset_vecs * img_cmps, axis=2)\n', (4220, 4258), True, 'import tensorflow as tf\n'), ((4281, 4336), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(mismatch_offset_vecs * img_cmps)'], {'axis': '(2)'}), '(mismatch_offset_vecs * img_cmps, axis=2)\n', (4295, 4336), True, 'import tensorflow as tf\n'), ((4354, 4406), 'tensorflow.concat', 'tf.concat', (['[match_guesses, mismatch_guesses]'], {'axis': '(0)'}), '([match_guesses, mismatch_guesses], axis=0)\n', (4363, 4406), True, 'import tensorflow as tf\n'), ((4595, 4673), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'result_val', 'logits': 'all_guesses'}), '(labels=result_val, logits=all_guesses)\n', (4634, 4673), True, 'import tensorflow as tf\n'), ((4685, 4711), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['all_losses'], {}), '(all_losses)\n', (4699, 4711), True, 'import tensorflow as tf\n'), ((4803, 4888), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, INPUT_CHANNELS)'], {}), '(tf.float32, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, INPUT_CHANNELS)\n )\n', (4817, 4888), True, 'import tensorflow as tf\n'), ((4901, 4946), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(2 * BATCH_SIZE, 2)'], {}), '(tf.int32, (2 * BATCH_SIZE, 2))\n', (4915, 4946), True, 'import tensorflow as tf\n'), ((5092, 5148), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'ADAM_learning_rate'}), '(learning_rate=ADAM_learning_rate)\n', (5114, 5148), True, 'import tensorflow as tf\n'), ((783, 810), 'os.path.join', 'os.path.join', (['folder', 'fname'], {}), '(folder, fname)\n', (795, 810), False, 'import os\n'), ((1038, 1078), 'numpy.ones', 'np.ones', (['(BATCH_SIZE, 2)'], {'dtype': 'np.int32'}), '((BATCH_SIZE, 2), dtype=np.int32)\n', (1045, 1078), True, 'import numpy as np\n'), ((1145, 1202), 'numpy.random.geometric', 'np.random.geometric', ([], {'p': 'PIXEL_GEOM_P', 'size': '(BATCH_SIZE, 2)'}), '(p=PIXEL_GEOM_P, size=(BATCH_SIZE, 2))\n', (1164, 1202), True, 'import numpy as np\n'), ((2336, 2379), 'numpy.concatenate', 'np.concatenate', (['[base_img, off_img]'], {'axis': '(2)'}), '([base_img, off_img], axis=2)\n', (2350, 2379), True, 'import numpy as np\n'), ((2435, 2465), 'numpy.stack', 'np.stack', (['batch_cropped_images'], {}), '(batch_cropped_images)\n', (2443, 2465), True, 'import numpy as np\n'), ((5200, 5212), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5210, 5212), True, 'import tensorflow as tf\n'), ((4434, 4487), 'tensorflow.ones', 'tf.ones', (['(BATCH_SIZE, vecs_per_img)'], {'dtype': 'tf.float32'}), '((BATCH_SIZE, vecs_per_img), dtype=tf.float32)\n', (4441, 4487), True, 'import tensorflow as tf\n'), ((4515, 4569), 'tensorflow.zeros', 'tf.zeros', (['(BATCH_SIZE, vecs_per_img)'], {'dtype': 'tf.float32'}), '((BATCH_SIZE, vecs_per_img), dtype=tf.float32)\n', (4523, 4569), True, 'import tensorflow as tf\n'), ((5239, 5272), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5270, 5272), True, 'import tensorflow as tf\n'), ((5830, 5848), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5846, 5848), False, 'import sys\n'), ((5528, 5585), 'numpy.concatenate', 'np.concatenate', (['[match_offsets, mismatch_offsets]'], {'axis': '(0)'}), '([match_offsets, mismatch_offsets], axis=0)\n', (5542, 5585), True, 'import numpy as np\n')] |
#%%
from datetime import datetime
import pandas as pd
import numpy as np
from pandas.core import frame
import geopandas as gpd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.api as sm
# %%
def process_index_pivot(vi_link, v_index):
v_index = str(v_index)
vi_tbl = pd.read_csv(vi_link)
vi_tbl['year'] = vi_tbl.month.astype(str).str[0:4]
vi_tbl['month'] = vi_tbl.month.astype(str).str[5:7]
vi_tbl = vi_tbl\
.groupby(['year', 'month'])[v_index]\
.aggregate('mean')\
.unstack()\
.reset_index()
vi_tbl.columns = v_index + "_" + vi_tbl.columns
vi_tbl.index = vi_tbl.index.astype(int)
return vi_tbl
# %%
sa_evi = process_index_pivot('sa_all_polygons_evi.csv', 'EVI').set_index('EVI_year')
sa_ndsi = process_index_pivot('sa_all_polygons_ndsi.csv', 'NDSI').set_index('NDSI_year')
sa_nbr = process_index_pivot('sa_all_polygons_nbrt.csv', 'NBRT').set_index('NBRT_year')
sa_yield = pd.read_csv('sa_yield.csv', index_col = 'year')
sa_yield.index = sa_yield.index.astype(str)
sa_rain = pd.read_csv('sa_crop_rain.csv', index_col = 'year')
sa_rain.index = sa_rain.index.astype(str)
sa_rand_forrest_table = pd.concat([sa_evi, sa_ndsi, sa_nbr, sa_yield], axis = 1)
sa_rand_forrest_table = sa_rand_forrest_table[sa_rand_forrest_table.index != '2021'].fillna(method='bfill')
# %%
# Labels are the values we want to predict
labels = np.array(sa_rand_forrest_table['Yield'])
# Remove the labels from the features
# axis 1 refers to the columns
features = sa_rand_forrest_table.drop(['Yield', 'production', 'hectares'], axis = 1)
# Saving feature names for later use
feature_list = list(features.columns)
# Convert to numpy array
features = np.array(features)
# %%
# Using Skicit-learn to split data into training and testing sets
from sklearn.model_selection import train_test_split
# Split the data into training and testing sets
train_features, test_features, train_labels, test_labels = \
train_test_split(features,
labels,
test_size = 0.25,
random_state = 42)
print('Training Features Shape:', train_features.shape)
print('Training Labels Shape:', train_labels.shape)
print('Testing Features Shape:', test_features.shape)
print('Testing Labels Shape:', test_labels.shape)
# %%
# Import the model we are using
from sklearn.ensemble import RandomForestRegressor
# Instantiate model with 1000 decision trees
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
# Train the model on training data
rf.fit(train_features, train_labels)
# %%
# Use the forest's predict method on the test data
predictions = rf.predict(test_features)
# Calculate the absolute errors
errors = abs(predictions - test_labels)
# Print out the mean absolute error (mae)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'tonnes per hectare.')
# %%
# Calculate mean absolute percentage error (MAPE)
mape = 100 * (errors / test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
# %%
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rf.estimators_[5]
# Import tools needed for visualization
from sklearn.tree import export_graphviz
import pydot
# Pull out one tree from the forest
tree = rf.estimators_[5]
# Export the image to a dot file
export_graphviz(tree, out_file = 'tree.dot',
feature_names = feature_list,
rounded = True,
precision = 1)
# Use dot file to create a graph
(graph, ) = pydot.graph_from_dot_file('tree.dot')
# Write graph to a png file
# graph.write_png('tree.png')
# %%
# Get numerical feature importances
importances = list(rf.feature_importances_)
# List of tuples with variable and importance
feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)]
# Sort the feature importances by most important first
feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# Print out the feature and importances
[print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances];
# %%
# Import matplotlib for plotting and use magic command for Jupyter Notebooks
import matplotlib.pyplot as plt
# Set the style
plt.style.use('fivethirtyeight')
# list of x locations for plotting
x_values = list(range(len(importances)))
# Make a bar chart
plt.bar(x_values, importances, orientation = 'vertical')
# Tick labels for x axis
plt.xticks(x_values, feature_list, rotation='vertical')
# Axis labels and title
plt.ylabel('Importance'); plt.xlabel('Variable'); plt.title('Variable Importances')
| [
"numpy.mean",
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pydot.graph_from_dot_file",
"matplotlib.pyplot.style.use",
"numpy.array",
"sklearn.tree.expo... | [((1042, 1087), 'pandas.read_csv', 'pd.read_csv', (['"""sa_yield.csv"""'], {'index_col': '"""year"""'}), "('sa_yield.csv', index_col='year')\n", (1053, 1087), True, 'import pandas as pd\n'), ((1145, 1194), 'pandas.read_csv', 'pd.read_csv', (['"""sa_crop_rain.csv"""'], {'index_col': '"""year"""'}), "('sa_crop_rain.csv', index_col='year')\n", (1156, 1194), True, 'import pandas as pd\n'), ((1264, 1318), 'pandas.concat', 'pd.concat', (['[sa_evi, sa_ndsi, sa_nbr, sa_yield]'], {'axis': '(1)'}), '([sa_evi, sa_ndsi, sa_nbr, sa_yield], axis=1)\n', (1273, 1318), True, 'import pandas as pd\n'), ((1489, 1529), 'numpy.array', 'np.array', (["sa_rand_forrest_table['Yield']"], {}), "(sa_rand_forrest_table['Yield'])\n", (1497, 1529), True, 'import numpy as np\n'), ((1798, 1816), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1806, 1816), True, 'import numpy as np\n'), ((2059, 2126), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(features, labels, test_size=0.25, random_state=42)\n', (2075, 2126), False, 'from sklearn.model_selection import train_test_split\n'), ((2552, 2609), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(1000)', 'random_state': '(42)'}), '(n_estimators=1000, random_state=42)\n', (2573, 2609), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((3540, 3641), 'sklearn.tree.export_graphviz', 'export_graphviz', (['tree'], {'out_file': '"""tree.dot"""', 'feature_names': 'feature_list', 'rounded': '(True)', 'precision': '(1)'}), "(tree, out_file='tree.dot', feature_names=feature_list,\n rounded=True, precision=1)\n", (3555, 3641), False, 'from sklearn.tree import export_graphviz\n'), ((3743, 3780), 'pydot.graph_from_dot_file', 'pydot.graph_from_dot_file', (['"""tree.dot"""'], {}), "('tree.dot')\n", (3768, 3780), False, 'import pydot\n'), ((4493, 4525), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (4506, 4525), True, 'import matplotlib.pyplot as plt\n'), ((4623, 4677), 'matplotlib.pyplot.bar', 'plt.bar', (['x_values', 'importances'], {'orientation': '"""vertical"""'}), "(x_values, importances, orientation='vertical')\n", (4630, 4677), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4761), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_values', 'feature_list'], {'rotation': '"""vertical"""'}), "(x_values, feature_list, rotation='vertical')\n", (4716, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4787, 4811), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Importance"""'], {}), "('Importance')\n", (4797, 4811), True, 'import matplotlib.pyplot as plt\n'), ((4813, 4835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Variable"""'], {}), "('Variable')\n", (4823, 4835), True, 'import matplotlib.pyplot as plt\n'), ((4837, 4870), 'matplotlib.pyplot.title', 'plt.title', (['"""Variable Importances"""'], {}), "('Variable Importances')\n", (4846, 4870), True, 'import matplotlib.pyplot as plt\n'), ((330, 350), 'pandas.read_csv', 'pd.read_csv', (['vi_link'], {}), '(vi_link)\n', (341, 350), True, 'import pandas as pd\n'), ((3126, 3139), 'numpy.mean', 'np.mean', (['mape'], {}), '(mape)\n', (3133, 3139), True, 'import numpy as np\n'), ((2937, 2952), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (2944, 2952), True, 'import numpy as np\n')] |
import gmpy
import numpy as np
from tqdm import tqdm
from mdpgen.mdp import MDP, AbstractMDP, UniformAbstractMDP
from mdpgen.vi import vi
from mdpgen.markov import generate_markov_mdp_pair, generate_non_markov_mdp_pair, is_markov
from mdpgen.value_fn import compare_value_fns, partial_ordering, sorted_order, sort_value_fns, graph_value_fns
#%%
# Try MDP pair with non-Markov belief
T = np.array([
[0, .5, .5, 0, 0, 0],
[0, 0, 0, .5, .5, 0],
[0, 0, 0, 0, .5, .5],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
])
R = np.array([
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 2, 2, 0],
[0, 0, 0, 0, 2, 2],
[2, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 0, 0],
[4, 0, 0, 0, 0, 0]
])/4
mdp1 = MDP([T, T], [R, R], gamma=0.9)
phi = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
])
mdp2 = AbstractMDP(mdp1, phi)
v_star, q_star, pi_star = vi(mdp1)
v_star, pi_star
pi_g_list = mdp2.piecewise_constant_policies()
pi_a_list = mdp2.abstract_policies()
v_g_list = [vi(mdp1, pi)[0] for pi in pi_g_list]
v_a_list = [vi(mdp2, pi)[0] for pi in pi_a_list]
np.allclose(v_g_list, v_g_list[0])
order_v_g = sorted_order(v_g_list)
order_v_a = sorted_order(v_a_list)
assert np.allclose(order_v_a, order_v_g)
graph_value_fns(v_a_list)
graph_value_fns(v_g_list)
| [
"numpy.allclose",
"mdpgen.mdp.AbstractMDP",
"numpy.array",
"mdpgen.vi.vi",
"mdpgen.value_fn.graph_value_fns",
"mdpgen.mdp.MDP",
"mdpgen.value_fn.sorted_order"
] | [((391, 537), 'numpy.array', 'np.array', (['[[0, 0.5, 0.5, 0, 0, 0], [0, 0, 0, 0.5, 0.5, 0], [0, 0, 0, 0, 0.5, 0.5], [1,\n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]]'], {}), '([[0, 0.5, 0.5, 0, 0, 0], [0, 0, 0, 0.5, 0.5, 0], [0, 0, 0, 0, 0.5,\n 0.5], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]])\n', (399, 537), True, 'import numpy as np\n'), ((726, 756), 'mdpgen.mdp.MDP', 'MDP', (['[T, T]', '[R, R]'], {'gamma': '(0.9)'}), '([T, T], [R, R], gamma=0.9)\n', (729, 756), False, 'from mdpgen.mdp import MDP, AbstractMDP, UniformAbstractMDP\n'), ((763, 861), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, \n 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0,\n 1], [0, 0, 0, 1]])\n', (771, 861), True, 'import numpy as np\n'), ((892, 914), 'mdpgen.mdp.AbstractMDP', 'AbstractMDP', (['mdp1', 'phi'], {}), '(mdp1, phi)\n', (903, 914), False, 'from mdpgen.mdp import MDP, AbstractMDP, UniformAbstractMDP\n'), ((942, 950), 'mdpgen.vi.vi', 'vi', (['mdp1'], {}), '(mdp1)\n', (944, 950), False, 'from mdpgen.vi import vi\n'), ((1151, 1185), 'numpy.allclose', 'np.allclose', (['v_g_list', 'v_g_list[0]'], {}), '(v_g_list, v_g_list[0])\n', (1162, 1185), True, 'import numpy as np\n'), ((1199, 1221), 'mdpgen.value_fn.sorted_order', 'sorted_order', (['v_g_list'], {}), '(v_g_list)\n', (1211, 1221), False, 'from mdpgen.value_fn import compare_value_fns, partial_ordering, sorted_order, sort_value_fns, graph_value_fns\n'), ((1234, 1256), 'mdpgen.value_fn.sorted_order', 'sorted_order', (['v_a_list'], {}), '(v_a_list)\n', (1246, 1256), False, 'from mdpgen.value_fn import compare_value_fns, partial_ordering, sorted_order, sort_value_fns, graph_value_fns\n'), ((1264, 1297), 'numpy.allclose', 'np.allclose', (['order_v_a', 'order_v_g'], {}), '(order_v_a, order_v_g)\n', (1275, 1297), True, 'import numpy as np\n'), ((1299, 1324), 'mdpgen.value_fn.graph_value_fns', 'graph_value_fns', (['v_a_list'], {}), '(v_a_list)\n', (1314, 1324), False, 'from mdpgen.value_fn import compare_value_fns, partial_ordering, sorted_order, sort_value_fns, graph_value_fns\n'), ((1325, 1350), 'mdpgen.value_fn.graph_value_fns', 'graph_value_fns', (['v_g_list'], {}), '(v_g_list)\n', (1340, 1350), False, 'from mdpgen.value_fn import compare_value_fns, partial_ordering, sorted_order, sort_value_fns, graph_value_fns\n'), ((559, 693), 'numpy.array', 'np.array', (['[[0, 1, 1, 0, 0, 0], [0, 0, 0, 2, 2, 0], [0, 0, 0, 0, 2, 2], [2, 0, 0, 0, 0,\n 0], [3, 0, 0, 0, 0, 0], [4, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 0, 0, 0], [0, 0, 0, 2, 2, 0], [0, 0, 0, 0, 2, 2], [2, 0,\n 0, 0, 0, 0], [3, 0, 0, 0, 0, 0], [4, 0, 0, 0, 0, 0]])\n', (567, 693), True, 'import numpy as np\n'), ((1064, 1076), 'mdpgen.vi.vi', 'vi', (['mdp1', 'pi'], {}), '(mdp1, pi)\n', (1066, 1076), False, 'from mdpgen.vi import vi\n'), ((1113, 1125), 'mdpgen.vi.vi', 'vi', (['mdp2', 'pi'], {}), '(mdp2, pi)\n', (1115, 1125), False, 'from mdpgen.vi import vi\n')] |
import numpy as np
MERONYMY = [
"component",
"member",
"portion",
"stuff",
"feature",
"place",
"in",
"is-a",
"attribute",
"attached",
"belongs-to"
]
M_UNKNOWN = -1
M_COMPONENT = 0
M_MEMBER = 1
M_PORTION = 2
M_STUFF = 3
M_FEATURE = 4
M_PLACE = 5
M_IN = 6
M_IS_A = 7
M_ATTRIBUTE = 8
M_ATTACHED = 9
M_BELONGS_TO = 10
# Meronymy compatibility matrix
meronymy_matrix = np.full((11, 11), False)
meronymy_matrix[0, 0] = True
meronymy_matrix[1, 1] = True
meronymy_matrix[2, 2] = True
meronymy_matrix[3, 3] = True
meronymy_matrix[4, 4] = True
meronymy_matrix[5, 5] = True
meronymy_matrix[6, 6] = True
meronymy_matrix[7, 7] = True
meronymy_matrix[8, 8] = True
meronymy_matrix[9, 9] = True
meronymy_matrix[10, 10] = True
meronymy_matrix[M_IS_A, M_COMPONENT] = True
meronymy_matrix[M_COMPONENT, M_IS_A] = True
| [
"numpy.full"
] | [((412, 436), 'numpy.full', 'np.full', (['(11, 11)', '(False)'], {}), '((11, 11), False)\n', (419, 436), True, 'import numpy as np\n')] |
#!/usr/bin/python
import sys, os
import numpy as np
currentpath = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..'))
sys.path.append(currentpath)
from TBotTools import pid, geometry, pgt
from time import time
import pygame
import pygame.gfxdraw
import pygame.locals as pgl
from collections import deque
from datetime import datetime
clock = pygame.time.Clock()
dirpath = currentpath+'/Simulators/Images'
framerate = 30 # set to 30 for Rasoberry pi
dt = 1.0/framerate
#-----------------------------------------------------------------------
# PID Tuning
#-----------------------------------------------------------------------
#------------------------- Tuning for g = g *0.1 -----------------------
#sf = 0.1
#s_kpo, s_kio, s_kdo = 0.050, 0.147, 0.041
#a_kpo, a_kio, a_kdo = 1.898, 0.006, 0.067
#----------------------- Tuning for the Moon ---------------------------
#sf = 0.165
#s_kpo, s_kio, s_kdo = 0.075, 0.94, 0.022
#a_kpo, a_kio, a_kdo = 3.03, 0.0096, 0.067
#------------------------ Tuning for Earth -----------------------------
sf = 1
s_kpo, s_kio, s_kdo = 0.090, 0.256, 0.00
a_kpo, a_kio, a_kdo = 12.651, 0.00, 0.26
#-----------------------------------------------------------------------
sf_original = sf
s_kp, s_ki, s_kd = s_kpo, s_kio, s_kdo
a_kp, a_ki, a_kd = a_kpo, a_kio, a_kdo
speed_pid = pid.pid(s_kp, s_ki, s_kd,[-10,10],[-5,5],dt)
angle_pid = pid.pid(a_kp, a_ki, a_kd,[6, 6],[-1,1],dt)
BLACK = pygame.Color('black')
WHITE = pygame.Color('white')
GRAY = pygame.Color('gray')
RED = pygame.Color('red')
save = 0
show_arrows = 0
draw_stick_man = 1
#-----------------------------------------------------------------------
# Physical constants
#-----------------------------------------------------------------------
acc_g = 9.81
l = 0.045 # distance between the centre of gravity of the T-Bot and the axil
R = 0.024 # Radius of wheels
C = 0.99 # Friction
h=l+R # Maximum distance between the centre of gravity and the ground
#h = 828 # Tallest building
#h = 5
auto_toggle = 0
auto = 1
#height_of_man = 1.8923 # Me
height_of_man = 0.1524 # 1:12 Scale (approx. 5-6") Action Figure
#height_of_man = 0.0508 # 1:48 Scale (approx. 2") Action Figure
tyre = 4
t = 0
alpha = 0
gamma = 0
acc = 0
omega = 0
velocity = 0
distance = 0
theta = 0.001
targetvelocity = 0
geom = geometry.geometry()
starttime = time()
lasttime = 0
timeflag = 1
draw_stick_man = 1
#-----------------------------------------------------------------------
# Drawing Geometry
#-----------------------------------------------------------------------
geom = geometry.geometry()
origin = [500,320]
tbot_drawing_offset = [-78,-10]
Tbot_scalefactor = 216
height_of_TBot_body = 120E-3
Man_scalefactor = (height_of_man/h/2)*Tbot_scalefactor
wheel_radius = int(R/l*Tbot_scalefactor/2.2)
tbot = np.loadtxt('T-BotSideView.dat')
tbot = np.vstack((tbot,tbot[0,:]))+tbot_drawing_offset # closes the shape and adds an offset
tbot = tbot/(tbot[:,1].max()-tbot[:,1].min())*Tbot_scalefactor
spokes = np.array([[0,1],[0,0],[ 0.8660254, -0.5],[0,0], [-0.8660254, -0.5 ],[0,0]])*(wheel_radius-tyre)
trackmarksArray = np.array([[0,origin[1]+wheel_radius],[1000,origin[1]+wheel_radius]])
track_marks_tup = tuple(map(tuple, tuple((trackmarksArray).astype(int))))
stick_man_data = np.loadtxt('Man.dat')
stick_man = np.vstack((stick_man_data,stick_man_data[0,:]))+tbot_drawing_offset # closes the shape and adds an offset
stick_man = stick_man/(stick_man[:,1].max()-stick_man[:,1].min())*Man_scalefactor
scaled_stick_man = stick_man
stick_man=stick_man-[stick_man[:,0].min(),stick_man[:,1].min()]
stick_man_h_centre = (stick_man[:,0].min()+stick_man[:,0].max())/2
stick_man = tuple(map(tuple, tuple((stick_man+[750-stick_man_h_centre,origin[1]+wheel_radius-stick_man[:,1].max()]).astype(int))))
speedfactor = 0.6
speedlimit = 65
turnspeedlimit = 70
oldvals = [0,0,0,0]
pygame.init()
# Set the width and height of the screen (width, height).
screen = pygame.display.set_mode((1000, 700))
pygame.display.set_caption("T-Bot Simulator")
# Used to manage how fast the screen updates.
clock = pygame.time.Clock()
# Use convert for the large images. This is the fastest format for blitting
# Background images
bg = pygame.image.load(dirpath+'/Gray.jpg').convert()
# Do not use convert for the following images
# Button images
joystick_image = pygame.image.load(dirpath+'/joystick_only.png')
track_image = pygame.image.load(dirpath+'/line.png')
dpad = pygame.image.load(dirpath+'/dpad.png')
dpadU = pygame.image.load(dirpath+'/dpadU.png')
dpadD = pygame.image.load(dirpath+'/dpadD.png')
dpadL = pygame.image.load(dirpath+'/dpadL.png')
dpadR = pygame.image.load(dirpath+'/dpadR.png')
dpadUR = pygame.image.load(dirpath+'/dpadUR.png')
dpadDR = pygame.image.load(dirpath+'/dpadDR.png')
dpadUL = pygame.image.load(dirpath+'/dpadUL.png')
dpadDL = pygame.image.load(dirpath+'/dpadDL.png')
bpad = pygame.image.load(dirpath+'/bpad.png')
bpadU = pygame.image.load(dirpath+'/bpadU.png')
bpadD = pygame.image.load(dirpath+'/bpadD.png')
bpadL = pygame.image.load(dirpath+'/bpadL.png')
bpadR = pygame.image.load(dirpath+'/bpadR.png')
bpadUR = pygame.image.load(dirpath+'/bpadUR.png')
bpadDR = pygame.image.load(dirpath+'/bpadDR.png')
bpadUL = pygame.image.load(dirpath+'/bpadUL.png')
bpadDL = pygame.image.load(dirpath+'/bpadDL.png')
stick = pygame.image.load(dirpath+'/stick.png')
L1 = pygame.image.load(dirpath+'/L1.png')
L2 = pygame.image.load(dirpath+'/L2.png')
L1L2 = pygame.image.load(dirpath+'/L1L2.png')
R1 = pygame.image.load(dirpath+'/R1.png')
R2 = pygame.image.load(dirpath+'/R2.png')
R1R2 = pygame.image.load(dirpath+'/R1R2.png')
hoffset = 244
voffset = 388
posdpad = (102+hoffset, 75+voffset)
posbpad = (327+hoffset, 75+voffset)
posL = (106+hoffset,15+voffset)
posR = (338+hoffset,15+voffset)
arrow = np.array([[2,0],[2,150],[7,150],[0,165],[-7,150],[-2,150],[-2,0],[2,0]])
pos_joystick = (298,420)
posstickL = (164+hoffset, 130+voffset)
posstickR = (287+hoffset, 130+voffset)
# Get ready to print.
textPrint = pgt.TextPrint(pygame.Color('white'))
# Initialize the joystick.
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0) # 0 for first joystick, 1 for the next etc.
joystick.init()
name = joystick.get_name()
axes = joystick.get_numaxes()
hats = joystick.get_numhats()
readdataevent = pygame.USEREVENT+1
pygame.time.set_timer(readdataevent, 60)
framecount = 1
done = False
xdatarange = [760,950]
y_origin = 500
yscale = 100
pts = deque(maxlen=xdatarange[1]-xdatarange[0])
pts2 = deque(maxlen=xdatarange[1]-xdatarange[0])
for ii in range(xdatarange[0],xdatarange[1]):
pts.appendleft((ii,0))
pts2.appendleft((ii,0))
iii = 200
aa = np.zeros((len(pts),2))
aa[:,1]=np.array(pts)[:,1]
aa[:,0]=np.array(range(xdatarange[0],xdatarange[1]))
cc = np.zeros((len(pts),2))
cc[:,1]=np.array(pts2)[:,1]
cc[:,0]=np.array(range(xdatarange[0],xdatarange[1]))
bb=np.copy(aa)
dd=np.copy(cc)
# -------- Main Program Loop -----------
while not done:
g = acc_g * sf
#screen.fill((0, 0, 0))
screen.blit(bg,(0,0))
screen.blit(joystick_image, pos_joystick)
screen.blit(track_image, (0,origin[1]+wheel_radius-8))
#-------------------------------------------------------------------
# The Physics
#-------------------------------------------------------------------
if theta >= -np.pi/2.2 and theta <= np.pi/2.2:
alpha = np.sin(theta)*g/h
h_acc = (alpha * R)+acc # Accounts for horizontal acceleration
# produced from the rotation of the
# wheels as the T-Bot falls. The gearbox
# prevents free rotation of the wheels.
gamma = np.cos(theta)*h_acc/l
a_acc = alpha-gamma
# integrate angular acceleration to get angular velocity
omega += a_acc*dt
omega = omega*C
# integrate angular velocity to get angle
theta += omega*dt
# integrate dt to get time
t += dt
velocity += acc*dt
distance += (velocity*dt)
'''
theta_c = np.arctan2(l*np.sin(theta),l*np.cos(theta)+R)
h_c = np.sqrt((l*np.sin(theta))**2+(l*np.cos(theta)+R)**2)
alpha = np.sin(theta_c)*g/h_c
h_acc = (alpha * R)+acc # Accounts for horizontal acceleration
# produced from the rotation of the
# wheels as the T-Bot falls. The gearbox
# prevents free rotation of the wheels.
gamma = np.cos(theta)*h_acc/l
a_acc = alpha-gamma
# integrate angular acceleration to get angular velocity
omega += a_acc*dt
omega = omega*C
# integrate angular velocity to get angle
theta_c += omega*dt
theta = np.arcsin(((R*np.cos(theta_c)+np.sqrt(l**2-(R*np.sin(theta_c))**2)) *np.sin(theta_c))/l) # only good for -90 > theta_c < 90
# integrate dt to get time
t += dt
velocity += acc*dt
distance += (velocity*dt)
'''
#---------------------------------------------------------------
mm2px = Tbot_scalefactor/height_of_TBot_body
origin[0] = 500+int(distance*mm2px)+int(((theta)*np.pi)*wheel_radius/4)
origin[0] = np.mod(origin[0],1000)
tbot_rot = np.array(geom.rotxy(theta+np.pi,tbot))
tbot_tup = tuple(map(tuple, tuple((tbot_rot+origin).astype(int))))
noise = np.random.rand(1)*np.pi/180
spokes_rot = np.array(geom.rotxy((distance*mm2px/wheel_radius)+theta,spokes))
spokes_tup = tuple(map(tuple, tuple((spokes_rot+origin).astype(int))))
#---------------------------------------------------------------
# The PID Controller
#---------------------------------------------------------------
if auto:
#settheta = -speed_pid.output(targetvelocity,-velocity,dt)
# The T-Bot does not have motor encoders so the velocity is is calculated as a function of angle
settheta = -speed_pid.output(geom.v2ang(h,g,targetvelocity),-geom.v2ang(h,g,velocity),dt)
acc = -angle_pid.output(settheta,(theta+noise[0]),dt)
#acc = -angle_pid.output(np.pi-geom.v2ang(h,g,targetvelocity),(theta+noise[0]),dt)
#---------------------------------------------------------------
if show_arrows:
arrow_rot1 = np.array(geom.rotxy(theta+np.pi,arrow))
arrow1_tup = tuple(map(tuple, tuple((arrow_rot1+origin).astype(int))))
arrow_rot2 = np.array(geom.rotxy(np.pi+settheta,arrow))
arrow2_tup = tuple(map(tuple, tuple((arrow_rot2+origin).astype(int))))
arrow_rot3 = np.array(geom.rotxy(np.pi+geom.v2ang(h,g,targetvelocity),arrow))
arrow3_tup = tuple(map(tuple, tuple((arrow_rot3+origin).astype(int))))
else:
textPrint.abspos(screen, "Press the start button to reset.",(430,180))
if timeflag:
lasttime = time()-starttime
timeflag = 0
if draw_stick_man:
pygame.gfxdraw.filled_polygon(screen, (stick_man), (255, 255, 255, 10))
#pygame.gfxdraw.aapolygon(screen, (stick_man), (255, 255, 255, 255))
pygame.gfxdraw.filled_polygon(screen, (tbot_tup), (0, 249, 249, 100))
pygame.gfxdraw.aapolygon(screen, (tbot_tup), WHITE)
pygame.gfxdraw.aapolygon(screen, (spokes_tup), WHITE)
pygame.gfxdraw.aacircle(screen, origin[0], origin[1], wheel_radius-tyre, WHITE)
pygame.gfxdraw.aacircle(screen, origin[0], origin[1], wheel_radius, WHITE)
pygame.draw.lines(screen, WHITE, False, (track_marks_tup),1)
pts.appendleft((iii,theta-np.pi))
pts2.appendleft((iii,velocity))
iii+=1
pygame.draw.lines(screen, (0,255,255), False, ((xdatarange[0],y_origin+0.5*yscale),(xdatarange[1],y_origin+0.5*yscale)),1)
pygame.draw.lines(screen, (0,255,255), False, ((xdatarange[0],y_origin),(xdatarange[0],y_origin+yscale)),1)
pygame.draw.lines(screen, (0,255,255), False, ((xdatarange[-1],y_origin),(xdatarange[-1],y_origin+yscale)),1)
if show_arrows:
pygame.gfxdraw.filled_polygon(screen, (arrow1_tup), (0,255,255,155))
pygame.gfxdraw.aapolygon(screen, (arrow1_tup), (0,255,255,200))
pygame.gfxdraw.filled_polygon(screen, (arrow2_tup), (255,255,255,155))
pygame.gfxdraw.aapolygon(screen, (arrow2_tup), (255,255,255,200))
pygame.gfxdraw.filled_polygon(screen, (arrow3_tup), (255,0,0,155))
pygame.gfxdraw.aapolygon(screen, (arrow3_tup), (255,0,0,200))
if iii > xdatarange[1]:
iii = xdatarange[0]
aa[:,1]=np.array(pts)[:,1]
cc[:,1]=np.array(pts2)[:,1]
try:
bb[:,1] = (yscale/((aa[:,1]-aa[:,1].max()).min())*(aa[:,1]-aa[:,1].max()))+y_origin
dd[:,1] = (yscale/((cc[:,1]-cc[:,1].max()).min())*(cc[:,1]-cc[:,1].max()))+y_origin
gdata = tuple(map(tuple, tuple(bb)))
vdata = tuple(map(tuple, tuple(dd)))
pygame.draw.lines(screen, WHITE, False, (gdata),1)
pygame.draw.lines(screen, RED, False, (vdata),1)
except:
b=1
textPrint.abspos(screen, "{:+.2f}".format(aa[:,1].max()),[xdatarange[0],y_origin-20])
textPrint.abspos(screen, "{:+.2f}".format(aa[:,1].min()),[xdatarange[0],y_origin+yscale+5])
textPrint.tprint(screen,'Angle')
textPrint.setColour(RED)
textPrint.abspos(screen, "{:+.2f}".format(cc[:,1].max()),[xdatarange[-1],y_origin-20])
textPrint.abspos(screen, "{:+.2f}".format(cc[:,1].min()),[xdatarange[-1],y_origin+yscale+5])
textPrint.tprint(screen,'Velocity')
textPrint.setColour(WHITE)
if pygame.event.get(readdataevent):
oldvals = [0,0,0,0]
for event in pygame.event.get():
if event.type == pygame.QUIT: # If user clicked close.
done = True # Flag that we are done so we exit this loop.
keys = pygame.key.get_pressed()
if keys[pgl.K_g]:
sf += 0.01
elif keys[pgl.K_f]:
sf -= 0.01
if keys[pgl.K_q]:
done = True
for i in range(hats):
hat = joystick.get_hat(i)
axis0 = joystick.get_axis(0)
axis1 = joystick.get_axis(1)
axis2 = joystick.get_axis(2)
axis3 = joystick.get_axis(3)
if keys[pgl.K_UP]:
show_arrows = 1
elif keys[pgl.K_DOWN]:
show_arrows = 0
if keys[pgl.K_a]:
auto = 1
elif keys[pgl.K_m]:
auto = 0
#
if auto:
targetvelocity = -axis0 * 0.2
else:
#acc = axis0
acc = axis0*2 # swing up
# ------------------ Highlight buttons ----------------#
screen.blit(dpad,posdpad)
screen.blit(bpad,posbpad)
screen.blit(stick,(posstickL[0]+axis0*5,posstickL[1]+axis1*5))
screen.blit(stick,(posstickR[0]+axis2*5,posstickR[1]+axis3*5))
if hat[0] == 1:
screen.blit(dpadR,posdpad)
s_ki += 0.001
speed_pid.set_PID(s_kp,s_ki,s_kd)
elif hat[0] == -1:
screen.blit(dpadL,posdpad)
s_ki -= 0.001
if s_ki < 0:
s_ki = 0
speed_pid.set_PID(s_kp,s_ki,s_kd)
elif hat[1] == 1:
screen.blit(dpadU,posdpad)
s_kp += 0.001
speed_pid.set_PID(s_kp,s_ki,s_kd)
elif hat[1] == -1:
screen.blit(dpadD,posdpad)
s_kp -= 0.001
if s_kp < 0:
s_kp = 0
speed_pid.set_PID(s_kp,s_ki,s_kd)
else:
screen.blit(dpad,posdpad)
if (hat[0] == -1) & (hat[1] == 1):
screen.blit(dpadUL,posdpad)
elif (hat[0] == 1) & (hat[1] == -1):
screen.blit(dpadDR,posdpad)
elif (hat[0] == 1 & hat[1] == 1):
screen.blit(dpadUR,posdpad)
elif hat[0] == -1 & hat[1] == -1:
screen.blit(dpadDL,posdpad)
if joystick.get_button(0):
screen.blit(bpadU,posbpad)
a_kp += 0.001
angle_pid.set_PID(a_kp,a_ki,a_kd)
elif joystick.get_button(1):
screen.blit(bpadR,posbpad)
a_ki += 0.001
angle_pid.set_PID(a_kp,a_ki,a_kd)
elif joystick.get_button(2):
screen.blit(bpadD,posbpad)
a_kp -= 0.001
if a_kp < 0:
a_kp = 0
angle_pid.set_PID(a_kp,a_ki,a_kd)
elif joystick.get_button(3):
screen.blit(bpadL,posbpad)
a_ki -= 0.001
if a_ki < 0:
a_ki = 0
angle_pid.set_PID(a_kp,a_ki,a_kd)
else:
screen.blit(bpad,posbpad)
if joystick.get_button(8):
speed_pid.set_PID(s_kpo,s_kio,s_kdo)
angle_pid.set_PID(a_kpo,a_kio,a_kdo)
sf = sf_original
elif joystick.get_button(9):
alpha = 0
gamma = 0
acc = 0
omega = 0
velocity = 0
distance = 0
theta = 0.001
origin[0] = 500
speed_pid.clear()
angle_pid.clear()
starttime = time()
timeflag = 1
if joystick.get_button(0) & joystick.get_button(1):
screen.blit(bpadUR,posbpad)
elif joystick.get_button(1) & joystick.get_button(2):
screen.blit(bpadDR,posbpad)
elif joystick.get_button(2) & joystick.get_button(3):
screen.blit(bpadDL,posbpad)
elif joystick.get_button(0) & joystick.get_button(3):
screen.blit(bpadUL,posbpad)
if joystick.get_button(4):
screen.blit(L1,posL)
s_kd += 0.001
speed_pid.set_PID(s_kp,s_ki,s_kd)
elif joystick.get_button(6):
screen.blit(L2,posL)
s_kd -= 0.001
if s_kd < 0:
s_kd = 0
speed_pid.set_PID(s_kp,s_ki,s_kd)
elif joystick.get_button(5):
screen.blit(R1,posR)
a_kd += 0.001
angle_pid.set_PID(a_kp,a_ki,a_kd)
elif joystick.get_button(7):
screen.blit(R2,posR)
a_kd -= 0.001
if a_kd < 0:
a_kd = 0
angle_pid.set_PID(a_kp,a_ki,a_kd)
else:
screen.blit(bpad,posbpad)
if joystick.get_button(4) & joystick.get_button(6):
screen.blit(L1L2,posL)
elif joystick.get_button(5) & joystick.get_button(7):
screen.blit(R1R2,posR)
elif joystick.get_button(4) & joystick.get_button(5):
screen.blit(L1,posL)
screen.blit(R1,posR)
elif joystick.get_button(4) & joystick.get_button(7):
screen.blit(L1,posL)
screen.blit(R2,posR)
elif joystick.get_button(6) & joystick.get_button(5):
screen.blit(L2,posL)
screen.blit(R1,posR)
elif joystick.get_button(6) & joystick.get_button(7):
screen.blit(L2,posL)
screen.blit(R2,posR)
if joystick.get_button(4) & joystick.get_button(6) & joystick.get_button(5):
screen.blit(L1L2,posL)
screen.blit(R1,posR)
elif joystick.get_button(4) & joystick.get_button(6) & joystick.get_button(7):
screen.blit(L1L2,posL)
screen.blit(R2,posR)
elif joystick.get_button(4) & joystick.get_button(5) & joystick.get_button(7):
screen.blit(L1,posL)
screen.blit(R1R2,posR)
elif joystick.get_button(5) & joystick.get_button(6) & joystick.get_button(7):
screen.blit(L2,posL)
screen.blit(R1R2,posR)
if joystick.get_button(4) & joystick.get_button(5) & joystick.get_button(6) & joystick.get_button(7):
screen.blit(L1L2,posL)
screen.blit(R1R2,posR)
s_kp = speed_pid.get_PID()[0]
s_ki = speed_pid.get_PID()[1]
s_kd = speed_pid.get_PID()[2]
a_kp = angle_pid.get_PID()[0]
a_ki = angle_pid.get_PID()[1]
a_kd = angle_pid.get_PID()[2]
textPrint.setfontsize(22)
textPrint.setColour(pygame.Color(0,255,255,255))
textPrint.abspos(screen, "T-Bot Simulator",(10,10))
textPrint.setColour(WHITE)
textPrint.setfontsize(16)
textPrint.tprint(screen, "www.klikrobotics.com")
textPrint.tprint(screen, " ")
textPrint.tprint(screen, "T: {:.3f}".format(time()-starttime))
textPrint.tprint(screen, "Last T: {:.3f}".format(lasttime))
textPrint.abspos(screen, "Tuning Parameters",(10,400))
textPrint.tprint(screen, " ")
textPrint.tprint(screen, "s_kp: {:.3f}".format(s_kp))
textPrint.tprint(screen, "s_ki: {:.3f}".format(s_ki))
textPrint.tprint(screen, "s_kd: {:.3f}".format(s_kd))
textPrint.tprint(screen, " ")
textPrint.tprint(screen, "a_kp: {:.3f}".format(a_kp))
textPrint.tprint(screen, "a_ki: {:.3f}".format(a_ki))
textPrint.tprint(screen, "a_kd: {:.3f}".format(a_kd))
textPrint.tprint(screen, " ")
if auto:
textPrint.tprint(screen, "Auto - Press m for manual control")
else:
textPrint.tprint(screen, "Manual - Press a for automatic control")
textPrint.tprint(screen,'Press i for information')
textPrint.abspos(screen, "g: {:.2f}".format((g)),(890,10))
textPrint.tprint(screen, "theta: {:.2f}".format((theta)*180/np.pi))
textPrint.tprint(screen, "Alpha: {:.2f}".format(alpha))
textPrint.tprint(screen, "Gamma: {:.2f}".format(gamma))
textPrint.tprint(screen, "Acceleration: {:.2f}".format(acc))
textPrint.tprint(screen, "Velocity: {:.2f}".format(velocity))
textPrint.tprint(screen, "Distance: {:.2f}".format(distance))
textPrint.tprint(screen, "{} FPS".format(str(int(clock.get_fps()))))
pygame.display.flip()
# Limit to 60 frames per second. Set to 30 for Raspberry Pi. It can't run at 60 fps
clock.tick(framerate)
if keys[pgl.K_p]:
waiting = 1
while waiting:
for event in pygame.event.get():
keys = pygame.key.get_pressed()
if keys[pgl.K_s]:
save = 1
if save:
pygame.image.save(screen, datetime.now().strftime("TutorialImages/%m%d%Y_%H%M%S.png"))
save = 0
if keys[pgl.K_o]:
waiting = 0
if keys[pgl.K_q]:
done = True
waiting = 0
if keys[pgl.K_i]:
waiting = 1
while waiting:
for event in pygame.event.get():
keys = pygame.key.get_pressed()
screen.blit(bg,(0,0))
textPrint.setfontsize(22)
textPrint.setColour(pygame.Color(0,255,255,255))
textPrint.abspos(screen, "T-Bot Simulator",(10,10))
textPrint.setColour(WHITE)
textPrint.setfontsize(16)
textPrint.tprint(screen, "www.klikrobotics.com")
textPrint.setfontsize(20)
textPrint.abspos(screen, "Keyboard",(200,80))
textPrint.tprint(screen, "")
textPrint.setfontsize(16)
textPrint.tprint(screen, "p -> Pause")
textPrint.tprint(screen, "o -> Resume")
textPrint.tprint(screen, "s -> Save paused frame")
textPrint.tprint(screen, "i -> For information")
textPrint.tprint(screen, "m -> For manual control")
textPrint.tprint(screen, "a -> For automatic PID control")
textPrint.tprint(screen, "g -> increase g")
textPrint.tprint(screen, "f -> decrease g")
textPrint.tprint(screen, "Up arrow to show arrows")
textPrint.tprint(screen, "Down arrow to hide arrows")
textPrint.setfontsize(20)
textPrint.abspos(screen, "Joystick",(600,80))
textPrint.tprint(screen, "")
textPrint.setfontsize(16)
textPrint.tprint(screen, "Left side of the controller")
textPrint.tprint(screen, "")
textPrint.tprint(screen, "Up -> Increase speed proportional gain")
textPrint.tprint(screen, "Down -> Decrease speed proportional gain")
textPrint.tprint(screen, "Left -> Increase speed integral gain")
textPrint.tprint(screen, "Right -> Decrease speed integral gain")
textPrint.tprint(screen, "L1 -> Increase speed derivitive gain")
textPrint.tprint(screen, "L2 -> Decrease speed derivitive gain")
textPrint.tprint(screen, "")
textPrint.tprint(screen, "Right side of the controller")
textPrint.tprint(screen, "")
textPrint.tprint(screen, "Triangle -> Increase angle proportional gain")
textPrint.tprint(screen, "X -> Decrease angle proportional gain")
textPrint.tprint(screen, "Square -> Increase angle integral gain")
textPrint.tprint(screen, "Circle -> Decrease angle integral gain")
textPrint.tprint(screen, "R1 -> Increase angle derivitive gain")
textPrint.tprint(screen, "R2 -> Decrease angle derivitive gain")
textPrint.setfontsize(40)
textPrint.abspos(screen, "Press o to return to simulator",(290,500))
pygame.display.flip()
if keys[pgl.K_o]:
waiting = 0
if keys[pgl.K_q]:
done = True
waiting = 0
pygame.display.quit()
pygame.quit()
print('Simulation Closed')
| [
"pygame.init",
"pygame.quit",
"numpy.random.rand",
"pygame.gfxdraw.filled_polygon",
"numpy.array",
"pygame.display.quit",
"numpy.sin",
"pygame.time.set_timer",
"sys.path.append",
"numpy.mod",
"collections.deque",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.joystick.init",
"... | [((131, 159), 'sys.path.append', 'sys.path.append', (['currentpath'], {}), '(currentpath)\n', (146, 159), False, 'import sys, os\n'), ((355, 374), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (372, 374), False, 'import pygame\n'), ((1358, 1407), 'TBotTools.pid.pid', 'pid.pid', (['s_kp', 's_ki', 's_kd', '[-10, 10]', '[-5, 5]', 'dt'], {}), '(s_kp, s_ki, s_kd, [-10, 10], [-5, 5], dt)\n', (1365, 1407), False, 'from TBotTools import pid, geometry, pgt\n'), ((1415, 1461), 'TBotTools.pid.pid', 'pid.pid', (['a_kp', 'a_ki', 'a_kd', '[6, 6]', '[-1, 1]', 'dt'], {}), '(a_kp, a_ki, a_kd, [6, 6], [-1, 1], dt)\n', (1422, 1461), False, 'from TBotTools import pid, geometry, pgt\n'), ((1469, 1490), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (1481, 1490), False, 'import pygame\n'), ((1499, 1520), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (1511, 1520), False, 'import pygame\n'), ((1528, 1548), 'pygame.Color', 'pygame.Color', (['"""gray"""'], {}), "('gray')\n", (1540, 1548), False, 'import pygame\n'), ((1555, 1574), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (1567, 1574), False, 'import pygame\n'), ((2366, 2385), 'TBotTools.geometry.geometry', 'geometry.geometry', ([], {}), '()\n', (2383, 2385), False, 'from TBotTools import pid, geometry, pgt\n'), ((2398, 2404), 'time.time', 'time', ([], {}), '()\n', (2402, 2404), False, 'from time import time\n'), ((2650, 2669), 'TBotTools.geometry.geometry', 'geometry.geometry', ([], {}), '()\n', (2667, 2669), False, 'from TBotTools import pid, geometry, pgt\n'), ((2887, 2918), 'numpy.loadtxt', 'np.loadtxt', (['"""T-BotSideView.dat"""'], {}), "('T-BotSideView.dat')\n", (2897, 2918), True, 'import numpy as np\n'), ((3200, 3275), 'numpy.array', 'np.array', (['[[0, origin[1] + wheel_radius], [1000, origin[1] + wheel_radius]]'], {}), '([[0, origin[1] + wheel_radius], [1000, origin[1] + wheel_radius]])\n', (3208, 3275), True, 'import numpy as np\n'), ((3361, 3382), 'numpy.loadtxt', 'np.loadtxt', (['"""Man.dat"""'], {}), "('Man.dat')\n", (3371, 3382), True, 'import numpy as np\n'), ((3951, 3964), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3962, 3964), False, 'import pygame\n'), ((4033, 4069), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1000, 700)'], {}), '((1000, 700))\n', (4056, 4069), False, 'import pygame\n'), ((4070, 4115), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""T-Bot Simulator"""'], {}), "('T-Bot Simulator')\n", (4096, 4115), False, 'import pygame\n'), ((4170, 4189), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (4187, 4189), False, 'import pygame\n'), ((4424, 4473), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/joystick_only.png')"], {}), "(dirpath + '/joystick_only.png')\n", (4441, 4473), False, 'import pygame\n'), ((4486, 4526), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/line.png')"], {}), "(dirpath + '/line.png')\n", (4503, 4526), False, 'import pygame\n'), ((4532, 4572), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpad.png')"], {}), "(dirpath + '/dpad.png')\n", (4549, 4572), False, 'import pygame\n'), ((4579, 4620), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadU.png')"], {}), "(dirpath + '/dpadU.png')\n", (4596, 4620), False, 'import pygame\n'), ((4627, 4668), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadD.png')"], {}), "(dirpath + '/dpadD.png')\n", (4644, 4668), False, 'import pygame\n'), ((4675, 4716), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadL.png')"], {}), "(dirpath + '/dpadL.png')\n", (4692, 4716), False, 'import pygame\n'), ((4723, 4764), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadR.png')"], {}), "(dirpath + '/dpadR.png')\n", (4740, 4764), False, 'import pygame\n'), ((4772, 4814), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadUR.png')"], {}), "(dirpath + '/dpadUR.png')\n", (4789, 4814), False, 'import pygame\n'), ((4822, 4864), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadDR.png')"], {}), "(dirpath + '/dpadDR.png')\n", (4839, 4864), False, 'import pygame\n'), ((4872, 4914), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadUL.png')"], {}), "(dirpath + '/dpadUL.png')\n", (4889, 4914), False, 'import pygame\n'), ((4922, 4964), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/dpadDL.png')"], {}), "(dirpath + '/dpadDL.png')\n", (4939, 4964), False, 'import pygame\n'), ((4971, 5011), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpad.png')"], {}), "(dirpath + '/bpad.png')\n", (4988, 5011), False, 'import pygame\n'), ((5018, 5059), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadU.png')"], {}), "(dirpath + '/bpadU.png')\n", (5035, 5059), False, 'import pygame\n'), ((5066, 5107), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadD.png')"], {}), "(dirpath + '/bpadD.png')\n", (5083, 5107), False, 'import pygame\n'), ((5114, 5155), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadL.png')"], {}), "(dirpath + '/bpadL.png')\n", (5131, 5155), False, 'import pygame\n'), ((5162, 5203), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadR.png')"], {}), "(dirpath + '/bpadR.png')\n", (5179, 5203), False, 'import pygame\n'), ((5211, 5253), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadUR.png')"], {}), "(dirpath + '/bpadUR.png')\n", (5228, 5253), False, 'import pygame\n'), ((5261, 5303), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadDR.png')"], {}), "(dirpath + '/bpadDR.png')\n", (5278, 5303), False, 'import pygame\n'), ((5311, 5353), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadUL.png')"], {}), "(dirpath + '/bpadUL.png')\n", (5328, 5353), False, 'import pygame\n'), ((5361, 5403), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/bpadDL.png')"], {}), "(dirpath + '/bpadDL.png')\n", (5378, 5403), False, 'import pygame\n'), ((5411, 5452), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/stick.png')"], {}), "(dirpath + '/stick.png')\n", (5428, 5452), False, 'import pygame\n'), ((5457, 5495), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/L1.png')"], {}), "(dirpath + '/L1.png')\n", (5474, 5495), False, 'import pygame\n'), ((5499, 5537), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/L2.png')"], {}), "(dirpath + '/L2.png')\n", (5516, 5537), False, 'import pygame\n'), ((5543, 5583), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/L1L2.png')"], {}), "(dirpath + '/L1L2.png')\n", (5560, 5583), False, 'import pygame\n'), ((5587, 5625), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/R1.png')"], {}), "(dirpath + '/R1.png')\n", (5604, 5625), False, 'import pygame\n'), ((5629, 5667), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/R2.png')"], {}), "(dirpath + '/R2.png')\n", (5646, 5667), False, 'import pygame\n'), ((5673, 5713), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/R1R2.png')"], {}), "(dirpath + '/R1R2.png')\n", (5690, 5713), False, 'import pygame\n'), ((5888, 5980), 'numpy.array', 'np.array', (['[[2, 0], [2, 150], [7, 150], [0, 165], [-7, 150], [-2, 150], [-2, 0], [2, 0]]'], {}), '([[2, 0], [2, 150], [7, 150], [0, 165], [-7, 150], [-2, 150], [-2, \n 0], [2, 0]])\n', (5896, 5980), True, 'import numpy as np\n'), ((6165, 6187), 'pygame.joystick.init', 'pygame.joystick.init', ([], {}), '()\n', (6185, 6187), False, 'import pygame\n'), ((6199, 6226), 'pygame.joystick.Joystick', 'pygame.joystick.Joystick', (['(0)'], {}), '(0)\n', (6223, 6226), False, 'import pygame\n'), ((6410, 6450), 'pygame.time.set_timer', 'pygame.time.set_timer', (['readdataevent', '(60)'], {}), '(readdataevent, 60)\n', (6431, 6450), False, 'import pygame\n'), ((6538, 6581), 'collections.deque', 'deque', ([], {'maxlen': '(xdatarange[1] - xdatarange[0])'}), '(maxlen=xdatarange[1] - xdatarange[0])\n', (6543, 6581), False, 'from collections import deque\n'), ((6587, 6630), 'collections.deque', 'deque', ([], {'maxlen': '(xdatarange[1] - xdatarange[0])'}), '(maxlen=xdatarange[1] - xdatarange[0])\n', (6592, 6630), False, 'from collections import deque\n'), ((6961, 6972), 'numpy.copy', 'np.copy', (['aa'], {}), '(aa)\n', (6968, 6972), True, 'import numpy as np\n'), ((6976, 6987), 'numpy.copy', 'np.copy', (['cc'], {}), '(cc)\n', (6983, 6987), True, 'import numpy as np\n'), ((25333, 25354), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (25352, 25354), False, 'import pygame\n'), ((25355, 25368), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (25366, 25368), False, 'import pygame\n'), ((2926, 2955), 'numpy.vstack', 'np.vstack', (['(tbot, tbot[0, :])'], {}), '((tbot, tbot[0, :]))\n', (2935, 2955), True, 'import numpy as np\n'), ((3085, 3170), 'numpy.array', 'np.array', (['[[0, 1], [0, 0], [0.8660254, -0.5], [0, 0], [-0.8660254, -0.5], [0, 0]]'], {}), '([[0, 1], [0, 0], [0.8660254, -0.5], [0, 0], [-0.8660254, -0.5], [0,\n 0]])\n', (3093, 3170), True, 'import numpy as np\n'), ((3395, 3444), 'numpy.vstack', 'np.vstack', (['(stick_man_data, stick_man_data[0, :])'], {}), '((stick_man_data, stick_man_data[0, :]))\n', (3404, 3444), True, 'import numpy as np\n'), ((6113, 6134), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (6125, 6134), False, 'import pygame\n'), ((6776, 6789), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (6784, 6789), True, 'import numpy as np\n'), ((6885, 6899), 'numpy.array', 'np.array', (['pts2'], {}), '(pts2)\n', (6893, 6899), True, 'import numpy as np\n'), ((11472, 11539), 'pygame.gfxdraw.filled_polygon', 'pygame.gfxdraw.filled_polygon', (['screen', 'tbot_tup', '(0, 249, 249, 100)'], {}), '(screen, tbot_tup, (0, 249, 249, 100))\n', (11501, 11539), False, 'import pygame\n'), ((11555, 11604), 'pygame.gfxdraw.aapolygon', 'pygame.gfxdraw.aapolygon', (['screen', 'tbot_tup', 'WHITE'], {}), '(screen, tbot_tup, WHITE)\n', (11579, 11604), False, 'import pygame\n'), ((11611, 11662), 'pygame.gfxdraw.aapolygon', 'pygame.gfxdraw.aapolygon', (['screen', 'spokes_tup', 'WHITE'], {}), '(screen, spokes_tup, WHITE)\n', (11635, 11662), False, 'import pygame\n'), ((11669, 11754), 'pygame.gfxdraw.aacircle', 'pygame.gfxdraw.aacircle', (['screen', 'origin[0]', 'origin[1]', '(wheel_radius - tyre)', 'WHITE'], {}), '(screen, origin[0], origin[1], wheel_radius - tyre,\n WHITE)\n', (11692, 11754), False, 'import pygame\n'), ((11753, 11827), 'pygame.gfxdraw.aacircle', 'pygame.gfxdraw.aacircle', (['screen', 'origin[0]', 'origin[1]', 'wheel_radius', 'WHITE'], {}), '(screen, origin[0], origin[1], wheel_radius, WHITE)\n', (11776, 11827), False, 'import pygame\n'), ((11832, 11891), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'WHITE', '(False)', 'track_marks_tup', '(1)'], {}), '(screen, WHITE, False, track_marks_tup, 1)\n', (11849, 11891), False, 'import pygame\n'), ((11987, 12127), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '(0, 255, 255)', '(False)', '((xdatarange[0], y_origin + 0.5 * yscale), (xdatarange[1], y_origin + 0.5 *\n yscale))', '(1)'], {}), '(screen, (0, 255, 255), False, ((xdatarange[0], y_origin +\n 0.5 * yscale), (xdatarange[1], y_origin + 0.5 * yscale)), 1)\n', (12004, 12127), False, 'import pygame\n'), ((12114, 12233), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '(0, 255, 255)', '(False)', '((xdatarange[0], y_origin), (xdatarange[0], y_origin + yscale))', '(1)'], {}), '(screen, (0, 255, 255), False, ((xdatarange[0], y_origin),\n (xdatarange[0], y_origin + yscale)), 1)\n', (12131, 12233), False, 'import pygame\n'), ((12226, 12347), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '(0, 255, 255)', '(False)', '((xdatarange[-1], y_origin), (xdatarange[-1], y_origin + yscale))', '(1)'], {}), '(screen, (0, 255, 255), False, ((xdatarange[-1], y_origin),\n (xdatarange[-1], y_origin + yscale)), 1)\n', (12243, 12347), False, 'import pygame\n'), ((13917, 13948), 'pygame.event.get', 'pygame.event.get', (['readdataevent'], {}), '(readdataevent)\n', (13933, 13948), False, 'import pygame\n'), ((14004, 14022), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (14020, 14022), False, 'import pygame\n'), ((14170, 14194), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (14192, 14194), False, 'import pygame\n'), ((21468, 21489), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (21487, 21489), False, 'import pygame\n'), ((95, 120), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (110, 120), False, 'import sys, os\n'), ((4294, 4334), 'pygame.image.load', 'pygame.image.load', (["(dirpath + '/Gray.jpg')"], {}), "(dirpath + '/Gray.jpg')\n", (4311, 4334), False, 'import pygame\n'), ((9441, 9464), 'numpy.mod', 'np.mod', (['origin[0]', '(1000)'], {}), '(origin[0], 1000)\n', (9447, 9464), True, 'import numpy as np\n'), ((11309, 11378), 'pygame.gfxdraw.filled_polygon', 'pygame.gfxdraw.filled_polygon', (['screen', 'stick_man', '(255, 255, 255, 10)'], {}), '(screen, stick_man, (255, 255, 255, 10))\n', (11338, 11378), False, 'import pygame\n'), ((12369, 12438), 'pygame.gfxdraw.filled_polygon', 'pygame.gfxdraw.filled_polygon', (['screen', 'arrow1_tup', '(0, 255, 255, 155)'], {}), '(screen, arrow1_tup, (0, 255, 255, 155))\n', (12398, 12438), False, 'import pygame\n'), ((12447, 12511), 'pygame.gfxdraw.aapolygon', 'pygame.gfxdraw.aapolygon', (['screen', 'arrow1_tup', '(0, 255, 255, 200)'], {}), '(screen, arrow1_tup, (0, 255, 255, 200))\n', (12471, 12511), False, 'import pygame\n'), ((12527, 12598), 'pygame.gfxdraw.filled_polygon', 'pygame.gfxdraw.filled_polygon', (['screen', 'arrow2_tup', '(255, 255, 255, 155)'], {}), '(screen, arrow2_tup, (255, 255, 255, 155))\n', (12556, 12598), False, 'import pygame\n'), ((12607, 12673), 'pygame.gfxdraw.aapolygon', 'pygame.gfxdraw.aapolygon', (['screen', 'arrow2_tup', '(255, 255, 255, 200)'], {}), '(screen, arrow2_tup, (255, 255, 255, 200))\n', (12631, 12673), False, 'import pygame\n'), ((12681, 12748), 'pygame.gfxdraw.filled_polygon', 'pygame.gfxdraw.filled_polygon', (['screen', 'arrow3_tup', '(255, 0, 0, 155)'], {}), '(screen, arrow3_tup, (255, 0, 0, 155))\n', (12710, 12748), False, 'import pygame\n'), ((12756, 12818), 'pygame.gfxdraw.aapolygon', 'pygame.gfxdraw.aapolygon', (['screen', 'arrow3_tup', '(255, 0, 0, 200)'], {}), '(screen, arrow3_tup, (255, 0, 0, 200))\n', (12780, 12818), False, 'import pygame\n'), ((12892, 12905), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (12900, 12905), True, 'import numpy as np\n'), ((12923, 12937), 'numpy.array', 'np.array', (['pts2'], {}), '(pts2)\n', (12931, 12937), True, 'import numpy as np\n'), ((13237, 13286), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'WHITE', '(False)', 'gdata', '(1)'], {}), '(screen, WHITE, False, gdata, 1)\n', (13254, 13286), False, 'import pygame\n'), ((13296, 13343), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', 'RED', '(False)', 'vdata', '(1)'], {}), '(screen, RED, False, vdata, 1)\n', (13313, 13343), False, 'import pygame\n'), ((19820, 19850), 'pygame.Color', 'pygame.Color', (['(0)', '(255)', '(255)', '(255)'], {}), '(0, 255, 255, 255)\n', (19832, 19850), False, 'import pygame\n'), ((17117, 17123), 'time.time', 'time', ([], {}), '()\n', (17121, 17123), False, 'from time import time\n'), ((21696, 21714), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (21712, 21714), False, 'import pygame\n'), ((22255, 22273), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (22271, 22273), False, 'import pygame\n'), ((7504, 7517), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7510, 7517), True, 'import numpy as np\n'), ((7826, 7839), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7832, 7839), True, 'import numpy as np\n'), ((9614, 9631), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (9628, 9631), True, 'import numpy as np\n'), ((11235, 11241), 'time.time', 'time', ([], {}), '()\n', (11239, 11241), False, 'from time import time\n'), ((20102, 20108), 'time.time', 'time', ([], {}), '()\n', (20106, 20108), False, 'from time import time\n'), ((21739, 21763), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (21761, 21763), False, 'import pygame\n'), ((22298, 22322), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (22320, 22322), False, 'import pygame\n'), ((25146, 25167), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (25165, 25167), False, 'import pygame\n'), ((22440, 22470), 'pygame.Color', 'pygame.Color', (['(0)', '(255)', '(255)', '(255)'], {}), '(0, 255, 255, 255)\n', (22452, 22470), False, 'import pygame\n'), ((21906, 21920), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21918, 21920), False, 'from datetime import datetime\n')] |
from __future__ import print_function
import numpy as np
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool
from bokeh.document import Document
from bokeh.session import Session
from bokeh.browserlib import view
document = Document()
session = Session()
session.use_doc('linked_tap_server')
session.load_document(document)
N = 9
x = np.linspace(-2, 2, N)
y = x**2
source1 = ColumnDataSource(dict(x = x, y = y, size = [20]*N))
xdr1 = DataRange1d(sources=[source1.columns("x")])
ydr1 = DataRange1d(sources=[source1.columns("y")])
plot1 = Plot(title="Plot1", x_range=xdr1, y_range=ydr1, plot_width=400, plot_height=400)
plot1.tools.append(TapTool(plot=plot1))
plot1.add_glyph(source1, Circle(x="x", y="y", size="size", fill_color="red"))
source2 = ColumnDataSource(dict(x = x, y = y, color = ["blue"]*N))
xdr2 = DataRange1d(sources=[source2.columns("x")])
ydr2 = DataRange1d(sources=[source2.columns("y")])
plot2 = Plot(title="Plot2", x_range=xdr2, y_range=ydr2, plot_width=400, plot_height=400)
plot2.tools.append(TapTool(plot=plot2))
plot2.add_glyph(source2, Circle(x="x", y="y", size=20, fill_color="color"))
def on_selection_change1(obj, attr, _, inds):
color = ["blue"]*N
if inds:
[index] = inds
color[index] = "red"
source2.data["color"] = color
session.store_objects(source2)
source1.on_change('selected', on_selection_change1)
def on_selection_change2(obj, attr, _, inds):
if inds:
[index] = inds
size = [10]*N
size[index] = 40
else:
size = [20]*N
source1.data["size"] = size
session.store_objects(source1)
source2.on_change('selected', on_selection_change2)
reset = Button(label="Reset")
def on_reset_click():
source1.selected = []
source2.selected = []
session.store_objects(source1, source2)
reset.on_click(on_reset_click)
vbox = VBox(children=[reset], width=150)
hbox = HBox(children=[vbox, plot1, plot2])
document.add(hbox)
session.store_document(document)
if __name__ == "__main__":
link = session.object_link(document.context)
print("Please visit %s to see the plots" % link)
view(link)
print("\npress ctrl-C to exit")
session.poll_document(document)
| [
"bokeh.models.Circle",
"bokeh.browserlib.view",
"bokeh.session.Session",
"bokeh.models.TapTool",
"bokeh.models.VBox",
"numpy.linspace",
"bokeh.models.Button",
"bokeh.models.Plot",
"bokeh.document.Document",
"bokeh.models.HBox"
] | [((291, 301), 'bokeh.document.Document', 'Document', ([], {}), '()\n', (299, 301), False, 'from bokeh.document import Document\n'), ((312, 321), 'bokeh.session.Session', 'Session', ([], {}), '()\n', (319, 321), False, 'from bokeh.session import Session\n'), ((403, 424), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'N'], {}), '(-2, 2, N)\n', (414, 424), True, 'import numpy as np\n'), ((607, 692), 'bokeh.models.Plot', 'Plot', ([], {'title': '"""Plot1"""', 'x_range': 'xdr1', 'y_range': 'ydr1', 'plot_width': '(400)', 'plot_height': '(400)'}), "(title='Plot1', x_range=xdr1, y_range=ydr1, plot_width=400, plot_height=400\n )\n", (611, 692), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((984, 1069), 'bokeh.models.Plot', 'Plot', ([], {'title': '"""Plot2"""', 'x_range': 'xdr2', 'y_range': 'ydr2', 'plot_width': '(400)', 'plot_height': '(400)'}), "(title='Plot2', x_range=xdr2, y_range=ydr2, plot_width=400, plot_height=400\n )\n", (988, 1069), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((1729, 1750), 'bokeh.models.Button', 'Button', ([], {'label': '"""Reset"""'}), "(label='Reset')\n", (1735, 1750), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((1910, 1943), 'bokeh.models.VBox', 'VBox', ([], {'children': '[reset]', 'width': '(150)'}), '(children=[reset], width=150)\n', (1914, 1943), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((1951, 1986), 'bokeh.models.HBox', 'HBox', ([], {'children': '[vbox, plot1, plot2]'}), '(children=[vbox, plot1, plot2])\n', (1955, 1986), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((707, 726), 'bokeh.models.TapTool', 'TapTool', ([], {'plot': 'plot1'}), '(plot=plot1)\n', (714, 726), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((753, 804), 'bokeh.models.Circle', 'Circle', ([], {'x': '"""x"""', 'y': '"""y"""', 'size': '"""size"""', 'fill_color': '"""red"""'}), "(x='x', y='y', size='size', fill_color='red')\n", (759, 804), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((1084, 1103), 'bokeh.models.TapTool', 'TapTool', ([], {'plot': 'plot2'}), '(plot=plot2)\n', (1091, 1103), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((1130, 1179), 'bokeh.models.Circle', 'Circle', ([], {'x': '"""x"""', 'y': '"""y"""', 'size': '(20)', 'fill_color': '"""color"""'}), "(x='x', y='y', size=20, fill_color='color')\n", (1136, 1179), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid, Circle, VBox, HBox, Button, TapTool\n'), ((2174, 2184), 'bokeh.browserlib.view', 'view', (['link'], {}), '(link)\n', (2178, 2184), False, 'from bokeh.browserlib import view\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 8 20:03:11 2020
@author: charl
"""
import pandas as pd
import names
import numpy as np
# Reformat dataframe output from pd.read_csv() on an Metrica open tracking data CSV file
# into a more user/database friendly format
def Reformat(data): # Input - a dataframe, Output - a reformatted dataframe
cols = list(data) # rename unamed columns with preceding column + "_y" or column + "_x"
for x in cols:
pos = cols.index(x)
if pos <= 2:
continue
elif "Unnamed" in x:
data.rename(columns={x: cols[pos-1]+"_y"}, inplace=True)
else:
data.rename(columns={x: cols[pos]+"_x"}, inplace=True)
melted = pd.melt(data, id_vars=['Period', 'Frame','Time [s]'], var_name = 'player')
melted = melted.sort_values(['Frame','player'])
melted_locs = pd.DataFrame({'x_loc':melted['value'].iloc[::2].values, 'y_loc':melted['value'].iloc[1::2].values})
melted = melted.iloc[::2]
melted = melted.reset_index(drop=True) # remember to reset index so you can do a concat on cols
melted_locs = melted_locs.reset_index(drop=True)
melted = pd.concat([melted, melted_locs], axis=1)
melted = melted.drop(['value'], axis = 1)
player = []
for x in melted['player']:
x = x[:-2]
player.append(x)
melted['player'] = player
melted = melted.rename(columns={'Period': 'period', 'Frame': 'frame', 'Time [s]': 'time'})
return melted
# function to assign random names to "Player X" values in Metrica open data - can set either male or female names
def GiveNames(team_data): # Input - a Reformatted (see above) team dataframe, Output - the same dataframe but with player names replaced
for player in set(team_data.loc[:,"player"]):
if player != 'Ball':
team_data.loc[:,"player"] = team_data.loc[:,"player"].replace(player,names.get_full_name(gender = 'female'))
return team_data
# function to calculate veloicty and acceleration values for each player at each time step and return dictionary of player data
def VelocityCalc(team_data, pitch_xdim, pitch_ydim, sampled_timestep): # Input - a Reformatted (see above) team dataframe, pitch x dimension in metres,
# pitch y dimension in metres, timestep of data
# Output - Dataframe with velocty and acceleration data columns appended
team_dict = {} # create empty dictionary which will contain data for each player
for player in set(team_data.loc[:,"player"]):
player_df = team_data[team_data["player"] == player]
player_df.loc[:,"x_diff"] = player_df.loc[:,"x_loc"].diff(-1)*pitch_xdim # get actual value in metres and get difference in x_loc from subsequent frame
player_df.loc[:,"x_diff"] = player_df.loc[:,"x_diff"].shift(1)
player_df.loc[:,"x_vel"] = player_df.loc[:,"x_diff"] / sampled_timestep # dx/dt
player_df.loc[:,"y_diff"] = player_df.loc[:,"y_loc"].diff(-1)*pitch_ydim # get difference in x_loc from subsequent frame
player_df.loc[:,"y_diff"] = player_df.loc[:,"y_diff"].shift(1)
player_df.loc[:,"y_vel"] = player_df.loc[:,"y_diff"] / sampled_timestep # dy/dt
player_df.loc[:,"vel"] = np.sqrt((player_df.loc[:,"x_vel"]**2) + (player_df.loc[:,"y_vel"]**2)) # get velocity magnitude
player_df.loc[:,"acc"] = player_df.loc[:,"vel"].diff(1) / sampled_timestep # d2y/dx2
team_dict[player] = player_df # write to dictionary
return team_dict
# function to remove "dead" time when the ball isn't in play or can't be measured - note you might not always want to remove this data
def RemoveInactive(team_dict): # Input - a dictionary of a team's player data dataframes, # Output - the same dictionary but with ball inactive time entries removed
# create mask where ball is not active, and remove relevant rows from player data
inactive_mask = team_dict['Ball'].loc[:,'x_loc'].isnull()
# all data has same length, is in same chronological order so we can apply mask as is
for player in team_dict:
team_dict[player].loc[:,'x_loc'] = np.where(inactive_mask,float('NaN') , team_dict[player].loc[:,'x_loc'])
team_dict[player] = team_dict[player].drop(team_dict[player][team_dict[player]['x_loc'].isnull()].index)
return team_dict
# function to remove data points where implied velocity is > 12m/s (limit of elite human sprinting)
def RemoveImplausible(team_dict):
for player in team_dict:
team_dict[player] = team_dict[player][team_dict[player]['vel'] <= 12]
return team_dict
# function to generate start and stop frames for posssessions for each team
def GetPossessionWindows(event_data, home_dict, away_dict):
event_data['team_int'] = event_data['team'].map({'Home': 1, 'Away': 0}) # binarize so we can do a diff to detect change of possession
pos_change_1 = event_data['team_int'].diff()[event_data['team_int'].diff() ==-1].index.values
pos_change_2 = event_data['team_int'].diff()[event_data['team_int'].diff() ==1].index.values
data = {'start': pos_change_2, 'end': pos_change_1}
nonko_team_pos = pd.DataFrame(data = data) # data for team who receives ball on first possession change
nonko_team_pos['end'] = nonko_team_pos['end']-1
pos_change_1 = np.insert(pos_change_1,0,0) # insert 0 at start of array for team who starts with ball
pos_change_2 = np.append(pos_change_2, len(event_data)-1) # add last index to the end of the array as this will be the pos end at final whistle
data = {'start': pos_change_1, 'end': pos_change_2}
ko_team_pos = pd.DataFrame(data = data) # data for team who kicks off
ko_team_pos['end'] = ko_team_pos['end']-1
#if team in pos at half end kicks off 2nd half then any time on the clock accumulated over half time will count as a possession time
#maybe strip it after - but shouldn't matter as time when ball is inactive is stripped out
ko_team_pos_name = event_data['team'].iloc[0] # TODO: if using metrica data with actual team names will need a one liner to convert actual team names to Home and Away
# lookup start and end frames
teams = [ko_team_pos,nonko_team_pos]
for team in teams:
start_frames = []
end_frames = []
for row in team.itertuples(index=False):
start_frame = event_data['start_frame'].iloc[row[0]]
end_frame = event_data['start_frame'].iloc[row[1]]
start_frames.append(start_frame)
end_frames.append(end_frame)
team['start_frame'] = start_frames
team['end_frame'] = end_frames
dicts = {'Home':home_dict, 'Away':away_dict}
for team_dict in dicts.items():
if team_dict[0] == 'Home': # get key of active dict
active_team = 'Home'
else:
active_team = 'Away'
for player in team_dict[1]:
team_dict[1][player]['in_pos'] = 0
team_dict[1][player]['opp_in_pos'] = 0
if ko_team_pos_name == active_team:
for row in ko_team_pos.itertuples(index=False):
team_dict[1][player].loc[(team_dict[1][player].frame >= row[2]) &(team_dict[1][player].frame <= row[3]), 'in_pos'] = 1
for row in nonko_team_pos.itertuples(index=False):
team_dict[1][player].loc[(team_dict[1][player].frame >= row[2]) &(team_dict[1][player].frame <= row[3]), 'opp_in_pos'] = 1
else:
for row in nonko_team_pos.itertuples(index=False):
team_dict[1][player].loc[(team_dict[1][player].frame >= row[2]) &(team_dict[1][player].frame <= row[3]), 'in_pos'] = 1
for row in ko_team_pos.itertuples(index=False):
team_dict[1][player].loc[(team_dict[1][player].frame >= row[2]) &(team_dict[1][player].frame <= row[3]), 'opp_in_pos'] = 1
home_dict = dicts['Home']
away_dict = dicts['Away']
return home_dict, away_dict
| [
"numpy.insert",
"numpy.sqrt",
"pandas.concat",
"pandas.DataFrame",
"pandas.melt",
"names.get_full_name"
] | [((768, 841), 'pandas.melt', 'pd.melt', (['data'], {'id_vars': "['Period', 'Frame', 'Time [s]']", 'var_name': '"""player"""'}), "(data, id_vars=['Period', 'Frame', 'Time [s]'], var_name='player')\n", (775, 841), True, 'import pandas as pd\n'), ((918, 1024), 'pandas.DataFrame', 'pd.DataFrame', (["{'x_loc': melted['value'].iloc[::2].values, 'y_loc': melted['value'].iloc[1\n ::2].values}"], {}), "({'x_loc': melted['value'].iloc[::2].values, 'y_loc': melted[\n 'value'].iloc[1::2].values})\n", (930, 1024), True, 'import pandas as pd\n'), ((1234, 1274), 'pandas.concat', 'pd.concat', (['[melted, melted_locs]'], {'axis': '(1)'}), '([melted, melted_locs], axis=1)\n', (1243, 1274), True, 'import pandas as pd\n'), ((5444, 5467), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (5456, 5467), True, 'import pandas as pd\n'), ((5607, 5636), 'numpy.insert', 'np.insert', (['pos_change_1', '(0)', '(0)'], {}), '(pos_change_1, 0, 0)\n', (5616, 5636), True, 'import numpy as np\n'), ((5916, 5939), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (5928, 5939), True, 'import pandas as pd\n'), ((3480, 3552), 'numpy.sqrt', 'np.sqrt', (["(player_df.loc[:, 'x_vel'] ** 2 + player_df.loc[:, 'y_vel'] ** 2)"], {}), "(player_df.loc[:, 'x_vel'] ** 2 + player_df.loc[:, 'y_vel'] ** 2)\n", (3487, 3552), True, 'import numpy as np\n'), ((1992, 2028), 'names.get_full_name', 'names.get_full_name', ([], {'gender': '"""female"""'}), "(gender='female')\n", (2011, 2028), False, 'import names\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 2 08:30:48 2019
@author: <NAME>
"""
# =============================================================================
# 1. SET PARAMETERS
# =============================================================================
p_s = 600
###MODEL PATCH SIZE
m_p_s = 350
###MODEL PATH (Type 2 model for Gleason grading)
model_dir = '' #to be defined
model_name = '' #to be defined
###DIRECTORY WITH IMAGES
base_dir = '' #to be defined
###OUTPUT DIRECTORY FOR RESULTS FILE
result_dir = '' #to be defined
###NAME OF THE OUTPUT FILE
result_name = "GS_model_name_btstrp"
###Bootstrapping parameters:
###Number of bootstrapping rounds
bstr_rounds = 20
# =============================================================================
# 2. IMPORT LIBRARIES
# =============================================================================
from keras.models import load_model
import os
from keras.preprocessing import image
import numpy as np
from PIL import Image
import staintools
import random
# =============================================================================
# 3. LOAD MODEL
# =============================================================================
path_model = os.path.join(model_dir, model_name)
model = load_model(path_model)
#present summary of the model
model.summary()
# =============================================================================
# 4. INITIALIZE STAIN NORMALIZER
# =============================================================================
#Standartization image
st = staintools.read_image('standard_he_stain_small.jpg')
#Inititate Brightness Standardizer
standardizer = staintools.BrightnessStandardizer()
#Inititate Stain Normalizer using "macenko" principle
stain_norm = staintools.StainNormalizer(method='macenko')
#Read HE staining schema from Standartization image
stain_norm.fit(st)
# =============================================================================
# 5. FUNCTIONS
# =============================================================================
#FUNCTION: Crop stem image (large tumor image)
#and return it with size in patches
#Necessary as some images sized do not perfectly pass into Sx600px frame
def image_crop (im):
w, h = im.size
w_p = int(w/600)
h_p = int(h/600)
#Centroid of the image
w_center = int(w / 2)
h_center = int(h / 2)
#Coordinates of upper left corner of the cropped image
coord_w_1 = int(w_center - (w_p / 2 * 600))
coord_h_1 = int(h_center - (h_p / 2 * 600))
#Area with ALL coordinates, based on coordinates of upper left corner
area = (coord_w_1, coord_h_1 , coord_w_1 + w_p*600, coord_h_1 + h_p * 600)
#Crop image and return it back
im_crop = im.crop (area)
return (im_crop, w_p, h_p)
#FUNCTION: Randomly generate list of coordinates of patches for bootstrapping
def coord_loop (coords, w_p, h_p, e, i):
temp_1 = random.randint(1, w_p)
temp_2 = random.randint(1, h_p)
#Re-Generated the coordinated, if they already have been included
#Do this until generated coordinates are unique
while no_pass_fun (coords, e, i, temp_1, temp_2) == True:
temp_1 = random.randint(1, w_p)
temp_2 = random.randint(1, h_p)
print("generating new coordinates")
#save coordinates into coords array
coords[i, e*2] = temp_1
coords[i, e*2 + 1] = temp_2
#Function to check if the patch with such coordinates was already included
#into bootstrapping list
#If YES, than coordinates should be generated once more time and once more time tested
def no_pass_fun (coords, e, i, temp_1, temp_2):
for x in range (e):
if coords[i, x*2] == temp_1:
if coords[i, x*2 + 1] == temp_2:
return True
return False
#Generate coords array which would be used to save coordinates for every
#bootstrapping round
#Size is dependent on current ROI size (1,2,3,n... patches) = bstr_size
#Fill it with randomly created coordinates
def bstr_coord_gen (bstr_rounds, bstr_size, w_p, h_p):
coords = np.zeros((bstr_rounds, 2 * bstr_size), dtype=np.int16)
for i in range(bstr_rounds):
for e in range (bstr_size):
coord_loop(coords, w_p, h_p, e, i)
return coords
#FUNCTION: prediction for single patches
def pred (model, patch):
#PREPROCESSING
patch = np.float32(patch)
patch = np.expand_dims(patch, axis = 0)
patch /= 255.
#prediction from model
preds = model.predict(patch)
#GS count
counter_3 = round(preds [0,0] * 100, 1)
counter_4 = round(preds [0,1] * 100, 1)
counter_5 = round(preds [0,2] * 100, 1)
score = (counter_3, counter_4, counter_5)
return score
#FUNCTION: get pixel coordinates of single patches for boostrapping round
#"Patch" level coordinates were already randomly generated
#Now we need pixel coordinates to make extractions of patches from image
#for further test with the model
def get_px_coords (bstr_coords_l):
area = np.zeros((bstr_size, 4), dtype = np.int16)
for i in range(bstr_size):
wb = bstr_coords_l [i*2]
hb = bstr_coords_l [i*2 + 1]
c1 = (wb - 1) * 600
c2 = (hb - 1) * 600
c3 = c1 + 600
c4 = c2 + 600
area [i] = (c1, c2, c3, c4)
return area
#FUNCTION: gleason scoring from percentages of single patterns,
#returns Gleason Score ans WHO/ISUP-Grade group
def gscoring (score):
gp3, gp4, gp5 = score
if gp5 > 5: #Firstly, check if enough GP5 is present (>5%)
if gp4 > 5: # Calculate if there is enough GP4 to be primary or secondary pattern
if gp4 > gp5: # X+5, GS 3+5 is still possible
if gp4 >= gp3: # 4+5
prim = 4
second = 5
ISUP = 5
else: # 3+5
prim = 3
second = 5
ISUP = 4
else: # 5+X (gp4 > 5, gp5 > 5, gp3 - not defined)
if gp4 >= gp3: #5+4
prim = 5
second = 4
ISUP = 5
else:
prim = 5 #5+3, if gp4 > 5 and gp4 !>= gp3, then gp3 > 5
second = 3
ISUP = 4
elif gp3 > 5: # In the absence of enough GP4; calculate if there is enough GP3 to be primary or secondary pattern
if gp3 > gp5: # 3+5
prim = 3
second = 5
ISUP = 4
else: # 5+3
prim = 5
second = 3
ISUP = 4
else: #Both GP3 and GP4 are low (<5%)
prim = 5 # 5+5
second = 5
ISUP = 5
else: #GP5 is not present at >5%, therefore GS does not include GP5
if gp3 > ((100-gp5) / 2): #GP3 is dominant
prim = 3
if gp4 > 5: #calculate secondary pattern in 3+X
second = 4 # 3+4
ISUP = 2
else:
second = 3
ISUP = 1 # 3+3
else: #GP4 is dominant
prim = 4
if (gp3 > 5): #calculate secondary pattern in 4+X
second = 3 # 4+3
ISUP = 3
else:
second = 4
ISUP = 4 # 4+4
#return (primary Gleason pattern, secondary Gleason pattern, ISUP_grade group)
gs = (prim, second, ISUP)
return gs
# =============================================================================
# 6. MAIN SCRIPT
# =============================================================================
#Retrieve file names of large tumor images
fnames = sorted(os.listdir(base_dir))
#Here 20 (19+1) is maximal size of subsampled ROI in patches (every patch 600x600px)
#consider that some images could have size less than defined maximal number of
#patches. This should be controled, otherwise you have an indefinite loop
#by generation of unique coordinates
#With this range test will include bootstrapping starting
#from minimal subsampled ROI size of 1 patch, then 2, 3, 4, ... and maximal
#size of 20 patches
for i in range(19):
bstr_size = i + 1
#Create output file path
path_result_full = result_dir + result_name + "_" + str(bstr_size) + "_full.txt"
path_result_short = result_dir + result_name + "_" + str(bstr_size) + "_short.txt"
#Main script as Loop
for fname in fnames:
filename = os.path.join(base_dir, fname)
im = image.load_img(filename)
print(fname, "loaded")
#Crop large images to a size of whole patch number
im, w_p, h_p = image_crop (im)
print ("Cropped to target size")
#Feedback
print("Size in px:", im.size)
print("Size:", w_p, "x", h_p, "patches")
print("Overall:", w_p * h_p, "patches")
#Control if tumor image has overall size less than 16 patches,
#if YES then skip it.
#16 patches was a minimal size of images in our dataset
if w_p * h_p < 16:
continue
print("Starting bootstrapping with size:", bstr_size, "patch(es);", "Bootstrapping rounds n =", bstr_rounds)
#Get coordinates of random patches
bstr_coords = bstr_coord_gen(bstr_rounds, bstr_size, w_p, h_p)
#every line in bstr_coords is one bootstrapping round
#in every single line coordinates of n patches, where n - size of ROI in patches
#prepare containers for short (only WHO/ISUP grade group) and full output
output_0 = fname + "\t" + str(w_p * h_p) + "\t"
output_full = ""
output_short = ""
for l in range(len(bstr_coords)): # length here is a number of rounds during bootstrapping
#get coordinates of patches
px_coords = get_px_coords (bstr_coords [l]) #px_coords is a numpy array
#every line = coordinates of single patch = area (w1,h2,w2,h2)
#Counter (percentage of Gleason patterns) is a global for every ROI within the Round of boostrapping
#(=single line in bstr_coords)
counter_3 = 0
counter_4 = 0
counter_5 = 0
#Analyse single patches from one ROI of bootstrapping round and update global counter for ROI
for j in range(len(px_coords)):
#generate patch as image using pixel coordinates
patch = im.crop (px_coords[j])
#normalization, preprocessing
patch = patch.resize((m_p_s,m_p_s), Image.ANTIALIAS)
patch = np.array(patch)
patch = standardizer.transform(patch)
patch = stain_norm.transform(patch)
#run model and get probilities of patterns
score = pred (model, patch)
#Update GS counter
counter_3 = counter_3 + score [0]
counter_4 = counter_4 + score [1]
counter_5 = counter_5 + score [2]
#Calculate percentage of patterns from counters
gp3 = counter_3 / (counter_3 + counter_4 + counter_5) * 100
gp4 = counter_4 / (counter_3 + counter_4 + counter_5) * 100
gp5 = counter_5 / (counter_3 + counter_4 + counter_5) * 100
gp3 = round (gp3, 1)
gp4 = round (gp4, 1)
gp5 = round (gp5, 1)
score = (gp3, gp4, gp5)
#transform score into Gleason Score (primary and secondary pattern)
#and WHO/ISUP grade group
gs_1, gs_2, isup = gscoring(score)
#Create output, short (only ISUP) and full (Percentage patterns, GS, ISUP) versions
output_full = output_full + str(gp3) + "\t" + str(gp4) + "\t" + str(gp5) + "\t" + str(gs_1) + "\t" + str(gs_2) + "\t" + str(isup) + "\t"
output_short = output_short + str(isup) + "\t"
#Write out output into the file
output_end_full = output_0 + output_full + "\n"
results = open (path_result_full, "a+")
results.write(output_end_full)
results.close()
output_end_short = output_0 + output_short + "\n"
results = open (path_result_short, "a+")
results.write(output_end_short)
results.close() | [
"staintools.BrightnessStandardizer",
"os.listdir",
"keras.models.load_model",
"numpy.float32",
"os.path.join",
"staintools.read_image",
"numpy.array",
"numpy.zeros",
"numpy.expand_dims",
"staintools.StainNormalizer",
"random.randint",
"keras.preprocessing.image.load_img"
] | [((1240, 1275), 'os.path.join', 'os.path.join', (['model_dir', 'model_name'], {}), '(model_dir, model_name)\n', (1252, 1275), False, 'import os\n'), ((1284, 1306), 'keras.models.load_model', 'load_model', (['path_model'], {}), '(path_model)\n', (1294, 1306), False, 'from keras.models import load_model\n'), ((1576, 1628), 'staintools.read_image', 'staintools.read_image', (['"""standard_he_stain_small.jpg"""'], {}), "('standard_he_stain_small.jpg')\n", (1597, 1628), False, 'import staintools\n'), ((1679, 1714), 'staintools.BrightnessStandardizer', 'staintools.BrightnessStandardizer', ([], {}), '()\n', (1712, 1714), False, 'import staintools\n'), ((1782, 1826), 'staintools.StainNormalizer', 'staintools.StainNormalizer', ([], {'method': '"""macenko"""'}), "(method='macenko')\n", (1808, 1826), False, 'import staintools\n'), ((2942, 2964), 'random.randint', 'random.randint', (['(1)', 'w_p'], {}), '(1, w_p)\n', (2956, 2964), False, 'import random\n'), ((2978, 3000), 'random.randint', 'random.randint', (['(1)', 'h_p'], {}), '(1, h_p)\n', (2992, 3000), False, 'import random\n'), ((4089, 4143), 'numpy.zeros', 'np.zeros', (['(bstr_rounds, 2 * bstr_size)'], {'dtype': 'np.int16'}), '((bstr_rounds, 2 * bstr_size), dtype=np.int16)\n', (4097, 4143), True, 'import numpy as np\n'), ((4380, 4397), 'numpy.float32', 'np.float32', (['patch'], {}), '(patch)\n', (4390, 4397), True, 'import numpy as np\n'), ((4410, 4439), 'numpy.expand_dims', 'np.expand_dims', (['patch'], {'axis': '(0)'}), '(patch, axis=0)\n', (4424, 4439), True, 'import numpy as np\n'), ((5016, 5056), 'numpy.zeros', 'np.zeros', (['(bstr_size, 4)'], {'dtype': 'np.int16'}), '((bstr_size, 4), dtype=np.int16)\n', (5024, 5056), True, 'import numpy as np\n'), ((7692, 7712), 'os.listdir', 'os.listdir', (['base_dir'], {}), '(base_dir)\n', (7702, 7712), False, 'import os\n'), ((3203, 3225), 'random.randint', 'random.randint', (['(1)', 'w_p'], {}), '(1, w_p)\n', (3217, 3225), False, 'import random\n'), ((3243, 3265), 'random.randint', 'random.randint', (['(1)', 'h_p'], {}), '(1, h_p)\n', (3257, 3265), False, 'import random\n'), ((8472, 8501), 'os.path.join', 'os.path.join', (['base_dir', 'fname'], {}), '(base_dir, fname)\n', (8484, 8501), False, 'import os\n'), ((8515, 8539), 'keras.preprocessing.image.load_img', 'image.load_img', (['filename'], {}), '(filename)\n', (8529, 8539), False, 'from keras.preprocessing import image\n'), ((10692, 10707), 'numpy.array', 'np.array', (['patch'], {}), '(patch)\n', (10700, 10707), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
:Name:
takahe
:Authors:
<NAME> (<EMAIL>)
:Version:
0.4
:Date:
Mar. 2013
:Description:
takahe is a multi-sentence compression module. Given a set of redundant
sentences, a word-graph is constructed by iteratively adding sentences to
it. The best compression is obtained by finding the shortest path in the
word graph. The original algorithm was published and described in
[filippova:2010:COLING]_. A keyphrase-based reranking method, described in
[boudin-morin:2013:NAACL]_ can be applied to generate more informative
compressions.
.. [filippova:2010:COLING] <NAME>, Multi-Sentence Compression:
Finding Shortest Paths in Word Graphs, *Proceedings of the 23rd
International Conference on Computational Linguistics (Coling 2010)*,
pages 322-330, 2010.
.. [boudin-morin:2013:NAACL] <NAME> and <NAME>, Keyphrase
Extraction for N-best Reranking in Multi-Sentence Compression,
*Proceedings of the 2013 Conference of the North American Chapter of the
Association for Computational Linguistics: Human Language Technologies
(NAACL-HLT 2013)*, 2013.
:History:
Development history of the takahe module:
- 0.4 (Mar. 2013) adding the keyphrase-based nbest reranking algorithm
- 0.33 (Feb. 2013), bug fixes and better code documentation
- 0.32 (Jun. 2012), Punctuation marks are now considered within the
graph, compressions are then punctuated
- 0.31 (Nov. 2011), modified context function (uses the left and right
contexts), improved docstring documentation, bug fixes
- 0.3 (Oct. 2011), improved K-shortest paths algorithm including
verb/size constraints and ordered lists for performance
- 0.2 (Dec. 2010), removed dependencies from nltk (i.e. POS-tagging,
tokenization and stopwords removal)
- 0.1 (Nov. 2010), first version
:Dependencies:
The following Python modules are required:
- `networkx <http://networkx.github.com/>`_ for the graph construction
(v1.2+)
:Usage:
A typical usage of this module is::
import takahe
# A list of tokenized and POS-tagged sentences
sentences = ['Hillary/NNP Clinton/NNP wanted/VBD to/stop visit/VB ...']
# Create a word graph from the set of sentences with parameters :
# - minimal number of words in the compression : 6
# - language of the input sentences : en (english)
# - POS tag for punctuation marks : PUNCT
compresser = takahe.word_graph( sentences,
nb_words = 6,
lang = 'en',
punct_tag = "PUNCT" )
# Get the 50 best paths
candidates = compresser.get_compression(50)
# 1. Rerank compressions by path length (Filippova's method)
for cummulative_score, path in candidates:
# Normalize path score by path length
normalized_score = cummulative_score / len(path)
# Print normalized score and compression
print round(normalized_score, 3), ' '.join([u[0] for u in path])
# Write the word graph in the dot format
compresser.write_dot('test.dot')
# 2. Rerank compressions by keyphrases (Boudin and Morin's method)
reranker = takahe.keyphrase_reranker( sentences,
candidates,
lang = 'en' )
reranked_candidates = reranker.rerank_nbest_compressions()
# Loop over the best reranked candidates
for score, path in reranked_candidates:
# Print the best reranked candidates
print round(score, 3), ' '.join([u[0] for u in path])
:Misc:
The Takahe is a flightless bird indigenous to New Zealand. It was thought to
be extinct after the last four known specimens were taken in 1898. However,
after a carefully planned search effort the bird was rediscovered by on
November 20, 1948. (Wikipedia, http://en.wikipedia.org/wiki/takahe)
"""
import bisect
import networkx as nx
import numpy as np
from utils import *
# import matplotlib.pyplot as plt
class WordGraph:
def __init__(self, sentence_list, stopwords, nb_words=8, lang="en", punct_tag="PUNCT", pos_separator='/'):
self.sentence = list(sentence_list)
""" A list of sentences provided by the user. """
self.length = len(sentence_list)
""" The number of sentences given for fusion. """
self.nb_words = nb_words
""" The minimal number of words in the compression. """
self.stopwords = stopwords
""" The set of stopwords loaded from stopwords."""
self.punct_tag = punct_tag
""" The stopword tag used in the graph. """
self.pos_separator = pos_separator
""" The character (or string) used to separate a word and its Part of Speech tag """
self.graph = nx.DiGraph()
""" The directed graph used for fusion. """
self.start = '-start-'
""" The start token in the graph. """
self.stop = '-end-'
""" The end token in the graph. """
self.sep = '/-/'
""" The separator used between a word and its POS in the graph. """
self.term_freq = {}
""" The frequency of a given term. """
self.verbs = {'VB', 'VBD', 'VBP', 'VBZ', 'VH', 'VHD', 'VHP', 'VBZ', 'VV', 'VVD', 'VVP', 'VVZ'}
"""
The list of verb POS tags required in the compression. At least *one*
verb must occur in the candidate compressions.
"""
# Replacing default values for Vietnamese
if lang == "vi":
self.verbs = {'V'}
# 1. Pre-process the sentences
self.pre_process_sentences()
# 2. Compute term statistics
self.compute_statistics()
# 3. Build the word graph
self.build_graph()
def pre_process_sentences(self):
"""
Pre-process the list of sentences given as input. Split sentences using
whitespaces and convert each sentence to a list of (word, POS) tuples.
"""
for i in range(self.length):
# Normalise extra white spaces
self.sentence[i] = regex.sub(' +', ' ', self.sentence[i])
self.sentence[i] = self.sentence[i].strip()
# Tokenize the current sentence in word/POS
sentence = self.sentence[i].split(' ')
# Creating an empty container for the cleaned up sentence
container = [(self.start, self.start)]
# Looping over the words
for w in sentence:
# Splitting word, POS
pos_separator_re = regex.escape(self.pos_separator)
m = regex.match("^(.+)" + pos_separator_re + "(.+)$", w)
# Extract the word information
token, POS = m.group(1), m.group(2)
# Add the token/POS to the sentence container
container.append((token.lower(), POS))
# Add the stop token at the end of the container
container.append((self.stop, self.stop))
# Recopy the container into the current sentence
self.sentence[i] = container
def build_graph(self):
"""
Constructs a directed word graph from the list of input sentences. Each
sentence is iteratively added to the directed graph according to the
following algorithm:
- Word mapping/creation is done in four steps:
1. non-stopwords for which no candidate exists in the graph or for
which an unambiguous mapping is possible or which occur more than
once in the sentence
2. non-stopwords for which there are either several possible
candidates in the graph
3. stopwords
4. punctuation marks
For the last three groups of words where mapping is ambiguous we check
the immediate context (the preceding and following words in the sentence
and the neighboring nodes in the graph) and select the candidate which
has larger overlap in the context, or the one with a greater frequency
(i.e. the one which has more words mapped onto it). Stopwords are mapped
only if there is some overlap in non-stopwords neighbors, otherwise a
new node is created. Punctuation marks are mapped only if the preceding
and following words in the sentence and the neighboring nodes are the
same.
- Edges are then computed and added between mapped words.
Each node in the graph is represented as a tuple ('word/POS', id) and
possesses an info list containing (sentence_id, position_in_sentence)
tuples.
"""
# Iteratively add each sentence in the graph ---------------------------
for i in range(self.length):
# Compute the sentence length
sentence_len = len(self.sentence[i])
# Create the mapping container
mapping = [0] * sentence_len
# -------------------------------------------------------------------
# 1. non-stopwords for which no candidate exists in the graph or for
# which an unambiguous mapping is possible or which occur more
# than once in the sentence.
# -------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If stopword or punctuation mark, continues
if token in self.stopwords or regex.search('(?u)^\W$', token):
continue
# Create the node identifier
node = token.lower() + self.sep + POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# If there is no node in the graph, create one with id = 0
if k == 0:
# Add the node in the graph
self.graph.add_node((node, 0), info=[(i, j)],
label=token.lower())
# Mark the word as mapped to k
mapping[j] = (node, 0)
# If there is only one matching node in the graph (id is 0)
elif k == 1:
# Get the sentences id of this node
ids = []
for sid, pos_s in self.graph.node[(node, 0)]['info']:
ids.append(sid)
# Update the node in the graph if not same sentence
if i not in ids:
self.graph.node[(node, 0)]['info'].append((i, j))
mapping[j] = (node, 0)
# Else Create new node for redundant word
else:
self.graph.add_node((node, 1), info=[(i, j)],
label=token.lower())
mapping[j] = (node, 1)
# -------------------------------------------------------------------
# 2. non-stopwords for which there are either several possible
# candidates in the graph.
# -------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If stopword or punctuation mark, continues
if token in self.stopwords or regex.search('(?u)^\W$', token):
continue
# If word is not already mapped to a node
if mapping[j] == 0:
# Create the node identifier
node = token.lower() + self.sep + POS
# Create the neighboring nodes identifiers
prev_token, prev_POS = self.sentence[i][j - 1]
next_token, next_POS = self.sentence[i][j + 1]
prev_node = prev_token.lower() + self.sep + prev_POS
next_node = next_token.lower() + self.sep + next_POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# Search for the ambiguous node with the larger overlap in
# context or the greater frequency.
ambinode_overlap = []
ambinode_frequency = []
# For each ambiguous node
for l in range(k):
# Get the immediate context words of the nodes
l_context = self.get_directed_context(node, l, 'left')
r_context = self.get_directed_context(node, l, 'right')
# Compute the (directed) context sum
val = l_context.count(prev_node)
val += r_context.count(next_node)
# Add the count of the overlapping words
ambinode_overlap.append(val)
# Add the frequency of the ambiguous node
ambinode_frequency.append(
len(self.graph.node[(node, l)]['info'])
)
# Search for the best candidate while avoiding a loop
found = False
selected = 0
while not found:
# Select the ambiguous node
selected = self.max_index(ambinode_overlap)
if ambinode_overlap[selected] == 0:
selected = self.max_index(ambinode_frequency)
# Get the sentences id of this node
ids = []
for sid, p in self.graph.node[(node, selected)]['info']:
ids.append(sid)
# Test if there is no loop
if i not in ids:
found = True
break
# Remove the candidate from the lists
else:
del ambinode_overlap[selected]
del ambinode_frequency[selected]
# Avoid endless loops
if len(ambinode_overlap) == 0:
break
# Update the node in the graph if not same sentence
if found:
self.graph.node[(node, selected)]['info'].append((i, j))
mapping[j] = (node, selected)
# Else create new node for redundant word
else:
self.graph.add_node((node, k), info=[(i, j)],
label=token.lower())
mapping[j] = (node, k)
# -------------------------------------------------------------------
# 3. map the stopwords to the nodes
# -------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If *NOT* stopword, continues
if token not in self.stopwords:
continue
# Create the node identifier
node = token.lower() + self.sep + POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# If there is no node in the graph, create one with id = 0
if k == 0:
# Add the node in the graph
self.graph.add_node((node, 0), info=[(i, j)],
label=token.lower())
# Mark the word as mapped to k
mapping[j] = (node, 0)
# Else find the node with overlap in context or create one
else:
# Create the neighboring nodes identifiers
prev_token, prev_POS = self.sentence[i][j - 1]
next_token, next_POS = self.sentence[i][j + 1]
prev_node = prev_token.lower() + self.sep + prev_POS
next_node = next_token.lower() + self.sep + next_POS
ambinode_overlap = []
# For each ambiguous node
for l in range(k):
# Get the immediate context words of the nodes, the
# boolean indicates to consider only non stopwords
l_context = self.get_directed_context(node, l, 'left', True)
r_context = self.get_directed_context(node, l, 'right', True)
# Compute the (directed) context sum
val = l_context.count(prev_node)
val += r_context.count(next_node)
# Add the count of the overlapping words
ambinode_overlap.append(val)
# Get best overlap candidate
selected = self.max_index(ambinode_overlap)
# Get the sentences id of the best candidate node
ids = []
for sid, pos_s in self.graph.node[(node, selected)]['info']:
ids.append(sid)
# Update the node in the graph if not same sentence and
# there is at least one overlap in context
if i not in ids and ambinode_overlap[selected] > 0:
# if i not in ids and \
# (ambinode_overlap[selected] > 1 and POS==self.punct_tag) or\
# (ambinode_overlap[selected] > 0 and POS!=self.punct_tag) :
# Update the node in the graph
self.graph.node[(node, selected)]['info'].append((i, j))
# Mark the word as mapped to k
mapping[j] = (node, selected)
# Else create a new node
else:
# Add the node in the graph
self.graph.add_node((node, k), info=[(i, j)],
label=token.lower())
# Mark the word as mapped to k
mapping[j] = (node, k)
# -------------------------------------------------------------------
# 4. lasty map the punctuation marks to the nodes
# -------------------------------------------------------------------
for j in range(sentence_len):
# Get the word and tag
token, POS = self.sentence[i][j]
# If *NOT* punctuation mark, continues
if not regex.search('(?u)^\W$', token):
continue
# Create the node identifier
node = token.lower() + self.sep + POS
# Find the number of ambiguous nodes in the graph
k = self.ambiguous_nodes(node)
# If there is no node in the graph, create one with id = 0
if k == 0:
# Add the node in the graph
self.graph.add_node((node, 0), info=[(i, j)],
label=token.lower())
# Mark the word as mapped to k
mapping[j] = (node, 0)
# Else find the node with overlap in context or create one
else:
# Create the neighboring nodes identifiers
prev_token, prev_POS = self.sentence[i][j - 1]
next_token, next_POS = self.sentence[i][j + 1]
prev_node = prev_token.lower() + self.sep + prev_POS
next_node = next_token.lower() + self.sep + next_POS
ambinode_overlap = []
# For each ambiguous node
for l in range(k):
# Get the immediate context words of the nodes
l_context = self.get_directed_context(node, l, 'left')
r_context = self.get_directed_context(node, l, 'right')
# Compute the (directed) context sum
val = l_context.count(prev_node)
val += r_context.count(next_node)
# Add the count of the overlapping words
ambinode_overlap.append(val)
# Get best overlap candidate
selected = self.max_index(ambinode_overlap)
# Get the sentences id of the best candidate node
ids = []
for sid, pos_s in self.graph.node[(node, selected)]['info']:
ids.append(sid)
# Update the node in the graph if not same sentence and
# there is at least one overlap in context
if i not in ids and ambinode_overlap[selected] > 1:
# Update the node in the graph
self.graph.node[(node, selected)]['info'].append((i, j))
# Mark the word as mapped to k
mapping[j] = (node, selected)
# Else create a new node
else:
# Add the node in the graph
self.graph.add_node((node, k), info=[(i, j)],
label=token.lower())
# Mark the word as mapped to k
mapping[j] = (node, k)
# -------------------------------------------------------------------
# 4. Connects the mapped words with directed edges
# -------------------------------------------------------------------
for j in range(1, len(mapping)):
self.graph.add_edge(mapping[j - 1], mapping[j])
# Assigns a weight to each node in the graph ---------------------------
for node1, node2 in self.graph.edges_iter():
edge_weight = self.get_edge_weight(node1, node2)
self.graph.add_edge(node1, node2, weight=edge_weight)
def ambiguous_nodes(self, node):
"""
Takes a node in parameter and returns the number of possible candidate
(ambiguous) nodes in the graph.
"""
k = 0
while self.graph.has_node((node, k)):
k += 1
return k
def get_directed_context(self, node, k, _dir='all', non_pos=False):
"""
Returns the directed context of a given node, i.e. a list of word/POS of
the left or right neighboring nodes in the graph. The function takes
four parameters :
- node is the word/POS tuple
- k is the node identifier used when multiple nodes refer to the same
word/POS (e.g. k=0 for (the/DET, 0), k=1 for (the/DET, 1), etc.)
- dir is the parameter that controls the directed context calculation,
it can be set to left, right or all (default)
- non_pos is a boolean allowing to remove stopwords from the context
(default is false)
"""
# Define the context containers
l_context = []
r_context = []
# For all the sentence/position tuples
for sid, off in self.graph.node[(node, k)]['info']:
_prev = self.sentence[sid][off - 1][0].lower() + self.sep + self.sentence[sid][off - 1][1]
_next = self.sentence[sid][off + 1][0].lower() + self.sep + self.sentence[sid][off + 1][1]
if non_pos:
if self.sentence[sid][off - 1][0] not in self.stopwords:
l_context.append(_prev)
if self.sentence[sid][off + 1][0] not in self.stopwords:
r_context.append(_next)
else:
l_context.append(_prev)
r_context.append(_next)
# Returns the left (previous) context
if _dir == 'left':
return l_context
# Returns the right (next) context
elif _dir == 'right':
return r_context
# Returns the whole context
else:
l_context.extend(r_context)
return l_context
def get_edge_weight(self, node1, node2):
"""
Compute the weight of an edge *e* between nodes *node1* and *node2*. It
is computed as e_ij = (A / B) / C with:
- A = freq(i) + freq(j),
- B = Sum (s in S) 1 / diff(s, i, j)
- C = freq(i) * freq(j)
A node is a tuple of ('word/POS', unique_id).
"""
# Get the list of (sentence_id, pos_in_sentence) for node1
info1 = self.graph.node[node1]['info']
# Get the list of (sentence_id, pos_in_sentence) for node2
info2 = self.graph.node[node2]['info']
# Get the frequency of node1 in the graph
# freq1 = self.graph.degree(node1)
freq1 = len(info1)
# Get the frequency of node2 in cluster
# freq2 = self.graph.degree(node2)
freq2 = len(info2)
# Initializing the diff function list container
diff = []
# For each sentence of the cluster (for s in S)
for s in range(self.length):
# Compute diff(s, i, j) which is calculated as
# pos(s, i) - pos(s, j) if pos(s, i) < pos(s, j)
# O otherwise
# Get the positions of i and j in s, named pos(s, i) and pos(s, j)
# As a word can appear at multiple positions in a sentence, a list
# of positions is used
pos_i_in_s = []
pos_j_in_s = []
# For each (sentence_id, pos_in_sentence) of node1
for sentence_id, pos_in_sentence in info1:
# If the sentence_id is s
if sentence_id == s:
# Add the position in s
pos_i_in_s.append(pos_in_sentence)
# For each (sentence_id, pos_in_sentence) of node2
for sentence_id, pos_in_sentence in info2:
# If the sentence_id is s
if sentence_id == s:
# Add the position in s
pos_j_in_s.append(pos_in_sentence)
# Container for all the diff(s, i, j) for i and j
all_diff_pos_i_j = []
# Loop over all the i, j couples
for x in range(len(pos_i_in_s)):
for y in range(len(pos_j_in_s)):
diff_i_j = pos_i_in_s[x] - pos_j_in_s[y]
# Test if word i appears *BEFORE* word j in s
if diff_i_j < 0:
all_diff_pos_i_j.append(-1.0 * diff_i_j)
# Add the mininum distance to diff (i.e. in case of multiple
# occurrencies of i or/and j in sentence s), 0 otherwise.
if len(all_diff_pos_i_j) > 0:
diff.append(1.0 / min(all_diff_pos_i_j))
else:
diff.append(0.0)
weight1 = freq1
weight2 = freq2
return ((freq1 + freq2) / sum(diff)) / (weight1 * weight2)
def k_shortest_paths(self, start, end, k=10):
"""
Simple implementation of a k-shortest paths algorithms. Takes three
parameters: the starting node, the ending node and the number of
shortest paths desired. Returns a list of k tuples (path, weight).
"""
# Initialize the list of shortest paths
kshortestpaths = []
# Initializing the label container
orderedX = []
orderedX.append((0, start, 0))
# Initializing the path container
paths = {}
paths[(0, start, 0)] = [start]
# Initialize the visited container
visited = {}
visited[start] = 0
# Initialize the sentence container that will be used to remove
# duplicate sentences passing throught different nodes
sentence_container = {}
# While the number of shortest paths isn't reached or all paths explored
while len(kshortestpaths) < k and len(orderedX) > 0:
# Searching for the shortest distance in orderedX
shortest = orderedX.pop(0)
shortestpath = paths[shortest]
# Removing the shortest node from X and paths
del paths[shortest]
# Iterating over the accessible nodes
for node in self.graph.neighbors(shortest[1]):
# To avoid loops
if node in shortestpath:
continue
# Compute the weight to node
w = shortest[0] + self.graph[shortest[1]][node]['weight']
# If found the end, adds to k-shortest paths
if node == end:
# -T-------------------------------------------------------T-
# --- Constraints on the shortest paths
# 1. Check if path contains at least one verb
# 2. Check the length of the shortest path, without
# considering punctuation marks and starting node (-1 in
# the range loop, because nodes are reversed)
# 3. Check the paired parentheses and quotation marks
# 4. Check if sentence is not redundant
nb_verbs = 0
length = 0
paired_parentheses = 0
quotation_mark_number = 0
raw_sentence = ''
for i in range(len(shortestpath) - 1):
word, tag = shortestpath[i][0].split(self.sep)
# 1.
if tag in self.verbs:
nb_verbs += 1
# 2.
if not regex.search('(?u)^\W$', word):
length += 1
# 3.
else:
if word == '(':
paired_parentheses -= 1
elif word == ')':
paired_parentheses += 1
elif word == '"':
quotation_mark_number += 1
# 4.
raw_sentence += word + ' '
# Remove extra space from sentence
raw_sentence = raw_sentence.strip()
if nb_verbs > 0 and \
length >= self.nb_words and \
paired_parentheses == 0 and \
(quotation_mark_number % 2) == 0 \
and raw_sentence not in sentence_container:
path = [node]
path.extend(shortestpath)
path.reverse()
weight = float(w) # / float(length)
kshortestpaths.append((path, weight))
sentence_container[raw_sentence] = 1
else:
# test if node has already been visited
if node in visited:
visited[node] += 1
else:
visited[node] = 0
_id = visited[node]
# Add the node to orderedX
bisect.insort(orderedX, (w, node, _id))
# Add the node to paths
paths[(w, node, _id)] = [node]
paths[(w, node, _id)].extend(shortestpath)
# Returns the list of shortest paths
return kshortestpaths
def get_compression(self, nb_candidates=50):
"""
Searches all possible paths from **start** to **end** in the word graph,
removes paths containing no verb or shorter than *n* words. Returns an
ordered list (smaller first) of nb (default value is 50) (cummulative
score, path) tuples. The score is not normalized with the sentence
length.
"""
# Search for the k-shortest paths in the graph
self.paths = self.k_shortest_paths((self.start + self.sep + self.start, 0),
(self.stop + self.sep + self.stop, 0),
nb_candidates)
# Initialize the fusion container
fusions = []
# Test if there are some paths
if len(self.paths) > 0:
# For nb candidates
for i in range(min(nb_candidates, len(self.paths))):
nodes = self.paths[i][0]
sentence = []
for j in range(1, len(nodes) - 1):
word, tag = nodes[j][0].split(self.sep)
sentence.append((word, tag))
bisect.insort(fusions, (self.paths[i][1], sentence))
return fusions
@staticmethod
def max_index(l):
""" Returns the index of the maximum value of a given list. """
try:
return np.argmax(l)
except ValueError:
pass
return None
def compute_statistics(self):
"""
This function iterates over the cluster's sentences and computes the
following statistics about each word:
- term frequency (self.term_freq)
"""
# Structure for containing the list of sentences in which a term occurs
terms = {}
# Loop over the sentences
for i in range(self.length):
# For each tuple (token, POS) of sentence i
for token, POS in self.sentence[i]:
# generate the word/POS token
node = token.lower() + self.sep + POS
# Add the token to the terms list
if node not in terms:
terms[node] = [i]
else:
terms[node].append(i)
# Loop over the terms
for w in terms:
# Compute the term frequency
self.term_freq[w] = len(terms[w])
def write_dot(self, dot_file):
""" Outputs the word graph in dot format in the specified file. """
nx.drawing.nx_agraph.write_dot(self.graph, dot_file)
| [
"networkx.drawing.nx_agraph.write_dot",
"networkx.DiGraph",
"numpy.argmax",
"bisect.insort"
] | [((5135, 5147), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5145, 5147), True, 'import networkx as nx\n'), ((35025, 35077), 'networkx.drawing.nx_agraph.write_dot', 'nx.drawing.nx_agraph.write_dot', (['self.graph', 'dot_file'], {}), '(self.graph, dot_file)\n', (35055, 35077), True, 'import networkx as nx\n'), ((33884, 33896), 'numpy.argmax', 'np.argmax', (['l'], {}), '(l)\n', (33893, 33896), True, 'import numpy as np\n'), ((33662, 33714), 'bisect.insort', 'bisect.insort', (['fusions', '(self.paths[i][1], sentence)'], {}), '(fusions, (self.paths[i][1], sentence))\n', (33675, 33714), False, 'import bisect\n'), ((32218, 32257), 'bisect.insort', 'bisect.insort', (['orderedX', '(w, node, _id)'], {}), '(orderedX, (w, node, _id))\n', (32231, 32257), False, 'import bisect\n')] |
r"""
Use this script to visualize the output of a trained speech-model.
Usage: python visualize.py /path/to/audio /path/to/training/json.json \
/path/to/model
"""
from __future__ import absolute_import, division, print_function
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from data_generator import DataGenerator
from model import compile_output_fn
from utils import argmax_decode, load_model
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def visualize(model, test_file, train_desc_file):
""" Get the prediction using the model, and visualize softmax outputs
Params:
model (keras.models.Model): Trained speech model
test_file (str): Path to an audio clip
train_desc_file(str): Path to the training file used to train this
model
"""
datagen = DataGenerator()
datagen.load_train_data(train_desc_file)
datagen.fit_train(100)
print ("Compiling test function...")
test_fn = compile_output_fn(model)
inputs = [datagen.featurize(test_file)]
prediction = np.squeeze(test_fn([inputs, True]))
softmax_file = "softmax.npy".format(test_file)
softmax_img_file = "softmax.png".format(test_file)
print ("Prediction: {}"
.format(argmax_decode(prediction)))
print ("Saving network output to: {}".format(softmax_file))
print ("As image: {}".format(softmax_img_file))
np.save(softmax_file, prediction)
sm = softmax(prediction.T)
sm = np.vstack((sm[0], sm[2], sm[3:][::-1]))
fig, ax = plt.subplots()
ax.pcolor(sm, cmap=plt.cm.Greys_r)
column_labels = [chr(i) for i in range(97, 97 + 26)] + ['space', 'blank']
ax.set_yticks(np.arange(sm.shape[0]) + 0.5, minor=False)
ax.set_yticklabels(column_labels[::-1], minor=False)
plt.savefig(softmax_img_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('test_file', type=str,
help='Path to an audio file')
parser.add_argument('train_desc_file', type=str,
help='Path to the training JSON-line file. This will '
'be used to extract feature means/variance')
parser.add_argument('load_dir', type=str,
help='Directory where a trained model is stored.')
parser.add_argument('--weights_file', type=str, default=None,
help='Path to a model weights file')
args = parser.parse_args()
print ("Loading model")
model = load_model(args.load_dir, args.weights_file)
visualize(model, args.test_file, args.train_desc_file)
if __name__ == '__main__':
main()
| [
"utils.load_model",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"numpy.arange",
"matplotlib.use",
"numpy.exp",
"utils.argmax_decode",
"model.compile_output_fn",
"numpy.vstack",
"data_generator.DataGenerator",
"matplotlib.pyplot.subplots",
"numpy.save"
] | [((275, 296), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (289, 296), False, 'import matplotlib\n'), ((912, 927), 'data_generator.DataGenerator', 'DataGenerator', ([], {}), '()\n', (925, 927), False, 'from data_generator import DataGenerator\n'), ((1056, 1080), 'model.compile_output_fn', 'compile_output_fn', (['model'], {}), '(model)\n', (1073, 1080), False, 'from model import compile_output_fn\n'), ((1481, 1514), 'numpy.save', 'np.save', (['softmax_file', 'prediction'], {}), '(softmax_file, prediction)\n', (1488, 1514), True, 'import numpy as np\n'), ((1555, 1594), 'numpy.vstack', 'np.vstack', (['(sm[0], sm[2], sm[3:][::-1])'], {}), '((sm[0], sm[2], sm[3:][::-1]))\n', (1564, 1594), True, 'import numpy as np\n'), ((1609, 1623), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1621, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1892), 'matplotlib.pyplot.savefig', 'plt.savefig', (['softmax_img_file'], {}), '(softmax_img_file)\n', (1874, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1920, 1945), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1943, 1945), False, 'import argparse\n'), ((2573, 2617), 'utils.load_model', 'load_model', (['args.load_dir', 'args.weights_file'], {}), '(args.load_dir, args.weights_file)\n', (2583, 2617), False, 'from utils import argmax_decode, load_model\n'), ((499, 508), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (505, 508), True, 'import numpy as np\n'), ((518, 527), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (524, 527), True, 'import numpy as np\n'), ((1333, 1358), 'utils.argmax_decode', 'argmax_decode', (['prediction'], {}), '(prediction)\n', (1346, 1358), False, 'from utils import argmax_decode, load_model\n'), ((1759, 1781), 'numpy.arange', 'np.arange', (['sm.shape[0]'], {}), '(sm.shape[0])\n', (1768, 1781), True, 'import numpy as np\n')] |
# code-checked
# server-checked
import os
import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.autograd import Variable
from model_mcdropout import DepthCompletionNet
from datasets import DatasetVirtualKITTIVal
from criterion import MaskedL2Gauss, RMSE
import numpy as np
import cv2
import random
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
model_id = "mcdropout_virtual"
model_is = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
print (len(model_is))
snapshot_dir = "/workspace/evaluating_bdl/depthCompletion/training_logs/%s_eval_ause_virtual" % model_id
virtualkitti_path = "/root/data/virtualkitti"
batch_size = 4
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
colors = {}
colors[1] = "k"
colors[2] = "b"
colors[4] = "g"
colors[8] = "r"
colors[16] = "c"
colors[32] = "m"
colors[64] = "y"
run_colors = {}
run_colors[0] = "k"
run_colors[1] = "#E53935"
run_colors[2] = "#8E24AA"
run_colors[3] = "#3949AB"
run_colors[4] = "#1E88E5"
run_colors[5] = "#00ACC1"
run_colors[6] = "#00897B"
run_colors[7] = "#7CB342"
run_colors[8] = "#FDD835"
run_colors[9] = "#FB8C00"
run_colors[10] = "#D81B60"
run_colors[11] = "#5E35B1"
run_colors[12] = "#039BE5"
run_colors[13] = "#43A047"
run_colors[14] = "#C0CA33"
run_colors[15] = "#FFB300"
run_colors[16] = "#F4511E"
run_colors[17] = "#6D4C41"
run_colors[18] = "#546E7A"
run_colors[19] = "#827717"
num_model_is = len(model_is)
print (num_model_is)
M_values = [1, 2, 4, 8, 16, 32]
print (M_values)
# # # # # # # # # # # # # # # # # # debug START:
# M_values = [1, 2, 4]
# model_is = [0, 1]
# # # # # # # # # # # # # # # # # # debug END:
num_runs_per_M = 1
sparsification_error_values = {}
error_rmse_values = {}
sigma_rmse_values = {}
for model_i in model_is:
sparsification_error_values[model_i] = {}
error_rmse_values[model_i] = {}
sigma_rmse_values[model_i] = {}
for M in M_values:
sparsification_error_values[model_i][M] = {}
error_rmse_values[model_i][M] = {}
sigma_rmse_values[model_i][M] = {}
auc_sparsification_error_values = {}
loss_values = {}
rmse_values = {}
for M in M_values:
auc_sparsification_error_values[M] = []
loss_values[M] = []
rmse_values[M] = []
eval_dataset = DatasetVirtualKITTIVal(virtualkitti_path=virtualkitti_path)
eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
criterion = MaskedL2Gauss().cuda()
rmse_criterion = RMSE().cuda()
for model_i in model_is:
print ("model_i: %d" % model_i)
restore_from = "/workspace/evaluating_bdl/depthCompletion/trained_models/%s_%d/checkpoint_40000.pth" % (model_id, model_i)
model = DepthCompletionNet().cuda()
model = torch.nn.DataParallel(model)
model.load_state_dict(torch.load(restore_from))
model.eval()
for M in M_values:
M_float = float(M)
print ("M: %d" % M)
for run in range(num_runs_per_M):
print ("run: %d" % run)
batch_losses = []
batch_rmses = []
sigma_alea_values = np.array([])
sigma_epi_values = np.array([])
sigma_pred_values = np.array([])
squared_error_values = np.array([])
for i_iter, batch in enumerate(eval_loader):
with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)
imgs, sparses, targets, file_ids = batch
imgs = Variable(imgs.cuda()) # (shape: (batch_size, h, w))
sparses = Variable(sparses.cuda()) # (shape: (batch_size, h, w))
targets = Variable(targets.cuda()) # (shape: (batch_size, h, w))
means = []
sigma_2_aleas = []
for i in range(M):
mean, log_var = model(imgs, sparses) # (both of shape: (batch_size, 1, h, w))
sigma_2_alea = torch.exp(log_var) # (sigma_alea^2) # (shape: (batch_size, 1, h, w))
means.append(mean)
sigma_2_aleas.append(sigma_2_alea)
mean = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w))
for value in means:
mean = mean + value/M_float
sigma_2_alea = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_alea^2)
for value in sigma_2_aleas:
sigma_2_alea = sigma_2_alea + value/M_float
sigma_2_epi = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_epi^2)
for value in means:
sigma_2_epi = sigma_2_epi + torch.pow(mean - value, 2)/M_float
sigma_2_pred = sigma_2_alea + sigma_2_epi # (sigma_pred^2)
loss = criterion(mean, torch.log(sigma_2_pred), targets)
rmse = rmse_criterion(mean, targets)
print('iter = {}/{} completed, loss = {}, rmse = {}'.format(i_iter, len(eval_dataset)/batch_size, loss.data.cpu().numpy(), rmse.data.cpu().numpy()))
batch_losses.append(loss.data.cpu().numpy())
batch_rmses.append(rmse.data.cpu().numpy())
sigma_alea = torch.sqrt(sigma_2_alea) # (shape: (batch_size, 1, h, w))
sigma_epi = torch.sqrt(sigma_2_epi) # (shape: (batch_size, 1, h, w))
sigma_pred = torch.sqrt(sigma_2_pred) # (shape: (batch_size, 1, h, w))
target = torch.unsqueeze(targets, 1) # (shape: (batch_size, 1, h, w))
valid_mask = (target > 0).detach() # (shape: (batch_size, 1, h, w))
mean = mean[valid_mask] # (shape: (num_valids, ))
sigma_alea = sigma_alea[valid_mask] # (shape: (num_valids, ))
sigma_epi = sigma_epi[valid_mask] # (shape: (num_valids, ))
sigma_pred = sigma_pred[valid_mask] # (shape: (num_valids, ))
target = target[valid_mask] # (shape: (num_valids, ))
squared_error = torch.pow(target - mean, 2) # (shape: (num_valids, ))
sigma_alea_values = np.concatenate((sigma_alea_values, sigma_alea.data.cpu().numpy()))
sigma_epi_values = np.concatenate((sigma_epi_values, sigma_epi.data.cpu().numpy()))
sigma_pred_values = np.concatenate((sigma_pred_values, sigma_pred.data.cpu().numpy()))
squared_error_values = np.concatenate((squared_error_values, squared_error.data.cpu().numpy()))
# # # # # # # # # # # # # # # # # # debug START:
# if i_iter > 0:
# break
# # # # # # # # # # # # # # # # # # debug END:
val_loss = np.mean(batch_losses)
print ("val loss: %g" % val_loss)
val_rmse = np.mean(batch_rmses)
print ("val rmse: %g" % val_rmse)
loss_values[M].append(val_loss)
rmse_values[M].append(val_rmse)
# (sigma_alea/epi/pred_values has shape: (num_predictions_with_GT, ))
# (squared_error_values has shape: (num_predictions_with_GT, ))
print (sigma_alea_values.shape)
print (sigma_epi_values.shape)
print (sigma_pred_values.shape)
print (squared_error_values.shape)
num_predictions_with_GT = squared_error_values.shape[0]
rmse = np.sqrt(np.mean(squared_error_values))
print (rmse)
#sorted_inds_sigma_alea = np.argsort(sigma_alea_values) # (sigma_values[sorted_inds_sigma[0]]: SMALLEST element of sigma_values)
#sorted_inds_sigma_epi = np.argsort(sigma_epi_values) # (sigma_values[sorted_inds_sigma[0]]: SMALLEST element of sigma_values)
sorted_inds_sigma_pred = np.argsort(sigma_pred_values) # (sigma_values[sorted_inds_sigma[0]]: SMALLEST element of sigma_values)
sorted_inds_error = np.argsort(squared_error_values)
sigma_alea_rmses = []
sigma_epi_rmses = []
sigma_pred_rmses = []
error_rmses = []
fractions = list(np.arange(start=0.0, stop=1.0, step=0.01)) # ([0.0, 0.01, ..., 0.99], 100 elements)
for step, fraction in enumerate(fractions):
#print ("fraction: %d/%d" % (step+1, len(fractions)))
#sigma_alea_rmse = np.sqrt(np.mean( squared_error_values[sorted_inds_sigma_alea[0:int((1.0-fraction)*num_predictions_with_GT)]] ))
#sigma_alea_rmses.append(sigma_alea_rmse)
#sigma_epi_rmse = np.sqrt(np.mean( squared_error_values[sorted_inds_sigma_epi[0:int((1.0-fraction)*num_predictions_with_GT)]] ))
#sigma_epi_rmses.append(sigma_epi_rmse)
sigma_pred_rmse = np.sqrt(np.mean( squared_error_values[sorted_inds_sigma_pred[0:int((1.0-fraction)*num_predictions_with_GT)]] ))
sigma_pred_rmses.append(sigma_pred_rmse)
error_rmse = np.sqrt(np.mean( squared_error_values[sorted_inds_error[0:int((1.0-fraction)*num_predictions_with_GT)]] ))
error_rmses.append(error_rmse)
error_rmses_normalized = error_rmses/error_rmses[0]
#sigma_alea_rmses_normalized = sigma_alea_rmses/sigma_alea_rmses[0]
#sigma_epi_rmses_normalized = sigma_epi_rmses/sigma_epi_rmses[0]
sigma_pred_rmses_normalized = sigma_pred_rmses/sigma_pred_rmses[0]
#sparsification_errors_alea = sigma_alea_rmses_normalized - error_rmses_normalized
#sparsification_errors_epi = sigma_epi_rmses_normalized - error_rmses_normalized
sparsification_errors_pred = sigma_pred_rmses_normalized - error_rmses_normalized
#ause_alea = np.trapz(y=sparsification_errors_alea, x=fractions)
#print ("Area Under the Sparsification Error curve (AUSE) - Alea: %g" % ause_alea)
#ause_epi = np.trapz(y=sparsification_errors_epi, x=fractions)
#print ("Area Under the Sparsification Error curve (AUSE) - Epi: %g" % ause_epi)
ause_pred = np.trapz(y=sparsification_errors_pred, x=fractions)
print ("Area Under the Sparsification Error curve (AUSE) - Pred: %g" % ause_pred)
sparsification_error_values[model_i][M][run] = np.array(sparsification_errors_pred)
error_rmse_values[model_i][M][run] = np.array(error_rmses_normalized)
sigma_rmse_values[model_i][M][run] = np.array(sigma_pred_rmses_normalized)
auc_sparsification_error_values[M].append(ause_pred)
print ("#######################")
print ("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
auc_sparsification_error_means = {}
auc_sparsification_error_stds = {}
loss_means = {}
loss_stds = {}
rmse_means = {}
rmse_stds = {}
for M in M_values:
auc_sparsification_error_values_mean = 0.0
for value in auc_sparsification_error_values[M]:
auc_sparsification_error_values_mean += value/float(num_runs_per_M*num_model_is)
auc_sparsification_error_values_var = 0.0
for value in auc_sparsification_error_values[M]:
auc_sparsification_error_values_var += ((value - auc_sparsification_error_values_mean)**2)/float(num_runs_per_M*num_model_is)
auc_sparsification_error_values_std = np.sqrt(auc_sparsification_error_values_var)
auc_sparsification_error_means[M] = auc_sparsification_error_values_mean
auc_sparsification_error_stds[M] = auc_sparsification_error_values_std
###
loss_values_mean = 0.0
for value in loss_values[M]:
loss_values_mean += value/float(num_runs_per_M*num_model_is)
loss_values_var = 0.0
for value in loss_values[M]:
loss_values_var += ((value - loss_values_mean)**2)/float(num_runs_per_M*num_model_is)
loss_values_std = np.sqrt(loss_values_var)
loss_means[M] = loss_values_mean
loss_stds[M] = loss_values_std
###
rmse_values_mean = 0.0
for value in rmse_values[M]:
rmse_values_mean += value/float(num_runs_per_M*num_model_is)
rmse_values_var = 0.0
for value in rmse_values[M]:
rmse_values_var += ((value - rmse_values_mean)**2)/float(num_runs_per_M*num_model_is)
rmse_values_std = np.sqrt(rmse_values_var)
rmse_means[M] = rmse_values_mean
rmse_stds[M] = rmse_values_std
for M in M_values:
print ("M = %d, Sparsification error (AUC) - mean: %g, std: %g" % (M, auc_sparsification_error_means[M], auc_sparsification_error_stds[M]))
print ("M = %d, Loss - mean: %g, std: %g" % (M, loss_means[M], loss_stds[M]))
print ("M = %d, RMSE - mean: %g, std: %g" % (M, rmse_means[M], rmse_stds[M]))
print ("#####")
plt.figure(1)
for M in M_values:
for model_i_step, model_i in enumerate(model_is):
for run in range(num_runs_per_M):
if (model_i_step == 0) and (run == 0):
plt.plot(fractions, sparsification_error_values[model_i][M][run], color=colors[M], alpha=0.5, label="M = %d" % M)
else:
plt.plot(fractions, sparsification_error_values[model_i][M][run], color=colors[M], alpha=0.5)
plt.legend()
plt.ylabel("Sparsification error")
plt.xlabel("Fraction of removed pixels")
sparsification_error_ylim = plt.ylim()
plt.title("Sparsification error curve")
plt.savefig("%s/sparsification_error_curve.png" % snapshot_dir)
plt.close(1)
for M in M_values:
plt.figure(1)
for model_i_step, model_i in enumerate(model_is):
for run in range(num_runs_per_M):
if (model_i_step == 0) and (run == 0):
plt.plot(fractions, sparsification_error_values[model_i][M][run], color=colors[M], alpha=0.5, label="M = %d" % M)
else:
plt.plot(fractions, sparsification_error_values[model_i][M][run], color=colors[M], alpha=0.5)
plt.legend()
plt.ylabel("Sparsification error")
plt.xlabel("Fraction of removed pixels")
plt.ylim(sparsification_error_ylim)
plt.title("Sparsification error curve")
plt.savefig("%s/sparsification_error_curve_M%d.png" % (snapshot_dir, M))
plt.close(1)
plt.figure(1)
for model_i_step, model_i in enumerate(model_is):
for run in range(num_runs_per_M):
plt.plot(fractions, error_rmse_values[model_i][M][run], color=run_colors[model_i_step*num_runs_per_M + run], linestyle="dotted")
plt.plot(fractions,sigma_rmse_values[model_i][M][run], color=run_colors[model_i_step*num_runs_per_M + run])
plt.ylabel("RMSE (normalized)")
plt.xlabel("Fraction of removed pixels")
plt.ylim((-0.05, 1.05))
plt.title("Sparsification plot - M=%d" % M)
plt.savefig("%s/sparsification_plot_M%d.png" % (snapshot_dir, M))
plt.close(1)
for M in M_values:
for model_i_step, model_i in enumerate(model_is):
for run in range(num_runs_per_M):
plt.figure(1)
plt.plot(fractions, error_rmse_values[model_i][M][run], color=run_colors[model_i_step*num_runs_per_M + run], linestyle="dotted", label="Oracle")
plt.plot(fractions, sigma_rmse_values[model_i][M][run], color=run_colors[model_i_step*num_runs_per_M + run], label="Model")
plt.legend()
plt.ylabel("RMSE (normalized)")
plt.xlabel("Fraction of removed pixels")
plt.ylim((-0.05, 1.05))
plt.title("Sparsification plot - M=%d, model_i=%d, %d" % (M, model_i, run))
plt.savefig("%s/sparsification_plot_M%d_model_i%d_%d.png" % (snapshot_dir, M, model_i, run))
plt.close(1)
with open("%s/auc_sparsification_error_values.pkl" % snapshot_dir, "wb") as file:
pickle.dump(auc_sparsification_error_values, file)
with open("%s/loss_values.pkl" % snapshot_dir, "wb") as file:
pickle.dump(loss_values, file)
with open("%s/rmse_values.pkl" % snapshot_dir, "wb") as file:
pickle.dump(rmse_values, file)
with open("%s/sparsification_error_values.pkl" % snapshot_dir, "wb") as file:
pickle.dump(sparsification_error_values, file)
with open("%s/error_rmse_values.pkl" % snapshot_dir, "wb") as file:
pickle.dump(error_rmse_values, file)
with open("%s/sigma_rmse_values.pkl" % snapshot_dir, "wb") as file:
pickle.dump(sigma_rmse_values, file)
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"torch.sqrt",
"torch.pow",
"torch.exp",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"os.path.exists",
"numpy.mean",
"torch.unsqueeze",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
... | [((396, 417), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (410, 417), False, 'import matplotlib\n'), ((2407, 2466), 'datasets.DatasetVirtualKITTIVal', 'DatasetVirtualKITTIVal', ([], {'virtualkitti_path': 'virtualkitti_path'}), '(virtualkitti_path=virtualkitti_path)\n', (2429, 2466), False, 'from datasets import DatasetVirtualKITTIVal\n'), ((2482, 2588), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'eval_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset=eval_dataset, batch_size=batch_size,\n shuffle=False, num_workers=4)\n', (2509, 2588), False, 'import torch\n'), ((13350, 13363), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (13360, 13363), True, 'import matplotlib.pyplot as plt\n'), ((13796, 13808), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13806, 13808), True, 'import matplotlib.pyplot as plt\n'), ((13810, 13844), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sparsification error"""'], {}), "('Sparsification error')\n", (13820, 13844), True, 'import matplotlib.pyplot as plt\n'), ((13846, 13886), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of removed pixels"""'], {}), "('Fraction of removed pixels')\n", (13856, 13886), True, 'import matplotlib.pyplot as plt\n'), ((13916, 13926), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (13924, 13926), True, 'import matplotlib.pyplot as plt\n'), ((13928, 13967), 'matplotlib.pyplot.title', 'plt.title', (['"""Sparsification error curve"""'], {}), "('Sparsification error curve')\n", (13937, 13967), True, 'import matplotlib.pyplot as plt\n'), ((13969, 14032), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/sparsification_error_curve.png' % snapshot_dir)"], {}), "('%s/sparsification_error_curve.png' % snapshot_dir)\n", (13980, 14032), True, 'import matplotlib.pyplot as plt\n'), ((14034, 14046), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (14043, 14046), True, 'import matplotlib.pyplot as plt\n'), ((762, 790), 'os.path.exists', 'os.path.exists', (['snapshot_dir'], {}), '(snapshot_dir)\n', (776, 790), False, 'import os\n'), ((797, 822), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {}), '(snapshot_dir)\n', (808, 822), False, 'import os\n'), ((2904, 2932), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (2925, 2932), False, 'import torch\n'), ((11934, 11978), 'numpy.sqrt', 'np.sqrt', (['auc_sparsification_error_values_var'], {}), '(auc_sparsification_error_values_var)\n', (11941, 11978), True, 'import numpy as np\n'), ((12463, 12487), 'numpy.sqrt', 'np.sqrt', (['loss_values_var'], {}), '(loss_values_var)\n', (12470, 12487), True, 'import numpy as np\n'), ((12892, 12916), 'numpy.sqrt', 'np.sqrt', (['rmse_values_var'], {}), '(rmse_values_var)\n', (12899, 12916), True, 'import numpy as np\n'), ((14074, 14087), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (14084, 14087), True, 'import matplotlib.pyplot as plt\n'), ((14504, 14516), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14514, 14516), True, 'import matplotlib.pyplot as plt\n'), ((14522, 14556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sparsification error"""'], {}), "('Sparsification error')\n", (14532, 14556), True, 'import matplotlib.pyplot as plt\n'), ((14562, 14602), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of removed pixels"""'], {}), "('Fraction of removed pixels')\n", (14572, 14602), True, 'import matplotlib.pyplot as plt\n'), ((14608, 14643), 'matplotlib.pyplot.ylim', 'plt.ylim', (['sparsification_error_ylim'], {}), '(sparsification_error_ylim)\n', (14616, 14643), True, 'import matplotlib.pyplot as plt\n'), ((14649, 14688), 'matplotlib.pyplot.title', 'plt.title', (['"""Sparsification error curve"""'], {}), "('Sparsification error curve')\n", (14658, 14688), True, 'import matplotlib.pyplot as plt\n'), ((14694, 14766), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/sparsification_error_curve_M%d.png' % (snapshot_dir, M))"], {}), "('%s/sparsification_error_curve_M%d.png' % (snapshot_dir, M))\n", (14705, 14766), True, 'import matplotlib.pyplot as plt\n'), ((14772, 14784), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (14781, 14784), True, 'import matplotlib.pyplot as plt\n'), ((14792, 14805), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (14802, 14805), True, 'import matplotlib.pyplot as plt\n'), ((15172, 15203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE (normalized)"""'], {}), "('RMSE (normalized)')\n", (15182, 15203), True, 'import matplotlib.pyplot as plt\n'), ((15209, 15249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of removed pixels"""'], {}), "('Fraction of removed pixels')\n", (15219, 15249), True, 'import matplotlib.pyplot as plt\n'), ((15255, 15278), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.05, 1.05)'], {}), '((-0.05, 1.05))\n', (15263, 15278), True, 'import matplotlib.pyplot as plt\n'), ((15284, 15327), 'matplotlib.pyplot.title', 'plt.title', (["('Sparsification plot - M=%d' % M)"], {}), "('Sparsification plot - M=%d' % M)\n", (15293, 15327), True, 'import matplotlib.pyplot as plt\n'), ((15333, 15398), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/sparsification_plot_M%d.png' % (snapshot_dir, M))"], {}), "('%s/sparsification_plot_M%d.png' % (snapshot_dir, M))\n", (15344, 15398), True, 'import matplotlib.pyplot as plt\n'), ((15404, 15416), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (15413, 15416), True, 'import matplotlib.pyplot as plt\n'), ((16332, 16382), 'pickle.dump', 'pickle.dump', (['auc_sparsification_error_values', 'file'], {}), '(auc_sparsification_error_values, file)\n', (16343, 16382), False, 'import pickle\n'), ((16453, 16483), 'pickle.dump', 'pickle.dump', (['loss_values', 'file'], {}), '(loss_values, file)\n', (16464, 16483), False, 'import pickle\n'), ((16554, 16584), 'pickle.dump', 'pickle.dump', (['rmse_values', 'file'], {}), '(rmse_values, file)\n', (16565, 16584), False, 'import pickle\n'), ((16671, 16717), 'pickle.dump', 'pickle.dump', (['sparsification_error_values', 'file'], {}), '(sparsification_error_values, file)\n', (16682, 16717), False, 'import pickle\n'), ((16794, 16830), 'pickle.dump', 'pickle.dump', (['error_rmse_values', 'file'], {}), '(error_rmse_values, file)\n', (16805, 16830), False, 'import pickle\n'), ((16907, 16943), 'pickle.dump', 'pickle.dump', (['sigma_rmse_values', 'file'], {}), '(sigma_rmse_values, file)\n', (16918, 16943), False, 'import pickle\n'), ((2600, 2615), 'criterion.MaskedL2Gauss', 'MaskedL2Gauss', ([], {}), '()\n', (2613, 2615), False, 'from criterion import MaskedL2Gauss, RMSE\n'), ((2641, 2647), 'criterion.RMSE', 'RMSE', ([], {}), '()\n', (2645, 2647), False, 'from criterion import MaskedL2Gauss, RMSE\n'), ((2960, 2984), 'torch.load', 'torch.load', (['restore_from'], {}), '(restore_from)\n', (2970, 2984), False, 'import torch\n'), ((2863, 2883), 'model_mcdropout.DepthCompletionNet', 'DepthCompletionNet', ([], {}), '()\n', (2881, 2883), False, 'from model_mcdropout import DepthCompletionNet\n'), ((3265, 3277), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3273, 3277), True, 'import numpy as np\n'), ((3310, 3322), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3318, 3322), True, 'import numpy as np\n'), ((3356, 3368), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3364, 3368), True, 'import numpy as np\n'), ((3405, 3417), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3413, 3417), True, 'import numpy as np\n'), ((7213, 7234), 'numpy.mean', 'np.mean', (['batch_losses'], {}), '(batch_losses)\n', (7220, 7234), True, 'import numpy as np\n'), ((7306, 7326), 'numpy.mean', 'np.mean', (['batch_rmses'], {}), '(batch_rmses)\n', (7313, 7326), True, 'import numpy as np\n'), ((8290, 8319), 'numpy.argsort', 'np.argsort', (['sigma_pred_values'], {}), '(sigma_pred_values)\n', (8300, 8319), True, 'import numpy as np\n'), ((8426, 8458), 'numpy.argsort', 'np.argsort', (['squared_error_values'], {}), '(squared_error_values)\n', (8436, 8458), True, 'import numpy as np\n'), ((10613, 10664), 'numpy.trapz', 'np.trapz', ([], {'y': 'sparsification_errors_pred', 'x': 'fractions'}), '(y=sparsification_errors_pred, x=fractions)\n', (10621, 10664), True, 'import numpy as np\n'), ((10822, 10858), 'numpy.array', 'np.array', (['sparsification_errors_pred'], {}), '(sparsification_errors_pred)\n', (10830, 10858), True, 'import numpy as np\n'), ((10909, 10941), 'numpy.array', 'np.array', (['error_rmses_normalized'], {}), '(error_rmses_normalized)\n', (10917, 10941), True, 'import numpy as np\n'), ((10992, 11029), 'numpy.array', 'np.array', (['sigma_pred_rmses_normalized'], {}), '(sigma_pred_rmses_normalized)\n', (11000, 11029), True, 'import numpy as np\n'), ((14917, 15052), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'error_rmse_values[model_i][M][run]'], {'color': 'run_colors[model_i_step * num_runs_per_M + run]', 'linestyle': '"""dotted"""'}), "(fractions, error_rmse_values[model_i][M][run], color=run_colors[\n model_i_step * num_runs_per_M + run], linestyle='dotted')\n", (14925, 15052), True, 'import matplotlib.pyplot as plt\n'), ((15059, 15174), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'sigma_rmse_values[model_i][M][run]'], {'color': 'run_colors[model_i_step * num_runs_per_M + run]'}), '(fractions, sigma_rmse_values[model_i][M][run], color=run_colors[\n model_i_step * num_runs_per_M + run])\n', (15067, 15174), True, 'import matplotlib.pyplot as plt\n'), ((15550, 15563), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (15560, 15563), True, 'import matplotlib.pyplot as plt\n'), ((15577, 15728), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'error_rmse_values[model_i][M][run]'], {'color': 'run_colors[model_i_step * num_runs_per_M + run]', 'linestyle': '"""dotted"""', 'label': '"""Oracle"""'}), "(fractions, error_rmse_values[model_i][M][run], color=run_colors[\n model_i_step * num_runs_per_M + run], linestyle='dotted', label='Oracle')\n", (15585, 15728), True, 'import matplotlib.pyplot as plt\n'), ((15735, 15865), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'sigma_rmse_values[model_i][M][run]'], {'color': 'run_colors[model_i_step * num_runs_per_M + run]', 'label': '"""Model"""'}), "(fractions, sigma_rmse_values[model_i][M][run], color=run_colors[\n model_i_step * num_runs_per_M + run], label='Model')\n", (15743, 15865), True, 'import matplotlib.pyplot as plt\n'), ((15872, 15884), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15882, 15884), True, 'import matplotlib.pyplot as plt\n'), ((15898, 15929), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE (normalized)"""'], {}), "('RMSE (normalized)')\n", (15908, 15929), True, 'import matplotlib.pyplot as plt\n'), ((15943, 15983), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of removed pixels"""'], {}), "('Fraction of removed pixels')\n", (15953, 15983), True, 'import matplotlib.pyplot as plt\n'), ((15997, 16020), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.05, 1.05)'], {}), '((-0.05, 1.05))\n', (16005, 16020), True, 'import matplotlib.pyplot as plt\n'), ((16034, 16109), 'matplotlib.pyplot.title', 'plt.title', (["('Sparsification plot - M=%d, model_i=%d, %d' % (M, model_i, run))"], {}), "('Sparsification plot - M=%d, model_i=%d, %d' % (M, model_i, run))\n", (16043, 16109), True, 'import matplotlib.pyplot as plt\n'), ((16123, 16219), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/sparsification_plot_M%d_model_i%d_%d.png' % (snapshot_dir, M, model_i,\n run))"], {}), "('%s/sparsification_plot_M%d_model_i%d_%d.png' % (snapshot_dir,\n M, model_i, run))\n", (16134, 16219), True, 'import matplotlib.pyplot as plt\n'), ((16229, 16241), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (16238, 16241), True, 'import matplotlib.pyplot as plt\n'), ((7911, 7940), 'numpy.mean', 'np.mean', (['squared_error_values'], {}), '(squared_error_values)\n', (7918, 7940), True, 'import numpy as np\n'), ((8625, 8666), 'numpy.arange', 'np.arange', ([], {'start': '(0.0)', 'stop': '(1.0)', 'step': '(0.01)'}), '(start=0.0, stop=1.0, step=0.01)\n', (8634, 8666), True, 'import numpy as np\n'), ((13551, 13669), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'sparsification_error_values[model_i][M][run]'], {'color': 'colors[M]', 'alpha': '(0.5)', 'label': "('M = %d' % M)"}), "(fractions, sparsification_error_values[model_i][M][run], color=\n colors[M], alpha=0.5, label='M = %d' % M)\n", (13559, 13669), True, 'import matplotlib.pyplot as plt\n'), ((13701, 13799), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'sparsification_error_values[model_i][M][run]'], {'color': 'colors[M]', 'alpha': '(0.5)'}), '(fractions, sparsification_error_values[model_i][M][run], color=\n colors[M], alpha=0.5)\n', (13709, 13799), True, 'import matplotlib.pyplot as plt\n'), ((14255, 14373), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'sparsification_error_values[model_i][M][run]'], {'color': 'colors[M]', 'alpha': '(0.5)', 'label': "('M = %d' % M)"}), "(fractions, sparsification_error_values[model_i][M][run], color=\n colors[M], alpha=0.5, label='M = %d' % M)\n", (14263, 14373), True, 'import matplotlib.pyplot as plt\n'), ((14405, 14503), 'matplotlib.pyplot.plot', 'plt.plot', (['fractions', 'sparsification_error_values[model_i][M][run]'], {'color': 'colors[M]', 'alpha': '(0.5)'}), '(fractions, sparsification_error_values[model_i][M][run], color=\n colors[M], alpha=0.5)\n', (14413, 14503), True, 'import matplotlib.pyplot as plt\n'), ((3498, 3513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3511, 3513), False, 'import torch\n'), ((5624, 5648), 'torch.sqrt', 'torch.sqrt', (['sigma_2_alea'], {}), '(sigma_2_alea)\n', (5634, 5648), False, 'import torch\n'), ((5715, 5738), 'torch.sqrt', 'torch.sqrt', (['sigma_2_epi'], {}), '(sigma_2_epi)\n', (5725, 5738), False, 'import torch\n'), ((5806, 5830), 'torch.sqrt', 'torch.sqrt', (['sigma_2_pred'], {}), '(sigma_2_pred)\n', (5816, 5830), False, 'import torch\n'), ((5896, 5923), 'torch.unsqueeze', 'torch.unsqueeze', (['targets', '(1)'], {}), '(targets, 1)\n', (5911, 5923), False, 'import torch\n'), ((6482, 6509), 'torch.pow', 'torch.pow', (['(target - mean)', '(2)'], {}), '(target - mean, 2)\n', (6491, 6509), False, 'import torch\n'), ((4206, 4224), 'torch.exp', 'torch.exp', (['log_var'], {}), '(log_var)\n', (4215, 4224), False, 'import torch\n'), ((5191, 5214), 'torch.log', 'torch.log', (['sigma_2_pred'], {}), '(sigma_2_pred)\n', (5200, 5214), False, 'import torch\n'), ((5028, 5054), 'torch.pow', 'torch.pow', (['(mean - value)', '(2)'], {}), '(mean - value, 2)\n', (5037, 5054), False, 'import torch\n')] |
from OpenGL.GLUT import *
from OpenGL.GL import *
import numpy as np
MAX_STEP = 4
def Recursion(step, width, center):
if step == 1:
curvature = NewCurvature()
#DrawColorPolygon(width, center, curvature)
new_center = center + [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
elif step <= MAX_STEP:
#<NAME>
curvature = NewCurvature() * (-1)
new_center = center + [-width, width] + [0,width/2*curvature]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#<NAME>
curvature = NewCurvature() * (-1)
new_center = center + [0.0, width]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#<NAME>
curvature = NewCurvature() * (-1)
new_center = center + [+width, width]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#Lewy Środkowy
curvature = NewCurvature()
new_center = center + [-width, 0.0]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#Prawy Środkowy
curvature = NewCurvature()
new_center = center + [+width, 0.0]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#<NAME>
curvature = NewCurvature()
new_center = center + [-width, -width]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#Środkowy Dolny
curvature = NewCurvature()
new_center = center + [0.0, -width]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
#<NAME>
curvature = NewCurvature()
new_center = center + [+width, -width]
IfMaxStepDraw(width, new_center, curvature, step)
new_center += [0,width/2*curvature]
Recursion(step+1, width/3.0, new_center)
def IfMaxStepDraw(width, new_center, curvature, step):
if step == MAX_STEP:
DrawColorPolygon(width, new_center, curvature)
def DrawColorPolygon(width, center, curvature):
glBegin(GL_POLYGON)
GetColor()
glVertex2f(center[0]-0.5*width, center[1]+0.5*width)
GetColor()
glVertex2f(center[0]+0.5*width, center[1]+0.5*width+curvature*width)
GetColor()
glVertex2f(center[0]+0.5*width, center[1]-0.5*width+curvature*width)
GetColor()
glVertex2f(center[0]-0.5*width, center[1]-0.5*width)
glEnd()
def NewCurvature():
return np.random.random()/10
def RenderScene():
glClear(GL_COLOR_BUFFER_BIT)
Recursion(step=1, width=200.0, center=np.array([0.0,0.0]))
glFlush()
def GetColor():
color = list(np.random.random(size=3))
return glColor3f(color[0],color[1],color[2])
def MyInit():
glClearColor(0.8, 0.8, 0.8, 1.0)
def ChangeSize(horizontal, vertical):
if vertical<=100:
vertical = 101
#glClearColor( 1.0, 1.0, 1.0, 1.0 )
glViewport(50, 50, horizontal-100, vertical-100)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
AspectRatio = horizontal/vertical
width=200.0
if(horizontal <= vertical):
glOrtho(-width/2,width/2,-width/2/AspectRatio,width/2/AspectRatio,1.0,-1.0)
else:
glOrtho(-width/2*AspectRatio,width/2*AspectRatio,-width/2,width/2,1.0,-1.0)
glMatrixMode(GL_MODELVIEW);
glLoadIdentity()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)
glutCreateWindow("Fraktal")
glutDisplayFunc(RenderScene)
glutReshapeFunc(ChangeSize)
MyInit()
glutMainLoop()
main()
| [
"numpy.random.random",
"numpy.array"
] | [((2983, 3001), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2999, 3001), True, 'import numpy as np\n'), ((3169, 3193), 'numpy.random.random', 'np.random.random', ([], {'size': '(3)'}), '(size=3)\n', (3185, 3193), True, 'import numpy as np\n'), ((3100, 3120), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3108, 3120), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# rng = pd.date_range('1/1/2012', periods=100, freq='S')
# ts = pd.Series(np.random.randint(0,500,len(rng)), index=rng)
# print ts
# print ts.resample('5Min')
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
# print ts
# print '************************'
ts = ts.cumsum()
# print ts
ts.plot()
plt.show()
| [
"numpy.random.randn",
"pandas.date_range",
"matplotlib.pyplot.show"
] | [((425, 435), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (433, 435), True, 'import matplotlib.pyplot as plt\n'), ((271, 292), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (286, 292), True, 'import numpy as np\n'), ((300, 339), 'pandas.date_range', 'pd.date_range', (['"""1/1/2000"""'], {'periods': '(1000)'}), "('1/1/2000', periods=1000)\n", (313, 339), True, 'import pandas as pd\n')] |
import os
import tensorflow as tf
from PIL import Image
import numpy as np
# 验证码存放路径
IMAGE_PATH = "./pictest/"
# 验证码图片宽度
IMAGE_WIDTH = 60
# 验证码图片高度
IMAGE_HEIGHT = 24
# 验证集,用于模型验证的验证码图片的文件名
VALIDATION_IMAGE_NAME = []
# 存放训练好的模型的路径
MODEL_SAVE_PATH = './models/'
CHAR_SET_LEN = 10
CAPTCHA_LEN = 4
def get_image_file_name(imgPath=IMAGE_PATH):
fileName = []
total = 0
for filePath in os.listdir(imgPath):
print(filePath)
captcha_name = filePath.split('/')[-1]
# captcha_name = captcha_name.split('.')[0]
print(captcha_name)
fileName.append(captcha_name)
total += 1
return fileName, total
# 将验证码转换为训练时用的标签向量,维数是 40
# 例如,如果验证码是 ‘0296’ ,则对应的标签是
# [1 0 0 0 0 0 0 0 0 0
# 0 0 1 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0 0 1
# 0 0 0 0 0 0 1 0 0 0]
def name2label(name):
label = np.zeros(CAPTCHA_LEN * CHAR_SET_LEN)
for i, c in enumerate(name):
idx = i * CHAR_SET_LEN + ord(c) - ord('0')
label[idx] = 1
return label
# 取得验证码图片的数据以及它的标签
def get_data_and_label(fileName, filePath=IMAGE_PATH):
pathName = os.path.join(filePath, fileName)
img = Image.open(pathName)
# 转为灰度图
img = img.convert("L")
image_array = np.array(img)
image_data = image_array.flatten() / 255
image_label = name2label(fileName[0:CAPTCHA_LEN])
return image_data, image_label
# 生成一个训练batch
def get_next_batch(batchSize=32, step=0):
batch_data = np.zeros([batchSize, IMAGE_WIDTH * IMAGE_HEIGHT])
batch_label = np.zeros([batchSize, CAPTCHA_LEN * CHAR_SET_LEN])
fileNameList = VALIDATION_IMAGE_NAME
totalNumber = len(fileNameList)
indexStart = step * batchSize
for i in range(batchSize):
index = (i + indexStart) % totalNumber
name = fileNameList[index]
img_data, img_label = get_data_and_label(name)
batch_data[i, :] = img_data
batch_label[i, :] = img_label
return batch_data, batch_label
# 构建卷积神经网络并训练
def validate_data_with_CNN():
# 初始化权值
def weight_variable(shape, name='weight'):
init = tf.truncated_normal(shape, stddev=0.1)
var = tf.Variable(initial_value=init, name=name)
return var
# 初始化偏置
def bias_variable(shape, name='bias'):
init = tf.constant(0.1, shape=shape)
var = tf.Variable(init, name=name)
return var
# 卷积
def conv2d(x, W, name='conv2d'):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name)
# 池化
def max_pool_2X2(x, name='maxpool'):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
# 输入层
# 请注意 X 的 name,在测试model时会用到它
X = tf.placeholder(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT], name='data-input')
Y = tf.placeholder(tf.float32, [None, CAPTCHA_LEN * CHAR_SET_LEN], name='label-input')
x_input = tf.reshape(X, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='x-input')
# 第一层卷积
W_conv1 = weight_variable([5, 5, 1, 32], 'W_conv1')
B_conv1 = bias_variable([32], 'B_conv1')
conv1 = tf.nn.relu(conv2d(x_input, W_conv1, 'conv1') + B_conv1)
conv1 = max_pool_2X2(conv1, 'conv1-pool')
# 第二层卷积
W_conv2 = weight_variable([5, 5, 32, 64], 'W_conv2')
B_conv2 = bias_variable([64], 'B_conv2')
conv2 = tf.nn.relu(conv2d(conv1, W_conv2, 'conv2') + B_conv2)
conv2 = max_pool_2X2(conv2, 'conv2-pool')
# 第三层卷积
W_conv3 = weight_variable([5, 5, 64, 64], 'W_conv3')
B_conv3 = bias_variable([64], 'B_conv3')
conv3 = tf.nn.relu(conv2d(conv2, W_conv3, 'conv3') + B_conv3)
conv3 = max_pool_2X2(conv3, 'conv3-pool')
# 全链接层
# 每次池化后,图片的宽度和高度均缩小为原来的一半,进过上面的三次池化,宽度和高度均缩小8倍
W_fc1 = weight_variable([8 * 3 * 64, 1024], 'W_fc1')
B_fc1 = bias_variable([1024], 'B_fc1')
fc1 = tf.reshape(conv3, [-1, W_fc1.get_shape().as_list()[0]])
fc1 = tf.nn.relu(tf.add(tf.matmul(fc1, W_fc1), B_fc1))
# 输出层
W_fc2 = weight_variable([1024, CAPTCHA_LEN * CHAR_SET_LEN], 'W_fc2')
B_fc2 = bias_variable([CAPTCHA_LEN * CHAR_SET_LEN], 'B_fc2')
output = tf.add(tf.matmul(fc1, W_fc2), B_fc2, 'output')
predict = tf.reshape(output, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='predict')
labels = tf.reshape(Y, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='labels')
# 预测结果
# 请注意 predict_max_idx 的 name,在测试model时会用到它
predict_max_idx = tf.argmax(predict, axis=2, name='predict_max_idx')
labels_max_idx = tf.argmax(labels, axis=2, name='labels_max_idx')
predict_correct_vec = tf.equal(predict_max_idx, labels_max_idx)
accuracy = tf.reduce_mean(tf.cast(predict_correct_vec, tf.float32))
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.6
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) # 获取checkpoints对象
if ckpt and ckpt.model_checkpoint_path: ##判断ckpt是否为空,若不为空,才进行模型的加载,否则从头开始训练
print("正在恢复参数.....")
saver.restore(sess, ckpt.model_checkpoint_path) # 恢复保存的神经网络结构,实现断点续训
print("参数恢复完成.")
steps = 0
test_data, test_label = get_next_batch(20, steps)
acc = sess.run(accuracy, feed_dict={X: test_data, Y: test_label})
predict_test = sess.run(predict_max_idx, feed_dict={X:test_data, Y: test_label})
data = predict_test.flatten().tolist()
prelab = []
pre = ""
for i in range(len(data)):
if i % 4 == 0 and i != 0:
prelab.append(pre)
pre = ""
pre = pre + str(data[i])
prelab.append(pre)
# print(prelab)
testlab = []
data = test_label.reshape((-1, 4, 10))
for item1 in data:
lab = ""
for item2 in item1:
ind = np.argmax(item2)
lab = lab + str(ind)
testlab.append(lab)
# print(testlab)
for item1, item2 in zip(prelab, testlab):
print("{}<->{}:{}".format(item1, item2, item1 == item2))
print("accuracy:{}".format(acc))
if __name__ == '__main__':
image_filename_list, total = get_image_file_name(IMAGE_PATH)
VALIDATION_IMAGE_NAME = image_filename_list
validate_data_with_CNN()
| [
"tensorflow.equal",
"numpy.array",
"tensorflow.cast",
"os.listdir",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.ConfigProto",
"tensorflow.nn.conv2d",
"tensorflow.Variable",
"numpy.argmax",
"tensorflow.train.get_checkpoint_state",
"tensorflow.reshape",
"... | [((396, 415), 'os.listdir', 'os.listdir', (['imgPath'], {}), '(imgPath)\n', (406, 415), False, 'import os\n'), ((835, 871), 'numpy.zeros', 'np.zeros', (['(CAPTCHA_LEN * CHAR_SET_LEN)'], {}), '(CAPTCHA_LEN * CHAR_SET_LEN)\n', (843, 871), True, 'import numpy as np\n'), ((1087, 1119), 'os.path.join', 'os.path.join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (1099, 1119), False, 'import os\n'), ((1130, 1150), 'PIL.Image.open', 'Image.open', (['pathName'], {}), '(pathName)\n', (1140, 1150), False, 'from PIL import Image\n'), ((1208, 1221), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1216, 1221), True, 'import numpy as np\n'), ((1431, 1480), 'numpy.zeros', 'np.zeros', (['[batchSize, IMAGE_WIDTH * IMAGE_HEIGHT]'], {}), '([batchSize, IMAGE_WIDTH * IMAGE_HEIGHT])\n', (1439, 1480), True, 'import numpy as np\n'), ((1499, 1548), 'numpy.zeros', 'np.zeros', (['[batchSize, CAPTCHA_LEN * CHAR_SET_LEN]'], {}), '([batchSize, CAPTCHA_LEN * CHAR_SET_LEN])\n', (1507, 1548), True, 'import numpy as np\n'), ((2677, 2763), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, IMAGE_WIDTH * IMAGE_HEIGHT]'], {'name': '"""data-input"""'}), "(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT], name=\n 'data-input')\n", (2691, 2763), True, 'import tensorflow as tf\n'), ((2767, 2854), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, CAPTCHA_LEN * CHAR_SET_LEN]'], {'name': '"""label-input"""'}), "(tf.float32, [None, CAPTCHA_LEN * CHAR_SET_LEN], name=\n 'label-input')\n", (2781, 2854), True, 'import tensorflow as tf\n'), ((2864, 2929), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1]'], {'name': '"""x-input"""'}), "(X, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name='x-input')\n", (2874, 2929), True, 'import tensorflow as tf\n'), ((4119, 4186), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, CAPTCHA_LEN, CHAR_SET_LEN]'], {'name': '"""predict"""'}), "(output, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='predict')\n", (4129, 4186), True, 'import tensorflow as tf\n'), ((4200, 4261), 'tensorflow.reshape', 'tf.reshape', (['Y', '[-1, CAPTCHA_LEN, CHAR_SET_LEN]'], {'name': '"""labels"""'}), "(Y, [-1, CAPTCHA_LEN, CHAR_SET_LEN], name='labels')\n", (4210, 4261), True, 'import tensorflow as tf\n'), ((4343, 4393), 'tensorflow.argmax', 'tf.argmax', (['predict'], {'axis': '(2)', 'name': '"""predict_max_idx"""'}), "(predict, axis=2, name='predict_max_idx')\n", (4352, 4393), True, 'import tensorflow as tf\n'), ((4415, 4463), 'tensorflow.argmax', 'tf.argmax', (['labels'], {'axis': '(2)', 'name': '"""labels_max_idx"""'}), "(labels, axis=2, name='labels_max_idx')\n", (4424, 4463), True, 'import tensorflow as tf\n'), ((4490, 4531), 'tensorflow.equal', 'tf.equal', (['predict_max_idx', 'labels_max_idx'], {}), '(predict_max_idx, labels_max_idx)\n', (4498, 4531), True, 'import tensorflow as tf\n'), ((4617, 4633), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4631, 4633), True, 'import tensorflow as tf\n'), ((4647, 4715), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), '(allow_soft_placement=True, log_device_placement=True)\n', (4661, 4715), True, 'import tensorflow as tf\n'), ((2059, 2097), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (2078, 2097), True, 'import tensorflow as tf\n'), ((2112, 2154), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'init', 'name': 'name'}), '(initial_value=init, name=name)\n', (2123, 2154), True, 'import tensorflow as tf\n'), ((2245, 2274), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (2256, 2274), True, 'import tensorflow as tf\n'), ((2289, 2317), 'tensorflow.Variable', 'tf.Variable', (['init'], {'name': 'name'}), '(init, name=name)\n', (2300, 2317), True, 'import tensorflow as tf\n'), ((2399, 2466), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': 'name'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name)\n", (2411, 2466), True, 'import tensorflow as tf\n'), ((2533, 2623), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': 'name'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME',\n name=name)\n", (2547, 2623), True, 'import tensorflow as tf\n'), ((4064, 4085), 'tensorflow.matmul', 'tf.matmul', (['fc1', 'W_fc2'], {}), '(fc1, W_fc2)\n', (4073, 4085), True, 'import tensorflow as tf\n'), ((4562, 4602), 'tensorflow.cast', 'tf.cast', (['predict_correct_vec', 'tf.float32'], {}), '(predict_correct_vec, tf.float32)\n', (4569, 4602), True, 'import tensorflow as tf\n'), ((4814, 4839), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4824, 4839), True, 'import tensorflow as tf\n'), ((4916, 4962), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['MODEL_SAVE_PATH'], {}), '(MODEL_SAVE_PATH)\n', (4945, 4962), True, 'import tensorflow as tf\n'), ((3865, 3886), 'tensorflow.matmul', 'tf.matmul', (['fc1', 'W_fc1'], {}), '(fc1, W_fc1)\n', (3874, 3886), True, 'import tensorflow as tf\n'), ((4866, 4899), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4897, 4899), True, 'import tensorflow as tf\n'), ((5927, 5943), 'numpy.argmax', 'np.argmax', (['item2'], {}), '(item2)\n', (5936, 5943), True, 'import numpy as np\n')] |
import numpy as np
class CachedSplineDistance:
"""
spline: An instance of VectorCubicSpline
configs: [{min: , max: , resolution: }...] for two dimension of the space spline is in.
"""
def __init__(self, spline, configs):
self.spline = spline
self.configs = configs
self.distance_table = None
self.s_table = None
def compute_cache(self):
x_config = self.configs[0]
y_config = self.configs[1]
x_num_states = self._get_num_states(x_config)
y_num_states = self._get_num_states(y_config)
self.distance_table = np.zeros((x_num_states, y_num_states))
self.s_table = np.zeros((x_num_states, y_num_states))
for x_idx in range(x_num_states):
for y_idx in range(y_num_states):
x = x_config['min'] + x_idx * x_config['resolution']
y = y_config['min'] + y_idx * y_config['resolution']
s, dist = self.spline.get_s_distance(np.array([x, y]))
self.distance_table[x_idx, y_idx] = dist
self.s_table[x_idx, y_idx] = s
def get_s_distance(self, point):
if self.distance_table is None or self.s_table is None:
self.compute_cache()
x = point[0]
y = point[1]
x_config = self.configs[0]
y_config = self.configs[1]
if x < x_config['min'] or x > x_config['max'] or y < y_config['min'] or y > y_config['max']:
s, dist, _ = self.spline.get_s_distance
return s, dist
x_idx = int((x - x_config['min']) / x_config['resolution'])
y_idx = int((y - y_config['min']) / y_config['resolution'])
return self.s_table[x_idx, y_idx], self.distance_table[x_idx, y_idx]
def get_velocity(self, s):
return self.spline.get_velocity(s)
def _get_num_states(self, config):
num_states = int((config['max'] - config['min']) / config['resolution']) + 1
return num_states
| [
"numpy.array",
"numpy.zeros"
] | [((606, 644), 'numpy.zeros', 'np.zeros', (['(x_num_states, y_num_states)'], {}), '((x_num_states, y_num_states))\n', (614, 644), True, 'import numpy as np\n'), ((668, 706), 'numpy.zeros', 'np.zeros', (['(x_num_states, y_num_states)'], {}), '((x_num_states, y_num_states))\n', (676, 706), True, 'import numpy as np\n'), ((987, 1003), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (995, 1003), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 8 14:27:24 2018
@author: Meagatron
"""
#Matrix Profile Version 1.4.0
#A Python implementation of the matrix profile algorithm described in <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2016): 'Matrix Profile I: All Pairs Similarity Joins for Time Series: A Unifying View that Includes Motifs, Discords and Shapelets', available at http://www.cs.ucr.edu/~eamonn/MatrixProfile.html
#Currently, this implementation supports parallel processing and early termination. A planned update will support the updating of the matrix profile when either time series in the comparison is updated. A GPU implementation is also planned.
#The latest version of this code can be found at https://github.com/javidlakha/matrix-profile
import pandas as pd
import numpy as np
import itertools
import time
import random
import os
import multiprocessing as mp
from scipy.fftpack import fft, ifft
def sliding_dot_product(time_series, query): #Time complexity: O(n log n)
#This function computes the dot products of a 'query' sequence of length M and every contiguous subsequence of
#length M in the time series. It is used in the distance calculations in MASS (below). The heart of the matrix
#profile algorithm is the insight that whilst the complexity of calculating the dot product of every 'instigating'
#subsequence that starts at position 1, 2, ..., N in the time series with every other 'target' subsequence of equal
#length is O(n^2), the dot product of two vectors is the inverse Fourier transform of the dot product of their
#Fourier transforms. The time complexity of the Fast Fourier Transform is O(n log n).
#NB. Computational complexity depends only on the length of the time series - not on the length of the 'query'
#sequence. This is a useful property: short patterns do not take more time to identify than long patterns.
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
n = time_series.shape[0]
m = query.shape[0]
query = query[::-1] #Reverse query
query = np.append(query,np.zeros(n-m)) #Append reversed query with n-m zeroes
query = fft(query) #Fast Fourier Transform of reversed query
time_series = fft(time_series) #Fast Fourier Transform of time_series
QT = np.multiply(query, time_series) #Element-wise multiplication of time_series and reversed query
dot_product = np.real(ifft(QT)) #Inverse Fast Fourier Transform
return dot_product
def MASS(time_series, query):
#Calculates the normalised distances between every 'query' sequence of length M with every contiguous subsequence
#of M in the time series. Except for the sliding dot product (which is O(n log n)) the time complexity of this
#algorithm is O(n).
#Based on the Matlab code - Mueen at al (2015): http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html
n = time_series.shape[0]
m = query.shape[0]
query_mean = np.mean(query) #Query mean (scalar)
query_std = np.std(query) #Query standard deviation (scalar)
time_series_mean = pd.rolling_mean(time_series,m) #Time series rolling mean; window is the length of the query
time_series_std = pd.rolling_std(time_series,m,ddof=0) #Time series rolling standard deviation; window is the length of the query. No degrees of freedom correction.
dot_product = sliding_dot_product(time_series, query)
distances = 2 * (m - (dot_product[m-1:n] - m * query_mean * time_series_mean[m-1:n]) / (query_std * time_series_std[m-1:n]))
distances = np.sqrt(distances + 0j) #Normalised Euclidean distance. See page 4 of http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
return distances
def STAMP_single(target_series, query_series=None, subsequence_length=10, max_time=600, self_join=False, verbose=False):
#THIS IS THE SINGLE-THREADED VERSION OF THE ALGORITHM. IT IS BETTER TO USE 'STAMP_parallel'.
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
n_original = query_series.shape[0]
m = target_series.shape[0]
if n_original > m:
raise ValueError('Query series should not be larger than target series.')
if m > n_original:
query_series = np.concatenate([query_series,np.zeros(m - n_original)])
n = query_series.shape[0]
#Initialise the matrix profile distances to be very high
matrix_profile = 999999999 * np.ones(n - subsequence_length + 1)
matrix_profile_index = np.zeros(n - subsequence_length + 1)
#Matrix profile is an anytime algorithm: its accuracy improves (at a diminishing rate) the longer it runs, but its
#output is useful even if it is terminated early. However, if the algorithm is terminated early, it is desirable to
#have compared (to every other 'target' subsequence in the time series) 'instigating' subsequences starting at
#random points which are evenly distributed throughout the time series rather than the first M 'instigating'
#subsequences in the time series. Hence, the indices (the position in the time series from which 'instigating'
#subsequences begin) are shuffled.
indices = [i for i in range(0, n_original - subsequence_length + 1)]
random.shuffle(indices)
#Matrix profile is an anytime algorithm. Consequently, considerations of computational time and expense mean that
#for large time series it may be desirable to terminate the algorithm after it has run for a user-specified time.
start_time = time.time()
update_time = time.time()
max_time = time.time() + max_time
iteration = 0
for index in indices:
#Stop updating the matrix profile once time is up
if time.time() > max_time:
break
#Compute progress update it at most once per second
if verbose == True:
if time.time() - update_time > 1:
os.system('cls')
print('{}% complete'.format(round(iteration/len(indices)*100,3)))
print('Elapsed time: {} seconds'.format(round(time.time() - start_time,1)))
update_time = time.time()
iteration += 1
#Compute the distances between the subsequence starting at a particular point in the time series and every
#other sub-sequence of equal length in the time series.
distances = MASS(target_series, query_series[index : index + subsequence_length])
#Exclude trivial cases where the matrix profile will be very low because the sequence is being matched to
#itself. These occur when the subsequence being compared is within a distance of (subsequence_length / 2)
#of the position in the time series.
if self_join == True:
exclusion_range = (int(max(0, index - subsequence_length/2)), int(min(index + subsequence_length/2 + 1, n)))
distances[exclusion_range[0]:exclusion_range[1]] = 99999
#Update the matrix profile and the matrix profile index if a subsequence which is a closer match is discovered
matrix_profile_index = np.where(matrix_profile <= distances, matrix_profile_index, index)
matrix_profile = np.minimum(matrix_profile,distances)
output = pd.DataFrame([np.real(matrix_profile_index), np.real(matrix_profile)]).T
output.columns = ['Matrix_Profile_Index','Matrix_Profile']
return output
def STAMP_parallel(target_series, query_series, subsequence_length=10, max_time=600, self_join=False, verbose=False):
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
n_original = query_series.shape[0]
m = target_series.shape[0]
if n_original > m:
raise ValueError('Query series should not be larger than target series.')
if m > n_original:
query_series = np.concatenate([query_series,np.zeros(m - n_original)])
n = query_series.shape[0]
processes = mp.cpu_count()
matrix_profile = {}
matrix_profile_index = {}
#Matrix profile is an anytime algorithm: its accuracy improves (at a diminishing rate) the longer it runs, but its
#output is useful even if it is terminated early. However, if the algorithm is terminated early, it is desirable to
#have compared (to every other 'target' subsequence in the time series) 'instigating' subsequences starting at
#random points which are evenly distributed throughout the time series rather than the first M 'instigating'
#subsequences in the time series. Hence, the indices (the position in the time series from which 'instigating'
#subsequences begin) are shuffled.
indices = [i for i in range(0, n_original - subsequence_length + 1)]
random.shuffle(indices)
#The indices are then divided by the number of CPUs. The algorithm is easy to parallelise because each element of
#the matrix profile is minimum distance between the 'instigating' subsequence (of user-specified length) which
#starts at that particular position in the time series and every other 'target' subsequence in the time series.
#Hence, if the 'instigating' time series are divided between CPUs and sub-matrix profiles computed, the overall
#matrix profile will be the element-wise minimum of the sub-profiles.
indices = np.array_split(np.array(indices), processes)
pool = mp.Pool(processes=processes)
results = [pool.apply_async(update_matrix_profile, args=(target_series, query_series, self_join, subsequence_length, indices[s], s, n, max_time, verbose)) for s in range(0,processes)]
output = [p.get() for p in results]
pool.close()
#The overall matrix profile is the element-wise minimum of each sub-profile, and each element of the overall
#matrix profile index is the time series position of the corresponding sub-profile.
s = 0
for subindices in indices:
matrix_profile[s] = output[s][0]
matrix_profile_index[s] = output[s][1]
if s != 0:
matrix_profile_index[s] = np.where(matrix_profile[s-1] <= matrix_profile[s], matrix_profile_index[s-1], matrix_profile_index[s])
matrix_profile[s] = np.minimum(matrix_profile[s-1],matrix_profile[s])
s += 1
output = pd.DataFrame([np.real(matrix_profile_index[s-1]), np.real(matrix_profile[s-1])]).T
output.columns = ['Matrix_Profile_Index','Matrix_Profile']
return output
def update_matrix_profile(target_series, query_series, self_join, subsequence_length, subindices, s, n, max_time, verbose=False):
#Based on the pseudocode - Keogh et al (2016): http://www.cs.ucr.edu/~eamonn/PID4481997_extend_Matrix%20Profile_I.pdf
#Initialise the matrix profile distances to be very high
matrix_profile_ = 999999999 * np.ones(n - subsequence_length + 1)
#Initialise the matrix profile index. The matrix profile corresponding to a particular point in the time series
#is the smallest distance of between the subsequence of user-specified length that starts at this point to every
#other subsequence of the same length in the time series. The matrix profile index gives the starting position of
#the subsequence which is the closest match.
matrix_profile_index_ = np.zeros(n - subsequence_length + 1)
#Matrix profile is an anytime algorithm: its accuracy improves (at a diminishing rate) the longer it runs, but its
#output is useful even if it is terminated early. Consequently, considerations of computational time and expense
#mean that for large time series it may be desirable to terminate the algorithm after it has run for a user-
#specified time.
start_time = time.time()
update_time = time.time()
max_time = time.time() + max_time
iteration = 0
for index in subindices:
#Stop updating the matrix profile once time is up
if time.time() > max_time:
break
#Compute progress based on the first CPU and update it at most once per second
if verbose == True:
if s == 0:
if time.time() - update_time > 1:
os.system('cls')
print('{}% complete'.format(round(iteration/len(subindices)*100,3)))
print('Number of CPUs: {}'.format(mp.cpu_count()))
print('Elapsed time: {} seconds'.format(round(time.time() - start_time,1)))
update_time = time.time()
iteration += 1
#Compute the distances between the subsequence starting at a particular point in the time series and every
#other sub-sequence of equal length in the time series.
distances = MASS(target_series, query_series[index : index + subsequence_length])
#Exclude trivial cases where the matrix profile will be very low because the sequence is being matched to
#itself. These occur when the subsequence being compared is within a distance of (subsequence_length / 2)
#of the position in the time series.
if self_join == True:
exclusion_range = (int(max(0, index - subsequence_length/2)), int(min(index + subsequence_length/2 + 1, n)))
distances[exclusion_range[0]:exclusion_range[1]] = 99999
#Update the matrix profile and the matrix profile index if a subsequence which is a closer match is discovered
matrix_profile_index_ = np.where(matrix_profile_ <= distances, matrix_profile_index_, index)
matrix_profile_ = np.minimum(matrix_profile_,distances)
return matrix_profile_, matrix_profile_index_
def STAMP(target_series, query_series=None, subsequence_length=10, max_time=60, verbose=True, parallel=True):
self_join = False
if type(query_series) == type(None):
query_series = target_series
self_join = True
q_shape = query_series.shape[0]
t_shape = target_series.shape[0]
if t_shape >= q_shape:
if parallel == True:
matrix_profile = STAMP_parallel(target_series=target_series, query_series=query_series, subsequence_length=subsequence_length, max_time=max_time, self_join=self_join, verbose=verbose)
else:
matrix_profile = STAMP_single(target_series=target_series, query_series=query_series, subsequence_length=subsequence_length, max_time=max_time, self_join=self_join, verbose=verbose)
elif t_shape < q_shape:
#Pad the target series with q_shape - t_shape 0s at the end
new_target_series = np.concatenate([target_series, np.zeros(q_shape - t_shape)])
if parallel == True:
matrix_profile = STAMP_parallel(target_series=new_target_series, query_series=query_series, subsequence_length=subsequence_length, max_time=max_time, self_join=self_join, verbose=verbose)
else:
matrix_profile = STAMP_single(target_series=new_target_series, query_series=query_series, subsequence_length=subsequence_length, max_time=max_time, self_join=self_join, verbose=verbose)
#Delete the q_shape - t_shape entries at the end, whose Matrix Profile values will be 0
matrix_profile = matrix_profile[0 : t_shape]
return matrix_profile | [
"numpy.sqrt",
"multiprocessing.cpu_count",
"numpy.array",
"scipy.fftpack.fft",
"numpy.mean",
"numpy.multiply",
"numpy.where",
"scipy.fftpack.ifft",
"numpy.real",
"pandas.rolling_std",
"random.shuffle",
"numpy.ones",
"numpy.std",
"time.time",
"numpy.minimum",
"pandas.rolling_mean",
"n... | [((2394, 2404), 'scipy.fftpack.fft', 'fft', (['query'], {}), '(query)\n', (2397, 2404), False, 'from scipy.fftpack import fft, ifft\n'), ((2509, 2525), 'scipy.fftpack.fft', 'fft', (['time_series'], {}), '(time_series)\n', (2512, 2525), False, 'from scipy.fftpack import fft, ifft\n'), ((2606, 2637), 'numpy.multiply', 'np.multiply', (['query', 'time_series'], {}), '(query, time_series)\n', (2617, 2637), True, 'import numpy as np\n'), ((3348, 3362), 'numpy.mean', 'np.mean', (['query'], {}), '(query)\n', (3355, 3362), True, 'import numpy as np\n'), ((3435, 3448), 'numpy.std', 'np.std', (['query'], {}), '(query)\n', (3441, 3448), True, 'import numpy as np\n'), ((3544, 3575), 'pandas.rolling_mean', 'pd.rolling_mean', (['time_series', 'm'], {}), '(time_series, m)\n', (3559, 3575), True, 'import pandas as pd\n'), ((3671, 3709), 'pandas.rolling_std', 'pd.rolling_std', (['time_series', 'm'], {'ddof': '(0)'}), '(time_series, m, ddof=0)\n', (3685, 3709), True, 'import pandas as pd\n'), ((4031, 4056), 'numpy.sqrt', 'np.sqrt', (['(distances + 0.0j)'], {}), '(distances + 0.0j)\n', (4038, 4056), True, 'import numpy as np\n'), ((5089, 5125), 'numpy.zeros', 'np.zeros', (['(n - subsequence_length + 1)'], {}), '(n - subsequence_length + 1)\n', (5097, 5125), True, 'import numpy as np\n'), ((5834, 5857), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (5848, 5857), False, 'import random\n'), ((6116, 6127), 'time.time', 'time.time', ([], {}), '()\n', (6125, 6127), False, 'import time\n'), ((6147, 6158), 'time.time', 'time.time', ([], {}), '()\n', (6156, 6158), False, 'import time\n'), ((8699, 8713), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (8711, 8713), True, 'import multiprocessing as mp\n'), ((9482, 9505), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (9496, 9505), False, 'import random\n'), ((10132, 10160), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'processes'}), '(processes=processes)\n', (10139, 10160), True, 'import multiprocessing as mp\n'), ((12056, 12092), 'numpy.zeros', 'np.zeros', (['(n - subsequence_length + 1)'], {}), '(n - subsequence_length + 1)\n', (12064, 12092), True, 'import numpy as np\n'), ((12491, 12502), 'time.time', 'time.time', ([], {}), '()\n', (12500, 12502), False, 'import time\n'), ((12522, 12533), 'time.time', 'time.time', ([], {}), '()\n', (12531, 12533), False, 'import time\n'), ((2304, 2319), 'numpy.zeros', 'np.zeros', (['(n - m)'], {}), '(n - m)\n', (2312, 2319), True, 'import numpy as np\n'), ((2753, 2761), 'scipy.fftpack.ifft', 'ifft', (['QT'], {}), '(QT)\n', (2757, 2761), False, 'from scipy.fftpack import fft, ifft\n'), ((5025, 5060), 'numpy.ones', 'np.ones', (['(n - subsequence_length + 1)'], {}), '(n - subsequence_length + 1)\n', (5032, 5060), True, 'import numpy as np\n'), ((6175, 6186), 'time.time', 'time.time', ([], {}), '()\n', (6184, 6186), False, 'import time\n'), ((7766, 7832), 'numpy.where', 'np.where', (['(matrix_profile <= distances)', 'matrix_profile_index', 'index'], {}), '(matrix_profile <= distances, matrix_profile_index, index)\n', (7774, 7832), True, 'import numpy as np\n'), ((7859, 7896), 'numpy.minimum', 'np.minimum', (['matrix_profile', 'distances'], {}), '(matrix_profile, distances)\n', (7869, 7896), True, 'import numpy as np\n'), ((10086, 10103), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (10094, 10103), True, 'import numpy as np\n'), ((11581, 11616), 'numpy.ones', 'np.ones', (['(n - subsequence_length + 1)'], {}), '(n - subsequence_length + 1)\n', (11588, 11616), True, 'import numpy as np\n'), ((12550, 12561), 'time.time', 'time.time', ([], {}), '()\n', (12559, 12561), False, 'import time\n'), ((14279, 14347), 'numpy.where', 'np.where', (['(matrix_profile_ <= distances)', 'matrix_profile_index_', 'index'], {}), '(matrix_profile_ <= distances, matrix_profile_index_, index)\n', (14287, 14347), True, 'import numpy as np\n'), ((14375, 14413), 'numpy.minimum', 'np.minimum', (['matrix_profile_', 'distances'], {}), '(matrix_profile_, distances)\n', (14385, 14413), True, 'import numpy as np\n'), ((6336, 6347), 'time.time', 'time.time', ([], {}), '()\n', (6345, 6347), False, 'import time\n'), ((10814, 10924), 'numpy.where', 'np.where', (['(matrix_profile[s - 1] <= matrix_profile[s])', 'matrix_profile_index[s - 1]', 'matrix_profile_index[s]'], {}), '(matrix_profile[s - 1] <= matrix_profile[s], matrix_profile_index[s -\n 1], matrix_profile_index[s])\n', (10822, 10924), True, 'import numpy as np\n'), ((10950, 11002), 'numpy.minimum', 'np.minimum', (['matrix_profile[s - 1]', 'matrix_profile[s]'], {}), '(matrix_profile[s - 1], matrix_profile[s])\n', (10960, 11002), True, 'import numpy as np\n'), ((12708, 12719), 'time.time', 'time.time', ([], {}), '()\n', (12717, 12719), False, 'import time\n'), ((4859, 4883), 'numpy.zeros', 'np.zeros', (['(m - n_original)'], {}), '(m - n_original)\n', (4867, 4883), True, 'import numpy as np\n'), ((6547, 6563), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (6556, 6563), False, 'import os\n'), ((6771, 6782), 'time.time', 'time.time', ([], {}), '()\n', (6780, 6782), False, 'import time\n'), ((7930, 7959), 'numpy.real', 'np.real', (['matrix_profile_index'], {}), '(matrix_profile_index)\n', (7937, 7959), True, 'import numpy as np\n'), ((7961, 7984), 'numpy.real', 'np.real', (['matrix_profile'], {}), '(matrix_profile)\n', (7968, 7984), True, 'import numpy as np\n'), ((8612, 8636), 'numpy.zeros', 'np.zeros', (['(m - n_original)'], {}), '(m - n_original)\n', (8620, 8636), True, 'import numpy as np\n'), ((11050, 11086), 'numpy.real', 'np.real', (['matrix_profile_index[s - 1]'], {}), '(matrix_profile_index[s - 1])\n', (11057, 11086), True, 'import numpy as np\n'), ((11086, 11116), 'numpy.real', 'np.real', (['matrix_profile[s - 1]'], {}), '(matrix_profile[s - 1])\n', (11093, 11116), True, 'import numpy as np\n'), ((6499, 6510), 'time.time', 'time.time', ([], {}), '()\n', (6508, 6510), False, 'import time\n'), ((12978, 12994), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (12987, 12994), False, 'import os\n'), ((13289, 13300), 'time.time', 'time.time', ([], {}), '()\n', (13298, 13300), False, 'import time\n'), ((15463, 15490), 'numpy.zeros', 'np.zeros', (['(q_shape - t_shape)'], {}), '(q_shape - t_shape)\n', (15471, 15490), True, 'import numpy as np\n'), ((12926, 12937), 'time.time', 'time.time', ([], {}), '()\n', (12935, 12937), False, 'import time\n'), ((13140, 13154), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (13152, 13154), True, 'import multiprocessing as mp\n'), ((6710, 6721), 'time.time', 'time.time', ([], {}), '()\n', (6719, 6721), False, 'import time\n'), ((13224, 13235), 'time.time', 'time.time', ([], {}), '()\n', (13233, 13235), False, 'import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.