code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Colour Temperature & Correlated Colour Temperature Plotting
===========================================================
Defines the colour temperature and correlated colour temperature plotting
objects:
- :func:`colour.plotting.\
plot_planckian_locus_in_chromaticity_diagram_CIE1931`
- :func:`colour.plotting.\
plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS`
"""
from __future__ import annotations
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import MSDS_CMFS, CCS_ILLUMINANTS
from colour.hints import (
Any,
ArrayLike,
Callable,
Dict,
List,
Literal,
NDArray,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from colour.models import (
UCS_uv_to_xy,
XYZ_to_UCS,
UCS_to_uv,
xy_to_XYZ,
)
from colour.temperature import CCT_to_uv
from colour.plotting import (
CONSTANTS_COLOUR_STYLE,
CONSTANTS_ARROW_STYLE,
artist,
plot_chromaticity_diagram_CIE1931,
plot_chromaticity_diagram_CIE1960UCS,
filter_passthrough,
override_style,
render,
update_settings_collection,
)
from colour.plotting.diagrams import plot_chromaticity_diagram
from colour.utilities import optional, tstack, validate_method, zeros
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "<EMAIL>"
__status__ = "Production"
__all__ = [
"plot_planckian_locus",
"plot_planckian_locus_CIE1931",
"plot_planckian_locus_CIE1960UCS",
"plot_planckian_locus_in_chromaticity_diagram",
"plot_planckian_locus_in_chromaticity_diagram_CIE1931",
"plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS",
]
@override_style()
def plot_planckian_locus(
planckian_locus_colours: Optional[Union[ArrayLike, str]] = None,
method: Union[Literal["CIE 1931", "CIE 1960 UCS"], str] = "CIE 1931",
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plots the *Planckian Locus* according to given method.
Parameters
----------
planckian_locus_colours
*Planckian Locus* colours.
method
*Chromaticity Diagram* method.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_planckian_locus() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Planckian_Locus.png
:align: center
:alt: plot_planckian_locus
"""
method = validate_method(method, ["CIE 1931", "CIE 1960 UCS"])
planckian_locus_colours = cast(
Union[ArrayLike, str],
optional(planckian_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark),
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
if method == "cie 1931":
def uv_to_ij(uv: NDArray) -> NDArray:
"""
Converts given *uv* chromaticity coordinates to *ij* chromaticity
coordinates.
"""
return UCS_uv_to_xy(uv)
D_uv = 0.025
elif method == "cie 1960 ucs":
def uv_to_ij(uv: NDArray) -> NDArray:
"""
Converts given *uv* chromaticity coordinates to *ij* chromaticity
coordinates.
"""
return uv
D_uv = 0.025
start, end = 1667, 100000
CCT = np.arange(start, end + 250, 250)
CCT_D_uv = tstack([CCT, zeros(CCT.shape)])
ij = uv_to_ij(CCT_to_uv(CCT_D_uv, "Robertson 1968"))
axes.plot(ij[..., 0], ij[..., 1], color=planckian_locus_colours)
for i in (1667, 2000, 2500, 3000, 4000, 6000, 10000):
i0, j0 = uv_to_ij(CCT_to_uv(np.array([i, -D_uv]), "Robertson 1968"))
i1, j1 = uv_to_ij(CCT_to_uv(np.array([i, D_uv]), "Robertson 1968"))
axes.plot((i0, i1), (j0, j1), color=planckian_locus_colours)
axes.annotate(
f"{i}K",
xy=(i0, j0),
xytext=(0, -10),
textcoords="offset points",
size="x-small",
)
settings = {"axes": axes}
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_planckian_locus_CIE1931(
planckian_locus_colours: Optional[Union[ArrayLike, str]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plots the *Planckian Locus* according to *CIE 1931* method.
Parameters
----------
planckian_locus_colours
*Planckian Locus* colours.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_planckian_locus_CIE1931() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Planckian_Locus_CIE1931.png
:align: center
:alt: plot_planckian_locus_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_planckian_locus(planckian_locus_colours, **settings)
@override_style()
def plot_planckian_locus_CIE1960UCS(
planckian_locus_colours: Optional[Union[ArrayLike, str]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plots the *Planckian Locus* according to *CIE 1960 UCS* method.
Parameters
----------
planckian_locus_colours
*Planckian Locus* colours.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_planckian_locus_CIE1960UCS() # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_Plot_Planckian_Locus_CIE1960UCS.png
:align: center
:alt: plot_planckian_locus_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_planckian_locus(planckian_locus_colours, **settings)
@override_style()
def plot_planckian_locus_in_chromaticity_diagram(
illuminants: Union[str, Sequence[str]],
chromaticity_diagram_callable: Callable = (
plot_chromaticity_diagram # type: ignore[has-type]
),
planckian_locus_callable: Callable = plot_planckian_locus,
method: Union[Literal["CIE 1931", "CIE 1960 UCS"], str] = "CIE 1931",
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plots the *Planckian Locus* and given illuminants in the
*Chromaticity Diagram* according to given method.
Parameters
----------
illuminants
Illuminants to plot. ``illuminants`` elements can be of any
type or form supported by the
:func:`colour.plotting.filter_passthrough` definition.
chromaticity_diagram_callable
Callable responsible for drawing the *Chromaticity Diagram*.
planckian_locus_callable
Callable responsible for drawing the *Planckian Locus*.
method
*Chromaticity Diagram* method.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted illuminants. ``plot_kwargs``
can be either a single dictionary applied to all the plotted
illuminants with the same settings or a sequence of dictionaries with
different settings for eachplotted illuminant.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.temperature.plot_planckian_locus`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> annotate_kwargs = [
... {'xytext': (-25, 15), 'arrowprops':{'arrowstyle':'-'}},
... {'arrowprops':{'arrowstyle':'-['}},
... {},
... ]
>>> plot_kwargs = [
... {
... 'markersize' : 15,
... },
... { 'color': 'r'},
... {},
... ]
>>> plot_planckian_locus_in_chromaticity_diagram(
... ['A', 'B', 'C'],
... annotate_kwargs=annotate_kwargs,
... plot_kwargs=plot_kwargs
... ) # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_Planckian_Locus_In_Chromaticity_Diagram.png
:align: center
:alt: plot_planckian_locus_in_chromaticity_diagram
"""
cmfs = MSDS_CMFS["CIE 1931 2 Degree Standard Observer"]
illuminants_filtered = filter_passthrough(
CCS_ILLUMINANTS.get(cmfs.name), illuminants # type: ignore[arg-type]
)
settings: Dict[str, Any] = {"uniform": True}
settings.update(kwargs)
_figure, axes = artist(**settings)
method = method.upper()
settings = {"axes": axes, "method": method}
settings.update(kwargs)
settings["standalone"] = False
chromaticity_diagram_callable(**settings)
planckian_locus_callable(**settings)
if method == "CIE 1931":
def xy_to_ij(xy: NDArray) -> NDArray:
"""
Converts given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return xy
bounding_box = (-0.1, 0.9, -0.1, 0.9)
elif method == "CIE 1960 UCS":
def xy_to_ij(xy: NDArray) -> NDArray:
"""
Converts given *CIE xy* chromaticity coordinates to *ij*
chromaticity coordinates.
"""
return UCS_to_uv(XYZ_to_UCS(xy_to_XYZ(xy)))
bounding_box = (-0.1, 0.7, -0.2, 0.6)
else:
raise ValueError(
f'Invalid method: "{method}", must be one of '
f'["CIE 1931", "CIE 1960 UCS"]'
)
annotate_settings_collection = [
{
"annotate": True,
"xytext": (-50, 30),
"textcoords": "offset points",
"arrowprops": CONSTANTS_ARROW_STYLE,
}
for _ in range(len(illuminants_filtered))
]
if annotate_kwargs is not None:
update_settings_collection(
annotate_settings_collection,
annotate_kwargs,
len(illuminants_filtered),
)
plot_settings_collection = [
{
"color": CONSTANTS_COLOUR_STYLE.colour.brightest,
"label": f"{illuminant}",
"marker": "o",
"markeredgecolor": CONSTANTS_COLOUR_STYLE.colour.dark,
"markeredgewidth": CONSTANTS_COLOUR_STYLE.geometry.short * 0.75,
"markersize": (
CONSTANTS_COLOUR_STYLE.geometry.short * 6
+ CONSTANTS_COLOUR_STYLE.geometry.short * 0.75
),
}
for illuminant in illuminants_filtered
]
if plot_kwargs is not None:
update_settings_collection(
plot_settings_collection, plot_kwargs, len(illuminants_filtered)
)
for i, (illuminant, xy) in enumerate(illuminants_filtered.items()):
plot_settings = plot_settings_collection[i]
ij = xy_to_ij(xy)
axes.plot(ij[0], ij[1], **plot_settings)
if annotate_settings_collection[i]["annotate"]:
annotate_settings = annotate_settings_collection[i]
annotate_settings.pop("annotate")
axes.annotate(illuminant, xy=ij, **annotate_settings)
title = (
(
"{} Illuminants - Planckian Locus\n"
"{} Chromaticity Diagram - "
"CIE 1931 2 Degree Standard Observer"
).format(", ".join(illuminants_filtered), method.upper())
if illuminants_filtered
else (
f"Planckian Locus\n{method.upper()} Chromaticity Diagram - "
f"CIE 1931 2 Degree Standard Observer"
)
)
settings.update(
{
"axes": axes,
"standalone": True,
"bounding_box": bounding_box,
"title": title,
}
)
settings.update(kwargs)
return render(**settings)
@override_style()
def plot_planckian_locus_in_chromaticity_diagram_CIE1931(
illuminants: Union[str, Sequence[str]],
chromaticity_diagram_callable_CIE1931: Callable = (
plot_chromaticity_diagram_CIE1931 # type: ignore[has-type]
),
planckian_locus_callable_CIE1931: Callable = plot_planckian_locus_CIE1931,
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plots the *Planckian Locus* and given illuminants in
*CIE 1931 Chromaticity Diagram*.
Parameters
----------
illuminants
Illuminants to plot. ``illuminants`` elements can be of any
type or form supported by the
:func:`colour.plotting.filter_passthrough` definition.
chromaticity_diagram_callable_CIE1931
Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*.
planckian_locus_callable_CIE1931
Callable responsible for drawing the *Planckian Locus* according to
*CIE 1931* method.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted illuminants. ``plot_kwargs``
can be either a single dictionary applied to all the plotted
illuminants with the same settings or a sequence of dictionaries with
different settings for eachplotted illuminant.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.temperature.plot_planckian_locus`,
:func:`colour.plotting.temperature.\
plot_planckian_locus_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_planckian_locus_in_chromaticity_diagram_CIE1931(['A', 'B', 'C'])
... # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1931.png
:align: center
:alt: plot_planckian_locus_in_chromaticity_diagram_CIE1931
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1931"})
return plot_planckian_locus_in_chromaticity_diagram(
illuminants,
chromaticity_diagram_callable_CIE1931,
planckian_locus_callable_CIE1931,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
@override_style()
def plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(
illuminants: Union[str, Sequence[str]],
chromaticity_diagram_callable_CIE1960UCS: Callable = (
plot_chromaticity_diagram_CIE1960UCS # type: ignore[has-type]
),
planckian_locus_callable_CIE1960UCS: Callable = (
plot_planckian_locus_CIE1960UCS
),
annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None,
plot_kwargs: Optional[Union[Dict, List[Dict]]] = None,
**kwargs: Any,
) -> Tuple[plt.Figure, plt.Axes]:
"""
Plots the *Planckian Locus* and given illuminants in
*CIE 1960 UCS Chromaticity Diagram*.
Parameters
----------
illuminants
Illuminants to plot. ``illuminants`` elements can be of any
type or form supported by the
:func:`colour.plotting.filter_passthrough` definition.
chromaticity_diagram_callable_CIE1960UCS
Callable responsible for drawing the
*CIE 1960 UCS Chromaticity Diagram*.
planckian_locus_callable_CIE1960UCS
Callable responsible for drawing the *Planckian Locus* according to
*CIE 1960 UCS* method.
annotate_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.annotate`
definition, used to annotate the resulting chromaticity coordinates
with their respective spectral distribution names. ``annotate_kwargs``
can be either a single dictionary applied to all the arrows with same
settings or a sequence of dictionaries with different settings for each
spectral distribution. The following special keyword arguments can also
be used:
- ``annotate`` : Whether to annotate the spectral distributions.
plot_kwargs
Keyword arguments for the :func:`matplotlib.pyplot.plot` definition,
used to control the style of the plotted illuminants. ``plot_kwargs``
can be either a single dictionary applied to all the plotted
illuminants with the same settings or a sequence of dictionaries with
different settings for eachplotted illuminant.
Other Parameters
----------------
kwargs
{:func:`colour.plotting.artist`,
:func:`colour.plotting.diagrams.plot_chromaticity_diagram`,
:func:`colour.plotting.temperature.plot_planckian_locus`,
:func:`colour.plotting.temperature.\
plot_planckian_locus_in_chromaticity_diagram`,
:func:`colour.plotting.render`},
See the documentation of the previously listed definitions.
Returns
-------
:class:`tuple`
Current figure and axes.
Examples
--------
>>> plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS(
... ['A', 'C', 'E']) # doctest: +ELLIPSIS
(<Figure size ... with 1 Axes>, <...AxesSubplot...>)
.. image:: ../_static/Plotting_\
Plot_Planckian_Locus_In_Chromaticity_Diagram_CIE1960UCS.png
:align: center
:alt: plot_planckian_locus_in_chromaticity_diagram_CIE1960UCS
"""
settings = dict(kwargs)
settings.update({"method": "CIE 1960 UCS"})
return plot_planckian_locus_in_chromaticity_diagram(
illuminants,
chromaticity_diagram_callable_CIE1960UCS,
planckian_locus_callable_CIE1960UCS,
annotate_kwargs=annotate_kwargs,
plot_kwargs=plot_kwargs,
**settings,
)
| [
"colour.utilities.optional",
"colour.plotting.artist",
"colour.colorimetry.CCS_ILLUMINANTS.get",
"colour.utilities.zeros",
"colour.temperature.CCT_to_uv",
"colour.plotting.override_style",
"numpy.arange",
"numpy.array",
"colour.models.UCS_uv_to_xy",
"colour.models.xy_to_XYZ",
"colour.plotting.re... | [((1794, 1810), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (1808, 1810), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((4452, 4468), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (4466, 4468), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((5498, 5514), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (5512, 5514), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((6564, 6580), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (6578, 6580), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((13375, 13391), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (13389, 13391), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((16640, 16656), 'colour.plotting.override_style', 'override_style', ([], {}), '()\n', (16654, 16656), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((2795, 2848), 'colour.utilities.validate_method', 'validate_method', (['method', "['CIE 1931', 'CIE 1960 UCS']"], {}), "(method, ['CIE 1931', 'CIE 1960 UCS'])\n", (2810, 2848), False, 'from colour.utilities import optional, tstack, validate_method, zeros\n'), ((3101, 3119), 'colour.plotting.artist', 'artist', ([], {}), '(**settings)\n', (3107, 3119), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((3695, 3727), 'numpy.arange', 'np.arange', (['start', '(end + 250)', '(250)'], {}), '(start, end + 250, 250)\n', (3704, 3727), True, 'import numpy as np\n'), ((4430, 4448), 'colour.plotting.render', 'render', ([], {}), '(**settings)\n', (4436, 4448), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((10114, 10132), 'colour.plotting.artist', 'artist', ([], {}), '(**settings)\n', (10120, 10132), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((13353, 13371), 'colour.plotting.render', 'render', ([], {}), '(**settings)\n', (13359, 13371), False, 'from colour.plotting import CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, artist, plot_chromaticity_diagram_CIE1931, plot_chromaticity_diagram_CIE1960UCS, filter_passthrough, override_style, render, update_settings_collection\n'), ((2925, 2994), 'colour.utilities.optional', 'optional', (['planckian_locus_colours', 'CONSTANTS_COLOUR_STYLE.colour.dark'], {}), '(planckian_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark)\n', (2933, 2994), False, 'from colour.utilities import optional, tstack, validate_method, zeros\n'), ((3793, 3830), 'colour.temperature.CCT_to_uv', 'CCT_to_uv', (['CCT_D_uv', '"""Robertson 1968"""'], {}), "(CCT_D_uv, 'Robertson 1968')\n", (3802, 3830), False, 'from colour.temperature import CCT_to_uv\n'), ((9939, 9969), 'colour.colorimetry.CCS_ILLUMINANTS.get', 'CCS_ILLUMINANTS.get', (['cmfs.name'], {}), '(cmfs.name)\n', (9958, 9969), False, 'from colour.colorimetry import MSDS_CMFS, CCS_ILLUMINANTS\n'), ((3352, 3368), 'colour.models.UCS_uv_to_xy', 'UCS_uv_to_xy', (['uv'], {}), '(uv)\n', (3364, 3368), False, 'from colour.models import UCS_uv_to_xy, XYZ_to_UCS, UCS_to_uv, xy_to_XYZ\n'), ((3756, 3772), 'colour.utilities.zeros', 'zeros', (['CCT.shape'], {}), '(CCT.shape)\n', (3761, 3772), False, 'from colour.utilities import optional, tstack, validate_method, zeros\n'), ((3997, 4017), 'numpy.array', 'np.array', (['[i, -D_uv]'], {}), '([i, -D_uv])\n', (4005, 4017), True, 'import numpy as np\n'), ((4074, 4093), 'numpy.array', 'np.array', (['[i, D_uv]'], {}), '([i, D_uv])\n', (4082, 4093), True, 'import numpy as np\n'), ((10911, 10924), 'colour.models.xy_to_XYZ', 'xy_to_XYZ', (['xy'], {}), '(xy)\n', (10920, 10924), False, 'from colour.models import UCS_uv_to_xy, XYZ_to_UCS, UCS_to_uv, xy_to_XYZ\n')] |
import numpy as np
import RL_agent_env
import RL_Bridge
from snake_env import my_snake
import pickle
# Agent parameters
agent_info = {
'agent_type': RL_agent_env.Agent,
'agent_parameters': {
'network_config': {
'state_dim': 147,
'num_hidden_units': 512,
'num_actions': 3
},
'optimizer_config': {
'step_size': 1e-3,
'beta_m': 0.9,
'beta_v': 0.999,
'epsilon': 1e-8
},
'replay_buffer_size': 5000,
'minibatch_sz': 16,
'num_replay_updates_per_step': 4,
'gamma': 0.99,
'tau': 0.001#0.6
}
}
# Env parameters
environment_info = {'env_type': my_snake,}
# initialize experiment
RL_shell = RL_Bridge.RL_shell_env_agent(environment_info, agent_info)
RL_shell.initialize_agent()
th_inds = [200,500,800,1000,1500,2000,2497,2498,2499,2500]
TH = {i: np.zeros((1,RL_shell.s_size)) for i in th_inds}
W = pickle.load( open( "weights_{}.p".format(147), "rb" ))
RL_shell.agent.network.set_weights(W)
for i in np.arange(2501):
RL_shell.initialize_env()
RL_shell.start()
print('Iterration...{}'.format(i))
j = 0
while not RL_shell.is_terminal():
j += 1
state = RL_shell.step()
if j==1000: # protect from infinite loop
break
if i in [500,1000, 2000,2500]:
RL_shell.env.print_state()
if i in th_inds:
TH[i] = np.append(TH[i],state.reshape(1,-1),axis=0)
print(RL_shell.agent.sum_rewards)
print(RL_shell.env.life)
W = RL_shell.agent.network.get_weights()
pickle.dump(TH,open( "TH_{}.p".format(agent_info['agent_parameters']['network_config']['state_dim']), "wb" ))
pickle.dump(W,open( "weights_{}.p".format(agent_info['agent_parameters']['network_config']['state_dim']), "wb" ))
| [
"numpy.zeros",
"numpy.arange",
"RL_Bridge.RL_shell_env_agent"
] | [((685, 743), 'RL_Bridge.RL_shell_env_agent', 'RL_Bridge.RL_shell_env_agent', (['environment_info', 'agent_info'], {}), '(environment_info, agent_info)\n', (713, 743), False, 'import RL_Bridge\n'), ((996, 1011), 'numpy.arange', 'np.arange', (['(2501)'], {}), '(2501)\n', (1005, 1011), True, 'import numpy as np\n'), ((841, 871), 'numpy.zeros', 'np.zeros', (['(1, RL_shell.s_size)'], {}), '((1, RL_shell.s_size))\n', (849, 871), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import, division
from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score
import glob
from collections import Counter
import numpy as np
np.random.seed(1)
from tensorflow import set_random_seed
set_random_seed(1)
from datetime import datetime
import argparse
import os
from keras.utils import to_categorical, plot_model
from keras.layers import Flatten, Dense
from keras.models import Model
from keras.optimizers import Adamax as opt
from keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger
import pandas as pd
from modules import *
from utils import *
from AudioDataGenerator import *
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument("--fold_idx", type=int,
help="data csvfile to use")
parser.add_argument("--seed", type=int,
help="Random seed")
parser.add_argument("--loadmodel",
help="load previous model checkpoint for retraining (Enter absolute path)")
parser.add_argument("--epochs", type=int,
help="Number of epochs for training")
parser.add_argument("--batch_size", type=int,
help="number of minibatches to take during each backwardpass preferably multiple of 2")
parser.add_argument("--verbose", type=int, choices=[1, 2],
help="Verbosity mode. 1 = progress bar, 2 = one line per epoch (default 2)")
parser.add_argument("--classweights", type=bool,
help="if True, class weights are added")
parser.add_argument("--comment",
help="Add comments to the log files")
args = parser.parse_args()
print("%s selected" % (args.fold_idx))
fold_idx = args.fold_idx
if args.seed: # if random seed is specified
print("Random seed specified as %d" % (args.seed))
random_seed = args.seed
else:
random_seed = 1
num_class = 5
if args.loadmodel: # If a previously trained model is loaded for retraining
load_path = args.loadmodel #### path to model to be loaded
idx = load_path.find("weights")
initial_epoch = int(load_path[idx + 8:idx + 8 + 4])
print("%s model loaded\nInitial epoch is %d" % (args.loadmodel, initial_epoch))
else:
print("no model specified, using initializer to initialize weights")
initial_epoch = 0
load_path = False
if args.epochs: # if number of training epochs is specified
print("Training for %d epochs" % (args.epochs))
epochs = args.epochs
else:
epochs = 200
print("Training for %d epochs" % (epochs))
if args.batch_size: # if batch_size is specified
print("Training with %d samples per minibatch" % (args.batch_size))
batch_size = args.batch_size
else:
batch_size = 1024
print("Training with %d minibatches" % (batch_size))
if args.verbose:
verbose = args.verbose
print("Verbosity level %d" % (verbose))
else:
verbose = 2
if args.comment:
comment = args.comment
else:
comment = None
model_dir = os.path.join(os.getcwd(), '..', 'models')
fold_dir = os.path.join(os.getcwd(), '..', 'data')
log_dir = os.path.join(os.getcwd(), '..', 'logs')
params = {
'num_classes': num_class,
'batch_size': batch_size,
'epochs': epochs,
'aafoldname': fold_idx,
'random_seed': random_seed,
'load_path': load_path,
'shuffle': False,
'initial_epoch': initial_epoch,
'eeg_length': 3000,
'kernel_size': 16,
'bias': True,
'maxnorm': 400000000000., ## No maxnorm constraint
'dropout_rate': 0.45, # .5
'dropout_rate_dense': 0.,
'padding': 'valid',
'activation_function': 'relu',
'subsam': 2,
'trainable': False,
'lr': .001, # .0001
'lr_decay': 0.0 # 1e-5, #1e-5
}
current_learning_rate = params['lr']
inf = glob.glob(os.path.join(fold_dir, 'SC*.csv'))
df = pd.DataFrame()
for f in inf:
df = pd.concat([df, pd.read_csv(f, header=None)])
total_valY = []
total_ypred = []
for fold_idx in range(20):
log_name = 'fold_' + str(fold_idx)
checkpoint_name = os.path.join(model_dir, log_name, 'weights.hdf5')
trainX, valX, trainY, valY, pat_train, pat_val = patientSplitter(df, fold_idx)
print(trainX.shape, valX.shape, trainY.shape, valY.shape)
print("Data loaded")
if args.classweights:
params['class_weight'] = compute_weight(trainY.astype(int), np.unique(trainY.astype(int)))
else:
params['class_weight'] = dict(zip(np.r_[0:params['num_classes']], np.ones(params['num_classes'])))
print('Classwise data in train', Counter(trainY))
valY = to_categorical(valY)
valX = np.expand_dims(valX, axis=-1)
K.clear_session()
top_model = eegnet(**params)
x = Flatten()(top_model.output)
x = Dense(params['num_classes'], activation='softmax', kernel_initializer=initializers.he_normal(seed=random_seed),
kernel_constraint=max_norm(params['maxnorm']), use_bias=True)(x)
model = Model(top_model.input, x)
model.summary()
if load_path:
model.load_weights(filepath=load_path, by_name=False)
model_json = model.to_json()
with open(os.path.join(model_dir, log_name, 'model.json'), "w") as json_file:
json_file.write(model_json)
model.compile(optimizer=opt(lr=params['lr'], epsilon=None, decay=params['lr_decay']),
loss='categorical_crossentropy', metrics=['accuracy'])
valgen = AudioDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
)
print(checkpoint_name)
model.load_weights(checkpoint_name)
y_pred = model.predict_generator(valgen.flow(valX, None, batch_size=64, seed=1, shuffle=False))
y_pred = np.argmax(y_pred, axis=1)
valY = np.argmax(valY, axis=1)
acc = np.mean(valY == y_pred)
print('acc', acc)
mf1 = f1_score(valY, y_pred, average="macro")
print('mf1', mf1)
ck = cohen_kappa_score(valY, y_pred)
print('ck', ck)
cm = confusion_matrix(valY, y_pred)
print('cm', cm)
total_valY.extend(valY)
total_ypred.extend(y_pred)
print("Overall prediction performance\n")
y_true = np.asarray(total_valY)
y_pred = np.asarray(total_ypred)
n_examples = len(y_true)
cm = confusion_matrix(y_true, y_pred)
acc = np.mean(y_true == y_pred)
mf1 = f1_score(y_true, y_pred, average="macro")
ck = cohen_kappa_score(y_true, y_pred)
print(
"n={}, acc={:.3f}, f1={:.3f}, ck={:.3f}".format(
n_examples, acc, mf1, ck
)
)
print(cm)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"numpy.ones",
"keras.models.Model",
"sklearn.metrics.f1_score",
"numpy.mean",
"os.path.join",
"pandas.DataFrame",
"keras.optimizers.Adamax",
"keras.layers.Flatten",
"tensorflow.set_random_seed",
"collections... | [((202, 219), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (216, 219), True, 'import numpy as np\n'), ((260, 278), 'tensorflow.set_random_seed', 'set_random_seed', (['(1)'], {}), '(1)\n', (275, 278), False, 'from tensorflow import set_random_seed\n'), ((706, 745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (729, 745), False, 'import argparse\n'), ((4162, 4176), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4174, 4176), True, 'import pandas as pd\n'), ((6631, 6653), 'numpy.asarray', 'np.asarray', (['total_valY'], {}), '(total_valY)\n', (6641, 6653), True, 'import numpy as np\n'), ((6667, 6690), 'numpy.asarray', 'np.asarray', (['total_ypred'], {}), '(total_ypred)\n', (6677, 6690), True, 'import numpy as np\n'), ((6729, 6761), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6745, 6761), False, 'from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score\n'), ((6772, 6797), 'numpy.mean', 'np.mean', (['(y_true == y_pred)'], {}), '(y_true == y_pred)\n', (6779, 6797), True, 'import numpy as np\n'), ((6808, 6849), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (6816, 6849), False, 'from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score\n'), ((6859, 6892), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6876, 6892), False, 'from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score\n'), ((3244, 3255), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3253, 3255), False, 'import os\n'), ((3301, 3312), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3310, 3312), False, 'import os\n'), ((3355, 3366), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3364, 3366), False, 'import os\n'), ((4118, 4151), 'os.path.join', 'os.path.join', (['fold_dir', '"""SC*.csv"""'], {}), "(fold_dir, 'SC*.csv')\n", (4130, 4151), False, 'import os\n'), ((4395, 4444), 'os.path.join', 'os.path.join', (['model_dir', 'log_name', '"""weights.hdf5"""'], {}), "(model_dir, log_name, 'weights.hdf5')\n", (4407, 4444), False, 'import os\n'), ((4962, 4982), 'keras.utils.to_categorical', 'to_categorical', (['valY'], {}), '(valY)\n', (4976, 4982), False, 'from keras.utils import to_categorical, plot_model\n'), ((4998, 5027), 'numpy.expand_dims', 'np.expand_dims', (['valX'], {'axis': '(-1)'}), '(valX, axis=-1)\n', (5012, 5027), True, 'import numpy as np\n'), ((5356, 5381), 'keras.models.Model', 'Model', (['top_model.input', 'x'], {}), '(top_model.input, x)\n', (5361, 5381), False, 'from keras.models import Model\n'), ((6157, 6182), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (6166, 6182), True, 'import numpy as np\n'), ((6198, 6221), 'numpy.argmax', 'np.argmax', (['valY'], {'axis': '(1)'}), '(valY, axis=1)\n', (6207, 6221), True, 'import numpy as np\n'), ((6236, 6259), 'numpy.mean', 'np.mean', (['(valY == y_pred)'], {}), '(valY == y_pred)\n', (6243, 6259), True, 'import numpy as np\n'), ((6300, 6339), 'sklearn.metrics.f1_score', 'f1_score', (['valY', 'y_pred'], {'average': '"""macro"""'}), "(valY, y_pred, average='macro')\n", (6308, 6339), False, 'from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score\n'), ((6379, 6410), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['valY', 'y_pred'], {}), '(valY, y_pred)\n', (6396, 6410), False, 'from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score\n'), ((6448, 6478), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['valY', 'y_pred'], {}), '(valY, y_pred)\n', (6464, 6478), False, 'from sklearn.metrics import confusion_matrix, f1_score, cohen_kappa_score\n'), ((4929, 4944), 'collections.Counter', 'Counter', (['trainY'], {}), '(trainY)\n', (4936, 4944), False, 'from collections import Counter\n'), ((5104, 5113), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5111, 5113), False, 'from keras.layers import Flatten, Dense\n'), ((4223, 4250), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': 'None'}), '(f, header=None)\n', (4234, 4250), True, 'import pandas as pd\n'), ((5549, 5596), 'os.path.join', 'os.path.join', (['model_dir', 'log_name', '"""model.json"""'], {}), "(model_dir, log_name, 'model.json')\n", (5561, 5596), False, 'import os\n'), ((5690, 5750), 'keras.optimizers.Adamax', 'opt', ([], {'lr': "params['lr']", 'epsilon': 'None', 'decay': "params['lr_decay']"}), "(lr=params['lr'], epsilon=None, decay=params['lr_decay'])\n", (5693, 5750), True, 'from keras.optimizers import Adamax as opt\n'), ((4854, 4884), 'numpy.ones', 'np.ones', (["params['num_classes']"], {}), "(params['num_classes'])\n", (4861, 4884), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Probemaker
"""
import os
import random
from shutil import rmtree
import numpy as np
data_ambig = os.path.join("experiment_pics","dry_run_probe_ambig")
data_nonambig = os.path.join("experiment_pics","dry_run_probe_nonambig")
f_ambig = open(data_ambig,"r")
cap_a = f_ambig.read().split("\n")
ambig_sent_id = np.arange(start=0, stop=100)
np.random.shuffle(ambig_sent_id)
ambig_sent = np.concatenate((ambig_sent_id,ambig_sent_id,ambig_sent_id), axis=None) #accounting for all permutations of the ambiguous stimulus
#ambig_captions = []
#for ind in ambig_sent:
# ambig_captions.append(cap_a[ind])
f_nonambig = open(data_nonambig,"r")
cap_na = f_nonambig.read().split("\n")
nonambig_sent_id = np.arange(start=0, stop=100)
np.random.shuffle(nonambig_sent_id)
nonambig_sent = np.concatenate((nonambig_sent_id,nonambig_sent_id,nonambig_sent_id), axis=None)
#nonambig_captions = []
#for ind in nonambig_sent:
# nonambig_captions.append(cap_na[ind])
try:
os.mkdir("probes")
except FileExistsError:
rmtree(os.path.join(os.getcwd(),"probes"))
os.mkdir("probes")
def amb_rand(n):
r = random.randint(0,100)
if r==n:
amb_rand(n)
return r
def namb_rand(n):
r = random.randint(0,100)
if r==n:
namb_rand(n)
return r
def construct_probe():
probe_name = 0
s_no = 0
for probe in range(0,300,15):
probe_name += 1
p_name = "probes/probe"+str(probe_name)
print(probe_name)
f = open(p_name,"a")
for sent in range(5):
for case in range(3):
if case==0:
img_amb = ambig_sent[s_no]
img_namb = nonambig_sent[s_no]
elif case==1:
img_amb = amb_rand(s_no)
img_namb = namb_rand(s_no)
elif case==2:
img_amb ='control'
img_namb = 'control'
tup_amb = 'amb'+ '\t' + str(case) + '\t' + str(ambig_sent[s_no]) + '\t' + cap_a[ambig_sent[s_no]] + '\t' + str(img_amb)+".jpg"+"\n"
tup_namb = 'namb'+ '\t' + str(case) + '\t' + str(nonambig_sent[s_no]) + '\t' + cap_na[nonambig_sent[s_no]] + '\t' + str(img_namb)+".jpg"+"\n"
f.write(tup_amb)
f.write(tup_namb)
s_no += 1
amb_contrastive = "A person in a blue ski suit is racing two girls on skis."
namb_contrastive = "A person in her blue ski suit is racing two girls on skis."
tup_amb = 'amb'+ '\t' + '3' + '\t' + 'contrastive_amb' + '\t' + amb_contrastive + '\t' + "contrastive.jpg"+"\n"
tup_namb = 'namb'+ '\t' + '3' + '\t' + 'contrastive_namb' + '\t' + namb_contrastive + '\t' + "contrastive.jpg"+"\n"
f.write(tup_amb)
f.write(tup_namb)
f.close()
construct_probe()
| [
"os.mkdir",
"numpy.random.shuffle",
"random.randint",
"os.getcwd",
"numpy.arange",
"os.path.join",
"numpy.concatenate"
] | [((125, 179), 'os.path.join', 'os.path.join', (['"""experiment_pics"""', '"""dry_run_probe_ambig"""'], {}), "('experiment_pics', 'dry_run_probe_ambig')\n", (137, 179), False, 'import os\n'), ((195, 252), 'os.path.join', 'os.path.join', (['"""experiment_pics"""', '"""dry_run_probe_nonambig"""'], {}), "('experiment_pics', 'dry_run_probe_nonambig')\n", (207, 252), False, 'import os\n'), ((336, 364), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(100)'}), '(start=0, stop=100)\n', (345, 364), True, 'import numpy as np\n'), ((366, 398), 'numpy.random.shuffle', 'np.random.shuffle', (['ambig_sent_id'], {}), '(ambig_sent_id)\n', (383, 398), True, 'import numpy as np\n'), ((412, 484), 'numpy.concatenate', 'np.concatenate', (['(ambig_sent_id, ambig_sent_id, ambig_sent_id)'], {'axis': 'None'}), '((ambig_sent_id, ambig_sent_id, ambig_sent_id), axis=None)\n', (426, 484), True, 'import numpy as np\n'), ((724, 752), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(100)'}), '(start=0, stop=100)\n', (733, 752), True, 'import numpy as np\n'), ((754, 789), 'numpy.random.shuffle', 'np.random.shuffle', (['nonambig_sent_id'], {}), '(nonambig_sent_id)\n', (771, 789), True, 'import numpy as np\n'), ((806, 892), 'numpy.concatenate', 'np.concatenate', (['(nonambig_sent_id, nonambig_sent_id, nonambig_sent_id)'], {'axis': 'None'}), '((nonambig_sent_id, nonambig_sent_id, nonambig_sent_id), axis\n =None)\n', (820, 892), True, 'import numpy as np\n'), ((997, 1015), 'os.mkdir', 'os.mkdir', (['"""probes"""'], {}), "('probes')\n", (1005, 1015), False, 'import os\n'), ((1140, 1162), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1154, 1162), False, 'import random\n'), ((1236, 1258), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1250, 1258), False, 'import random\n'), ((1091, 1109), 'os.mkdir', 'os.mkdir', (['"""probes"""'], {}), "('probes')\n", (1099, 1109), False, 'import os\n'), ((1064, 1075), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1073, 1075), False, 'import os\n')] |
from pythermo import pythermo as pt
import numpy as np
from matplotlib import pyplot as plt
CH4CO2H2S = pt.Model()
CH4CO2H2S.ChooseAModel(2)
nc = 3
CH4CO2H2S.NoPureComp(nc)
CH4CO2H2S.CritProps(1, 190.60, 45.99, 0.008)
CH4CO2H2S.CritProps(2, 304.20, 73.75, 0.225)
CH4CO2H2S.CritProps(3, 373.20, 89.40, 0.100)
CH4CO2H2S.NoSpecKij(3)
CH4CO2H2S.SpecKij(1, 1, 2, 0.12)
CH4CO2H2S.SpecKij(2, 1, 3, 0.08)
CH4CO2H2S.SpecKij(3, 2, 3, 0.12)
CH4CO2H2S.Setup_Thermo()
T = 170.0
P = 19.5
np_3p, p1xyz, p2xyz, p3xyz, np_tl, id_tl, tl_p1, tl_p2 = CH4CO2H2S.TernaryXYDiagram(T, P)
# plot three-phase region
for i in range(np_3p):
t1 = np.array((p1xyz[i,0], p2xyz[i,0], p3xyz[i,0]))
t2 = np.array((p1xyz[i,1], p2xyz[i,1], p3xyz[i,1]))
tpr = plt.fill(t1, t2, 'g')
# plot phase boundaries
for i in range(np_tl):
idx = np.arange(id_tl[i], id_tl[i+1], 1)
# it is interesting that "plt.plot(tl_p1[idx][0], tl_p1[idx][1], 'b')" is not working... X.L. 2018-10-17
# It is really unbelievable. Why [idx][0] is not working... X.L. 2018-10-24
phb = plt.plot(tl_p1[idx,0], tl_p1[idx,1], 'b')
plt.plot(tl_p2[idx,0], tl_p2[idx,1], 'b')
# plot tie-lines
for i in range(id_tl[np_tl]):
x = np.array((tl_p1[i,0],tl_p2[i,0]))
y = np.array((tl_p1[i,1],tl_p2[i,1]))
til = plt.plot(x, y, 'r')
x = [0, 1]
y = [1, 0]
plt.plot(x, y, 'k')
plt.xlabel('Mole fraction of Comp 1')
plt.ylabel('Mole fraction of Comp 2')
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.legend([tpr[0],phb[0],til[0]],['Three phase region','Phase boundary','Tie-lines'])
plt.show()
CH4CO2H2S.Finishup_Thermo()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.legend",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pythermo.pythermo.Model"
] | [((106, 116), 'pythermo.pythermo.Model', 'pt.Model', ([], {}), '()\n', (114, 116), True, 'from pythermo import pythermo as pt\n'), ((1319, 1338), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""k"""'], {}), "(x, y, 'k')\n", (1327, 1338), True, 'from matplotlib import pyplot as plt\n'), ((1339, 1376), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mole fraction of Comp 1"""'], {}), "('Mole fraction of Comp 1')\n", (1349, 1376), True, 'from matplotlib import pyplot as plt\n'), ((1377, 1414), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mole fraction of Comp 2"""'], {}), "('Mole fraction of Comp 2')\n", (1387, 1414), True, 'from matplotlib import pyplot as plt\n'), ((1415, 1433), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1423, 1433), True, 'from matplotlib import pyplot as plt\n'), ((1434, 1452), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1442, 1452), True, 'from matplotlib import pyplot as plt\n'), ((1453, 1548), 'matplotlib.pyplot.legend', 'plt.legend', (['[tpr[0], phb[0], til[0]]', "['Three phase region', 'Phase boundary', 'Tie-lines']"], {}), "([tpr[0], phb[0], til[0]], ['Three phase region',\n 'Phase boundary', 'Tie-lines'])\n", (1463, 1548), True, 'from matplotlib import pyplot as plt\n'), ((1541, 1551), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1549, 1551), True, 'from matplotlib import pyplot as plt\n'), ((629, 678), 'numpy.array', 'np.array', (['(p1xyz[i, 0], p2xyz[i, 0], p3xyz[i, 0])'], {}), '((p1xyz[i, 0], p2xyz[i, 0], p3xyz[i, 0]))\n', (637, 678), True, 'import numpy as np\n'), ((684, 733), 'numpy.array', 'np.array', (['(p1xyz[i, 1], p2xyz[i, 1], p3xyz[i, 1])'], {}), '((p1xyz[i, 1], p2xyz[i, 1], p3xyz[i, 1]))\n', (692, 733), True, 'import numpy as np\n'), ((740, 761), 'matplotlib.pyplot.fill', 'plt.fill', (['t1', 't2', '"""g"""'], {}), "(t1, t2, 'g')\n", (748, 761), True, 'from matplotlib import pyplot as plt\n'), ((819, 855), 'numpy.arange', 'np.arange', (['id_tl[i]', 'id_tl[i + 1]', '(1)'], {}), '(id_tl[i], id_tl[i + 1], 1)\n', (828, 855), True, 'import numpy as np\n'), ((1050, 1093), 'matplotlib.pyplot.plot', 'plt.plot', (['tl_p1[idx, 0]', 'tl_p1[idx, 1]', '"""b"""'], {}), "(tl_p1[idx, 0], tl_p1[idx, 1], 'b')\n", (1058, 1093), True, 'from matplotlib import pyplot as plt\n'), ((1095, 1138), 'matplotlib.pyplot.plot', 'plt.plot', (['tl_p2[idx, 0]', 'tl_p2[idx, 1]', '"""b"""'], {}), "(tl_p2[idx, 0], tl_p2[idx, 1], 'b')\n", (1103, 1138), True, 'from matplotlib import pyplot as plt\n'), ((1192, 1228), 'numpy.array', 'np.array', (['(tl_p1[i, 0], tl_p2[i, 0])'], {}), '((tl_p1[i, 0], tl_p2[i, 0]))\n', (1200, 1228), True, 'import numpy as np\n'), ((1233, 1269), 'numpy.array', 'np.array', (['(tl_p1[i, 1], tl_p2[i, 1])'], {}), '((tl_p1[i, 1], tl_p2[i, 1]))\n', (1241, 1269), True, 'import numpy as np\n'), ((1276, 1295), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""'], {}), "(x, y, 'r')\n", (1284, 1295), True, 'from matplotlib import pyplot as plt\n')] |
import operator
import os
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
import cv2
import numpy as np
import pandas as pd
import torch
from torch.utils import data
from tqdm import tqdm
from sdcdup.utils import generate_tag_pair_lookup
from sdcdup.utils import get_hamming_distance
from sdcdup.utils import fuzzy_join
from sdcdup.utils import get_tile
from sdcdup.utils import get_img
from sdcdup.utils import to_hls
from sdcdup.utils import to_bgr
from sdcdup.utils import idx_chan_map
from sdcdup.utils import hls_shift
from sdcdup.utils import load_duplicate_truth
from sdcdup.utils import train_image_dir
from sdcdup.utils import train_tile_dir
def write_256_tile(img_id):
img = None
filebase, fileext = img_id.split('.')
for idx in range(9):
outfile = os.path.join(train_tile_dir, f'{filebase}_{idx}.{fileext}')
if os.path.exists(outfile):
continue
if img is None:
img = cv2.imread(os.path.join(train_image_dir, img_id))
tile = get_tile(img, idx)
cv2.imwrite(outfile, tile)
def create_256_tiles(train_image_dir, train_tile_dir):
os.makedirs(train_tile_dir, exist_ok=True)
img_ids = os.listdir(train_image_dir)
with ThreadPoolExecutor(max_workers=16) as executor:
for _ in tqdm(executor.map(write_256_tile, img_ids), total=len(img_ids)):
pass
def create_dataset_from_tiles(sdcic):
"""
is_dup issolid action
i==j i j skip?
----------------------
1 1 1 1
1 1 0 1 Does not exist?
1 0 1 1 Does not exist?
1 0 0 0
----------------------
0 1 1 1
0 1 0 0 Could present problems if other tile is "near" solid.
0 0 1 0 Could present problems if other tile is "near" solid.
0 0 0 0
:param sdcic:
:return:
"""
img_overlap_pairs_dup_keys = []
img_overlap_pairs_non_dup_all = []
KeyScore = namedtuple('keyscore', 'key score')
for img_id, tile_md5hash_grid in tqdm(sdcic.img_metrics['md5'].items()):
for idx1, tile1_md5hash in enumerate(tile_md5hash_grid):
for idx2, tile2_md5hash in enumerate(tile_md5hash_grid):
if idx1 > idx2:
continue
tile1_issolid = np.all(sdcic.img_metrics['sol'][img_id][idx1] >= 0)
tile2_issolid = np.all(sdcic.img_metrics['sol'][img_id][idx2] >= 0)
if idx1 == idx2:
if tile1_issolid:
continue
if tile2_issolid:
continue
img_overlap_pairs_dup_keys.append((img_id, img_id, idx1, idx2, 1))
continue
# if idx1 != idx2:
if tile1_md5hash == tile2_md5hash:
continue
if tile1_issolid and tile2_issolid:
continue
bmh1 = sdcic.img_metrics['bmh96'][img_id][idx1]
bmh2 = sdcic.img_metrics['bmh96'][img_id][idx2]
score = get_hamming_distance(bmh1, bmh2, normalize=True, as_score=True)
if score == 1:
tile1 = sdcic.get_tile(sdcic.get_img(img_id), idx1)
tile2 = sdcic.get_tile(sdcic.get_img(img_id), idx2)
tile3 = fuzzy_join(tile1, tile2)
pix3, cts3 = np.unique(tile3.flatten(), return_counts=True)
if np.max(cts3 / (256 * 256 * 3)) > 0.97:
# skip all the near solid (i.e. blue edge) tiles.
continue
img_overlap_pairs_non_dup_all.append(KeyScore((img_id, img_id, idx1, idx2, 0), score))
img_overlap_pairs_non_dup_keys_sorted = []
for candidate in tqdm(sorted(img_overlap_pairs_non_dup_all, key=operator.attrgetter('score'), reverse=True)):
img_overlap_pairs_non_dup_keys_sorted.append(candidate.key)
img_overlap_pairs_non_dup_keys = img_overlap_pairs_non_dup_keys_sorted[:len(img_overlap_pairs_dup_keys)]
img_overlap_pairs = img_overlap_pairs_non_dup_keys + img_overlap_pairs_dup_keys
# non_dup_scores = []
# img_overlap_pairs_non_dup_all_sorted = []
# for candidate in tqdm(sorted(img_overlap_pairs_non_dup_all, key=operator.attrgetter('score'), reverse=True)):
# non_dup_scores.append(candidate.score)
# img_overlap_pairs_non_dup_all_sorted.append(candidate)
# assert min(non_dup_scores) == non_dup_scores[0], (min(non_dup_scores), non_dup_scores[0])
# assert max(non_dup_scores) == non_dup_scores[-1], (max(non_dup_scores), non_dup_scores[-1])
# non_dup_scores = non_dup_scores[:len(img_overlap_pairs_dup_keys)]
# assert max(non_dup_scores) == non_dup_scores[-1], (max(non_dup_scores), non_dup_scores[-1])
# np.random.shuffle(non_dup_scores)
# img_overlap_pairs_dup = []
# for key, score in zip(img_overlap_pairs_dup_keys, non_dup_scores):
# img_overlap_pairs_dup.append(KeyScore(key, score))
# img_overlap_pairs_non_dup_sorted = img_overlap_pairs_non_dup_all_sorted[:len(img_overlap_pairs_dup_keys)]
# img_overlap_pairs = img_overlap_pairs_non_dup_sorted + img_overlap_pairs_dup
return img_overlap_pairs
def create_dataset_from_truth(sdcic):
tpl = generate_tag_pair_lookup()
dup_truth = load_duplicate_truth()
dup_pairs = set()
img_overlap_pairs = []
# First collect all image pairs flagged as duplicates.
for (img1_id, img2_id, img1_overlap_tag), is_dup in dup_truth.items():
if is_dup:
for idx1, idx2 in tpl[img1_overlap_tag]:
img_overlap_pairs.append((img1_id, img2_id, idx1, idx2, is_dup))
# Keep a record of all duplicate image pairs for later reference.
dup_pairs.add((img1_id, img2_id))
n_dup_tile_pairs = len(img_overlap_pairs)
print(f"Number of non-dup/dup tiles: {0:>8}/{n_dup_tile_pairs}")
# For the second pass, record the non-dups as non-dups unless the hashes of
# overlapping tile are equal in which case just ignore that tile pair.
# Also, if the two images have already been flagged duplicate (possibly for
# a different overlap), then exclude all other overlaps we might have
# accidentally picked up.
done = False
for (img1_id, img2_id, img1_overlap_tag), is_dup in dup_truth.items():
if is_dup or (img1_id, img2_id) in dup_pairs:
continue
# If 2 tiles are the same then skip them since they are actually dups.
# Remember a dup corresponds to the "entire" overlay. if the overlay
# is flagged as non-dup then at least one of the tiles is different.
for idx1, idx2 in tpl[img1_overlap_tag]:
if sdcic.img_metrics['md5'][img1_id][idx1] == sdcic.img_metrics['md5'][img2_id][idx2]:
continue
img_overlap_pairs.append((img1_id, img2_id, idx1, idx2, is_dup))
if len(img_overlap_pairs) > 2 * n_dup_tile_pairs:
done = True
break
if done:
break
print(f"Number of non-dup/dup tiles: {len(img_overlap_pairs) - n_dup_tile_pairs:>8}/{n_dup_tile_pairs}")
return img_overlap_pairs
class TrainDataset(data.Dataset):
"""Characterizes a dataset for PyTorch"""
def __init__(self, img_overlaps, train_or_valid, image_transform,
in_shape=(6, 256, 256),
out_shape=(1,)):
"""Initialization"""
self.img_overlaps = img_overlaps
# TODO: handle case if train_or_valid == 'test'
self.valid = train_or_valid == 'valid'
self.image_transform = image_transform
self.ij = ((0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2))
self.in_shape = in_shape
self.out_shape = out_shape
self.hls_limits = {'H': 10, 'L': 20, 'S': 20}
if self.valid:
self.img_augs = [self.get_random_augmentation() for _ in self.img_overlaps]
def __len__(self):
"""Denotes the total number of samples"""
return len(self.img_overlaps)
def __getitem__(self, index):
"""Generates one sample of data"""
if self.valid:
img_aug = self.img_augs[index]
else:
img_aug = self.get_random_augmentation()
return self.get_data_pair(self.img_overlaps[index], img_aug) # X, y
def get_random_augmentation(self):
# So, we aren't always biasing the 'second' image with hls shifting...
flip_img_order = np.random.random() > 0.5
# The first tile will always come from either a slice of the image or from the saved slice.
first_from_large = np.random.random() > 0.5
second_from_large = np.random.random() > 0.5
second_augment_hls = 0 if self.valid else np.random.random() > 0.25
flip_stacking_order = np.random.random() > 0.5
hls_idx = np.random.choice(3)
hls_chan = idx_chan_map[hls_idx]
hls_gain = np.random.choice(self.hls_limits[hls_chan]) + 1
hls_gain = hls_gain if np.random.random() > 0.5 else -1 * hls_gain
return flip_img_order, first_from_large, second_from_large, second_augment_hls, hls_chan, hls_gain, flip_stacking_order
def color_shift(self, img, chan, gain):
hls = to_hls(img)
hls_shifted = hls_shift(hls, chan, gain)
return to_bgr(hls_shifted)
def get_tile(self, img, idx, sz=256):
i, j = self.ij[idx]
return img[i * sz:(i + 1) * sz, j * sz:(j + 1) * sz, :]
def read_from_large(self, img_id, idx):
img = cv2.imread(os.path.join(train_image_dir, img_id))
return self.get_tile(img, idx)
def read_from_small(self, img_id, idx):
filebase, fileext = img_id.split('.')
tile_id = f'{filebase}_{idx}.{fileext}'
return cv2.imread(os.path.join(train_tile_dir, tile_id))
def get_data_pair(self, img_overlap, img_aug):
flip_img_order, first_from_large, second_from_large, aug_hls, chan, gain, flip_stacking_order = img_aug
if flip_img_order:
img2_id, img1_id, idx2, idx1, is_dup = img_overlap
else:
img1_id, img2_id, idx1, idx2, is_dup = img_overlap
read1 = self.read_from_large if first_from_large else self.read_from_small
read2 = self.read_from_large if second_from_large else self.read_from_small
same_image = img1_id == img2_id
if same_image: # img1_id == img2_id
if is_dup: # idx1 == idx2
tile1 = read1(img1_id, idx1)
if aug_hls:
tile2 = self.color_shift(tile1, chan, gain)
else:
tile2 = read2(img2_id, idx2)
else: # idx1 != idx2
if first_from_large and second_from_large:
img = cv2.imread(os.path.join(train_image_dir, img1_id))
tile1 = self.get_tile(img, idx1)
tile2 = self.get_tile(img, idx2)
else:
tile1 = read1(img1_id, idx1)
tile2 = read2(img2_id, idx2)
else: # img1_id != img2_id
if is_dup:
tile1 = read1(img1_id, idx1)
if aug_hls:
tile2 = self.color_shift(tile1, chan, gain)
else:
tile2 = read2(img2_id, idx2)
else:
tile1 = read1(img1_id, idx1)
tile2 = read2(img2_id, idx2)
tile1 = cv2.cvtColor(tile1, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
tile2 = cv2.cvtColor(tile2, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
X = np.dstack([tile2, tile1]) if flip_stacking_order else np.dstack([tile1, tile2])
X = self.image_transform(X)
y = np.array([is_dup], dtype=np.float32)
return X, y
class EvalDataset(data.Dataset):
"""Characterizes a dataset for PyTorch"""
def __init__(self, tile_pairs,
image_transform=None,
in_shape=(6, 256, 256),
out_shape=(1,)):
"""Initialization"""
self.sz = 256
self.tile_pairs = tile_pairs
self.image_transform = image_transform
self.ij = ((0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2))
self.in_shape = in_shape
self.out_shape = out_shape
def __len__(self):
"""Denotes the total number of samples"""
return len(self.tile_pairs)
def __getitem__(self, index):
"""Generates one sample of data"""
tp = self.tile_pairs[index]
img1 = get_img(tp.img1_id)
img2 = get_img(tp.img2_id)
tile1 = cv2.cvtColor(self.get_tile(img1, *self.ij[tp.idx1]), cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
tile2 = cv2.cvtColor(self.get_tile(img2, *self.ij[tp.idx2]), cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
X = np.dstack([tile1, tile2])
X = X.transpose((2, 0, 1))
X = torch.from_numpy(X)
return X
def get_tile(self, img, i, j):
return img[i * self.sz:(i + 1) * self.sz, j * self.sz:(j + 1) * self.sz, :]
class WrappedDataLoader:
def __init__(self, dl, func):
self.dl = dl
self.func = func
def __len__(self):
return len(self.dl)
def __iter__(self):
batches = iter(self.dl)
for b in batches:
yield (self.func(b))
| [
"sdcdup.utils.to_bgr",
"sdcdup.utils.load_duplicate_truth",
"sdcdup.utils.to_hls",
"sdcdup.utils.hls_shift",
"os.path.join",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"numpy.max",
"numpy.random.choice",
"sdcdup.utils.get_hamming_distance",
"concurrent.futures.ThreadPoolExecutor",
"sdc... | [((1158, 1200), 'os.makedirs', 'os.makedirs', (['train_tile_dir'], {'exist_ok': '(True)'}), '(train_tile_dir, exist_ok=True)\n', (1169, 1200), False, 'import os\n'), ((1215, 1242), 'os.listdir', 'os.listdir', (['train_image_dir'], {}), '(train_image_dir)\n', (1225, 1242), False, 'import os\n'), ((2020, 2055), 'collections.namedtuple', 'namedtuple', (['"""keyscore"""', '"""key score"""'], {}), "('keyscore', 'key score')\n", (2030, 2055), False, 'from collections import namedtuple\n'), ((5362, 5388), 'sdcdup.utils.generate_tag_pair_lookup', 'generate_tag_pair_lookup', ([], {}), '()\n', (5386, 5388), False, 'from sdcdup.utils import generate_tag_pair_lookup\n'), ((5405, 5427), 'sdcdup.utils.load_duplicate_truth', 'load_duplicate_truth', ([], {}), '()\n', (5425, 5427), False, 'from sdcdup.utils import load_duplicate_truth\n'), ((819, 878), 'os.path.join', 'os.path.join', (['train_tile_dir', 'f"""{filebase}_{idx}.{fileext}"""'], {}), "(train_tile_dir, f'{filebase}_{idx}.{fileext}')\n", (831, 878), False, 'import os\n'), ((890, 913), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (904, 913), False, 'import os\n'), ((1043, 1061), 'sdcdup.utils.get_tile', 'get_tile', (['img', 'idx'], {}), '(img, idx)\n', (1051, 1061), False, 'from sdcdup.utils import get_tile\n'), ((1070, 1096), 'cv2.imwrite', 'cv2.imwrite', (['outfile', 'tile'], {}), '(outfile, tile)\n', (1081, 1096), False, 'import cv2\n'), ((1253, 1287), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(16)'}), '(max_workers=16)\n', (1271, 1287), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((9022, 9041), 'numpy.random.choice', 'np.random.choice', (['(3)'], {}), '(3)\n', (9038, 9041), True, 'import numpy as np\n'), ((9413, 9424), 'sdcdup.utils.to_hls', 'to_hls', (['img'], {}), '(img)\n', (9419, 9424), False, 'from sdcdup.utils import to_hls\n'), ((9447, 9473), 'sdcdup.utils.hls_shift', 'hls_shift', (['hls', 'chan', 'gain'], {}), '(hls, chan, gain)\n', (9456, 9473), False, 'from sdcdup.utils import hls_shift\n'), ((9489, 9508), 'sdcdup.utils.to_bgr', 'to_bgr', (['hls_shifted'], {}), '(hls_shifted)\n', (9495, 9508), False, 'from sdcdup.utils import to_bgr\n'), ((11904, 11940), 'numpy.array', 'np.array', (['[is_dup]'], {'dtype': 'np.float32'}), '([is_dup], dtype=np.float32)\n', (11912, 11940), True, 'import numpy as np\n'), ((12765, 12784), 'sdcdup.utils.get_img', 'get_img', (['tp.img1_id'], {}), '(tp.img1_id)\n', (12772, 12784), False, 'from sdcdup.utils import get_img\n'), ((12800, 12819), 'sdcdup.utils.get_img', 'get_img', (['tp.img2_id'], {}), '(tp.img2_id)\n', (12807, 12819), False, 'from sdcdup.utils import get_img\n'), ((13062, 13087), 'numpy.dstack', 'np.dstack', (['[tile1, tile2]'], {}), '([tile1, tile2])\n', (13071, 13087), True, 'import numpy as np\n'), ((13135, 13154), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (13151, 13154), False, 'import torch\n'), ((8642, 8660), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8658, 8660), True, 'import numpy as np\n'), ((8794, 8812), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8810, 8812), True, 'import numpy as np\n'), ((8847, 8865), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8863, 8865), True, 'import numpy as np\n'), ((8978, 8996), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8994, 8996), True, 'import numpy as np\n'), ((9102, 9145), 'numpy.random.choice', 'np.random.choice', (['self.hls_limits[hls_chan]'], {}), '(self.hls_limits[hls_chan])\n', (9118, 9145), True, 'import numpy as np\n'), ((9714, 9751), 'os.path.join', 'os.path.join', (['train_image_dir', 'img_id'], {}), '(train_image_dir, img_id)\n', (9726, 9751), False, 'import os\n'), ((9957, 9994), 'os.path.join', 'os.path.join', (['train_tile_dir', 'tile_id'], {}), '(train_tile_dir, tile_id)\n', (9969, 9994), False, 'import os\n'), ((11776, 11801), 'numpy.dstack', 'np.dstack', (['[tile2, tile1]'], {}), '([tile2, tile1])\n', (11785, 11801), True, 'import numpy as np\n'), ((11830, 11855), 'numpy.dstack', 'np.dstack', (['[tile1, tile2]'], {}), '([tile1, tile2])\n', (11839, 11855), True, 'import numpy as np\n'), ((989, 1026), 'os.path.join', 'os.path.join', (['train_image_dir', 'img_id'], {}), '(train_image_dir, img_id)\n', (1001, 1026), False, 'import os\n'), ((2362, 2413), 'numpy.all', 'np.all', (["(sdcic.img_metrics['sol'][img_id][idx1] >= 0)"], {}), "(sdcic.img_metrics['sol'][img_id][idx1] >= 0)\n", (2368, 2413), True, 'import numpy as np\n'), ((2446, 2497), 'numpy.all', 'np.all', (["(sdcic.img_metrics['sol'][img_id][idx2] >= 0)"], {}), "(sdcic.img_metrics['sol'][img_id][idx2] >= 0)\n", (2452, 2497), True, 'import numpy as np\n'), ((3140, 3203), 'sdcdup.utils.get_hamming_distance', 'get_hamming_distance', (['bmh1', 'bmh2'], {'normalize': '(True)', 'as_score': '(True)'}), '(bmh1, bmh2, normalize=True, as_score=True)\n', (3160, 3203), False, 'from sdcdup.utils import get_hamming_distance\n'), ((3902, 3930), 'operator.attrgetter', 'operator.attrgetter', (['"""score"""'], {}), "('score')\n", (3921, 3930), False, 'import operator\n'), ((8922, 8940), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8938, 8940), True, 'import numpy as np\n'), ((9181, 9199), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9197, 9199), True, 'import numpy as np\n'), ((3408, 3432), 'sdcdup.utils.fuzzy_join', 'fuzzy_join', (['tile1', 'tile2'], {}), '(tile1, tile2)\n', (3418, 3432), False, 'from sdcdup.utils import fuzzy_join\n'), ((11617, 11655), 'cv2.cvtColor', 'cv2.cvtColor', (['tile1', 'cv2.COLOR_BGR2RGB'], {}), '(tile1, cv2.COLOR_BGR2RGB)\n', (11629, 11655), False, 'import cv2\n'), ((11698, 11736), 'cv2.cvtColor', 'cv2.cvtColor', (['tile2', 'cv2.COLOR_BGR2RGB'], {}), '(tile2, cv2.COLOR_BGR2RGB)\n', (11710, 11736), False, 'import cv2\n'), ((3536, 3566), 'numpy.max', 'np.max', (['(cts3 / (256 * 256 * 3))'], {}), '(cts3 / (256 * 256 * 3))\n', (3542, 3566), True, 'import numpy as np\n'), ((10959, 10997), 'os.path.join', 'os.path.join', (['train_image_dir', 'img1_id'], {}), '(train_image_dir, img1_id)\n', (10971, 10997), False, 'import os\n')] |
from typing import Union, Iterable, ValuesView
import numpy as np
import scipy as sp
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
from ..utils import keydefaultdict
from .madscaler import MedianAbsoluteDeviationScaler
from .quantiletransformer import EfficientQuantileTransformer
NORMALIZATION_ = ['std', 'minmax', 'mad', 'percshift']
# NOTE: This will be deprecated, it is confusing
DISTANCE_TO_NORMALIZATION = {
'euclidean': 'std',
'manhattan': 'mad',
'cityblock': 'mad',
'quantile': 'percshift',
}
NORMALIZATION_TO_NAME = {
'percshift': 'Quantile',
'std': 'Standard',
'mad': 'Median Absolute Dev.',
'Mad': 'Mean Absolute De.',
'minmax': 'Min Max',
}
def _get_method_safe(method):
if method is None or method in NORMALIZATION_:
return method
# NOTE: This will be deprecated, it is confusing
elif method in list(DISTANCE_TO_NORMALIZATION.keys()):
return DISTANCE_TO_NORMALIZATION[method]
else:
raise ValueError('Invalid normalization method.')
def get_normalization_name(method):
return NORMALIZATION_TO_NAME[_get_method_safe(method)]
class NullScaler(TransformerMixin, BaseEstimator):
def __init__(self):
pass
def fit(self, X, y=None):
self.scale_ = np.ones(X.shape[1])
return self
def transform(self, X):
return X
def inverse_transform(self, X):
return X
def get_transformer_class(method):
method = _get_method_safe(method)
if method is None:
return NullScaler
elif method == 'std':
return StandardScaler
elif method == 'minmax':
return MinMaxScaler
elif method == 'mad':
return MedianAbsoluteDeviationScaler
elif method == 'percshift':
return EfficientQuantileTransformer # PercentileShifterCached
class Normalizer:
"""Class to support the normalization of the features
Raises:
Exception: If an invalid normalization is used
"""
NORMALIZATION = NORMALIZATION_
def __init__(self, data: np.ndarray):
"""Constructor
Args:
data (pd.DataFrame): A dataframe with the features
"""
self.data = np.asarray(data)
self.transformers = keydefaultdict(lambda method: get_transformer_class(method)().fit(self.data))
def single_transformer(method, f):
return get_transformer_class(method)().fit(self.data[:, f].reshape(-1, 1))
self.single_transformers = keydefaultdict(lambda args: single_transformer(*args))
self.data_transformed = keydefaultdict(lambda method: self.transform(self.data, method))
self.covs = keydefaultdict(lambda method, lib: self.__compute_covariance_matrix(self.data, method, lib))
self.__suppress_warning = False
def suppress_warnings(self, value=True):
self.__suppress_warning = value
def __compute_covariance_matrix(self, data, method, lib):
if lib == 'np':
return sp.linalg.inv(np.cov((self.transform(data, method=method)), rowvar=False))
elif lib == 'tf':
from ..tf import inv_cov as tf_inv_cov
return tf_inv_cov(self.transform(data, method=method))
else:
raise ValueError('Invalid lib.')
def transform(self, data: np.ndarray, method: str, **kwargs):
"""Normalize the data according to the "method" passed
Args:
data (np.ndarray): The data to be normalized (nb_samples x nb_features)
method (str, optional): Normalization ('mad', 'std' or 'minmax'). Defaults to 'std'.
Raises:
ValueError: Invalid normalization
Returns:
np.ndarray: Normalized array
"""
method = _get_method_safe(method)
return self.transformers[method].transform(data)
def inverse_transform(self, data: np.ndarray, method: str = 'std'):
"""Un-normalize the data according to the "method" passes
Args:
data (np.ndarray): The data to be un-normalized (nb_samples x nb_features)
method (str, optional): Normalization ('mad', 'std' or 'minmax'). Defaults to 'std'.
Raises:
ValueError: Invalid normalization
Returns:
np.ndarray: Un-normalized array
"""
method = _get_method_safe(method)
return self.transformers[method].inverse_transform(data)
def feature_deviation(self, method: str = 'std', phi: Union[float, int] = 1):
"""Get the deviation of each feature according to the normalization method
Args:
method (str): method (str, optional): Normalization ('mad', 'std' or 'minmax'). Defaults to 'std'.
phi (Union[float, int]): The fraction of the STD/MAD/MINMAX. Default to 1.
Raises:
ValueError: Invalid normalization
Returns:
np.ndarray: Deviations, shape = (nb_features, )
"""
method = _get_method_safe(method)
transformer = self.transformers[method]
if 'scale_' in dir(transformer):
return transformer.scale_ * phi
else:
return np.ones(self.data.shape[1]) * phi
def feature_transform(self, x: np.ndarray, f: int, method: str):
x = np.asarray(x)
transformer = self.get_feature_transformer(method, f)
return transformer.transform(x.reshape(-1, 1))[:, 0]
def shift_transform(self, X, shifts, method, **kwargs):
transformer = self.get_transformer(method)
if 'shift' in dir(transformer):
return transformer.shift_transform(X, shifts=shifts, **kwargs)
else:
return X + shifts
def move_transform(self, X, costs, method, **kwargs):
transformer = self.get_transformer(method)
assert costs.shape[0] == X.shape[1]
return transformer.inverse_transform(transformer.transform(X) + np.tile(costs, (X.shape[0], 1)))
def get_transformer(self, method: str):
return self.transformers[_get_method_safe(method)]
def get_feature_transformer(self, method: str, f: int):
return self.single_transformers[(_get_method_safe(method), f)]
def single_transform(self, x, *args, **kwargs):
return self.transform(np.array([x]), *args, **kwargs)[0]
def single_inverse_transform(self, x, *args, **kwargs):
return self.inverse_transform(np.array([x]), *args, **kwargs)[0]
def single_shift_transform(self, x, shift, **kwargs):
return self.shift_transform(np.array([x]), np.array([shift]), **kwargs)[0]
# NOTE: This must be deprecated, it does not fit here.
def covariance_matrix(self, data: np.ndarray, method: Union[None, str], lib='np'):
if data is None:
return self.covs[(method, lib)]
else:
return self.__compute_covariance_matrix(data, method, lib)
# NOTE: This must be deprecated, it does not fit here.
def covariance_matrices(self, data: np.ndarray, methods=None, lib='np'):
"""Compute the covariance matrices
Args:
data (np.ndarray): The data from which to extract the covariance
Returns:
Dict[np.ndarray]: Dictionary (for each normalization method) of covariance matrices
"""
# If no method is passed we compute for all of them
if methods is None:
methods = self.NORMALIZATION
return {method: self.covariance_matrix(data, method, lib) for method in methods}
| [
"numpy.array",
"numpy.asarray",
"numpy.tile",
"numpy.ones"
] | [((1345, 1364), 'numpy.ones', 'np.ones', (['X.shape[1]'], {}), '(X.shape[1])\n', (1352, 1364), True, 'import numpy as np\n'), ((2264, 2280), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2274, 2280), True, 'import numpy as np\n'), ((5329, 5342), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5339, 5342), True, 'import numpy as np\n'), ((5213, 5240), 'numpy.ones', 'np.ones', (['self.data.shape[1]'], {}), '(self.data.shape[1])\n', (5220, 5240), True, 'import numpy as np\n'), ((5963, 5994), 'numpy.tile', 'np.tile', (['costs', '(X.shape[0], 1)'], {}), '(costs, (X.shape[0], 1))\n', (5970, 5994), True, 'import numpy as np\n'), ((6315, 6328), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (6323, 6328), True, 'import numpy as np\n'), ((6449, 6462), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (6457, 6462), True, 'import numpy as np\n'), ((6579, 6592), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (6587, 6592), True, 'import numpy as np\n'), ((6594, 6611), 'numpy.array', 'np.array', (['[shift]'], {}), '([shift])\n', (6602, 6611), True, 'import numpy as np\n')] |
#%%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
# %%
mpl.rcParams.update(mpl.rcParamsDefault)
plt.rcParams['font.family'] ='sans-serif'
plt.rcParams["figure.subplot.left"] = 0.22
plt.rcParams["figure.subplot.right"] = 0.95
plt.rcParams["figure.subplot.bottom"] = 0.20
plt.rcParams["figure.subplot.top"] = 0.95
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.major.width'] = 1.0
plt.rcParams['ytick.major.width'] = 1.0
plt.rcParams['axes.labelsize'] = 30
plt.rcParams['font.size'] = 22
plt.rcParams['axes.linewidth'] = 1.2
plt.rcParams['savefig.dpi'] = 200
plt.rcParams['figure.facecolor'] = 'white'
# %%
a = np.loadtxt("core_ESS_ids")
# %%
plt.clf()
n_bins = 100
plt.hist(a[:,2], bins=n_bins, range=(0.0,1.0), log=True, alpha=0.9, density=False, label=r"$h_B^{\ast}$")
plt.hist(a[:,4], bins=n_bins, range=(0.0,1.0), log=True, alpha=0.9, density=False, label=r"$h_G^{\ast}$")
plt.xlim((0.0,1.0))
plt.xticks([0.0,0.5,1.0])
plt.xlabel(r"$h_G^{\ast}, h_B^{\ast}$")
plt.ylabel("frequency")
plt.legend()
# plt.show()
plt.savefig("h_distribution.pdf")
# %%
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.clf",
"matplotlib.rcParams.update",
"matplotlib.pyplot.legend",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] | [((86, 126), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['mpl.rcParamsDefault'], {}), '(mpl.rcParamsDefault)\n', (105, 126), True, 'import matplotlib as mpl\n'), ((691, 717), 'numpy.loadtxt', 'np.loadtxt', (['"""core_ESS_ids"""'], {}), "('core_ESS_ids')\n", (701, 717), True, 'import numpy as np\n'), ((724, 733), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (731, 733), True, 'import matplotlib.pyplot as plt\n'), ((747, 858), 'matplotlib.pyplot.hist', 'plt.hist', (['a[:, 2]'], {'bins': 'n_bins', 'range': '(0.0, 1.0)', 'log': '(True)', 'alpha': '(0.9)', 'density': '(False)', 'label': '"""$h_B^{\\\\ast}$"""'}), "(a[:, 2], bins=n_bins, range=(0.0, 1.0), log=True, alpha=0.9,\n density=False, label='$h_B^{\\\\ast}$')\n", (755, 858), True, 'import matplotlib.pyplot as plt\n'), ((853, 964), 'matplotlib.pyplot.hist', 'plt.hist', (['a[:, 4]'], {'bins': 'n_bins', 'range': '(0.0, 1.0)', 'log': '(True)', 'alpha': '(0.9)', 'density': '(False)', 'label': '"""$h_G^{\\\\ast}$"""'}), "(a[:, 4], bins=n_bins, range=(0.0, 1.0), log=True, alpha=0.9,\n density=False, label='$h_G^{\\\\ast}$')\n", (861, 964), True, 'import matplotlib.pyplot as plt\n'), ((959, 979), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0, 1.0)'], {}), '((0.0, 1.0))\n', (967, 979), True, 'import matplotlib.pyplot as plt\n'), ((979, 1006), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (989, 1006), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1045), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$h_G^{\\\\ast}, h_B^{\\\\ast}$"""'], {}), "('$h_G^{\\\\ast}, h_B^{\\\\ast}$')\n", (1015, 1045), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1068), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (1055, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1081), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1079, 1081), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1128), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""h_distribution.pdf"""'], {}), "('h_distribution.pdf')\n", (1106, 1128), True, 'import matplotlib.pyplot as plt\n')] |
###########################################################################
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###########################################################################
import numpy as np
import csv
import scipy.interpolate as interp
# {{{ GISData
class GISData:
# {{{ constructor
"""
Store for all GIS data over time period. This will cache all data in
memory so we can compute over the entire time period instead of on-demand
loading of data for each timestep.
"""
def __init__(self, params):
# data will be organized as a dictionary of dictionaries:
#
# year -> month -> data frame
#
self.data = {}
self.params = params
self.coordinates = None
self.waterbodies = None
self.villages = None
self.fci = None
self.paths = {}
# mean NDVI per cell
self.mean_ndvi_alltime = {}
# load data for static features
self.read_coordinates()
self.read_fci()
self.read_fci_new(years=(params['model']['setup']['start_date'].year, params['model']['setup']['end_date'].year))
self.interpolate_fci_average()
self.read_static_locations()
self.read_paths()
# }}}
# {{{ load
def load(self, start_year, start_month, end_year, end_month):
"""
load all data over specified time period.
"""
cur_year = start_year
cur_month = start_month
keep_reading = True
nmonths = 0
while keep_reading:
if cur_year == end_year and cur_month == end_month:
keep_reading = False
if cur_year not in self.data:
self.data[cur_year] = {}
self.data[cur_year][cur_month] = self.read_date(cur_year, cur_month)
for cell_id in self.data[cur_year][cur_month]:
row = self.data[cur_year][cur_month][cell_id]
if cell_id not in self.mean_ndvi_alltime:
self.mean_ndvi_alltime[cell_id] = 0.0
self.mean_ndvi_alltime[cell_id] += row['mean_ndvi']
if cur_month == 12:
cur_month = 1
cur_year = cur_year + 1
else:
cur_month = cur_month + 1
nmonths = nmonths + 1
for id in self.mean_ndvi_alltime:
self.mean_ndvi_alltime[id] = self.mean_ndvi_alltime[id] / nmonths
# }}}
def csv_helper(self, fname, rowfunc):
"""
Replacement for pandas data frame code. Pandas CSV import
is VERY slow and caused substantial performance degradation.
"""
data = csv.DictReader(open(fname))
rows = []
fields = data.fieldnames
for row in data.reader:
rows.append(rowfunc(row))
a = np.array(rows, dtype=object)
d = {}
for i,field in enumerate(fields):
d[field] = a[:,i]
return d
# {{{ read paths
def read_paths(self):
with open(self.params['gis']['paths'], 'r') as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
origin = int(row[0])
waypoints = [origin] + [int(point) for point in row[1].split(':')]
if origin in self.paths:
self.paths[origin].append(waypoints)
else:
self.paths[origin] = [waypoints]
# }}}
# {{{ read_date
def read_date(self, y, m):
ndvi_root = self.params['gis']['fileroot'] + 'NDVIPolygons/'
precip_root = self.params['gis']['fileroot'] + 'PrecipitationPolygons/'
water_root = self.params['gis']['fileroot'] + 'WaterPolygons/'
ndvi_fname = ndvi_root+'CleanMeanNDVI_{0}-{1:02d}.csv'.format(y,m)
precip_fname = precip_root+'CleanPrec_{0}-{1:02d}-01.csv'.format(y,m)
water_fname = water_root+'CleanWater_{0}-{1:02d}-01.csv'.format(y,m)
def rowfunc(r):
return (int(r[0][2:]), float(r[1]))
ndvi_data = self.csv_helper(ndvi_fname, rowfunc)
precip_data = self.csv_helper(precip_fname, rowfunc)
water_data = self.csv_helper(water_fname, rowfunc)
merged = {}
id = ndvi_data['ID']
ndvi_mean = ndvi_data['mean']
for i in range(len(id)):
merged[id[i]] = {'mean_ndvi': ndvi_mean[i]}
id = precip_data['ID']
precip_mean = precip_data['mean']
for i in range(len(id)):
merged[id[i]]['mean_precip'] = precip_mean[i]
id = water_data['ID']
water_intersect = water_data['Intersect']
for i in range(len(id)):
merged[id[i]]['intersect'] = water_intersect[i]
return merged
# }}}
# {{{ read_coordinates
def read_coordinates(self):
def rowfunc(r):
return (int(r[0][2:]), float(r[1]), float(r[2]))
self.coordinates = self.csv_helper(self.params['gis']['coordinates'], rowfunc)
# }}}
# {{{ read_static_locations
def read_static_locations(self):
def rowfunc(r):
return (r[0], int(r[1]))
self.villages = self.csv_helper(self.params['gis']['villages'], rowfunc)
self.waterbodies = self.csv_helper(self.params['gis']['waterbodies'],
lambda r: (int(r[0]), float(r[1])))
# }}}
# {{{ read_fci
def read_fci_new(self, years=None):
""" Read the FCI data from the file specified in the parameter list.
If we are given a pair of years (year_lo, year_hi), calculate the
average FCI over the time period (not including year_hi) for each
cell in the FCI grid. """
data = {}
reader = csv.reader(open(self.params['gis']['fcinew'],"r"), delimiter=",")
x = list(reader)[1:]
for row in x:
long = float(row[1])
lat = float(row[2])
year = int(row[3])
fcivals = [float(i) if len(i) > 0 else 0.0 for i in row[4:]]
if year in data:
data[year][(lat,long)] = fcivals
else:
data[year] = {(lat,long): fcivals}
if years is not None:
self.fci_averages = {}
(year_lo, year_hi) = years
denom = float(year_hi-year_lo)
# account for missing years for calculating the average
for year in range(year_lo, year_hi):
if year not in data:
denom = denom - 1
ave_mult = 1.0 / denom
for year in range(year_lo, year_hi):
if year in data:
for cell in data[year]:
months = data[year][cell]
year_average = np.mean(months) * ave_mult
if cell not in self.fci_averages:
self.fci_averages[cell] = year_average
else:
self.fci_averages[cell] = self.fci_averages[cell] + year_average
else:
self.fci_averages = None
self.fci = data
def interpolate_fci_average(self):
""" Map the FCI average data to the model grid as a dictionary from
cell ID to average FCI. """
data = self.fci_averages
# pull out FCI cell locations and values for given year/month
coords = []
values = []
for ((lat, long), value) in data.items():
coords.append((lat, long))
values.append(value)
coord_array = np.array(coords)
value_array = np.array(values)
# interpolate FCI cells to grid cells with linear method
grid_coords = np.array([(self.coordinates['Lat'][x],self.coordinates['Long'][x]) for x in range(len(self.coordinates['Lat']))])
fci_interp = interp.griddata(coord_array, value_array, grid_coords, method='linear')
self.grid_fci_averages = {}
for i in range(len(self.coordinates['ID'])):
self.grid_fci_averages[self.coordinates['ID'][i]] = fci_interp[i]
def get_fci_month(self, year, month):
""" Get the FCI data for the current month interpolated to the world grid
and return a map from cell ID to the FCI for the month/year. Return
none if the year is not defined. Assume that if a year is defined we
have a value for all months. """
data = self.fci
# missing year
if year not in data:
return None
# pull out FCI cell locations and values for given year/month
coords = []
values = []
for ((lat, long), months) in data[year].items():
coords.append((lat, long))
values.append(months[month-1])
coord_array = np.array(coords)
value_array = np.array(values)
# interpolate FCI cells to grid cells with linear method
grid_coords = np.array([(self.coordinates['Lat'][x],self.coordinates['Long'][x]) for x in range(len(self.coordinates['Lat']))])
fci_interp = interp.griddata(coord_array, value_array, grid_coords, method='linear')
result = {}
for i in range(len(self.coordinates['ID'])):
result[self.coordinates['ID'][i]] = fci_interp[i]
return result
def read_fci(self):
def rowfunc(r):
if r[2] == 'NA':
return (int(r[0]), int(r[1]), None)
return (int(r[0]), int(r[1]), float(r[2]))
self.fci = self.csv_helper(self.params['gis']['fci'], rowfunc)
# }}}
# {{{ get_date
def get_date(self, year, month):
return self.data[year][month]
# }}}
# }}}
| [
"csv.reader",
"numpy.mean",
"numpy.array",
"scipy.interpolate.griddata"
] | [((3601, 3629), 'numpy.array', 'np.array', (['rows'], {'dtype': 'object'}), '(rows, dtype=object)\n', (3609, 3629), True, 'import numpy as np\n'), ((7798, 7814), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (7806, 7814), True, 'import numpy as np\n'), ((7833, 7849), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (7841, 7849), True, 'import numpy as np\n'), ((8061, 8132), 'scipy.interpolate.griddata', 'interp.griddata', (['coord_array', 'value_array', 'grid_coords'], {'method': '"""linear"""'}), "(coord_array, value_array, grid_coords, method='linear')\n", (8076, 8132), True, 'import scipy.interpolate as interp\n'), ((8929, 8945), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (8937, 8945), True, 'import numpy as np\n'), ((8964, 8980), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (8972, 8980), True, 'import numpy as np\n'), ((9192, 9263), 'scipy.interpolate.griddata', 'interp.griddata', (['coord_array', 'value_array', 'grid_coords'], {'method': '"""linear"""'}), "(coord_array, value_array, grid_coords, method='linear')\n", (9207, 9263), True, 'import scipy.interpolate as interp\n'), ((3832, 3863), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (3842, 3863), False, 'import csv\n'), ((7111, 7126), 'numpy.mean', 'np.mean', (['months'], {}), '(months)\n', (7118, 7126), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
"""Test smooth2D function."""
# --- import --------------------------------------------------------------------------------------
import WrightTools as wt
import numpy as np
# --- tests ---------------------------------------------------------------------------------------
def test_default():
xi = np.linspace(-10, 10, 100)
yi = np.linspace(-10, 10, 100)
zi = np.sin(xi[:, None]) * np.cos(yi[None, :])
xo, yo, zo = wt.kit.zoom2D(xi, yi, zi)
zcheck = np.sin(xo[:, None]) * np.cos(yo[None, :])
assert xo.shape == (300,)
assert yo.shape == (300,)
assert zo.shape == (300, 300)
assert np.all(np.isclose(zo, zcheck, 0.01)) # all values within 1 percent of "actual"
def test_non_default():
xi = np.linspace(-10, 10, 100)
yi = np.linspace(-10, 10, 100)
zi = np.sin(xi[:, None]) * np.cos(yi[None, :])
xo, yo, zo = wt.kit.zoom2D(xi, yi, zi, 2, 4)
zcheck = np.sin(xo[:, None]) * np.cos(yo[None, :])
assert xo.shape == (200,)
assert yo.shape == (400,)
assert zo.shape == (200, 400)
assert np.all(np.isclose(zo, zcheck, 0.01)) # all values within 1 percent of "actual"
# --- run -----------------------------------------------------------------------------------------
if __name__ == "__main__":
test_default()
test_non_default()
| [
"WrightTools.kit.zoom2D",
"numpy.isclose",
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] | [((333, 358), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (344, 358), True, 'import numpy as np\n'), ((368, 393), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (379, 393), True, 'import numpy as np\n'), ((464, 489), 'WrightTools.kit.zoom2D', 'wt.kit.zoom2D', (['xi', 'yi', 'zi'], {}), '(xi, yi, zi)\n', (477, 489), True, 'import WrightTools as wt\n'), ((768, 793), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (779, 793), True, 'import numpy as np\n'), ((803, 828), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (814, 828), True, 'import numpy as np\n'), ((899, 930), 'WrightTools.kit.zoom2D', 'wt.kit.zoom2D', (['xi', 'yi', 'zi', '(2)', '(4)'], {}), '(xi, yi, zi, 2, 4)\n', (912, 930), True, 'import WrightTools as wt\n'), ((404, 423), 'numpy.sin', 'np.sin', (['xi[:, None]'], {}), '(xi[:, None])\n', (410, 423), True, 'import numpy as np\n'), ((426, 445), 'numpy.cos', 'np.cos', (['yi[None, :]'], {}), '(yi[None, :])\n', (432, 445), True, 'import numpy as np\n'), ((504, 523), 'numpy.sin', 'np.sin', (['xo[:, None]'], {}), '(xo[:, None])\n', (510, 523), True, 'import numpy as np\n'), ((526, 545), 'numpy.cos', 'np.cos', (['yo[None, :]'], {}), '(yo[None, :])\n', (532, 545), True, 'import numpy as np\n'), ((660, 688), 'numpy.isclose', 'np.isclose', (['zo', 'zcheck', '(0.01)'], {}), '(zo, zcheck, 0.01)\n', (670, 688), True, 'import numpy as np\n'), ((839, 858), 'numpy.sin', 'np.sin', (['xi[:, None]'], {}), '(xi[:, None])\n', (845, 858), True, 'import numpy as np\n'), ((861, 880), 'numpy.cos', 'np.cos', (['yi[None, :]'], {}), '(yi[None, :])\n', (867, 880), True, 'import numpy as np\n'), ((945, 964), 'numpy.sin', 'np.sin', (['xo[:, None]'], {}), '(xo[:, None])\n', (951, 964), True, 'import numpy as np\n'), ((967, 986), 'numpy.cos', 'np.cos', (['yo[None, :]'], {}), '(yo[None, :])\n', (973, 986), True, 'import numpy as np\n'), ((1101, 1129), 'numpy.isclose', 'np.isclose', (['zo', 'zcheck', '(0.01)'], {}), '(zo, zcheck, 0.01)\n', (1111, 1129), True, 'import numpy as np\n')] |
'''
This environment describe a fixed scene (area) to conduct end-to-end lateral control tasks
for the autonomous ego vehicle. (This environment is relative simple and is only for training)
'''
import pygame
import weakref
import collections
import numpy as np
import math
import cv2
import sys
'''
Add your path of the CARLA simulator below.
This script was originally run with CARLA(0.9.7), some functions (e.g., carla.set_velocity()) have been removed in the newer CARLA.
Please refer to CARLA official documents for detail if you want to run the script with a different version.
'''
sys.path.append('xxx/carla-0.9.X-py3.X-linux-x86_64.egg')
import carla
from carla import ColorConverter as cc
if sys.version_info >= (3, 0):
from configparser import ConfigParser
else:
from ConfigParser import RawConfigParser as ConfigParser
from utils import get_path
path_generator = get_path()
velocity_target_ego = 5
x_bench = 335.0
y_bench = 200.0
WIDTH, HEIGHT = 80, 45
class scenario(object):
def __init__(self, random_spawn=True, pedestrian=False, no_render=False, frame=25):
self.observation_size_width = WIDTH
self.observation_size_height = HEIGHT
self.observation_size = WIDTH * HEIGHT
self.action_size = 1
## set the carla World parameters
self.pedestrian = pedestrian
self.random_spawn = random_spawn
self.no_render = no_render
## set the vehicle actors
self.ego_vehicle = None
self.obs1 = None
self.obs2 = None
self.obs3 = None
## set the sensory actors
self.collision_sensor = None
self.seman_camera = None
self.viz_camera = None
self.surface = None
self.camera_output = np.zeros([720,1280,3])
self.recording = False
self.Attachment = carla.AttachmentType
## connect to the CARLA client
self.client = carla.Client('localhost',2000)
self.client.set_timeout(10.0)
## build the CARLA world
self.world = self.client.load_world('Town01')
settings = self.world.get_settings()
settings.fixed_delta_seconds = 1/frame
settings.no_rendering_mode = self.no_render
settings.synchronous_mode = True
self.world.apply_settings(settings)
## initialize the pygame settings
pygame.init()
pygame.font.init()
pygame.joystick.init()
self.display = pygame.display.set_mode((1280, 720),pygame.HWSURFACE | pygame.DOUBLEBUF)
self.infoObject = pygame.display.Info()
## initilize the joystick settings
self._joystick = pygame.joystick.Joystick(0)
self._joystick.init()
self._parser = ConfigParser()
self._parser.read('./wheel_config.ini')
self._steer_idx = int(self._parser.get('G29 Racing Wheel', 'steering_wheel'))
self._throttle_idx = int(self._parser.get('G29 Racing Wheel', 'throttle'))
self._brake_idx = int(self._parser.get('G29 Racing Wheel', 'brake'))
self._reverse_idx = int(self._parser.get('G29 Racing Wheel', 'reverse'))
self._handbrake_idx = int(self._parser.get('G29 Racing Wheel', 'handbrake'))
self.restart()
def restart(self):
## reset the recording lists
self.steer_history = []
self.intervene_history = []
## reset the human intervention state
self.intervention = False
## spawn three surrounding vehicles
self.bp_obs1, self.spawn_point_obs1 = self._produce_vehicle_blueprint(1, 335.0+3.5, 100.0)
self.obs1 = self.world.spawn_actor(self.bp_obs1,self.spawn_point_obs1)
self.bp_obs2, self.spawn_point_obs2 = self._produce_vehicle_blueprint(1, 335.0, 200.0+25.0)
self.obs2 = self.world.spawn_actor(self.bp_obs2,self.spawn_point_obs2)
self.bp_obs3, self.spawn_point_obs3 = self._produce_vehicle_blueprint(1, 335.0+3.5, 200.0+50.0)
self.obs3 = self.world.spawn_actor(self.bp_obs3,self.spawn_point_obs3)
## if pedestrians are considered, spawn two persons
if self.pedestrian:
self.bp_walker1, self.spawn_point_walker1 = self._produce_walker_blueprint(338.0, 200+np.random.randint(10,15))
self.bp_walker2, self.spawn_point_walker2 = self._produce_walker_blueprint(np.random.randint(3310,3350)/10, 235)
self.walker1 = self.world.spawn_actor(self.bp_walker1, self.spawn_point_walker1)
self.walker2 = self.world.spawn_actor(self.bp_walker2, self.spawn_point_walker2)
walker1_control = carla.WalkerControl()
walker1_control.speed = 0.1
self.walker1.apply_control(walker1_control)
walker2_control = carla.WalkerControl()
walker2_control.speed = 0.1
self.walker2.apply_control(walker2_control)
## spawn the ego vehicle (random / fixed)
if self.random_spawn:
y_spawn_random = np.random.randint(200, 240)
random_lateral_disturb = 0.1 * (np.random.rand()-0.5)
x_spwan_random = path_generator(y_spawn_random) + random_lateral_disturb
self.bp_ego, self.spawn_point_ego = self._produce_vehicle_blueprint(1, x_spwan_random, y_spawn_random)
else:
self.bp_ego, self.spawn_point_ego = self._produce_vehicle_blueprint(1 , x_bench, y_bench)
self.ego_vehicle = self.world.spawn_actor(self.bp_ego,self.spawn_point_ego)
# set the initial velocity of the ego vehicle
initial_velocity = carla.Vector3D(0, velocity_target_ego, 0)
self.ego_vehicle.set_velocity(initial_velocity)
# initilize the control variable for the ego vehicle
self.control = carla.VehicleControl()
## configurate and spawn the collision sensor
# clear the collision history list
self.collision_history = []
bp_collision = self.world.get_blueprint_library().find('sensor.other.collision')
# spawn the collision sensor actor
if self.collision_sensor is not None:
self.collision_sensor.destroy()
self.collision_sensor = self.world.spawn_actor(
bp_collision, carla.Transform(), attach_to=self.ego_vehicle)
# obtain the collision signal and append to the history list
weak_self = weakref.ref(self)
self.collision_sensor.listen(lambda event: scenario._on_collision(weak_self, event))
## configurate and spawn the camera sensors
# the candidated transform of camera's position: frontal
self.camera_transforms = [
(carla.Transform(carla.Location(x=-2, z=5), carla.Rotation(pitch=30.0)), self.Attachment.SpringArm),
(carla.Transform(carla.Location(x=-2, z=5), carla.Rotation(pitch=30.0)), self.Attachment.SpringArm)]
self.camera_transform_index = 1
# the candidated camera type: rgb (viz_camera) and semantic (seman_camera)
self.cameras = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],
['sensor.camera.semantic_segmentation', cc.CityScapesPalette,
'Camera Semantic Segmentation (CityScapes Palette)', {}]
]
bp_viz_camera = self.world.get_blueprint_library().find('sensor.camera.rgb')
bp_viz_camera.set_attribute('image_size_x', '1280')
bp_viz_camera.set_attribute('image_size_y', '720')
bp_viz_camera.set_attribute('sensor_tick', '0.02')
self.cameras[0].append(bp_viz_camera)
bp_seman_camera = self.world.get_blueprint_library().find('sensor.camera.semantic_segmentation')
bp_seman_camera.set_attribute('image_size_x', '1280')
bp_seman_camera.set_attribute('image_size_y', '720')
bp_seman_camera.set_attribute('sensor_tick', '0.04')
self.cameras[1].append(bp_seman_camera)
# spawn the camera actors
if self.seman_camera is not None:
self.seman_camera.destroy()
self.surface = None
self.viz_camera = self.world.spawn_actor(
self.cameras[0][-1],
self.camera_transforms[self.camera_transform_index][0],
attach_to=self.ego_vehicle,
attachment_type=self.Attachment.SpringArm)
self.seman_camera = self.world.spawn_actor(
self.cameras[1][-1],
self.camera_transforms[self.camera_transform_index - 1][0],
attach_to=self.ego_vehicle,
attachment_type=self.camera_transforms[self.camera_transform_index - 1][1])
# obtain the camera image
weak_self = weakref.ref(self)
self.seman_camera.listen(lambda image: scenario._parse_seman_image(weak_self, image))
self.viz_camera.listen(lambda image: scenario._parse_image(weak_self, image))
## reset the step counter
self.count = 0
def render(self, display):
if self.surface is not None:
m = pygame.transform.smoothscale(self.surface,
[int(self.infoObject.current_w),
int(self.infoObject.current_h)])
display.blit(m, (0, 0))
def _parse_seman_image(weak_self, image):
self = weak_self()
if not self:
return
image.convert(self.cameras[1][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.array(image.raw_data)
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.camera_output = array
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
image.convert(self.cameras[0][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.array(image.raw_data)
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
def _on_collision(weak_self, event):
self = weak_self()
if not self:
return
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
self.collision_history.append((event.frame, intensity))
if len(self.collision_history) > 4000:
self.collision_history.pop(0)
def get_collision_history(self):
collision_history = collections.defaultdict(int)
flag = 0
for frame, intensity in self.collision_history:
collision_history[frame] += intensity
if intensity != 0:
flag = 1
return collision_history, flag
def run_step(self,action):
self.render(self.display)
pygame.display.flip()
self.parse_events()
human_control = None
# retrive the signals from the joystick (steering wheel)
numAxes = self._joystick.get_numaxes()
jsInputs = [float(self._joystick.get_axis(i)) for i in range(numAxes)]
## if no human intervention
if not self.intervention:
steerCmd = action / 2
self.control.steer = math.tan(1.1 * steerCmd)
## if intervention detected, convert the joystick signal to the steering command
else:
K1 = 1
steerCmd = K1 * (2 * jsInputs[self._steer_idx])
self.control.steer = steerCmd
human_control = self.control.steer
## detect the intervention signal
if len(self.steer_history) > 2:
# the intervention is activated if human participants move the joystick
if abs(self.intervene_history[-2] - self.intervene_history[-1]) > 0.02:
self.intervention = True
if len(self.steer_history) > 5:
# the intervention is deactivated if the joystick continue to be stable for 0.2 seconds
if abs(self.intervene_history[-5] - self.intervene_history[-1]) < 0.01:
self.intervention = False
## record the intervention histroy (get "None" for non-intervened steps)
self.intervene_history.append(jsInputs[0])
# record the steering command history
self.steer_history.append(steerCmd)
## configurate the control command for the ego vehicle
# the velocity is calculated as :sqrt(vx**2+vy**2)
velocity_ego = ((self.ego_vehicle.get_velocity().x)**2 + (self.ego_vehicle.get_velocity().y)**2)**(1/2)
# the longitudinal control (throttle) of the ego vehicle is achieved by a proportional controller
self.control.throttle = np.clip (velocity_target_ego - velocity_ego, 0, 1)
self.control.brake = 0
self.control.hand_brake = 0
## achieve the control to the ego vehicle
self.ego_vehicle.apply_control(self.control)
## obtain the state transition and other variables after taking the action (control command)
next_states, other_indicators = self.obtain_observation()
## detect if the step is the terminated step, by considering: collision, beyond the road, and episode fininsh
collision = self.get_collision_history()[1]
finish = (self.ego_vehicle.get_location().y > y_bench + 55.0)
beyond = (self.ego_vehicle.get_location().x < x_bench - 1.2) or (self.ego_vehicle.get_location().x > x_bench + 4.8)
done = collision or finish or beyond
## calculate the relative distance to the surrounding vehicles for the subsequent reward function
dis_to_front = other_indicators['state_front']
dis_to_side = min(other_indicators['state_left'],other_indicators['state_right'])
dis_to_obs11 = other_indicators['state_corner_11']
dis_to_obs12 = other_indicators['state_corner_12']
dis_to_obs21 = other_indicators['state_corner_21']
dis_to_obs22 = other_indicators['state_corner_22']
dis_to_obs31 = other_indicators['state_corner_31']
dis_to_obs32 = other_indicators['state_corner_32']
## calculate the reward signal of the step: r1-r3 distance reward, r4 terminal reward, r5-r6 smooth reward
r1 = -1*np.square(1-dis_to_front)
r2 = -2*np.square(1-dis_to_side)
r3 = - (np.abs(1-dis_to_obs11)+np.abs(1-dis_to_obs12)+np.abs(1-dis_to_obs21)+np.abs(1-dis_to_obs22)+np.abs(1-dis_to_obs31)+np.abs(1-dis_to_obs32))
r4 = finish*10 - collision*10 - beyond*10
r5= -np.float32(abs(self.steer_history[-1]-steerCmd)>0.1)
r6 = -3*abs(steerCmd)
reward = r1+r2+r3+r4+r5+r6+0.2
reward = np.clip(reward,-10,10)
## update the epsodic step
self.count += 1
## record the physical variables
yaw_rate = np.arctan(self.ego_vehicle.get_velocity().x/self.ego_vehicle.get_velocity().y) if self.ego_vehicle.get_velocity().y > 0 else 0
physical_variables = {'velocity_y':self.ego_vehicle.get_velocity().y,
'velocity_x':self.ego_vehicle.get_velocity().x,
'position_y':self.ego_vehicle.get_location().y,
'position_x':self.ego_vehicle.get_location().x,
'yaw_rate':yaw_rate,
'yaw':self.ego_vehicle.get_transform().rotation.yaw,
'pitch':self.ego_vehicle.get_transform().rotation.pitch,
'roll':self.ego_vehicle.get_transform().rotation.roll,
'angular_velocity_y':self.ego_vehicle.get_angular_velocity().y,
'angular_velocity_x':self.ego_vehicle.get_angular_velocity().x
}
if done:
self.destroy()
return next_states, human_control, reward, self.intervention, done, physical_variables
def destroy(self):
actors = [
self.ego_vehicle,
self.obs1,
self.obs2,
self.obs3,
self.seman_camera,
self.viz_camera,
self.collision_sensor]
self.seman_camera.stop()
self.viz_camera.stop()
for actor in actors:
if actor is not None:
actor.destroy()
def obtain_observation(self):
## obtain image-based state space
# state variable sets
state_space = self.camera_output[:,:,0]
state_space = cv2.resize(state_space,(WIDTH, HEIGHT))
state_space = np.resize(state_space,(self.observation_size, 1))
state_space = np.squeeze(state_space)/255
## obtain space variables for reward generation
velocity_self = self.ego_vehicle.get_velocity()
position_self = self.ego_vehicle.get_location()
yaw_self = self.ego_vehicle.get_transform().rotation.yaw
position_obs1 = self.obs1.get_location()
position_obs2 = self.obs2.get_location()
position_obs3 = self.obs3.get_location()
## obtain relative distance information for reward generation
# pre-calculated parameters
xa,ya,xb,yb,xc,yc,xd,yd = self._to_corner_coordinate(position_self.x,position_self.y,yaw_self)
xfc = (xa+xb)/2
yfc = (ya+yb)/2
xa1,ya1,xb1,yb1,xc1,yc1,xd1,yd1 = 337.4,202.4,339.6,202.4,339.6,197.6,337.4,197.6
xa2,ya2,xb2,yb2,xc2,yc2,xd2,yd2 = 333.9,227.4,336.1,227.4,336.1,222.6,333.9,222.6
xa3,ya3,xb3,yb3,xc3,yc3,xd3,yd3 = 337.4,252.4,339.6,252.4,339.6,247.6,337.4,247.6
# relative distance from ego vehicle to obstacle 1 (corner distance)
if position_obs1.y - 4 < position_self.y < position_obs1.y + 4:
state_corner_11 = self._sigmoid(np.clip(abs(xa1-xa),0,10),2.5)
state_corner_12 = self._sigmoid(np.clip(abs(xa1-xb),0,10),2.5)
else:
state_corner_11 = 1
state_corner_12 = 1
# relative distance from ego vehicle to obstacle 2 (corner distance)
if position_obs2.y - 4 < position_self.y < position_obs2.y + 4:
state_corner_21 = self._sigmoid(np.clip(abs(xb2-xa),0,10),2.5)
state_corner_22 = self._sigmoid(np.clip(abs(xb2-xb),0,10),2.5)
else:
state_corner_21 = 1
state_corner_22 = 1
# relative distance from ego vehicle to obstacle 3 (corner distance)
if position_obs3.y - 4 < position_self.y < position_obs3.y + 4:
state_corner_31 = self._sigmoid(np.clip(abs(xa3-xa),0,10),2.5)
state_corner_32 = self._sigmoid(np.clip(abs(xa3-xb),0,10),2.5)
else:
state_corner_31 = 1
state_corner_32 = 1
# relative distance to both sides of road
state_left = self._sigmoid(np.clip(340-xb,0,10),2)
state_right = self._sigmoid(np.clip(xb-332,0,10),2)
# relative distance front
RIGHT = 1 if position_self.x < x_bench + 1.8 else 0
if RIGHT:
if position_self.y < y_bench + 25.0:
state_front = np.clip(yc2 - position_self.y - 2.6, 0, 25)
state_front = self._sigmoid(state_front,1)
else:
state_front = 1
else:
state_front = np.clip(yc3 - position_self.y - 2.4, 0,25)
state_front = self._sigmoid(state_front,1)
# other indicators facilitating producing reward function signal
other_indicators = {'state_front':state_front,
'state_left':state_left,
'state_right':state_right,
'state_corner_11':state_corner_11,
'state_corner_12':state_corner_12,
'state_corner_21':state_corner_21,
'state_corner_22':state_corner_22,
'state_corner_31':state_corner_31,
'state_corner_32':state_corner_32}
return state_space, other_indicators
def obtain_real_observation(self):
state_space = self.camera_output[:,:,0]
return state_space
def parse_events(self):
for event in pygame.event.get():
if event.type == pygame.JOYBUTTONDOWN:
if event.button == 0:
self.intervention = False
elif event.button == self._reverse_idx:
self.control.gear = 1 if self.control.reverse else -1
elif event.button == 1:
self._toggle_camera()
elif event.button == 2:
self._next_sensor()
def _produce_vehicle_blueprint(self, color, x, y, vehicle='bmw'):
if vehicle=='bmw':
bp = self.world.get_blueprint_library().filter('vehicle.bmw.*')[0]
elif vehicle=='moto':
bp = self.world.get_blueprint_library().filter('vehicle.harley-davidson.*')[0]
elif vehicle=='bike':
bp = self.world.get_blueprint_library().filter('vehicle.diamondback.century.*')[0]
elif vehicle=='bus':
bp = self.world.get_blueprint_library().filter('vehicle.volkswagen.*')[0]
else:
bp = self.world.get_blueprint_library().filter('vehicle.lincoln.*')[0]
bp.set_attribute('color', bp.get_attribute('color').recommended_values[color])
spawn_point = self.world.get_map().get_spawn_points()[0]
spawn_point.location.x = x
spawn_point.location.y = y
spawn_point.location.z += 0.1
return bp, spawn_point
def _produce_walker_blueprint(self, x, y):
bp = self.world.get_blueprint_library().filter('walker.*')[np.random.randint(2)]
spawn_point = self.world.get_map().get_spawn_points()[0]
spawn_point.location.x = x
spawn_point.location.y = y
spawn_point.location.z += 0.1
spawn_point.rotation.yaw = 0
return bp, spawn_point
def _toggle_camera(self):
self.camera_transform_index = (self.camera_transform_index + 1) % len(self.camera_transforms)
def _next_sensor(self):
self.camera_index += 1
def _dis_p_to_l(self,k,b,x,y):
dis = abs((k*x-y+b)/math.sqrt(k*k+1))
return self._sigmoid(dis,2)
def _calculate_k_b(self,x1,y1,x2,y2):
k = (y1-y2)/(x1-x2)
b = (x1*y2-x2*y1)/(x1-x2)
return k,b
def _dis_p_to_p(self,x1,y1,x2,y2):
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def _to_corner_coordinate(self,x,y,yaw):
xa = x+2.64*math.cos(yaw*math.pi/180-0.43)
ya = y+2.64*math.sin(yaw*math.pi/180-0.43)
xb = x+2.64*math.cos(yaw*math.pi/180+0.43)
yb = y+2.64*math.cos(yaw*math.pi/180+0.43)
xc = x+2.64*math.cos(yaw*math.pi/180-0.43+math.pi)
yc = y+2.64*math.cos(yaw*math.pi/180-0.43+math.pi)
xd = x+2.64*math.cos(yaw*math.pi/180+0.43+math.pi)
yd = y+2.64*math.cos(yaw*math.pi/180+0.43+math.pi)
return xa,ya,xb,yb,xc,yc,xd,yd
def _sigmoid(self,x,theta):
return 2./(1+math.exp(-theta*x))-1
| [
"numpy.abs",
"numpy.resize",
"pygame.event.get",
"numpy.clip",
"collections.defaultdict",
"pygame.font.init",
"numpy.random.randint",
"pygame.display.Info",
"carla.Client",
"weakref.ref",
"carla.Vector3D",
"sys.path.append",
"pygame.display.set_mode",
"numpy.reshape",
"math.cos",
"pyga... | [((590, 647), 'sys.path.append', 'sys.path.append', (['"""xxx/carla-0.9.X-py3.X-linux-x86_64.egg"""'], {}), "('xxx/carla-0.9.X-py3.X-linux-x86_64.egg')\n", (605, 647), False, 'import sys\n'), ((887, 897), 'utils.get_path', 'get_path', ([], {}), '()\n', (895, 897), False, 'from utils import get_path\n'), ((1749, 1773), 'numpy.zeros', 'np.zeros', (['[720, 1280, 3]'], {}), '([720, 1280, 3])\n', (1757, 1773), True, 'import numpy as np\n'), ((1912, 1943), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (1924, 1943), False, 'import carla\n'), ((2365, 2378), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2376, 2378), False, 'import pygame\n'), ((2387, 2405), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (2403, 2405), False, 'import pygame\n'), ((2414, 2436), 'pygame.joystick.init', 'pygame.joystick.init', ([], {}), '()\n', (2434, 2436), False, 'import pygame\n'), ((2460, 2533), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1280, 720)', '(pygame.HWSURFACE | pygame.DOUBLEBUF)'], {}), '((1280, 720), pygame.HWSURFACE | pygame.DOUBLEBUF)\n', (2483, 2533), False, 'import pygame\n'), ((2559, 2580), 'pygame.display.Info', 'pygame.display.Info', ([], {}), '()\n', (2578, 2580), False, 'import pygame\n'), ((2650, 2677), 'pygame.joystick.Joystick', 'pygame.joystick.Joystick', (['(0)'], {}), '(0)\n', (2674, 2677), False, 'import pygame\n'), ((2744, 2758), 'ConfigParser.RawConfigParser', 'ConfigParser', ([], {}), '()\n', (2756, 2758), True, 'from ConfigParser import RawConfigParser as ConfigParser\n'), ((5567, 5608), 'carla.Vector3D', 'carla.Vector3D', (['(0)', 'velocity_target_ego', '(0)'], {}), '(0, velocity_target_ego, 0)\n', (5581, 5608), False, 'import carla\n'), ((5750, 5772), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (5770, 5772), False, 'import carla\n'), ((6352, 6369), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (6363, 6369), False, 'import weakref\n'), ((8631, 8648), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (8642, 8648), False, 'import weakref\n'), ((9444, 9468), 'numpy.array', 'np.array', (['image.raw_data'], {}), '(image.raw_data)\n', (9452, 9468), True, 'import numpy as np\n'), ((9485, 9534), 'numpy.reshape', 'np.reshape', (['array', '(image.height, image.width, 4)'], {}), '(array, (image.height, image.width, 4))\n', (9495, 9534), True, 'import numpy as np\n'), ((9887, 9911), 'numpy.array', 'np.array', (['image.raw_data'], {}), '(image.raw_data)\n', (9895, 9911), True, 'import numpy as np\n'), ((9928, 9977), 'numpy.reshape', 'np.reshape', (['array', '(image.height, image.width, 4)'], {}), '(array, (image.height, image.width, 4))\n', (9938, 9977), True, 'import numpy as np\n'), ((10296, 10355), 'math.sqrt', 'math.sqrt', (['(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)'], {}), '(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n', (10305, 10355), False, 'import math\n'), ((10570, 10598), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (10593, 10598), False, 'import collections\n'), ((10901, 10922), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (10920, 10922), False, 'import pygame\n'), ((12813, 12862), 'numpy.clip', 'np.clip', (['(velocity_target_ego - velocity_ego)', '(0)', '(1)'], {}), '(velocity_target_ego - velocity_ego, 0, 1)\n', (12820, 12862), True, 'import numpy as np\n'), ((14792, 14816), 'numpy.clip', 'np.clip', (['reward', '(-10)', '(10)'], {}), '(reward, -10, 10)\n', (14799, 14816), True, 'import numpy as np\n'), ((16526, 16566), 'cv2.resize', 'cv2.resize', (['state_space', '(WIDTH, HEIGHT)'], {}), '(state_space, (WIDTH, HEIGHT))\n', (16536, 16566), False, 'import cv2\n'), ((16588, 16638), 'numpy.resize', 'np.resize', (['state_space', '(self.observation_size, 1)'], {}), '(state_space, (self.observation_size, 1))\n', (16597, 16638), True, 'import numpy as np\n'), ((20248, 20266), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (20264, 20266), False, 'import pygame\n'), ((22566, 22608), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (22575, 22608), False, 'import math\n'), ((4598, 4619), 'carla.WalkerControl', 'carla.WalkerControl', ([], {}), '()\n', (4617, 4619), False, 'import carla\n'), ((4747, 4768), 'carla.WalkerControl', 'carla.WalkerControl', ([], {}), '()\n', (4766, 4768), False, 'import carla\n'), ((4983, 5010), 'numpy.random.randint', 'np.random.randint', (['(200)', '(240)'], {}), '(200, 240)\n', (5000, 5010), True, 'import numpy as np\n'), ((6216, 6233), 'carla.Transform', 'carla.Transform', ([], {}), '()\n', (6231, 6233), False, 'import carla\n'), ((11328, 11352), 'math.tan', 'math.tan', (['(1.1 * steerCmd)'], {}), '(1.1 * steerCmd)\n', (11336, 11352), False, 'import math\n'), ((14359, 14386), 'numpy.square', 'np.square', (['(1 - dis_to_front)'], {}), '(1 - dis_to_front)\n', (14368, 14386), True, 'import numpy as np\n'), ((14401, 14427), 'numpy.square', 'np.square', (['(1 - dis_to_side)'], {}), '(1 - dis_to_side)\n', (14410, 14427), True, 'import numpy as np\n'), ((16660, 16683), 'numpy.squeeze', 'np.squeeze', (['state_space'], {}), '(state_space)\n', (16670, 16683), True, 'import numpy as np\n'), ((18830, 18854), 'numpy.clip', 'np.clip', (['(340 - xb)', '(0)', '(10)'], {}), '(340 - xb, 0, 10)\n', (18837, 18854), True, 'import numpy as np\n'), ((18890, 18914), 'numpy.clip', 'np.clip', (['(xb - 332)', '(0)', '(10)'], {}), '(xb - 332, 0, 10)\n', (18897, 18914), True, 'import numpy as np\n'), ((19298, 19341), 'numpy.clip', 'np.clip', (['(yc3 - position_self.y - 2.4)', '(0)', '(25)'], {}), '(yc3 - position_self.y - 2.4, 0, 25)\n', (19305, 19341), True, 'import numpy as np\n'), ((21761, 21781), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (21778, 21781), True, 'import numpy as np\n'), ((9409, 9426), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (9417, 9426), True, 'import numpy as np\n'), ((9852, 9869), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (9860, 9869), True, 'import numpy as np\n'), ((14557, 14581), 'numpy.abs', 'np.abs', (['(1 - dis_to_obs32)'], {}), '(1 - dis_to_obs32)\n', (14563, 14581), True, 'import numpy as np\n'), ((19105, 19148), 'numpy.clip', 'np.clip', (['(yc2 - position_self.y - 2.6)', '(0)', '(25)'], {}), '(yc2 - position_self.y - 2.6, 0, 25)\n', (19112, 19148), True, 'import numpy as np\n'), ((22325, 22345), 'math.sqrt', 'math.sqrt', (['(k * k + 1)'], {}), '(k * k + 1)\n', (22334, 22345), False, 'import math\n'), ((22669, 22705), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 - 0.43)'], {}), '(yaw * math.pi / 180 - 0.43)\n', (22677, 22705), False, 'import math\n'), ((22720, 22756), 'math.sin', 'math.sin', (['(yaw * math.pi / 180 - 0.43)'], {}), '(yaw * math.pi / 180 - 0.43)\n', (22728, 22756), False, 'import math\n'), ((22771, 22807), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 + 0.43)'], {}), '(yaw * math.pi / 180 + 0.43)\n', (22779, 22807), False, 'import math\n'), ((22822, 22858), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 + 0.43)'], {}), '(yaw * math.pi / 180 + 0.43)\n', (22830, 22858), False, 'import math\n'), ((22873, 22919), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 - 0.43 + math.pi)'], {}), '(yaw * math.pi / 180 - 0.43 + math.pi)\n', (22881, 22919), False, 'import math\n'), ((22932, 22978), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 - 0.43 + math.pi)'], {}), '(yaw * math.pi / 180 - 0.43 + math.pi)\n', (22940, 22978), False, 'import math\n'), ((22991, 23037), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 + 0.43 + math.pi)'], {}), '(yaw * math.pi / 180 + 0.43 + math.pi)\n', (22999, 23037), False, 'import math\n'), ((23050, 23096), 'math.cos', 'math.cos', (['(yaw * math.pi / 180 + 0.43 + math.pi)'], {}), '(yaw * math.pi / 180 + 0.43 + math.pi)\n', (23058, 23096), False, 'import math\n'), ((4229, 4254), 'numpy.random.randint', 'np.random.randint', (['(10)', '(15)'], {}), '(10, 15)\n', (4246, 4254), True, 'import numpy as np\n'), ((4342, 4371), 'numpy.random.randint', 'np.random.randint', (['(3310)', '(3350)'], {}), '(3310, 3350)\n', (4359, 4371), True, 'import numpy as np\n'), ((5055, 5071), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5069, 5071), True, 'import numpy as np\n'), ((6654, 6679), 'carla.Location', 'carla.Location', ([], {'x': '(-2)', 'z': '(5)'}), '(x=-2, z=5)\n', (6668, 6679), False, 'import carla\n'), ((6681, 6707), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(30.0)'}), '(pitch=30.0)\n', (6695, 6707), False, 'import carla\n'), ((6767, 6792), 'carla.Location', 'carla.Location', ([], {'x': '(-2)', 'z': '(5)'}), '(x=-2, z=5)\n', (6781, 6792), False, 'import carla\n'), ((6794, 6820), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(30.0)'}), '(pitch=30.0)\n', (6808, 6820), False, 'import carla\n'), ((14534, 14558), 'numpy.abs', 'np.abs', (['(1 - dis_to_obs31)'], {}), '(1 - dis_to_obs31)\n', (14540, 14558), True, 'import numpy as np\n'), ((23186, 23206), 'math.exp', 'math.exp', (['(-theta * x)'], {}), '(-theta * x)\n', (23194, 23206), False, 'import math\n'), ((14511, 14535), 'numpy.abs', 'np.abs', (['(1 - dis_to_obs22)'], {}), '(1 - dis_to_obs22)\n', (14517, 14535), True, 'import numpy as np\n'), ((14488, 14512), 'numpy.abs', 'np.abs', (['(1 - dis_to_obs21)'], {}), '(1 - dis_to_obs21)\n', (14494, 14512), True, 'import numpy as np\n'), ((14442, 14466), 'numpy.abs', 'np.abs', (['(1 - dis_to_obs11)'], {}), '(1 - dis_to_obs11)\n', (14448, 14466), True, 'import numpy as np\n'), ((14465, 14489), 'numpy.abs', 'np.abs', (['(1 - dis_to_obs12)'], {}), '(1 - dis_to_obs12)\n', (14471, 14489), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import cv2
import numpy as np
import click
import click_log
import logging
from imutils.video import VideoStream
logging.basicConfig(format='%(asctime)s %(message)s')
def nothing(x):
pass
@click.command()
@click.option('--video', required=True, help='Video stream number', default=0, type=int)
@click_log.simple_verbosity_option(default='INFO')
def run(video):
logging.info("Process started")
vs = VideoStream(src=video).start()
cv2.namedWindow("Trackbars")
minHSV = np.array([165, 132, 98])
maxHSV = np.array([195, 255, 255])
cv2.createTrackbar("minH", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("minS", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("minV", "Trackbars", 0, 255, nothing)
cv2.setTrackbarPos("minH", "Trackbars", minHSV[0])
cv2.setTrackbarPos("minS", "Trackbars", minHSV[1])
cv2.setTrackbarPos("minV", "Trackbars", minHSV[2])
cv2.createTrackbar("maxH", "Trackbars", 0, 255, nothing)
cv2.setTrackbarPos("maxH", "Trackbars", maxHSV[0])
time.sleep(2.0)
logging.info('Loop start')
counter = 0
while True:
logging.info("Frame read")
time.sleep(0.05)
image = vs.read()
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
minH = cv2.getTrackbarPos("minH", "Trackbars")
minS = cv2.getTrackbarPos("minS", "Trackbars")
minV = cv2.getTrackbarPos("minV", "Trackbars")
maxH = cv2.getTrackbarPos("maxH", "Trackbars")
lowerLimit = np.uint8([minH, minS, minV])
upperLimit = np.uint8([maxH, 255, 255])
mask = cv2.inRange(hsv, lowerLimit, upperLimit)
result = cv2.bitwise_and(image , image , mask=mask)
cv2.imshow("frame", image)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
key = cv2.waitKey(1)
if key == 27:
break
vs.stop()
cv2.destroyAllWindows()
def main():
run()
# try:
# run()
# except KeyboardInterrupt:
# pass
# except Exception as e:
# logging.error(e)
# sys.exit(1)
if __name__ == '__main__':
main()
| [
"cv2.bitwise_and",
"click.option",
"cv2.imshow",
"cv2.inRange",
"click_log.simple_verbosity_option",
"cv2.cvtColor",
"click.command",
"cv2.setTrackbarPos",
"cv2.getTrackbarPos",
"cv2.destroyAllWindows",
"cv2.createTrackbar",
"numpy.uint8",
"cv2.waitKey",
"time.sleep",
"imutils.video.Vide... | [((173, 226), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""'}), "(format='%(asctime)s %(message)s')\n", (192, 226), False, 'import logging\n'), ((256, 271), 'click.command', 'click.command', ([], {}), '()\n', (269, 271), False, 'import click\n'), ((273, 365), 'click.option', 'click.option', (['"""--video"""'], {'required': '(True)', 'help': '"""Video stream number"""', 'default': '(0)', 'type': 'int'}), "('--video', required=True, help='Video stream number', default=\n 0, type=int)\n", (285, 365), False, 'import click\n'), ((362, 411), 'click_log.simple_verbosity_option', 'click_log.simple_verbosity_option', ([], {'default': '"""INFO"""'}), "(default='INFO')\n", (395, 411), False, 'import click_log\n'), ((432, 463), 'logging.info', 'logging.info', (['"""Process started"""'], {}), "('Process started')\n", (444, 463), False, 'import logging\n'), ((510, 538), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Trackbars"""'], {}), "('Trackbars')\n", (525, 538), False, 'import cv2\n'), ((553, 577), 'numpy.array', 'np.array', (['[165, 132, 98]'], {}), '([165, 132, 98])\n', (561, 577), True, 'import numpy as np\n'), ((591, 616), 'numpy.array', 'np.array', (['[195, 255, 255]'], {}), '([195, 255, 255])\n', (599, 616), True, 'import numpy as np\n'), ((623, 679), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""minH"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('minH', 'Trackbars', 0, 255, nothing)\n", (641, 679), False, 'import cv2\n'), ((684, 740), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""minS"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('minS', 'Trackbars', 0, 255, nothing)\n", (702, 740), False, 'import cv2\n'), ((745, 801), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""minV"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('minV', 'Trackbars', 0, 255, nothing)\n", (763, 801), False, 'import cv2\n'), ((807, 857), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""minH"""', '"""Trackbars"""', 'minHSV[0]'], {}), "('minH', 'Trackbars', minHSV[0])\n", (825, 857), False, 'import cv2\n'), ((862, 912), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""minS"""', '"""Trackbars"""', 'minHSV[1]'], {}), "('minS', 'Trackbars', minHSV[1])\n", (880, 912), False, 'import cv2\n'), ((917, 967), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""minV"""', '"""Trackbars"""', 'minHSV[2]'], {}), "('minV', 'Trackbars', minHSV[2])\n", (935, 967), False, 'import cv2\n'), ((973, 1029), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""maxH"""', '"""Trackbars"""', '(0)', '(255)', 'nothing'], {}), "('maxH', 'Trackbars', 0, 255, nothing)\n", (991, 1029), False, 'import cv2\n'), ((1034, 1084), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""maxH"""', '"""Trackbars"""', 'maxHSV[0]'], {}), "('maxH', 'Trackbars', maxHSV[0])\n", (1052, 1084), False, 'import cv2\n'), ((1091, 1106), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1101, 1106), False, 'import time\n'), ((1112, 1138), 'logging.info', 'logging.info', (['"""Loop start"""'], {}), "('Loop start')\n", (1124, 1138), False, 'import logging\n'), ((1952, 1975), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1973, 1975), False, 'import cv2\n'), ((1182, 1208), 'logging.info', 'logging.info', (['"""Frame read"""'], {}), "('Frame read')\n", (1194, 1208), False, 'import logging\n'), ((1217, 1233), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1227, 1233), False, 'import time\n'), ((1275, 1313), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1287, 1313), False, 'import cv2\n'), ((1330, 1369), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""minH"""', '"""Trackbars"""'], {}), "('minH', 'Trackbars')\n", (1348, 1369), False, 'import cv2\n'), ((1385, 1424), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""minS"""', '"""Trackbars"""'], {}), "('minS', 'Trackbars')\n", (1403, 1424), False, 'import cv2\n'), ((1440, 1479), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""minV"""', '"""Trackbars"""'], {}), "('minV', 'Trackbars')\n", (1458, 1479), False, 'import cv2\n'), ((1496, 1535), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""maxH"""', '"""Trackbars"""'], {}), "('maxH', 'Trackbars')\n", (1514, 1535), False, 'import cv2\n'), ((1558, 1586), 'numpy.uint8', 'np.uint8', (['[minH, minS, minV]'], {}), '([minH, minS, minV])\n', (1566, 1586), True, 'import numpy as np\n'), ((1609, 1635), 'numpy.uint8', 'np.uint8', (['[maxH, 255, 255]'], {}), '([maxH, 255, 255])\n', (1617, 1635), True, 'import numpy as np\n'), ((1653, 1693), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lowerLimit', 'upperLimit'], {}), '(hsv, lowerLimit, upperLimit)\n', (1664, 1693), False, 'import cv2\n'), ((1712, 1752), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (1727, 1752), False, 'import cv2\n'), ((1764, 1790), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'image'], {}), "('frame', image)\n", (1774, 1790), False, 'import cv2\n'), ((1799, 1823), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (1809, 1823), False, 'import cv2\n'), ((1832, 1860), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (1842, 1860), False, 'import cv2\n'), ((1876, 1890), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1887, 1890), False, 'import cv2\n'), ((474, 496), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': 'video'}), '(src=video)\n', (485, 496), False, 'from imutils.video import VideoStream\n')] |
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestRMSNormOp(hu.HypothesisTestCase):
@given(
M=st.integers(0, 8),
N=st.integers(1, 16),
eps=st.floats(0, 1e-3),
dtype=st.sampled_from([np.float32, np.float64]),
**hu.gcs,
)
@settings(deadline=None)
def test_rms_norm(self, M, N, eps, dtype, gc, dc):
X = (np.random.randn(M, N) * 2.0 + 1.0).astype(dtype)
gamma = np.random.randn(N).astype(dtype)
beta = np.random.randn(N).astype(dtype)
op = core.CreateOperator(
"RMSNorm",
["X", "gamma", "beta"],
["Y", "rrms"],
eps=eps,
)
def rms_norm_ref(X, gamma, beta):
rrms = 1.0 / np.sqrt(np.mean(np.square(X), axis=1) + eps)
Y = X * np.expand_dims(rrms, axis=1) * gamma + beta
return Y, rrms
inputs = [X, gamma, beta]
self.assertReferenceChecks(gc, op, inputs, rms_norm_ref)
self.assertDeviceChecks(dc, op, inputs, [0, 1])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.randn",
"numpy.square",
"hypothesis.strategies.sampled_from",
"numpy.expand_dims",
"hypothesis.settings",
"caffe2.python.core.CreateOperator",
"hypothesis.strategies.integers",
"hypothesis.strategies.floats"
] | [((427, 450), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (435, 450), False, 'from hypothesis import given, settings\n'), ((1309, 1324), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1322, 1324), False, 'import unittest\n'), ((679, 757), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""RMSNorm"""', "['X', 'gamma', 'beta']", "['Y', 'rrms']"], {'eps': 'eps'}), "('RMSNorm', ['X', 'gamma', 'beta'], ['Y', 'rrms'], eps=eps)\n", (698, 757), False, 'from caffe2.python import core\n'), ((260, 277), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(8)'], {}), '(0, 8)\n', (271, 277), True, 'import hypothesis.strategies as st\n'), ((289, 307), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(16)'], {}), '(1, 16)\n', (300, 307), True, 'import hypothesis.strategies as st\n'), ((321, 340), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(0.001)'], {}), '(0, 0.001)\n', (330, 340), True, 'import hypothesis.strategies as st\n'), ((355, 396), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[np.float32, np.float64]'], {}), '([np.float32, np.float64])\n', (370, 396), True, 'import hypothesis.strategies as st\n'), ((584, 602), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (599, 602), True, 'import numpy as np\n'), ((632, 650), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (647, 650), True, 'import numpy as np\n'), ((519, 540), 'numpy.random.randn', 'np.random.randn', (['M', 'N'], {}), '(M, N)\n', (534, 540), True, 'import numpy as np\n'), ((950, 978), 'numpy.expand_dims', 'np.expand_dims', (['rrms'], {'axis': '(1)'}), '(rrms, axis=1)\n', (964, 978), True, 'import numpy as np\n'), ((901, 913), 'numpy.square', 'np.square', (['X'], {}), '(X)\n', (910, 913), True, 'import numpy as np\n')] |
from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap
import numpy as np
import os,sys
from tilec import covtools,ilc
from scipy.optimize import curve_fit
deg = 20.
px = 2.0
theory = cosmology.default_theory()
shape,wcs = maps.rect_geometry(width_deg=deg,px_res_arcmin=px)
modlmap = enmap.modlmap(shape,wcs)
ells = np.arange(modlmap.max())
cltt = theory.lCl('TT',ells)
mgen = maps.MapGen(shape,wcs,cltt[None,None])
noise = [10,20]
ngen1 = maps.MapGen(shape,wcs,(ells*0 + (noise[0]*np.pi/180./60.)**2.)[None,None])
ngen2 = maps.MapGen(shape,wcs,(ells*0 + (noise[1]*np.pi/180./60.)**2.)[None,None])
cov = enmap.enmap(np.zeros((shape[0],shape[1],2,2)),wcs)
for i in range(2):
for j in range(2):
cov[...,i,j] = maps.interp(ells,cltt)(modlmap) + int(i==j) * (noise[i]*np.pi/180./60.)**2.
cinv = np.linalg.inv(cov)
nsims = 30
np.random.seed(1)
bin_edges = np.arange(80,3000,40)
binner = stats.bin2D(modlmap,bin_edges)
s = stats.Stats()
gellmax = modlmap.max()
ells = np.arange(0,gellmax,1)
ctheory = ilc.CTheory(ells)
slmin = 80
minell = maps.minimum_ell(shape,wcs)
fitmax = 600
fitmin = slmin
dell = 2*minell
fbin_edges = np.arange(fitmin,fitmax,dell)
fbinner = stats.bin2D(modlmap,fbin_edges)
fcents = fbinner.centers
for i in range(nsims):
print(i)
cmb = mgen.get_map(seed=(1,i))
n1 = ngen1.get_map(seed=(2,i))
n2 = ngen2.get_map(seed=(3,i))
kmap0 = enmap.fft(cmb,normalize='phys')
kmap1 = enmap.fft(cmb+n1,normalize='phys')
kmap2 = enmap.fft(cmb+n2,normalize='phys')
kmaps = [kmap1,kmap2]
icov = np.zeros((shape[0],shape[1],2,2))
ncov = np.zeros((shape[0],shape[1],2,2))
lmin = 80
lmax = 7000
for p in range(2):
for q in range(2):
power = np.real(kmaps[p]*kmaps[q].conj())
icov[...,p,q] = covtools.signal_average(power,bin_width=80,kind=3,dlspace=True,lmin=lmin)
#icov[...,p,q] = covtools.signal_average(enmap.enmap(cov[...,p,q],wcs),bin_width=80,kind=3,dlspace=True,lmin=lmin)
ncov[...,p,q] = icov[...,p,q].copy()
np.random.seed((4,i,p,q))
stoch = (1+np.random.normal(scale=0.01))
print(100-stoch*100.)
ncov[...,p,q][modlmap<600] = icov[...,p,q][modlmap<600].copy() * stoch
#ncov[modlmap<600,p,q] = cov[modlmap<600,p,q].copy()
# f1 = 150 ; f2 = 150
# ffunc = lambda d,x: fbinner.bin(maps.interp(ells,ctheory.get_theory_cls(f1,f2,a_cmb=x))(modlmap))[1]
# res,_ = curve_fit(ffunc,fcents,fbinner.bin(power)[1],p0=[1],bounds=([0.2],[1.8]))
# fcmb = res
# print(fcmb)
# cfit = maps.interp(ells,ctheory.get_theory_cls(f1,f2,a_cmb=fcmb))(modlmap)
# ncov[modlmap<600,p,q] = cfit[modlmap<600].copy()
if p==q:
icov[modlmap<=lmin,p,q] = cov.max()*10000
icov[modlmap>=lmax,p,q] = cov.max()*10000
ncov[modlmap<=lmin,p,q] = cov.max()*10000
ncov[modlmap>=lmax,p,q] = cov.max()*10000
#io.power_crop(icov[...,p,q],200,"dscov_%d_%d.png" % (p,q))
#icov[...,p,q] = cov[...,p,q]
icinv = np.linalg.inv(icov)
ncinv = np.linalg.inv(ncov)
ks = np.stack([kmap1,kmap2])
rs = np.ones((2,))
kcoadd = np.einsum("i,...ij,j...->...",rs,cinv,ks) / np.einsum("i,...ij,j->...",rs,cinv,rs)
ikcoadd = np.einsum("i,...ij,j...->...",rs,icinv,ks) / np.einsum("i,...ij,j->...",rs,icinv,rs)
nkcoadd = np.einsum("i,...ij,j...->...",rs,ncinv,ks) / np.einsum("i,...ij,j->...",rs,ncinv,rs)
p2d = np.real(kcoadd*kmap0.conj())
cents,p1d = binner.bin(p2d)
s.add_to_stats("p1d",p1d)
p2d = np.real(kmap0*kmap0.conj())
cents,p1d0 = binner.bin(p2d)
s.add_to_stats("p1d0",p1d0)
p2d = np.real(ikcoadd*kmap0.conj())
cents,p1d = binner.bin(p2d)
s.add_to_stats("ip1d",p1d)
p2d = np.real(nkcoadd*kmap0.conj())
cents,p1d = binner.bin(p2d)
s.add_to_stats("np1d",p1d)
s.get_stats()
p1d = s.stats['p1d']['mean']
p1d0 = s.stats['p1d0']['mean']
ip1d = s.stats['ip1d']['mean']
np1d = s.stats['np1d']['mean']
pl = io.Plotter(xyscale='loglog',scalefn = lambda x: x**2./2./np.pi,xlabel='l',ylabel='D')
pl.add(ells,cltt)
pl.add(cents,p1d)
pl.done("simpleilc.png")
pl = io.Plotter(xyscale='linlin',xlabel='l',ylabel='D')
pl.add(cents,(p1d-p1d0)/p1d0)
pl.add(cents,(ip1d-p1d0)/p1d0,ls="-")
pl.add(cents,(np1d-p1d0)/p1d0,ls="--")
pl._ax.set_xlim(70,1000)
pl._ax.set_ylim(-0.02,0.02)
pl.hline()
pl.done("dsimpleilc.png")
| [
"orphics.maps.interp",
"numpy.random.seed",
"numpy.einsum",
"numpy.ones",
"orphics.maps.MapGen",
"numpy.arange",
"numpy.random.normal",
"orphics.stats.Stats",
"orphics.maps.rect_geometry",
"pixell.enmap.modlmap",
"orphics.stats.bin2D",
"tilec.ilc.CTheory",
"pixell.enmap.fft",
"numpy.stack"... | [((238, 264), 'orphics.cosmology.default_theory', 'cosmology.default_theory', ([], {}), '()\n', (262, 264), False, 'from orphics import maps, io, cosmology, stats\n'), ((277, 328), 'orphics.maps.rect_geometry', 'maps.rect_geometry', ([], {'width_deg': 'deg', 'px_res_arcmin': 'px'}), '(width_deg=deg, px_res_arcmin=px)\n', (295, 328), False, 'from orphics import maps, io, cosmology, stats\n'), ((338, 363), 'pixell.enmap.modlmap', 'enmap.modlmap', (['shape', 'wcs'], {}), '(shape, wcs)\n', (351, 363), False, 'from pixell import enmap\n'), ((433, 474), 'orphics.maps.MapGen', 'maps.MapGen', (['shape', 'wcs', 'cltt[None, None]'], {}), '(shape, wcs, cltt[None, None])\n', (444, 474), False, 'from orphics import maps, io, cosmology, stats\n'), ((496, 591), 'orphics.maps.MapGen', 'maps.MapGen', (['shape', 'wcs', '(ells * 0 + (noise[0] * np.pi / 180.0 / 60.0) ** 2.0)[None, None]'], {}), '(shape, wcs, (ells * 0 + (noise[0] * np.pi / 180.0 / 60.0) ** \n 2.0)[None, None])\n', (507, 591), False, 'from orphics import maps, io, cosmology, stats\n'), ((579, 674), 'orphics.maps.MapGen', 'maps.MapGen', (['shape', 'wcs', '(ells * 0 + (noise[1] * np.pi / 180.0 / 60.0) ** 2.0)[None, None]'], {}), '(shape, wcs, (ells * 0 + (noise[1] * np.pi / 180.0 / 60.0) ** \n 2.0)[None, None])\n', (590, 674), False, 'from orphics import maps, io, cosmology, stats\n'), ((861, 879), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (874, 879), True, 'import numpy as np\n'), ((892, 909), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (906, 909), True, 'import numpy as np\n'), ((923, 946), 'numpy.arange', 'np.arange', (['(80)', '(3000)', '(40)'], {}), '(80, 3000, 40)\n', (932, 946), True, 'import numpy as np\n'), ((954, 985), 'orphics.stats.bin2D', 'stats.bin2D', (['modlmap', 'bin_edges'], {}), '(modlmap, bin_edges)\n', (965, 985), False, 'from orphics import maps, io, cosmology, stats\n'), ((990, 1003), 'orphics.stats.Stats', 'stats.Stats', ([], {}), '()\n', (1001, 1003), False, 'from orphics import maps, io, cosmology, stats\n'), ((1037, 1061), 'numpy.arange', 'np.arange', (['(0)', 'gellmax', '(1)'], {}), '(0, gellmax, 1)\n', (1046, 1061), True, 'import numpy as np\n'), ((1070, 1087), 'tilec.ilc.CTheory', 'ilc.CTheory', (['ells'], {}), '(ells)\n', (1081, 1087), False, 'from tilec import covtools, ilc\n'), ((1108, 1136), 'orphics.maps.minimum_ell', 'maps.minimum_ell', (['shape', 'wcs'], {}), '(shape, wcs)\n', (1124, 1136), False, 'from orphics import maps, io, cosmology, stats\n'), ((1193, 1224), 'numpy.arange', 'np.arange', (['fitmin', 'fitmax', 'dell'], {}), '(fitmin, fitmax, dell)\n', (1202, 1224), True, 'import numpy as np\n'), ((1233, 1265), 'orphics.stats.bin2D', 'stats.bin2D', (['modlmap', 'fbin_edges'], {}), '(modlmap, fbin_edges)\n', (1244, 1265), False, 'from orphics import maps, io, cosmology, stats\n'), ((4216, 4314), 'orphics.io.Plotter', 'io.Plotter', ([], {'xyscale': '"""loglog"""', 'scalefn': '(lambda x: x ** 2.0 / 2.0 / np.pi)', 'xlabel': '"""l"""', 'ylabel': '"""D"""'}), "(xyscale='loglog', scalefn=lambda x: x ** 2.0 / 2.0 / np.pi,\n xlabel='l', ylabel='D')\n", (4226, 4314), False, 'from orphics import maps, io, cosmology, stats\n'), ((4369, 4421), 'orphics.io.Plotter', 'io.Plotter', ([], {'xyscale': '"""linlin"""', 'xlabel': '"""l"""', 'ylabel': '"""D"""'}), "(xyscale='linlin', xlabel='l', ylabel='D')\n", (4379, 4421), False, 'from orphics import maps, io, cosmology, stats\n'), ((673, 709), 'numpy.zeros', 'np.zeros', (['(shape[0], shape[1], 2, 2)'], {}), '((shape[0], shape[1], 2, 2))\n', (681, 709), True, 'import numpy as np\n'), ((1450, 1482), 'pixell.enmap.fft', 'enmap.fft', (['cmb'], {'normalize': '"""phys"""'}), "(cmb, normalize='phys')\n", (1459, 1482), False, 'from pixell import enmap\n'), ((1494, 1531), 'pixell.enmap.fft', 'enmap.fft', (['(cmb + n1)'], {'normalize': '"""phys"""'}), "(cmb + n1, normalize='phys')\n", (1503, 1531), False, 'from pixell import enmap\n'), ((1541, 1578), 'pixell.enmap.fft', 'enmap.fft', (['(cmb + n2)'], {'normalize': '"""phys"""'}), "(cmb + n2, normalize='phys')\n", (1550, 1578), False, 'from pixell import enmap\n'), ((1614, 1650), 'numpy.zeros', 'np.zeros', (['(shape[0], shape[1], 2, 2)'], {}), '((shape[0], shape[1], 2, 2))\n', (1622, 1650), True, 'import numpy as np\n'), ((1659, 1695), 'numpy.zeros', 'np.zeros', (['(shape[0], shape[1], 2, 2)'], {}), '((shape[0], shape[1], 2, 2))\n', (1667, 1695), True, 'import numpy as np\n'), ((3239, 3258), 'numpy.linalg.inv', 'np.linalg.inv', (['icov'], {}), '(icov)\n', (3252, 3258), True, 'import numpy as np\n'), ((3271, 3290), 'numpy.linalg.inv', 'np.linalg.inv', (['ncov'], {}), '(ncov)\n', (3284, 3290), True, 'import numpy as np\n'), ((3311, 3335), 'numpy.stack', 'np.stack', (['[kmap1, kmap2]'], {}), '([kmap1, kmap2])\n', (3319, 3335), True, 'import numpy as np\n'), ((3344, 3357), 'numpy.ones', 'np.ones', (['(2,)'], {}), '((2,))\n', (3351, 3357), True, 'import numpy as np\n'), ((3371, 3415), 'numpy.einsum', 'np.einsum', (['"""i,...ij,j...->..."""', 'rs', 'cinv', 'ks'], {}), "('i,...ij,j...->...', rs, cinv, ks)\n", (3380, 3415), True, 'import numpy as np\n'), ((3415, 3456), 'numpy.einsum', 'np.einsum', (['"""i,...ij,j->..."""', 'rs', 'cinv', 'rs'], {}), "('i,...ij,j->...', rs, cinv, rs)\n", (3424, 3456), True, 'import numpy as np\n'), ((3468, 3513), 'numpy.einsum', 'np.einsum', (['"""i,...ij,j...->..."""', 'rs', 'icinv', 'ks'], {}), "('i,...ij,j...->...', rs, icinv, ks)\n", (3477, 3513), True, 'import numpy as np\n'), ((3513, 3555), 'numpy.einsum', 'np.einsum', (['"""i,...ij,j->..."""', 'rs', 'icinv', 'rs'], {}), "('i,...ij,j->...', rs, icinv, rs)\n", (3522, 3555), True, 'import numpy as np\n'), ((3567, 3612), 'numpy.einsum', 'np.einsum', (['"""i,...ij,j...->..."""', 'rs', 'ncinv', 'ks'], {}), "('i,...ij,j...->...', rs, ncinv, ks)\n", (3576, 3612), True, 'import numpy as np\n'), ((3612, 3654), 'numpy.einsum', 'np.einsum', (['"""i,...ij,j->..."""', 'rs', 'ncinv', 'rs'], {}), "('i,...ij,j->...', rs, ncinv, rs)\n", (3621, 3654), True, 'import numpy as np\n'), ((1855, 1932), 'tilec.covtools.signal_average', 'covtools.signal_average', (['power'], {'bin_width': '(80)', 'kind': '(3)', 'dlspace': '(True)', 'lmin': 'lmin'}), '(power, bin_width=80, kind=3, dlspace=True, lmin=lmin)\n', (1878, 1932), False, 'from tilec import covtools, ilc\n'), ((2131, 2159), 'numpy.random.seed', 'np.random.seed', (['(4, i, p, q)'], {}), '((4, i, p, q))\n', (2145, 2159), True, 'import numpy as np\n'), ((777, 800), 'orphics.maps.interp', 'maps.interp', (['ells', 'cltt'], {}), '(ells, cltt)\n', (788, 800), False, 'from orphics import maps, io, cosmology, stats\n'), ((2180, 2208), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.01)'}), '(scale=0.01)\n', (2196, 2208), True, 'import numpy as np\n')] |
#%%
import numpy as np
import scipy as sp
import pandas as pd
import ccutils
#%%
# Set random seed
np.random.seed(42)
# Define number of boostrap estimates
n_estimates = 10000
# Define percentiles to save
percentiles = [.01, .05, .10, .25, .50, .75, .90, .95, .99]
# Read single cell data
df_micro = pd.read_csv('../../../data/csv_microscopy/' +
'single_cell_microscopy_data.csv')
#%%
# group by date and by IPTG concentration
df_group = df_micro.groupby(['date'])
# Define names for columns in data frame
names = ['date', 'IPTG_uM','operator', 'binding_energy',
'repressor', 'percentile',
'fold_change', 'fold_change_lower', 'fold_change_upper',
'noise', 'noise_lower', 'noise_upper',
'skewness', 'skewness_lower', 'skewness_upper']
# Initialize data frame to save the noise
df_noise = pd.DataFrame(columns=names)
# Loop through groups
for date, data in df_group:
print(f'date: {date}')
# Extract the autofluorescence
I_auto = data[data.rbs == 'auto'].mean_intensity.values
print('bootstrapping autofluorescence')
# Perform bootstrap estimate of mean autofluorescence
boots_auto = ccutils.stats.bootstrap_estimate(I_auto, np.mean, n_estimates)
# Extract ∆lacI data
data_delta = data[data.rbs == 'delta']
# Initialize array to save bootstrap estimates of the background corrected
# ∆lacI intensity
boots_mean_delta = np.zeros(n_estimates)
boots_std_delta = np.zeros(n_estimates)
boots_skew_delta = np.zeros(n_estimates)
print('bootstrapping ∆lacI')
# Loop through estimates
for i in range(n_estimates):
# Sample data
sample = data_delta.sample(n=len(data_delta), replace=True)
# Compute bootstrap estimates
boots_mean_delta[i] = np.mean(sample.intensity.values -
boots_auto[i] * sample.area.values)
boots_std_delta[i] = np.std(sample.intensity.values -
boots_auto[i] * sample.area.values, ddof=1)
boots_skew_delta[i] = sp.stats.skew(sample.intensity.values -
boots_auto[i] * sample.area.values, bias=False)
# Compute ∆lacI noise
boots_noise_delta = boots_std_delta / boots_mean_delta
# Loop through percentiles and save information
for per in percentiles:
# Compute percentile noise
per_noise = ccutils.stats.hpd(boots_noise_delta, per)
per_skew = ccutils.stats.hpd(boots_skew_delta, per)
strain_info = [
date,
None,
data_delta.operator.unique()[0],
data_delta.binding_energy.unique()[0],
0,
per,
None,
None,
None,
np.median(boots_noise_delta),
per_noise[0],
per_noise[1],
np.median(boots_skew_delta),
per_skew[0],
per_skew[1]
]
# Append to dataframe
df_noise = df_noise.append(pd.Series(strain_info, index=names),
ignore_index=True)
# Group data by IPTG concentration
data_group = data[(data.rbs != 'auto') &
(data.rbs != 'delta')].groupby('IPTG_uM')
# Loop through inducer concentrations
for inducer, data_inducer in data_group:
print(f'bootstrapping {inducer} µM')
# Initialize array to save bootstrap estimates of the background
# corrected
boots_mean_inducer = np.zeros(n_estimates)
boots_std_inducer = np.zeros(n_estimates)
boots_skew_inducer = np.zeros(n_estimates)
# Loop through estimates
for i in range(n_estimates):
# Sample data
sample = data_inducer.sample(n=len(data_inducer), replace=True)
# Compute bootstrap estimates
boots_mean_inducer[i] = np.mean(sample.intensity.values -
boots_auto[i] * sample.area.values)
boots_std_inducer[i] = np.std(sample.intensity.values -
boots_auto[i] * sample.area.values, ddof=1)
boots_skew_inducer[i] = sp.stats.skew(sample.intensity.values -
boots_auto[i] * sample.area.values,
bias=False)
# Remove netative reads
idx = boots_mean_inducer >= 0
boots_mean_inducer = boots_mean_inducer[idx]
boots_std_inducer = boots_std_inducer[idx]
boots_skew_inducer = boots_skew_inducer[idx]
# Compute fold-change and noise
boots_fc_inducer = boots_mean_inducer /\
boots_mean_delta[0:sum(idx)]
boots_noise_inducer = boots_std_inducer / boots_mean_inducer
# Loop through percentiles and save information
for per in percentiles:
# Compute percentile noise
per_fc = ccutils.stats.hpd(boots_fc_inducer, per)
per_noise = ccutils.stats.hpd(boots_noise_inducer, per)
per_skew = ccutils.stats.hpd(boots_skew_inducer, per)
strain_info = [
date,
inducer,
data_inducer.operator.unique()[0],
data_inducer.binding_energy.unique()[0],
data_inducer.repressor.unique()[0],
per,
np.median(boots_fc_inducer),
per_fc[0],
per_fc[1],
np.median(boots_noise_inducer),
per_noise[0],
per_noise[1],
np.median(boots_skew_inducer),
per_skew[0],
per_skew[1]
]
# Append to dataframe
df_noise = df_noise.append(pd.Series(strain_info, index=names),
ignore_index=True)
# %%
# Export dataframe
df_noise.to_csv('../../../data/csv_microscopy/microscopy_noise_bootstrap.csv')
| [
"pandas.DataFrame",
"numpy.random.seed",
"ccutils.stats.hpd",
"pandas.read_csv",
"numpy.std",
"numpy.median",
"numpy.zeros",
"ccutils.stats.bootstrap_estimate",
"scipy.stats.skew",
"numpy.mean",
"pandas.Series"
] | [((101, 119), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (115, 119), True, 'import numpy as np\n'), ((304, 389), 'pandas.read_csv', 'pd.read_csv', (["('../../../data/csv_microscopy/' + 'single_cell_microscopy_data.csv')"], {}), "('../../../data/csv_microscopy/' + 'single_cell_microscopy_data.csv'\n )\n", (315, 389), True, 'import pandas as pd\n'), ((853, 880), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'names'}), '(columns=names)\n', (865, 880), True, 'import pandas as pd\n'), ((1173, 1235), 'ccutils.stats.bootstrap_estimate', 'ccutils.stats.bootstrap_estimate', (['I_auto', 'np.mean', 'n_estimates'], {}), '(I_auto, np.mean, n_estimates)\n', (1205, 1235), False, 'import ccutils\n'), ((1433, 1454), 'numpy.zeros', 'np.zeros', (['n_estimates'], {}), '(n_estimates)\n', (1441, 1454), True, 'import numpy as np\n'), ((1477, 1498), 'numpy.zeros', 'np.zeros', (['n_estimates'], {}), '(n_estimates)\n', (1485, 1498), True, 'import numpy as np\n'), ((1522, 1543), 'numpy.zeros', 'np.zeros', (['n_estimates'], {}), '(n_estimates)\n', (1530, 1543), True, 'import numpy as np\n'), ((1797, 1866), 'numpy.mean', 'np.mean', (['(sample.intensity.values - boots_auto[i] * sample.area.values)'], {}), '(sample.intensity.values - boots_auto[i] * sample.area.values)\n', (1804, 1866), True, 'import numpy as np\n'), ((1929, 2005), 'numpy.std', 'np.std', (['(sample.intensity.values - boots_auto[i] * sample.area.values)'], {'ddof': '(1)'}), '(sample.intensity.values - boots_auto[i] * sample.area.values, ddof=1)\n', (1935, 2005), True, 'import numpy as np\n'), ((2065, 2156), 'scipy.stats.skew', 'sp.stats.skew', (['(sample.intensity.values - boots_auto[i] * sample.area.values)'], {'bias': '(False)'}), '(sample.intensity.values - boots_auto[i] * sample.area.values,\n bias=False)\n', (2078, 2156), True, 'import scipy as sp\n'), ((2402, 2443), 'ccutils.stats.hpd', 'ccutils.stats.hpd', (['boots_noise_delta', 'per'], {}), '(boots_noise_delta, per)\n', (2419, 2443), False, 'import ccutils\n'), ((2463, 2503), 'ccutils.stats.hpd', 'ccutils.stats.hpd', (['boots_skew_delta', 'per'], {}), '(boots_skew_delta, per)\n', (2480, 2503), False, 'import ccutils\n'), ((3502, 3523), 'numpy.zeros', 'np.zeros', (['n_estimates'], {}), '(n_estimates)\n', (3510, 3523), True, 'import numpy as np\n'), ((3552, 3573), 'numpy.zeros', 'np.zeros', (['n_estimates'], {}), '(n_estimates)\n', (3560, 3573), True, 'import numpy as np\n'), ((3603, 3624), 'numpy.zeros', 'np.zeros', (['n_estimates'], {}), '(n_estimates)\n', (3611, 3624), True, 'import numpy as np\n'), ((2758, 2786), 'numpy.median', 'np.median', (['boots_noise_delta'], {}), '(boots_noise_delta)\n', (2767, 2786), True, 'import numpy as np\n'), ((2852, 2879), 'numpy.median', 'np.median', (['boots_skew_delta'], {}), '(boots_skew_delta)\n', (2861, 2879), True, 'import numpy as np\n'), ((3005, 3040), 'pandas.Series', 'pd.Series', (['strain_info'], {'index': 'names'}), '(strain_info, index=names)\n', (3014, 3040), True, 'import pandas as pd\n'), ((3875, 3944), 'numpy.mean', 'np.mean', (['(sample.intensity.values - boots_auto[i] * sample.area.values)'], {}), '(sample.intensity.values - boots_auto[i] * sample.area.values)\n', (3882, 3944), True, 'import numpy as np\n'), ((4016, 4092), 'numpy.std', 'np.std', (['(sample.intensity.values - boots_auto[i] * sample.area.values)'], {'ddof': '(1)'}), '(sample.intensity.values - boots_auto[i] * sample.area.values, ddof=1)\n', (4022, 4092), True, 'import numpy as np\n'), ((4165, 4256), 'scipy.stats.skew', 'sp.stats.skew', (['(sample.intensity.values - boots_auto[i] * sample.area.values)'], {'bias': '(False)'}), '(sample.intensity.values - boots_auto[i] * sample.area.values,\n bias=False)\n', (4178, 4256), True, 'import scipy as sp\n'), ((4916, 4956), 'ccutils.stats.hpd', 'ccutils.stats.hpd', (['boots_fc_inducer', 'per'], {}), '(boots_fc_inducer, per)\n', (4933, 4956), False, 'import ccutils\n'), ((4981, 5024), 'ccutils.stats.hpd', 'ccutils.stats.hpd', (['boots_noise_inducer', 'per'], {}), '(boots_noise_inducer, per)\n', (4998, 5024), False, 'import ccutils\n'), ((5048, 5090), 'ccutils.stats.hpd', 'ccutils.stats.hpd', (['boots_skew_inducer', 'per'], {}), '(boots_skew_inducer, per)\n', (5065, 5090), False, 'import ccutils\n'), ((5363, 5390), 'numpy.median', 'np.median', (['boots_fc_inducer'], {}), '(boots_fc_inducer)\n', (5372, 5390), True, 'import numpy as np\n'), ((5462, 5492), 'numpy.median', 'np.median', (['boots_noise_inducer'], {}), '(boots_noise_inducer)\n', (5471, 5492), True, 'import numpy as np\n'), ((5570, 5599), 'numpy.median', 'np.median', (['boots_skew_inducer'], {}), '(boots_skew_inducer)\n', (5579, 5599), True, 'import numpy as np\n'), ((5745, 5780), 'pandas.Series', 'pd.Series', (['strain_info'], {'index': 'names'}), '(strain_info, index=names)\n', (5754, 5780), True, 'import pandas as pd\n')] |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests NeuralStackCell, NeuralQueueCell and NeuralStackModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
import numpy as np
from tensor2tensor.layers import modalities
from tensor2tensor.models.research import neural_stack
import tensorflow as tf
def build_fake_controller(cell):
"""Create a scalar variable to track the timestep.
Args:
cell: The NeuralStackCell to add the variable to.
"""
cell.current_step = cell.add_variable(
"current_step", [],
initializer=tf.constant_initializer(-1),
dtype=tf.int32,
trainable=False)
def call_fake_controller(push_values, pop_values, read_values, output_values):
"""Mock a RNN controller from a set of expected outputs.
Args:
push_values: Expected controller push values.
pop_values: Expected controller pop values.
read_values: Expected controller read values.
output_values: Expected controller output values.
Returns:
A callable which behaves like the call method of an NeuralStackCell.
"""
def call(cell, inputs, state, batch_size):
del inputs
del batch_size
next_step = tf.assign_add(cell.current_step, tf.constant(1))
return (
tf.slice(tf.constant(push_values), [next_step, 0], [1, -1]),
tf.slice(tf.constant(pop_values), [next_step, 0], [1, -1]),
tf.slice(tf.constant(read_values), [next_step, 0, 0], [1, -1, -1]),
tf.slice(tf.constant(output_values), [next_step, 0, 0], [1, -1, -1]),
state
)
return call
class NeuralStackCellTest(tf.test.TestCase):
def test_controller_shapes(self):
"""Check that all the NeuralStackCell tensor shapes are correct.
"""
batch_size = 5
embedding_size = 3
memory_size = 6
num_units = 8
stack = neural_stack.NeuralStackCell(num_units, memory_size, embedding_size)
stack.build(None)
self.assertEqual([1, embedding_size], stack.output_size)
self.assertEqual([1, memory_size, memory_size], stack.read_mask.shape)
self.assertEqual([3, 3, 1, 1], stack.write_shift_convolution.shape)
stack_input = tf.zeros([batch_size, 1, embedding_size], dtype=tf.float32)
zero_state = stack.zero_state(batch_size, tf.float32)
(controller_state,
previous_values,
memory_values,
read_strengths,
write_strengths) = zero_state
self.assertEqual([batch_size, num_units], controller_state.shape)
self.assertEqual([batch_size, 1, embedding_size], previous_values.shape)
self.assertEqual([batch_size, memory_size, embedding_size],
memory_values.shape)
self.assertEqual([batch_size, 1, memory_size, 1], read_strengths.shape)
self.assertEqual([batch_size, 1, memory_size, 1], write_strengths.shape)
rnn_input = tf.concat([
tf.reshape(
previous_values,
shape=[batch_size, embedding_size]),
tf.reshape(
stack_input,
shape=[batch_size, embedding_size])
], axis=1)
self.assertEqual([batch_size, 2 * embedding_size], rnn_input.shape)
(push_strengths,
pop_strengths,
new_values,
outputs,
controller_next_state) = stack.call_controller(rnn_input,
controller_state,
batch_size)
self.assertEqual([batch_size, 1, 1, 1], push_strengths.shape)
self.assertEqual([batch_size, 1, 1, 1], pop_strengths.shape)
self.assertEqual([batch_size, 1, embedding_size], new_values.shape)
self.assertEqual([batch_size, 1, embedding_size], outputs.shape)
self.assertEqual([batch_size, num_units], controller_next_state.shape)
(outputs, (controller_next_state,
read_values,
next_memory_values,
next_read_strengths,
next_write_strengths)) = stack.call(stack_input, zero_state)
self.assertEqual([batch_size, 1, embedding_size], outputs.shape)
self.assertEqual([batch_size, num_units], controller_next_state.shape)
self.assertEqual([batch_size, 1, embedding_size], read_values.shape)
self.assertEqual([batch_size, memory_size, embedding_size],
next_memory_values.shape)
self.assertEqual([batch_size, 1, memory_size, 1], next_read_strengths.shape)
self.assertEqual([batch_size, 1, memory_size, 1],
next_write_strengths.shape)
# Make sure that stack output shapes match stack input shapes
self.assertEqual(controller_next_state.shape, controller_state.shape)
self.assertEqual(read_values.shape, previous_values.shape)
self.assertEqual(next_memory_values.shape, memory_values.shape)
self.assertEqual(next_read_strengths.shape, read_strengths.shape)
self.assertEqual(next_write_strengths.shape, write_strengths.shape)
@mock.patch.object(neural_stack.NeuralStackCell, "build_controller",
build_fake_controller)
@mock.patch.object(neural_stack.NeuralStackCell, "call_controller",
call_fake_controller(
push_values=[[1.0], [1.0], [0.0]],
pop_values=[[0.0], [0.0], [1.0]],
read_values=[[[1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0]]],
output_values=[[[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0]]]))
def test_push_pop(self):
"""Test pushing a popping from a NeuralStackCell.
"""
input_values = np.array([[[[1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0]]]])
expected_values = np.array([[[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]])
expected_read_strengths = np.array([
[[[1.0], [0.0], [0.0], [0.0], [0.0], [0.0]]]])
expected_write_strengths = np.array([
[[[0.0], [0.0], [0.], [1.0], [0.0], [0.0]]]])
expected_top = np.array([[[1.0, 0.0, 0.0]]])
stack = neural_stack.NeuralStackCell(8, 6, 3)
stack_input = tf.constant(input_values, dtype=tf.float32)
(outputs, state) = tf.nn.dynamic_rnn(cell=stack,
inputs=stack_input,
time_major=False,
dtype=tf.float32)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
_, state_vals = sess.run([outputs, state])
(_, stack_top, values, read_strengths, write_strengths) = state_vals
self.assertAllClose(expected_top, stack_top)
self.assertAllClose(expected_values, values)
self.assertAllClose(expected_read_strengths, read_strengths)
self.assertAllClose(expected_write_strengths, write_strengths)
class NeuralQueueCellTest(tf.test.TestCase):
@mock.patch.object(neural_stack.NeuralQueueCell, "build_controller",
build_fake_controller)
@mock.patch.object(neural_stack.NeuralQueueCell, "call_controller",
call_fake_controller(
push_values=[[1.0], [1.0], [0.0]],
pop_values=[[0.0], [0.0], [1.0]],
read_values=[[[1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0]]],
output_values=[[[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0]]]))
def test_enqueue_dequeue(self):
"""Test enqueueing a dequeueing from a NeuralQueueCell.
"""
input_values = np.array([[[[1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0]]]])
expected_values = np.array([[[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]])
expected_read_strengths = np.array([
[[[0.0], [1.0], [0.0], [0.0], [0.0], [0.0]]]])
expected_write_strengths = np.array([
[[[0.0], [0.0], [0.0], [1.0], [0.0], [0.0]]]])
expected_front = np.array([[[0.0, 1.0, 0.0]]])
queue = neural_stack.NeuralQueueCell(8, 6, 3)
rnn_input = tf.constant(input_values, dtype=tf.float32)
(outputs, state) = tf.nn.dynamic_rnn(cell=queue,
inputs=rnn_input,
time_major=False,
dtype=tf.float32)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
_, state_vals = sess.run([outputs, state])
(_, queue_front, values, read_strengths, write_strengths) = state_vals
self.assertAllClose(expected_front, queue_front)
self.assertAllClose(expected_values, values)
self.assertAllClose(expected_read_strengths, read_strengths)
self.assertAllClose(expected_write_strengths, write_strengths)
class NeuralStackModelTest(tf.test.TestCase):
def test_model_shapes(self):
"""Test a few of the important output shapes for NeuralStackModel.
"""
batch_size = 100
seq_length = 80
embedding_size = 64
vocab_size = 128
hparams = neural_stack.neural_stack()
problem_hparams = tf.contrib.training.HParams()
problem_hparams.add_hparam("modality", {
"inputs": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.SYMBOL,
})
problem_hparams.add_hparam("vocab_size", {
"inputs": vocab_size,
"targets": vocab_size,
})
model = neural_stack.NeuralStackModel(hparams,
problem_hparams=problem_hparams)
features = {
"inputs": tf.ones([batch_size, seq_length, 1, 1],
dtype=tf.int32),
"targets": tf.ones([batch_size, seq_length, 1, 1], dtype=tf.int32)
}
transformed_features = model.bottom(features)
self.assertEqual([batch_size, seq_length, 1, embedding_size],
transformed_features["inputs"].shape)
logits = model.body(transformed_features)
self.assertEqual([batch_size, seq_length, 1, embedding_size], logits.shape)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main",
"mock.patch.object",
"tensorflow.ones",
"tensor2tensor.models.research.neural_stack.neural_stack",
"tensorflow.contrib.training.HParams",
"tensorflow.nn.dynamic_rnn",
"tensorflow.constant_initializer",
"tensorflow.global_variables_initializer",
"tensorflow.reshape",
"tensor... | [((5472, 5566), 'mock.patch.object', 'mock.patch.object', (['neural_stack.NeuralStackCell', '"""build_controller"""', 'build_fake_controller'], {}), "(neural_stack.NeuralStackCell, 'build_controller',\n build_fake_controller)\n", (5489, 5566), False, 'import mock\n'), ((7797, 7891), 'mock.patch.object', 'mock.patch.object', (['neural_stack.NeuralQueueCell', '"""build_controller"""', 'build_fake_controller'], {}), "(neural_stack.NeuralQueueCell, 'build_controller',\n build_fake_controller)\n", (7814, 7891), False, 'import mock\n'), ((11359, 11373), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (11371, 11373), True, 'import tensorflow as tf\n'), ((2434, 2502), 'tensor2tensor.models.research.neural_stack.NeuralStackCell', 'neural_stack.NeuralStackCell', (['num_units', 'memory_size', 'embedding_size'], {}), '(num_units, memory_size, embedding_size)\n', (2462, 2502), False, 'from tensor2tensor.models.research import neural_stack\n'), ((2754, 2813), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, 1, embedding_size]'], {'dtype': 'tf.float32'}), '([batch_size, 1, embedding_size], dtype=tf.float32)\n', (2762, 2813), True, 'import tensorflow as tf\n'), ((6275, 6344), 'numpy.array', 'np.array', (['[[[[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0]]]]'], {}), '([[[[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0]]]])\n', (6283, 6344), True, 'import numpy as np\n'), ((6428, 6547), 'numpy.array', 'np.array', (['[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0]]]'], {}), '([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])\n', (6436, 6547), True, 'import numpy as np\n'), ((6738, 6794), 'numpy.array', 'np.array', (['[[[[1.0], [0.0], [0.0], [0.0], [0.0], [0.0]]]]'], {}), '([[[[1.0], [0.0], [0.0], [0.0], [0.0], [0.0]]]])\n', (6746, 6794), True, 'import numpy as np\n'), ((6835, 6891), 'numpy.array', 'np.array', (['[[[[0.0], [0.0], [0.0], [1.0], [0.0], [0.0]]]]'], {}), '([[[[0.0], [0.0], [0.0], [1.0], [0.0], [0.0]]]])\n', (6843, 6891), True, 'import numpy as np\n'), ((6919, 6948), 'numpy.array', 'np.array', (['[[[1.0, 0.0, 0.0]]]'], {}), '([[[1.0, 0.0, 0.0]]])\n', (6927, 6948), True, 'import numpy as np\n'), ((6962, 6999), 'tensor2tensor.models.research.neural_stack.NeuralStackCell', 'neural_stack.NeuralStackCell', (['(8)', '(6)', '(3)'], {}), '(8, 6, 3)\n', (6990, 6999), False, 'from tensor2tensor.models.research import neural_stack\n'), ((7018, 7061), 'tensorflow.constant', 'tf.constant', (['input_values'], {'dtype': 'tf.float32'}), '(input_values, dtype=tf.float32)\n', (7029, 7061), True, 'import tensorflow as tf\n'), ((7085, 7175), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'stack', 'inputs': 'stack_input', 'time_major': '(False)', 'dtype': 'tf.float32'}), '(cell=stack, inputs=stack_input, time_major=False, dtype=\n tf.float32)\n', (7102, 7175), True, 'import tensorflow as tf\n'), ((8613, 8682), 'numpy.array', 'np.array', (['[[[[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0]]]]'], {}), '([[[[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0]]]])\n', (8621, 8682), True, 'import numpy as np\n'), ((8765, 8884), 'numpy.array', 'np.array', (['[[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0,\n 0.0, 0.0], [0.0, 0.0, 0.0]]]'], {}), '([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])\n', (8773, 8884), True, 'import numpy as np\n'), ((9075, 9131), 'numpy.array', 'np.array', (['[[[[0.0], [1.0], [0.0], [0.0], [0.0], [0.0]]]]'], {}), '([[[[0.0], [1.0], [0.0], [0.0], [0.0], [0.0]]]])\n', (9083, 9131), True, 'import numpy as np\n'), ((9172, 9228), 'numpy.array', 'np.array', (['[[[[0.0], [0.0], [0.0], [1.0], [0.0], [0.0]]]]'], {}), '([[[[0.0], [0.0], [0.0], [1.0], [0.0], [0.0]]]])\n', (9180, 9228), True, 'import numpy as np\n'), ((9259, 9288), 'numpy.array', 'np.array', (['[[[0.0, 1.0, 0.0]]]'], {}), '([[[0.0, 1.0, 0.0]]])\n', (9267, 9288), True, 'import numpy as np\n'), ((9302, 9339), 'tensor2tensor.models.research.neural_stack.NeuralQueueCell', 'neural_stack.NeuralQueueCell', (['(8)', '(6)', '(3)'], {}), '(8, 6, 3)\n', (9330, 9339), False, 'from tensor2tensor.models.research import neural_stack\n'), ((9356, 9399), 'tensorflow.constant', 'tf.constant', (['input_values'], {'dtype': 'tf.float32'}), '(input_values, dtype=tf.float32)\n', (9367, 9399), True, 'import tensorflow as tf\n'), ((9423, 9511), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'queue', 'inputs': 'rnn_input', 'time_major': '(False)', 'dtype': 'tf.float32'}), '(cell=queue, inputs=rnn_input, time_major=False, dtype=tf.\n float32)\n', (9440, 9511), True, 'import tensorflow as tf\n'), ((10348, 10375), 'tensor2tensor.models.research.neural_stack.neural_stack', 'neural_stack.neural_stack', ([], {}), '()\n', (10373, 10375), False, 'from tensor2tensor.models.research import neural_stack\n'), ((10398, 10427), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {}), '()\n', (10425, 10427), True, 'import tensorflow as tf\n'), ((10709, 10780), 'tensor2tensor.models.research.neural_stack.NeuralStackModel', 'neural_stack.NeuralStackModel', (['hparams'], {'problem_hparams': 'problem_hparams'}), '(hparams, problem_hparams=problem_hparams)\n', (10738, 10780), False, 'from tensor2tensor.models.research import neural_stack\n'), ((1181, 1208), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(-1)'], {}), '(-1)\n', (1204, 1208), True, 'import tensorflow as tf\n'), ((1825, 1839), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (1836, 1839), True, 'import tensorflow as tf\n'), ((10859, 10914), 'tensorflow.ones', 'tf.ones', (['[batch_size, seq_length, 1, 1]'], {'dtype': 'tf.int32'}), '([batch_size, seq_length, 1, 1], dtype=tf.int32)\n', (10866, 10914), True, 'import tensorflow as tf\n'), ((10961, 11016), 'tensorflow.ones', 'tf.ones', (['[batch_size, seq_length, 1, 1]'], {'dtype': 'tf.int32'}), '([batch_size, seq_length, 1, 1], dtype=tf.int32)\n', (10968, 11016), True, 'import tensorflow as tf\n'), ((1871, 1895), 'tensorflow.constant', 'tf.constant', (['push_values'], {}), '(push_values)\n', (1882, 1895), True, 'import tensorflow as tf\n'), ((1940, 1963), 'tensorflow.constant', 'tf.constant', (['pop_values'], {}), '(pop_values)\n', (1951, 1963), True, 'import tensorflow as tf\n'), ((2008, 2032), 'tensorflow.constant', 'tf.constant', (['read_values'], {}), '(read_values)\n', (2019, 2032), True, 'import tensorflow as tf\n'), ((2084, 2110), 'tensorflow.constant', 'tf.constant', (['output_values'], {}), '(output_values)\n', (2095, 2110), True, 'import tensorflow as tf\n'), ((3439, 3502), 'tensorflow.reshape', 'tf.reshape', (['previous_values'], {'shape': '[batch_size, embedding_size]'}), '(previous_values, shape=[batch_size, embedding_size])\n', (3449, 3502), True, 'import tensorflow as tf\n'), ((3537, 3596), 'tensorflow.reshape', 'tf.reshape', (['stack_input'], {'shape': '[batch_size, embedding_size]'}), '(stack_input, shape=[batch_size, embedding_size])\n', (3547, 3596), True, 'import tensorflow as tf\n'), ((7348, 7381), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7379, 7381), True, 'import tensorflow as tf\n'), ((9684, 9717), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9715, 9717), True, 'import tensorflow as tf\n')] |
#originally inspired by http://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html ,
#but substantially modified from that
import numpy as np
from sklearn import datasets, linear_model
from sklearn.cross_validation import cross_val_score, KFold
import csv
#Load the data
#TODO: allow loading from an arbitrary input
f = open('../solves.csv', 'rb')
reader = csv.reader(f)
firstRow = True
targets_basic = []
data_basic = []
labels = []
for row in reader:
if firstRow:
firstRow = False
labels = row
continue
convertedRow = [float(a) for a in row]
targets_basic.append(convertedRow[:1][0])
data_basic.append(convertedRow[1:])
#TODO: figure out if I can just create a numpy array from the beginning
targets = np.array(targets_basic)
data = np.array(data_basic)
# Create linear regression object
regr = linear_model.Ridge(alpha=1.0)
# Train the model using the training sets
regr.fit(data, targets)
print("Coefficients")
print("Constant = " + str(regr.intercept_))
for i in xrange(0, len(regr.coef_)):
print(labels[i+1] + " = " + str(regr.coef_[i]))
# # The mean square error
# print("Residual sum of squares: %.2f"
# % np.mean((regr.predict(data_test) - targets_test) ** 2))
# # Explained variance score: 1 is perfect prediction
# print('Variance score: %.2f' % regr.score(data_test, targets_test))
K = 20 #folds
R2 = cross_val_score(regr, data, y=targets, cv=KFold(targets.size, K)).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
#TODO: export the model in a way that Go can run it | [
"numpy.array",
"csv.reader",
"sklearn.linear_model.Ridge",
"sklearn.cross_validation.KFold"
] | [((374, 387), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (384, 387), False, 'import csv\n'), ((735, 758), 'numpy.array', 'np.array', (['targets_basic'], {}), '(targets_basic)\n', (743, 758), True, 'import numpy as np\n'), ((766, 786), 'numpy.array', 'np.array', (['data_basic'], {}), '(data_basic)\n', (774, 786), True, 'import numpy as np\n'), ((829, 858), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {'alpha': '(1.0)'}), '(alpha=1.0)\n', (847, 858), False, 'from sklearn import datasets, linear_model\n'), ((1398, 1420), 'sklearn.cross_validation.KFold', 'KFold', (['targets.size', 'K'], {}), '(targets.size, K)\n', (1403, 1420), False, 'from sklearn.cross_validation import cross_val_score, KFold\n')] |
import pydicom
import numpy as np
from random import randrange
from skimage.measure import label, find_contours
import SimpleITK as sitk
class ROIContourSequence :
"""a class to represente ROIContourSequence in RTSTRUCT file
"""
def __init__(self, mask_array:np.ndarray, mask_img:sitk.Image, number_of_roi:int):
"""constructor
Args:
mask_array (np.ndarray]): [mask ndarray]
mask_img ([sitk.Image]): [mask sitk image]
number_of_roi ([int]): [total number of roi ]
"""
self.mask_array = mask_array
self.mask_img = mask_img
self.number_of_roi = number_of_roi
def __define_contour_img(self, number_roi:int) -> sitk.Image :
"""method to extract mask img per ROI number
Args:
number_roi (int): [A roi number, start at 1]
Returns:
[sitk.Image]: [mask img of ROI number 'number_roi']
"""
roi = np.zeros((self.mask_array.shape), dtype=np.uint8)
z,x,y = np.where(self.mask_array == number_roi)
roi[z,x,y] = 1
roi_img = sitk.GetImageFromArray(roi)
return roi_img
def get_spatial_coordonate(self, number_roi:int, list_all_SOPInstanceUID:list) -> tuple:
"""Per ROI number, gather spatial coordonates per slices (when there is a contour)
Args:
number_roi (int): [a ROI number, start at 1]
list_all_SOPInstanceUID (list): [list of every SOPInstanceUID from associated dicom serie]
Returns:
[tuple]: [list of spatial coordonates [ [contour 1 : [x, y, z, x, y, z...] ], [contour 2 : [x, y, z, x, y, z...]], ... ] and list of SOPInstanceUID [SOPInstanceUID_contour1, SOPINStanceUID_contour2, ...]
"""
img_roi = self.__define_contour_img(number_roi)
depth = img_roi.GetDepth()
results = []
list_SOPInstance = []
for z in range(depth):
img_slice = img_roi[:,:,z]
array_slice = sitk.GetArrayFromImage(img_slice)
contour = find_contours(array_slice, level = 0.0)
if contour != []:
for i in range(len(contour)):
liste = []
l = contour[i].tolist()
for item in l:
spatial_coord = self.mask_img.TransformIndexToPhysicalPoint([int(item[1]), int(item[0]), int(z)])
liste.append(spatial_coord[0])
liste.append(spatial_coord[1])
liste.append(spatial_coord[2])
results.append(liste)
list_SOPInstance.append(list_all_SOPInstanceUID[z])
return results, list_SOPInstance
def __create_ContourImageSequence(self, ReferencedSOPClassUID:str, ReferencedSOPInstanceUID:str) -> pydicom.Sequence:
"""method to generate ContourImageSequence from ROIContourSequence
Args:
ReferencedSOPClassUID (str): [Referenced SOP Class UID value from associated serie]
ReferencedSOPInstanceUID (str): [Reference SOPInstance UID value from associated serie]
Returns:
[pydicom.Sequence]: [return ContourImageSequence]
"""
ContourImageSequence = pydicom.sequence.Sequence()
dataset = pydicom.dataset.Dataset()
dataset.ReferencedSOPClassUID = ReferencedSOPClassUID
dataset.ReferencedSOPInstanceUID = ReferencedSOPInstanceUID
ContourImageSequence.append(dataset)
return ContourImageSequence
def __create_ContourSequence(self, ReferencedSOPClassUID:str, list_ReferencedSOPInstanceUID:list, list_ContourData:list) -> pydicom.Sequence:
"""method to generate ContourSequence from ROIContourSequence
Args:
ReferencedSOPClassUID (str): [Referenced SOP Class UID value from associated serie]
list_ReferencedSOPInstanceUID (list): [list of every SOPInstanceUID (in which we find contour), same size as list_ContourData]
list_ContourData (list): [list of every ContourData [[x,y,z,x,y,z], [x,y,z,...], ...], same size as list_ReferencedSOPInstanceUID]
Returns:
[pydicom.Sequence]: [return ContourSequence]
"""
ContourSequence = pydicom.sequence.Sequence()
for ContourData,SOPInstanceUID in zip(list_ContourData,list_ReferencedSOPInstanceUID):
dataset = pydicom.dataset.Dataset()
dataset.ContourData = ContourData
dataset.ContourImageSequence = self.__create_ContourImageSequence(ReferencedSOPClassUID, SOPInstanceUID)
dataset.NumberOfContourPoints = len(ContourData)/3
dataset.ContourGeometricType = 'CLOSED_PLANAR'
ContourSequence.append(dataset)
return ContourSequence
@classmethod
def get_random_colour(cls) -> list:
"""a class method to generate random color for ROI
Returns:
[list]: [return color [r,g,b]]
"""
max = 256
return [randrange(max), randrange(max), randrange(max)]
def create_ROIContourSequence(self, ReferencedSOPClassUID:str, list_all_SOPInstanceUID:list) -> pydicom.Sequence:
"""method to generate ROIContourSequence from RTSTRUCT file
Args:
ReferencedSOPClassUID (str): [ReferencedSOPClass UID value from associated serie]
list_all_SOPInstanceUID (list): [list of every SOPInstanceUID of each instance from associated dicom serie]
Returns:
[pydicom.Sequence]: [return ROIContourSequence]
"""
ROIContourSequence = pydicom.sequence.Sequence()
for number_roi in range(1, self.number_of_roi +1) :
dataset = pydicom.dataset.Dataset()
dataset.ROIDisplayColor = self.get_random_colour()
dataset.ReferencedROINumber = number_roi
list_contour_data, list_SOP_instance_uid = self.get_spatial_coordonate(number_roi, list_all_SOPInstanceUID )
dataset.ContourSequence = self.__create_ContourSequence(ReferencedSOPClassUID, list_SOP_instance_uid, list_contour_data)
ROIContourSequence.append(dataset)
return ROIContourSequence
| [
"pydicom.dataset.Dataset",
"numpy.zeros",
"SimpleITK.GetArrayFromImage",
"numpy.where",
"pydicom.sequence.Sequence",
"skimage.measure.find_contours",
"SimpleITK.GetImageFromArray",
"random.randrange"
] | [((961, 1008), 'numpy.zeros', 'np.zeros', (['self.mask_array.shape'], {'dtype': 'np.uint8'}), '(self.mask_array.shape, dtype=np.uint8)\n', (969, 1008), True, 'import numpy as np\n'), ((1027, 1066), 'numpy.where', 'np.where', (['(self.mask_array == number_roi)'], {}), '(self.mask_array == number_roi)\n', (1035, 1066), True, 'import numpy as np\n'), ((1108, 1135), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['roi'], {}), '(roi)\n', (1130, 1135), True, 'import SimpleITK as sitk\n'), ((3272, 3299), 'pydicom.sequence.Sequence', 'pydicom.sequence.Sequence', ([], {}), '()\n', (3297, 3299), False, 'import pydicom\n'), ((3318, 3343), 'pydicom.dataset.Dataset', 'pydicom.dataset.Dataset', ([], {}), '()\n', (3341, 3343), False, 'import pydicom\n'), ((4280, 4307), 'pydicom.sequence.Sequence', 'pydicom.sequence.Sequence', ([], {}), '()\n', (4305, 4307), False, 'import pydicom\n'), ((5652, 5679), 'pydicom.sequence.Sequence', 'pydicom.sequence.Sequence', ([], {}), '()\n', (5677, 5679), False, 'import pydicom\n'), ((2005, 2038), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_slice'], {}), '(img_slice)\n', (2027, 2038), True, 'import SimpleITK as sitk\n'), ((2061, 2098), 'skimage.measure.find_contours', 'find_contours', (['array_slice'], {'level': '(0.0)'}), '(array_slice, level=0.0)\n', (2074, 2098), False, 'from skimage.measure import label, find_contours\n'), ((4426, 4451), 'pydicom.dataset.Dataset', 'pydicom.dataset.Dataset', ([], {}), '()\n', (4449, 4451), False, 'import pydicom\n'), ((5066, 5080), 'random.randrange', 'randrange', (['max'], {}), '(max)\n', (5075, 5080), False, 'from random import randrange\n'), ((5082, 5096), 'random.randrange', 'randrange', (['max'], {}), '(max)\n', (5091, 5096), False, 'from random import randrange\n'), ((5098, 5112), 'random.randrange', 'randrange', (['max'], {}), '(max)\n', (5107, 5112), False, 'from random import randrange\n'), ((5763, 5788), 'pydicom.dataset.Dataset', 'pydicom.dataset.Dataset', ([], {}), '()\n', (5786, 5788), False, 'import pydicom\n')] |
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training, datasets, iterators, report
from chainer.training import extensions
import numpy as np
class MDN(chainer.Chain):
def __init__(self, hidden_dim, output_dim, k):
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.k = k
super(MDN, self).__init__(
input_layer=L.Linear(None, hidden_dim),
coef_layer=L.Linear(hidden_dim, k * output_dim),
mu_layer=L.Linear(hidden_dim, k * output_dim),
ln_var_layer=L.Linear(hidden_dim, k * output_dim),
)
def __call__(self, input):
coef, mu, ln_var = self.fprop(input)
def sample(row_num):
cum_prod = 0
r = np.random.uniform()
index = None
for i, probability in enumerate(coef[row_num]):
cum_prod += sum(probability)
if r <= cum_prod.data:
index = i
break
return F.gaussian(mu[row_num][index], ln_var[row_num][index])
output = F.expand_dims(sample(0), 0)
for row_num in range(1, input.shape[0]):
this_output = F.expand_dims(sample(row_num), 0)
output = F.concat((output, this_output), axis=0)
return output
def fprop(self, input):
k = self.k
output_dim = self.output_dim
h = self.input_layer(input)
coef = F.softmax(self.coef_layer(h))
mu = self.mu_layer(h)
ln_var = self.ln_var_layer(h)
mu = F.reshape(mu, (-1, k, output_dim))
coef = F.reshape(coef, (-1, k, output_dim))
ln_var = F.reshape(ln_var, (-1, k, output_dim))
return coef, mu, ln_var
def get_loss_func(self):
def lf(input, output, epsilon=1e-8):
output_dim = self.output_dim
coef, mu, ln_var = self.fprop(input)
output = F.reshape(output, (-1, 1, output_dim))
mu, output = F.broadcast(mu, output)
var = F.exp(ln_var)
density = F.sum(
coef *
(1 / (np.sqrt(2 * np.pi) * F.sqrt(var))) *
F.exp(-0.5 * F.square(output - mu) / var)
, axis=1)
nll = -F.sum(F.log(density))
report({'loss': nll}, self)
return nll
return lf
class Linear(chainer.Chain):
def __init__(self, hidden_dim, output_dim):
self.output_dim = output_dim
super(Linear, self).__init__(
input_layer=L.Linear(None, hidden_dim),
output_layer=L.Linear(hidden_dim, output_dim),
)
def __call__(self, input):
return self.fprop(input)
def fprop(self, input):
h = self.input_layer(input)
return self.output_layer(h)
def get_loss_func(self):
def lf(input, output):
pred = self.fprop(input)
loss = F.mean_squared_error(output.reshape(-1, 1), pred)
report({'loss': loss}, self)
return loss
return lf
def main():
model = MDN(256, 1, 5)
# model = Linear(256, 1)
points = 500
y = np.random.rand(points).astype(np.float32)
x = np.sin(2 * np.pi * y) + 0.2 * np.random.rand(points) * (np.cos(2 * np.pi * y) + 2)
x = x.astype(np.float32)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
dataset = datasets.tuple_dataset.TupleDataset(x.reshape(-1, 1), y)
train_iter = iterators.SerialIterator(dataset, batch_size=100)
updater = training.StandardUpdater(train_iter, optimizer, loss_func=model.get_loss_func())
trainer = training.Trainer(updater, (2000, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'main/loss']))
trainer.run()
plt.ylim(-0.1, 1.1)
plt.plot(x, y, "b.")
plt.savefig("result/mdn-data_only.png")
plt.clf()
x_test = np.linspace(min(x), max(x), points).astype(np.float32)
y_pred = model(x_test.reshape(-1, 1)).data
plt.ylim(-0.1, 1.1)
plt.plot(x, y, "b.")
plt.plot(x_test, y_pred, "r.")
plt.savefig("result/mdn-with_preds.png")
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.clf",
"numpy.sin",
"chainer.iterators.SerialIterator",
"chainer.training.extensions.LogReport",
"chainer.links.Linear",
"chainer.functions.sqrt",
"chainer.functions.concat",
"chainer.functions.square",
"chainer.functions.gaussian",
"matplotlib.pyplot.ylim",
"chainer.training.e... | [((33, 58), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (51, 58), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3456), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {}), '()\n', (3454, 3456), False, 'import chainer\n'), ((3572, 3621), 'chainer.iterators.SerialIterator', 'iterators.SerialIterator', (['dataset'], {'batch_size': '(100)'}), '(dataset, batch_size=100)\n', (3596, 3621), False, 'from chainer import training, datasets, iterators, report\n'), ((3731, 3773), 'chainer.training.Trainer', 'training.Trainer', (['updater', "(2000, 'epoch')"], {}), "(updater, (2000, 'epoch'))\n", (3747, 3773), False, 'from chainer import training, datasets, iterators, report\n'), ((3907, 3926), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (3915, 3926), True, 'import matplotlib.pyplot as plt\n'), ((3931, 3951), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b."""'], {}), "(x, y, 'b.')\n", (3939, 3951), True, 'import matplotlib.pyplot as plt\n'), ((3956, 3995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result/mdn-data_only.png"""'], {}), "('result/mdn-data_only.png')\n", (3967, 3995), True, 'import matplotlib.pyplot as plt\n'), ((4000, 4009), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4007, 4009), True, 'import matplotlib.pyplot as plt\n'), ((4131, 4150), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (4139, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4175), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b."""'], {}), "(x, y, 'b.')\n", (4163, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4180, 4210), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 'y_pred', '"""r."""'], {}), "(x_test, y_pred, 'r.')\n", (4188, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4255), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result/mdn-with_preds.png"""'], {}), "('result/mdn-with_preds.png')\n", (4226, 4255), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1692), 'chainer.functions.reshape', 'F.reshape', (['mu', '(-1, k, output_dim)'], {}), '(mu, (-1, k, output_dim))\n', (1667, 1692), True, 'import chainer.functions as F\n'), ((1708, 1744), 'chainer.functions.reshape', 'F.reshape', (['coef', '(-1, k, output_dim)'], {}), '(coef, (-1, k, output_dim))\n', (1717, 1744), True, 'import chainer.functions as F\n'), ((1762, 1800), 'chainer.functions.reshape', 'F.reshape', (['ln_var', '(-1, k, output_dim)'], {}), '(ln_var, (-1, k, output_dim))\n', (1771, 1800), True, 'import chainer.functions as F\n'), ((3302, 3323), 'numpy.sin', 'np.sin', (['(2 * np.pi * y)'], {}), '(2 * np.pi * y)\n', (3308, 3323), True, 'import numpy as np\n'), ((3793, 3815), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {}), '()\n', (3813, 3815), False, 'from chainer.training import extensions\n'), ((3836, 3882), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['epoch', 'main/loss']"], {}), "(['epoch', 'main/loss'])\n", (3858, 3882), False, 'from chainer.training import extensions\n'), ((849, 868), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (866, 868), True, 'import numpy as np\n'), ((1114, 1168), 'chainer.functions.gaussian', 'F.gaussian', (['mu[row_num][index]', 'ln_var[row_num][index]'], {}), '(mu[row_num][index], ln_var[row_num][index])\n', (1124, 1168), True, 'import chainer.functions as F\n'), ((1345, 1384), 'chainer.functions.concat', 'F.concat', (['(output, this_output)'], {'axis': '(0)'}), '((output, this_output), axis=0)\n', (1353, 1384), True, 'import chainer.functions as F\n'), ((2022, 2060), 'chainer.functions.reshape', 'F.reshape', (['output', '(-1, 1, output_dim)'], {}), '(output, (-1, 1, output_dim))\n', (2031, 2060), True, 'import chainer.functions as F\n'), ((2086, 2109), 'chainer.functions.broadcast', 'F.broadcast', (['mu', 'output'], {}), '(mu, output)\n', (2097, 2109), True, 'import chainer.functions as F\n'), ((2129, 2142), 'chainer.functions.exp', 'F.exp', (['ln_var'], {}), '(ln_var)\n', (2134, 2142), True, 'import chainer.functions as F\n'), ((2393, 2420), 'chainer.report', 'report', (["{'loss': nll}", 'self'], {}), "({'loss': nll}, self)\n", (2399, 2420), False, 'from chainer import training, datasets, iterators, report\n'), ((3083, 3111), 'chainer.report', 'report', (["{'loss': loss}", 'self'], {}), "({'loss': loss}, self)\n", (3089, 3111), False, 'from chainer import training, datasets, iterators, report\n'), ((3252, 3274), 'numpy.random.rand', 'np.random.rand', (['points'], {}), '(points)\n', (3266, 3274), True, 'import numpy as np\n'), ((480, 506), 'chainer.links.Linear', 'L.Linear', (['None', 'hidden_dim'], {}), '(None, hidden_dim)\n', (488, 506), True, 'import chainer.links as L\n'), ((531, 567), 'chainer.links.Linear', 'L.Linear', (['hidden_dim', '(k * output_dim)'], {}), '(hidden_dim, k * output_dim)\n', (539, 567), True, 'import chainer.links as L\n'), ((590, 626), 'chainer.links.Linear', 'L.Linear', (['hidden_dim', '(k * output_dim)'], {}), '(hidden_dim, k * output_dim)\n', (598, 626), True, 'import chainer.links as L\n'), ((653, 689), 'chainer.links.Linear', 'L.Linear', (['hidden_dim', '(k * output_dim)'], {}), '(hidden_dim, k * output_dim)\n', (661, 689), True, 'import chainer.links as L\n'), ((2641, 2667), 'chainer.links.Linear', 'L.Linear', (['None', 'hidden_dim'], {}), '(None, hidden_dim)\n', (2649, 2667), True, 'import chainer.links as L\n'), ((2694, 2726), 'chainer.links.Linear', 'L.Linear', (['hidden_dim', 'output_dim'], {}), '(hidden_dim, output_dim)\n', (2702, 2726), True, 'import chainer.links as L\n'), ((3332, 3354), 'numpy.random.rand', 'np.random.rand', (['points'], {}), '(points)\n', (3346, 3354), True, 'import numpy as np\n'), ((3358, 3379), 'numpy.cos', 'np.cos', (['(2 * np.pi * y)'], {}), '(2 * np.pi * y)\n', (3364, 3379), True, 'import numpy as np\n'), ((2365, 2379), 'chainer.functions.log', 'F.log', (['density'], {}), '(density)\n', (2370, 2379), True, 'import chainer.functions as F\n'), ((2218, 2236), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2225, 2236), True, 'import numpy as np\n'), ((2239, 2250), 'chainer.functions.sqrt', 'F.sqrt', (['var'], {}), '(var)\n', (2245, 2250), True, 'import chainer.functions as F\n'), ((2284, 2305), 'chainer.functions.square', 'F.square', (['(output - mu)'], {}), '(output - mu)\n', (2292, 2305), True, 'import chainer.functions as F\n')] |
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
import cvxpy as cvx
def polyproject(point, polyhedron=None, vertices=None):
"""
Projects a point onto a convex polyhedron. Makes use of cvxpy.
The polyhedron can be specified either as a set of vertices, or as a matrix
of equations describing the polyhedron. In the former case, the convex hull
of the vertices is computed. In the latter case, the input is a matrix `A`
such that A * [x, y, z]^T = [0, 0, 0]^T (in 3D).
Parameters
----------
point : array of shape (ndim,)
Point to project onto the polyhedron
polyhedron : array of shape (nfacets, ndim+1), optional
Array containing the matrix of equations describing the polyhedron.
Either this option or `vertices` must be provided.
vertices : array of shape (nvertices, ndim), optional
Array containing the vertices of the polyhedron. Either this parameter
or `polyhedron` must be provided. Ignored if polyhedron is provided.
Returns
-------
proj_point : array of shape (ndim,)
Projection of the given point onto the convex polyhedron.
polyhedron : array of shape (nfacets, ndim+1), optional
Array containing the matrix of equations describing the polyhedron.
Raises
------
ValueError
If neither `vertices` nor `polyhedron` is provided.
"""
if polyhedron is None:
if vertices is None:
raise ValueError('Must provide either vertices or polyhedron.')
from scipy.spatial import ConvexHull
conv_hull = ConvexHull(vertices)
polyhedron = conv_hull.equations
# Set up the convex optimization problem
ndim = point.size
x = cvx.Variable(ndim)
objective = cvx.Minimize(cvx.sum_entries(cvx.square(x - point)))
constraints = [polyhedron[:, :-1] * x <= -polyhedron[:, -1],]
prob = cvx.Problem(objective, constraints)
final_obj = prob.solve()
#print(final_obj)
#print(x.value)
# Convert x.value from matrix type to ndarray type.
# NOTE: This will have to change after cvxpy 1.0, which is more numpythonic
return np.squeeze(np.asarray(x.value)), polyhedron
| [
"numpy.asarray",
"cvxpy.square",
"cvxpy.Problem",
"cvxpy.Variable",
"scipy.spatial.ConvexHull"
] | [((1767, 1785), 'cvxpy.Variable', 'cvx.Variable', (['ndim'], {}), '(ndim)\n', (1779, 1785), True, 'import cvxpy as cvx\n'), ((1932, 1967), 'cvxpy.Problem', 'cvx.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (1943, 1967), True, 'import cvxpy as cvx\n'), ((1629, 1649), 'scipy.spatial.ConvexHull', 'ConvexHull', (['vertices'], {}), '(vertices)\n', (1639, 1649), False, 'from scipy.spatial import ConvexHull\n'), ((1831, 1852), 'cvxpy.square', 'cvx.square', (['(x - point)'], {}), '(x - point)\n', (1841, 1852), True, 'import cvxpy as cvx\n'), ((2199, 2218), 'numpy.asarray', 'np.asarray', (['x.value'], {}), '(x.value)\n', (2209, 2218), True, 'import numpy as np\n')] |
import json
import argparse
import numpy as np
import pickle
import re
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-save_path', type=str,default='unknown',help='name of this run log')
parser.add_argument('-ntask', type=int, default=1, help='number of tasks')
args = parser.parse_args()
return args
def main(args):
with open(f"./gptuneband.db/{args.save_path}.json") as f:
data = json.load(f)
if args.ntask==1:
tid = 0
history = []
for item in data["func_eval"]:
budget = item['task_parameter']['budget']
fval = item['evaluation_result']['validation_loss']
history.append([budget, fval])
task = item['task_parameter']
x = []
y = []
pre_fix = 0
max_num = -999
for info in history:
if info[0] > max_num:
max_num = info[0]
for info in history:
pre_fix += info[0]/max_num
if np.isclose(info[0], max_num):
x.append(pre_fix)
y.append(info[1])
results = [tid, task, [x,y]]
else:
task_set = set()
for item in data['func_eval']:
task = item['task_parameter']['dataset']
task_set.add(task)
data_task = {}
for t in task_set:
data_task[t] = [x for x in data['func_eval'] if x['task_parameter']['dataset'] == t]
results = []
for i, task in enumerate(task_set):
data_cur = data_task[task]
history = []
for item in data_cur:
budget = item['task_parameter']['budget']
fval = item['evaluation_result']['validation_loss']
history.append([budget, fval])
x = []
y = []
pre_fix = 0
max_num = -999
for info in history:
if info[0] > max_num:
max_num = info[0]
for info in history:
pre_fix += info[0]/max_num
if np.isclose(info[0], max_num):
x.append(pre_fix)
y.append(info[1])
results.append([i, task, [x,y]])
print("Finish parseing GPTuneBand results")
print(results)
print(f"saved path: {args.save_path}_parsed.pkl")
pickle.dump(results, open(f"{args.save_path}_parsed.pkl", "wb"))
if __name__ == "__main__":
main(parse_args()) | [
"numpy.isclose",
"json.load",
"argparse.ArgumentParser"
] | [((103, 128), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (126, 128), False, 'import argparse\n'), ((444, 456), 'json.load', 'json.load', (['f'], {}), '(f)\n', (453, 456), False, 'import json\n'), ((1012, 1040), 'numpy.isclose', 'np.isclose', (['info[0]', 'max_num'], {}), '(info[0], max_num)\n', (1022, 1040), True, 'import numpy as np\n'), ((2090, 2118), 'numpy.isclose', 'np.isclose', (['info[0]', 'max_num'], {}), '(info[0], max_num)\n', (2100, 2118), True, 'import numpy as np\n')] |
import numpy as np
from pyHalo.defaults import lenscone_default
from scipy.interpolate import interp1d
from pyHalo.Cosmology.cosmology import Cosmology
def interpolate_ray_paths(x_coordinates, y_coordinates, lens_model, kwargs_lens, zsource,
terminate_at_source=False, source_x=None, source_y=None, evaluate_at_mean=False,
cosmo=None):
"""
:param x_coordinates: x coordinates to interpolate (arcsec) (list)
:param y_coordinates: y coordinates to interpolate (arcsec) (list)
Typically x_coordinates/y_coordinates would be four image positions, or the coordinate of the lens centroid
:param lens_model: instance of LensModel (lenstronomy)
:param kwargs_lens: keyword arguments for lens model
:param zsource: source redshift
:param terminate_at_source: fix the final angular coordinate to the source coordinate
:param source_x: source x coordinate (arcsec)
:param source_y: source y coordinate (arcsec)
:param evaluate_at_mean: if True, returns two single interp1d instances (one for each of x/y) that return the
average of each individual x/y coordinate evaluated at each lens plane. For example, if you pass in four images positions
the output would be an interpolation of the average x/y coordinate along the path traversed by the light
(This is useful for aligning realizations with a background source significantly offset from the lens centroid)
:return: Instances of interp1d (scipy) that return the angular coordinate of a ray given a
comoving distance
"""
angle_x = []
angle_y = []
if cosmo is None:
cosmo = Cosmology()
for i, (xpos, ypos) in enumerate(zip(x_coordinates, y_coordinates)):
theta_x = [xpos]
theta_y = [ypos]
ray_x, ray_y, d = compute_comoving_ray_path(xpos, ypos, lens_model, kwargs_lens, zsource,
terminate_at_source, source_x, source_y, cosmo=cosmo)
for rx, ry, di in zip(ray_x[1:], ray_y[1:], d[1:]):
theta_x.append(rx / di)
theta_y.append(ry / di)
distances = [0.] + list(d[1:])
distances = np.array(distances)
theta_x = np.array(theta_x)
theta_y = np.array(theta_y)
angle_x.append(interp1d(distances, theta_x))
angle_y.append(interp1d(distances, theta_y))
if evaluate_at_mean:
zrange = np.linspace(0., zsource, 100)
comoving_distance_calc = cosmo.D_C_transverse
distances = [comoving_distance_calc(zi) for zi in zrange]
angular_coordinates_x = []
angular_coordinates_y = []
for di in distances:
x_coords = [ray_x(di) for ray_x in angle_x]
y_coords = [ray_y(di) for ray_y in angle_y]
x_center = np.mean(x_coords)
y_center = np.mean(y_coords)
angular_coordinates_x.append(x_center)
angular_coordinates_y.append(y_center)
angle_x = [interp1d(distances, angular_coordinates_x)]
angle_y = [interp1d(distances, angular_coordinates_y)]
return angle_x, angle_y
def compute_comoving_ray_path(x_coordinate, y_coordinate, lens_model, kwargs_lens, zsource,
terminate_at_source=False, source_x=None, source_y=None, cosmo=None):
"""
:param x_coordinate: x coordinates to interpolate (arcsec) (float)
:param y_coordinate: y coordinates to interpolate (arcsec) (float)
Typically x_coordinates/y_coordinates would be four image positions, or the coordinate of the lens centroid
:param lens_model: instance of LensModel (lenstronomy)
:param kwargs_lens: keyword arguments for lens model
:param zsource: source redshift
:param terminate_at_source: fix the final angular coordinate to the source coordinate
:param source_x: source x coordinate (arcsec)
:param source_y: source y coordinate (arcsec)
:return: Instance of interp1d (scipy) that returns the angular coordinate of a ray given a
comoving distance
"""
if cosmo is None:
cosmo = Cosmology()
redshift_list = lens_model.redshift_list + [zsource]
zstep = lenscone_default.default_z_step
finely_sampled_redshifts = np.linspace(zstep, zsource - zstep, 50)
all_redshifts = np.unique(np.append(redshift_list, finely_sampled_redshifts))
all_redshifts_sorted = all_redshifts[np.argsort(all_redshifts)]
comoving_distance_calc = cosmo.D_C_transverse
x_start, y_start = 0., 0.
z_start = 0.
x_list = [0.]
y_list = [0.]
distances = [0.]
alpha_x_start, alpha_y_start = x_coordinate, y_coordinate
for zi in all_redshifts_sorted:
x_start, y_start, alpha_x_start, alpha_y_start = lens_model.lens_model.ray_shooting_partial(x_start, y_start,
alpha_x_start, alpha_y_start,
z_start, zi, kwargs_lens)
d = float(comoving_distance_calc(zi))
x_list.append(x_start)
y_list.append(y_start)
distances.append(d)
z_start = zi
if terminate_at_source:
d_src = comoving_distance_calc(zsource)
x_list[-1] = source_x * d_src
y_list[-1] = source_y * d_src
return np.array(x_list), np.array(y_list), np.array(distances)
def sample_density(probability_density, Nsamples, pixel_scale, x_0, y_0, Rmax, smoothing_scale=4):
"""
:param probability_density:
:param Nsamples:
:param pixel_scale:
:param x_0:
:param y_0:
:param Rmax:
:param smoothing_scale:
:return:
"""
probnorm = probability_density / probability_density.sum()
s = probnorm.shape[0]
p = probnorm.ravel()
values = np.arange(s ** 2)
x_out, y_out = np.array([]), np.array([])
ndraw = Nsamples
while ndraw > 0:
ndraw = Nsamples - len(x_out)
inds = np.random.choice(values, p=p, size=ndraw, replace=True)
pairs = np.indices(dimensions=(s, s)).T
locations = pairs.reshape(-1, 2)[inds]
x_sample_pixel, y_sample_pixel = locations[:, 0], locations[:, 1]
# transform to arcsec
x_sample_arcsec = (x_sample_pixel - s / 2) * pixel_scale
y_sample_arcsec = (y_sample_pixel - s / 2) * pixel_scale
# smooth on sub-pixel scale
pixel_smoothing_kernel = pixel_scale / smoothing_scale
# apply smoothing to remove artificial tiling
x_sample_arcsec += np.random.normal(0, pixel_smoothing_kernel, ndraw)
y_sample_arcsec += np.random.normal(0, pixel_smoothing_kernel, ndraw)
# keep circular symmetry
r = np.sqrt(x_sample_arcsec ** 2 + y_sample_arcsec ** 2)
keep = np.where(r <= Rmax)
x_out = np.append(x_out, x_sample_arcsec[keep])
y_out = np.append(y_out, y_sample_arcsec[keep])
# originally this returned coord_x and coord_y, shouldn't it return x_out and y_out?
return x_out, y_out
def sample_circle(max_rendering_range, Nsmooth, center_x, center_y):
"""
This function distributes points smoothly accross a plane.
Parameters
----------
max_rendering_range : radius of rendering area (already scaled) (arcsec)
Nsmooth : number of points to render
center_x : center x coordinate of image
center_y : center y coordinate of image
Returns
-------
coord_x_smooth : x-coordinate of point (arcsec)
coord_y_smooth : y-coordinate of point (arcsec)
"""
# SAMPLE UNIFORM POINTS IN A CIRCLE
radii = np.random.uniform(0, max_rendering_range ** 2, Nsmooth)
# note you have to sample out to r^2 and then take sqrt
angles = np.random.uniform(0, 2 * np.pi, Nsmooth)
coord_x_smooth = radii ** 0.5 * np.cos(angles) + center_x
coord_y_smooth = radii ** 0.5 * np.sin(angles) + center_y
return coord_x_smooth, coord_y_smooth
def sample_clustered(lens_model, kwargs_lens, center_x, center_y, n_samples, max_rendering_range, npix):
"""
This function distributes points to cluster in areas of higher mass.
Parameters
----------
lens_model_list_at_plane : model at lensing plane
center_x : center x coordinate of image
center_y : center y coordinate of image
kwargs_lens_at_plane : arguments from realization instance
Nclumpy : number of points to render
max_rendering_range : radius of rendering area (already scaled) (arcsec)
npix : number of pixels on one axis
Returns
-------
coord_x_clumpy : x-coordinate of point (arcsec)
coord_y_clumpy : y-coordinate of point (arcsec)
"""
grid_x_base = np.linspace(-max_rendering_range, max_rendering_range, npix)
grid_y_base = np.linspace(-max_rendering_range, max_rendering_range, npix)
pixel_scale = 2 * max_rendering_range / npix
xx_base, yy_base = np.meshgrid(grid_x_base, grid_y_base)
shape0 = xx_base.shape
xcoords, ycoords = xx_base + center_x, yy_base + center_y
projected_mass = lens_model.kappa(xcoords.ravel() + center_x, ycoords.ravel() + center_y, kwargs_lens).reshape(shape0)
coord_x, coord_y = sample_density(projected_mass, n_samples, pixel_scale,
center_x, center_y, max_rendering_range)
return coord_x, coord_y
| [
"numpy.random.uniform",
"numpy.meshgrid",
"numpy.argsort",
"numpy.append",
"numpy.indices",
"numpy.where",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.random.choice",
"numpy.random.normal",
"scipy.interpolate.interp1d",
"numpy.mean",
"numpy.cos",
"numpy.sin",
"pyHalo.Cosmol... | [((4324, 4363), 'numpy.linspace', 'np.linspace', (['zstep', '(zsource - zstep)', '(50)'], {}), '(zstep, zsource - zstep, 50)\n', (4335, 4363), True, 'import numpy as np\n'), ((5982, 5999), 'numpy.arange', 'np.arange', (['(s ** 2)'], {}), '(s ** 2)\n', (5991, 5999), True, 'import numpy as np\n'), ((7773, 7828), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(max_rendering_range ** 2)', 'Nsmooth'], {}), '(0, max_rendering_range ** 2, Nsmooth)\n', (7790, 7828), True, 'import numpy as np\n'), ((7902, 7942), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'Nsmooth'], {}), '(0, 2 * np.pi, Nsmooth)\n', (7919, 7942), True, 'import numpy as np\n'), ((8847, 8907), 'numpy.linspace', 'np.linspace', (['(-max_rendering_range)', 'max_rendering_range', 'npix'], {}), '(-max_rendering_range, max_rendering_range, npix)\n', (8858, 8907), True, 'import numpy as np\n'), ((8926, 8986), 'numpy.linspace', 'np.linspace', (['(-max_rendering_range)', 'max_rendering_range', 'npix'], {}), '(-max_rendering_range, max_rendering_range, npix)\n', (8937, 8986), True, 'import numpy as np\n'), ((9059, 9096), 'numpy.meshgrid', 'np.meshgrid', (['grid_x_base', 'grid_y_base'], {}), '(grid_x_base, grid_y_base)\n', (9070, 9096), True, 'import numpy as np\n'), ((1659, 1670), 'pyHalo.Cosmology.cosmology.Cosmology', 'Cosmology', ([], {}), '()\n', (1668, 1670), False, 'from pyHalo.Cosmology.cosmology import Cosmology\n'), ((2200, 2219), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (2208, 2219), True, 'import numpy as np\n'), ((2238, 2255), 'numpy.array', 'np.array', (['theta_x'], {}), '(theta_x)\n', (2246, 2255), True, 'import numpy as np\n'), ((2274, 2291), 'numpy.array', 'np.array', (['theta_y'], {}), '(theta_y)\n', (2282, 2291), True, 'import numpy as np\n'), ((2443, 2473), 'numpy.linspace', 'np.linspace', (['(0.0)', 'zsource', '(100)'], {}), '(0.0, zsource, 100)\n', (2454, 2473), True, 'import numpy as np\n'), ((4167, 4178), 'pyHalo.Cosmology.cosmology.Cosmology', 'Cosmology', ([], {}), '()\n', (4176, 4178), False, 'from pyHalo.Cosmology.cosmology import Cosmology\n'), ((4398, 4448), 'numpy.append', 'np.append', (['redshift_list', 'finely_sampled_redshifts'], {}), '(redshift_list, finely_sampled_redshifts)\n', (4407, 4448), True, 'import numpy as np\n'), ((4496, 4521), 'numpy.argsort', 'np.argsort', (['all_redshifts'], {}), '(all_redshifts)\n', (4506, 4521), True, 'import numpy as np\n'), ((5512, 5528), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (5520, 5528), True, 'import numpy as np\n'), ((5530, 5546), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (5538, 5546), True, 'import numpy as np\n'), ((5548, 5567), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (5556, 5567), True, 'import numpy as np\n'), ((6020, 6032), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6028, 6032), True, 'import numpy as np\n'), ((6034, 6046), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6042, 6046), True, 'import numpy as np\n'), ((6145, 6200), 'numpy.random.choice', 'np.random.choice', (['values'], {'p': 'p', 'size': 'ndraw', 'replace': '(True)'}), '(values, p=p, size=ndraw, replace=True)\n', (6161, 6200), True, 'import numpy as np\n'), ((6714, 6764), 'numpy.random.normal', 'np.random.normal', (['(0)', 'pixel_smoothing_kernel', 'ndraw'], {}), '(0, pixel_smoothing_kernel, ndraw)\n', (6730, 6764), True, 'import numpy as np\n'), ((6792, 6842), 'numpy.random.normal', 'np.random.normal', (['(0)', 'pixel_smoothing_kernel', 'ndraw'], {}), '(0, pixel_smoothing_kernel, ndraw)\n', (6808, 6842), True, 'import numpy as np\n'), ((6889, 6941), 'numpy.sqrt', 'np.sqrt', (['(x_sample_arcsec ** 2 + y_sample_arcsec ** 2)'], {}), '(x_sample_arcsec ** 2 + y_sample_arcsec ** 2)\n', (6896, 6941), True, 'import numpy as np\n'), ((6957, 6976), 'numpy.where', 'np.where', (['(r <= Rmax)'], {}), '(r <= Rmax)\n', (6965, 6976), True, 'import numpy as np\n'), ((6993, 7032), 'numpy.append', 'np.append', (['x_out', 'x_sample_arcsec[keep]'], {}), '(x_out, x_sample_arcsec[keep])\n', (7002, 7032), True, 'import numpy as np\n'), ((7049, 7088), 'numpy.append', 'np.append', (['y_out', 'y_sample_arcsec[keep]'], {}), '(y_out, y_sample_arcsec[keep])\n', (7058, 7088), True, 'import numpy as np\n'), ((2316, 2344), 'scipy.interpolate.interp1d', 'interp1d', (['distances', 'theta_x'], {}), '(distances, theta_x)\n', (2324, 2344), False, 'from scipy.interpolate import interp1d\n'), ((2369, 2397), 'scipy.interpolate.interp1d', 'interp1d', (['distances', 'theta_y'], {}), '(distances, theta_y)\n', (2377, 2397), False, 'from scipy.interpolate import interp1d\n'), ((2828, 2845), 'numpy.mean', 'np.mean', (['x_coords'], {}), '(x_coords)\n', (2835, 2845), True, 'import numpy as np\n'), ((2869, 2886), 'numpy.mean', 'np.mean', (['y_coords'], {}), '(y_coords)\n', (2876, 2886), True, 'import numpy as np\n'), ((3009, 3051), 'scipy.interpolate.interp1d', 'interp1d', (['distances', 'angular_coordinates_x'], {}), '(distances, angular_coordinates_x)\n', (3017, 3051), False, 'from scipy.interpolate import interp1d\n'), ((3072, 3114), 'scipy.interpolate.interp1d', 'interp1d', (['distances', 'angular_coordinates_y'], {}), '(distances, angular_coordinates_y)\n', (3080, 3114), False, 'from scipy.interpolate import interp1d\n'), ((6218, 6247), 'numpy.indices', 'np.indices', ([], {'dimensions': '(s, s)'}), '(dimensions=(s, s))\n', (6228, 6247), True, 'import numpy as np\n'), ((7979, 7993), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (7985, 7993), True, 'import numpy as np\n'), ((8041, 8055), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (8047, 8055), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sys
import gif
from os import getcwd, path
def hilbert(i):
index = i & 3
points = np.array([[0,0],[0,1],[1,1],[1,0]])
v = points[index]
for iOrder in range(1,order):
i = i >> 2
index = i & 3
l = int(2**iOrder)
if index == 0:
temp = v[0]
v[0] = v[1]
v[1] = temp
elif index == 1:
v[1] += l
elif index == 2:
v[0] += l
v[1] += l
elif index == 3:
temp = l - 1 - v[0]
v[0] = l - 1 - v[1]
v[1] = temp
v[0] += l
return v
@gif.frame
def plot(i):
plt.figure(frameon='False')
plt.imshow(img)
x1 = curve[i-1,0]
x2 = curve[i,0]
y1 = curve[i-1,1]
y2 = curve[i,1]
x = np.arange(min(x1,x2),min(max(x1,x2)+1,width))
y = np.arange(min(y1,y2),min(max(y1,y2)+1,heigth))
xx,yy = np.meshgrid(x,y)
background[xx,yy,3] = empty
plt.imshow(background)
plt.axis('off')
if __name__ == '__main__':
defaultInputs = 3
if len(sys.argv)-1 == defaultInputs:
order = int(sys.argv[1])
imagePath = sys.argv[2]
gifPath = sys.argv[3]
else:
order = 1
imagePath = 'Superman.png'
gifPath = 'Superman.gif'
# LOAD IMAGE
img = mpimg.imread(imagePath)
empty, full = 0, 255
width, heigth, RGBa = img.shape
background = np.zeros((width+1,heigth+1,RGBa))
background[:,:,RGBa-1] = full
N = int(2**order)
total = N*N # 2D
l = int(width / N) # or heigth
curve = np.zeros((total,2)).astype(int)
frames = []
for i in range(0,total):
curve[i] = hilbert(i)
curve[i] = curve[i]*l
curve[i] += int(l/2)
if i > 0:
frames.append(plot(i))
gif.save(frames,path.join(getcwd(),gifPath),duration=50) | [
"matplotlib.image.imread",
"numpy.meshgrid",
"os.getcwd",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.array"
] | [((182, 224), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 1], [1, 0]]'], {}), '([[0, 0], [0, 1], [1, 1], [1, 0]])\n', (190, 224), True, 'import numpy as np\n'), ((730, 757), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '"""False"""'}), "(frameon='False')\n", (740, 757), True, 'import matplotlib.pyplot as plt\n'), ((766, 781), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (776, 781), True, 'import matplotlib.pyplot as plt\n'), ((987, 1004), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (998, 1004), True, 'import numpy as np\n'), ((1040, 1062), 'matplotlib.pyplot.imshow', 'plt.imshow', (['background'], {}), '(background)\n', (1050, 1062), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1082), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1075, 1082), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1419), 'matplotlib.image.imread', 'mpimg.imread', (['imagePath'], {}), '(imagePath)\n', (1408, 1419), True, 'import matplotlib.image as mpimg\n'), ((1498, 1537), 'numpy.zeros', 'np.zeros', (['(width + 1, heigth + 1, RGBa)'], {}), '((width + 1, heigth + 1, RGBa))\n', (1506, 1537), True, 'import numpy as np\n'), ((1656, 1676), 'numpy.zeros', 'np.zeros', (['(total, 2)'], {}), '((total, 2))\n', (1664, 1676), True, 'import numpy as np\n'), ((1909, 1917), 'os.getcwd', 'getcwd', ([], {}), '()\n', (1915, 1917), False, 'from os import getcwd, path\n')] |
import sc2reader
from sc2reader.engine.plugins import SelectionTracker, APMTracker
from selection_plugin import ActiveSelection
from collections import Counter
import json
import csv
import numpy as np
import logging
import sys
from modified_rank_plugin import ModifiedRank
root = logging.getLogger()
# root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
with open("lookup.json") as fp:
lookup = json.load(fp)
sc2reader.engine.register_plugin(SelectionTracker())
sc2reader.engine.register_plugin(ModifiedRank())
sc2reader.engine.register_plugin(APMTracker())
sc2reader.engine.register_plugin(ActiveSelection())
game_ids = np.loadtxt("game_ids.txt", dtype=np.str)
replays = {}
with open("sc2_test_events.csv", 'w') as fp:
events_out = csv.DictWriter(fp, fieldnames=["game_id", "uid", "frame", "type"])
events_out.writeheader()
for game_id in game_ids:
r = sc2reader.load_replay("replays/gggreplays_{}.SC2Replay".format(game_id))
print(game_id, r.build, r.datapack.id)
if hasattr(r, "marked_error") and r.marked_error:
print("skipping", r.filename, "as it contains errors")
print(r.filename, "has build", r.build, "but best available datapack is", r.datapack.id)
continue
replays[game_id] = r
commands = [e for e in r.events if "CommandEvent" in e.name and e.ability]
n_features = len(lookup["features"])
for player in r.players:
uid = player.detail_data['bnet']['uid']
player_commands = [c for c in commands if c.player.uid == player.uid]
for command in player_commands:
try:
com_type = lookup[command.name][command.ability.name]
except KeyError:
print("lookup doesn't have {} - {} - {}".format(command.ability.name, command.name,
command.active_selection))
continue
if com_type is None:
continue
selection = command.active_selection
if com_type == "Order":
selection = [u for u in command.active_selection if
u.name not in lookup["ignoreunits"] and not u.is_building]
if len(selection) == 0:
continue
if all(u.name in lookup["econunits"] for u in selection):
com_type = "OrderEcon"
else:
com_type = "OrderMilitary"
# if com_type.startswith("Order"):
# for _ in range(len(selection)):
# events_out.writerow({"game_id": game_id, "uid": uid, "frame": command.frame, "type": com_type})
if any(u.name == "Larva" for u in selection) and command.ability.name.startswith("Morph"):
larva = [u for u in selection if u.name == "Larva"]
morphs = [t for t in r.tracker_events if t.name == "UnitTypeChangeEvent" and
command.frame <= t.frame <= command.frame + 30 and t.unit in larva]
for _ in range(len(morphs)):
events_out.writerow({"game_id": game_id, "uid": uid, "frame": command.frame, "type": com_type})
else:
events_out.writerow({"game_id": game_id, "uid": uid, "frame": command.frame, "type": com_type})
| [
"json.load",
"sc2reader.engine.plugins.SelectionTracker",
"logging.StreamHandler",
"logging.Formatter",
"sc2reader.engine.plugins.APMTracker",
"selection_plugin.ActiveSelection",
"modified_rank_plugin.ModifiedRank",
"numpy.loadtxt",
"logging.getLogger",
"csv.DictWriter"
] | [((282, 301), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (299, 301), False, 'import logging\n'), ((344, 377), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (365, 377), False, 'import logging\n'), ((422, 495), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (439, 495), False, 'import logging\n'), ((827, 867), 'numpy.loadtxt', 'np.loadtxt', (['"""game_ids.txt"""'], {'dtype': 'np.str'}), "('game_ids.txt', dtype=np.str)\n", (837, 867), True, 'import numpy as np\n'), ((599, 612), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (608, 612), False, 'import json\n'), ((647, 665), 'sc2reader.engine.plugins.SelectionTracker', 'SelectionTracker', ([], {}), '()\n', (663, 665), False, 'from sc2reader.engine.plugins import SelectionTracker, APMTracker\n'), ((700, 714), 'modified_rank_plugin.ModifiedRank', 'ModifiedRank', ([], {}), '()\n', (712, 714), False, 'from modified_rank_plugin import ModifiedRank\n'), ((749, 761), 'sc2reader.engine.plugins.APMTracker', 'APMTracker', ([], {}), '()\n', (759, 761), False, 'from sc2reader.engine.plugins import SelectionTracker, APMTracker\n'), ((796, 813), 'selection_plugin.ActiveSelection', 'ActiveSelection', ([], {}), '()\n', (811, 813), False, 'from selection_plugin import ActiveSelection\n'), ((943, 1009), 'csv.DictWriter', 'csv.DictWriter', (['fp'], {'fieldnames': "['game_id', 'uid', 'frame', 'type']"}), "(fp, fieldnames=['game_id', 'uid', 'frame', 'type'])\n", (957, 1009), False, 'import csv\n')] |
import os
import time
import sys
import shutil
import random
from time import strftime
from argparse import ArgumentParser
import numpy as np
import jittor as jt
from jittor import init
from jittor import optim
from jittor.dataset import Dataset
import utils.util as util
import utils.config as config
import h5py
# Use 1-4 CPU threads to train.
# Don't use too many CPU threads, which will slow down the training.
import jittor.nn as nn
class PartFeatSampler(nn.Module):
def __init__(self, feature_size, probabilistic=False):
super(PartFeatSampler, self).__init__()
self.probabilistic = probabilistic
self.mlp2mu = nn.Linear(feature_size, feature_size)
self.mlp2var = nn.Linear(feature_size, feature_size)
def execute(self, x):
mu = self.mlp2mu(x)
if self.probabilistic:
logvar = self.mlp2var(x)
std = logvar.mul(0.5).exp_()
eps = jt.randn_like(std)
kld = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
return jt.concat([eps.mul(std).add_(mu), kld], 1)
else:
return mu
class VoxelEncoder(nn.Module):
def __init__(self, feat_len, hidden_size=32):
super(VoxelEncoder, self).__init__()
self.conv1 = nn.Conv3d(1, hidden_size, 4, padding=1, stride=1, bias=False)
self.in1 = nn.InstanceNorm3d(hidden_size)
self.conv2 = nn.Conv3d(hidden_size, hidden_size * 2, 4, padding=1, stride=2, bias=False)
self.in2 = nn.InstanceNorm3d(hidden_size * 2)
self.conv3 = nn.Conv3d(hidden_size * 2, hidden_size * 4, 4, padding=1, stride=2, bias=False)
self.in3 = nn.InstanceNorm3d(hidden_size * 4)
self.conv4 = nn.Conv3d(hidden_size * 4, hidden_size * 8, 4, padding=1, stride=2, bias=False)
self.in4 = nn.InstanceNorm3d(hidden_size * 8)
self.conv5 = nn.Conv3d(hidden_size * 8, feat_len, 3, padding=0, stride=1, bias=True)
self.leaky_relu = nn.LeakyReLU(0.02)
init.xavier_uniform_(self.conv1.weight)
init.xavier_uniform_(self.conv2.weight)
init.xavier_uniform_(self.conv3.weight)
init.xavier_uniform_(self.conv4.weight)
init.constant_(self.conv5.bias, 0)
def execute(self, x):
batch_size = x.shape[0]
x = x.reshape(batch_size, 1, 32, 32, 32)
x = self.leaky_relu(self.in1(self.conv1(x)))
x = self.leaky_relu(self.in2(self.conv2(x)))
x = self.leaky_relu(self.in3(self.conv3(x)))
x = self.leaky_relu(self.in4(self.conv4(x)))
x = self.conv5(x).reshape(batch_size, -1)
return x
class PartEncoder(nn.Module):
def __init__(self, feat_len, latent_size, probabilistic=False):
super(PartEncoder, self).__init__()
self.vox_enc = VoxelEncoder(feat_len)
self.mlp1 = nn.Linear(feat_len + 3, latent_size)
init.gauss_(self.mlp1.weight, mean=0.0, std=0.02)
init.constant_(self.mlp1.bias, 0)
self.leaky_relu = nn.LeakyReLU(0.02)
self.sampler = PartFeatSampler(latent_size) if probabilistic else None
def execute(self, x, norms):
# batch_size = x.shape[0]
# print(x.shape)
feat = self.leaky_relu(self.vox_enc(x))
x = self.mlp1(jt.concat([feat, norms], -1))
if self.sampler is not None:
x = self.sampler(x)
return x
###############################################################################
# Decoder
###############################################################################
class IM_Tiny(nn.Module):
def __init__(self, feat_len, hidden_size=32):
super(IM_Tiny, self).__init__()
self.mlp1 = nn.Linear(feat_len + 3, hidden_size * 8)
self.mlp2 = nn.Linear(hidden_size * 8, hidden_size * 8)
self.mlp3 = nn.Linear(hidden_size * 8, hidden_size * 8)
self.mlp4 = nn.Linear(hidden_size * 8, hidden_size * 4)
self.mlp5 = nn.Linear(hidden_size * 4, hidden_size * 2)
self.mlp6 = nn.Linear(hidden_size * 2, hidden_size)
self.mlp7 = nn.Linear(hidden_size, 1)
init.gauss_(self.mlp1.weight, mean=0.0, std=0.02)
init.constant_(self.mlp1.bias, 0)
init.gauss_(self.mlp2.weight, mean=0.0, std=0.02)
init.constant_(self.mlp2.bias, 0)
init.gauss_(self.mlp3.weight, mean=0.0, std=0.02)
init.constant_(self.mlp3.bias, 0)
init.gauss_(self.mlp4.weight, mean=0.0, std=0.02)
init.constant_(self.mlp4.bias, 0)
init.gauss_(self.mlp5.weight, mean=0.0, std=0.02)
init.constant_(self.mlp5.bias, 0)
init.gauss_(self.mlp6.weight, mean=0.0, std=0.02)
init.constant_(self.mlp6.bias, 0)
init.gauss_(self.mlp7.weight, mean=1e-5, std=0.02)
init.constant_(self.mlp7.bias, 0.5)
self.leaky_relu = nn.LeakyReLU(0.02)
self.sigmoid = nn.Sigmoid()
def execute(self, net):
x = self.leaky_relu(self.mlp1(net))
x = self.leaky_relu(self.mlp2(x))
x = self.leaky_relu(self.mlp3(x))
x = self.leaky_relu(self.mlp4(x))
x = self.leaky_relu(self.mlp5(x))
x = self.leaky_relu(self.mlp6(x))
pred = self.sigmoid(self.mlp7(x))
return pred
class NodeClassifier(nn.Module):
def __init__(self, feat_len):
super(NodeClassifier, self).__init__()
self.mlp1 = nn.Linear(feat_len, 8)
self.sigmoid = nn.Sigmoid()
def execute(self, x):
# x = self.leaky_relu(self.mlp1(x))
x = self.sigmoid(self.mlp1(x))
return x
class PartDecoder(nn.Module):
def __init__(self, feat_len):
super(PartDecoder, self).__init__()
self.predictor = IM_Tiny(feat_len)
self.classifier = NodeClassifier(feat_len)
self.bce_loss = nn.BCELoss()
def execute(self, x, in_feat):
batch_size, num_points, _ = x.shape
# node_type = self.classifier(in_feat)
feat = in_feat.view(batch_size, 1, -1).expand(-1, num_points, -1)
query = jt.concat([feat, x], -1).view(batch_size * num_points, -1)
pred = self.predictor(query)
return pred
def loss(self, pred, gt):
bce_loss = self.bce_loss(pred, gt)
return bce_loss
###############################################################################
# Dataset
###############################################################################
class PartNetGeoDataset(Dataset):
def __init__(self, conf, split='train', batch_size=1, shuffle=False):
super(PartNetGeoDataset, self).__init__()
self.batch_size = batch_size
self.shuffle = shuffle
self.data_path = conf.data_path
if split == 'train':
item_list = conf.train_list
self.data_h5 = conf.train_dataset
elif split == 'val':
item_list = conf.val_list
self.data_h5 = conf.val_dataset
elif split == 'test':
item_list = conf.test_list
self.data_h5 = conf.test_dataset
self.keys = ['cells', 'points', 'values', 'normals', 'voxels']
self.data_dict = self.load_h5f(conf.load_ram)
def load_h5f(self, load_ram=True):
src_file = os.path.join(self.data_path, self.data_h5)
print(src_file)
h5f = h5py.File(src_file, 'r')
return {
key: np.array(h5f.get(key)) for key in self.keys
}
def __getitem__(self, index):
# model_name = self.items_lst[index]
# idx = slice(self.idx_end[index], self.idx_end[index + 1])
cell = self.data_dict['cells'][index]
normals = self.data_dict['normals'][index]
voxels = self.data_dict['voxels'][index]
points = self.data_dict['points'][index]
values = self.data_dict['values'][index]
num_points = points.shape[0]
half = num_points // 2
pos = np.nonzero(values>0.5)[0]
neg = np.nonzero(values<0.5)[0]
resample = np.zeros(num_points, dtype=np.long)
resample[:half] = pos[np.random.randint(pos.shape[0],size=(half,))].reshape(-1)
resample[half:] = neg[np.random.randint(neg.shape[0],size=(half,))].reshape(-1)
points = points[resample]
values = values[resample]
return cell, points, values, normals, voxels
def __len__(self):
return self.data_dict['cells'].shape[0]
def train(conf):
# load network model
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.log_path, conf.exp_name))
if os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.model_path, conf.exp_name))
# create directories for this run
os.makedirs(os.path.join(conf.model_path, conf.exp_name))
os.makedirs(os.path.join(conf.log_path, conf.exp_name))
# file log
flog = open(os.path.join(conf.log_path, conf.exp_name, 'train.log'), 'w')
# log the object category information
print(f'Object Category: {conf.category}')
flog.write(f'Object Category: {conf.category}\n')
# control randomness
if conf.seed < 0:
conf.seed = random.randint(1, 10000)
print("Random Seed: %d" % (conf.seed))
flog.write(f'Random Seed: {conf.seed}\n')
random.seed(conf.seed)
np.random.seed(conf.seed)
# create models
encoder = PartEncoder(feat_len=conf.geo_feat_size, latent_size=conf.geo_feat_size)
decoder = PartDecoder(feat_len=conf.geo_feat_size)
models = [encoder, decoder]
model_names = ['part_pc_encoder', 'part_pc_decoder']
# create optimizers
optimizer = nn.Adam(encoder.parameters() + decoder.parameters(), lr=conf.lr, weight_decay=conf.weight_decay)
# create training and validation datasets and data loaders
train_dataloader = PartNetGeoDataset(conf, 'train', batch_size=conf.batch_size, shuffle=True)
train_num_batch = len(train_dataloader)
# create logs
if not conf.no_console_log:
header = ' Time Epoch Dataset Iteration Progress(%) LR ReconLoss KLDivLoss TotalLoss'
if not conf.no_tb_log:
# https://github.com/lanpa/tensorboard-pyjt
from tensorboardX import SummaryWriter
train_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'train'))
# save config
jt.save(conf, os.path.join(conf.model_path, conf.exp_name, 'conf.pth'))
# start training
print("Starting training ...... ")
flog.write('Starting training ......\n')
start_time = time.time()
last_checkpoint_step = None
last_train_console_log_step = None
# train for every epoch
for epoch in range(conf.epochs):
if not conf.no_console_log:
print(f'training run {conf.exp_name}')
flog.write(f'training run {conf.exp_name}\n')
print(header)
flog.write(header+'\n')
train_batches = enumerate(train_dataloader, 0)
train_fraction_done = 0.0
train_batch_ind = 0
# train for every batch
for train_batch_ind, batch in train_batches:
train_fraction_done = (train_batch_ind + 1) / train_num_batch
train_step = epoch * train_num_batch + train_batch_ind
log_console = not conf.no_console_log and (last_train_console_log_step is None or \
train_step - last_train_console_log_step >= conf.console_log_interval)
if log_console:
last_train_console_log_step = train_step
# set models to training mode
for m in models:
m.train()
# forward pass (including logging)
total_loss = forward(
batch=batch, encoder=encoder, decoder=decoder, conf=conf,
is_valdt=False, step=train_step, epoch=epoch, batch_ind=train_batch_ind,
num_batch=train_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=train_writer, flog=flog)
# optimize one step
optimizer.step(total_loss)
# save checkpoint
with jt.no_grad():
if last_checkpoint_step is None or \
train_step - last_checkpoint_step >= conf.checkpoint_interval:
print("Saving checkpoint ...... ", end='', flush=True)
flog.write("Saving checkpoint ...... ")
util.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=True, optimizers=[optimizer], optimizer_names=['opt'])
print("DONE")
flog.write("DONE\n")
last_checkpoint_step = train_step
# save the final models
print("Saving final checkpoint ...... ", end='', flush=True)
flog.write('Saving final checkpoint ...... ')
util.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=False, optimizers=[optimizer], optimizer_names=['opt'])
print("DONE")
flog.write("DONE\n")
flog.close()
def forward(batch, encoder, decoder, conf,
is_valdt=False, step=None, epoch=None, batch_ind=0, num_batch=1, start_time=0,
log_console=False, log_tb=False, tb_writer=None, flog=None):
data = [item for item in batch]
cells, points, values, normals, voxels = data
batch_size = cells.shape[0]
feat = encoder(voxels, normals)
num_smp = points.shape[1]
# points = jt.reshape(points, (batch_size * num_smp, -1))
# values = jt.reshape(values, (batch_size * num_smp, -1))
pred = decoder(points, feat)
# node_type, pred = decoder(points, feat)
recon_loss= decoder.loss(pred, values.view(-1, 1))
recon_loss = recon_loss.mean() * conf.loss_weight_geo
# mask = gt_type.max(1).values.view(-1,1)
total_loss = recon_loss
with jt.no_grad():
# log to console
if log_console:
print(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{recon_loss.item():>11.6f} '''
f'''{total_loss.item():>10.6f}''')
flog.write(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{recon_loss.item():>11.6f} '''
f'''{total_loss.item():>10.6f}\n''')
flog.flush()
# log to tensorboard
if log_tb and tb_writer is not None:
tb_writer.add_scalar('loss', total_loss.item(), step)
tb_writer.add_scalar('recon_loss', recon_loss.item(), step)
return total_loss
if __name__ == '__main__':
sys.setrecursionlimit(5000) # this code uses recursion a lot for code simplicity
parser = ArgumentParser()
parser = config.add_train_vae_args(parser)
parser.add_argument('--use_local_frame', action='store_true', default=False, help='factorize out 3-dim center + 1-dim scale')
config = parser.parse_args()
train(conf=config)
| [
"jittor.init.gauss_",
"numpy.random.seed",
"argparse.ArgumentParser",
"jittor.no_grad",
"jittor.concat",
"utils.config.add_train_vae_args",
"numpy.random.randint",
"jittor.nn.Conv3d",
"sys.setrecursionlimit",
"os.path.join",
"jittor.init.xavier_uniform_",
"random.randint",
"jittor.nn.BCELoss... | [((9239, 9261), 'random.seed', 'random.seed', (['conf.seed'], {}), '(conf.seed)\n', (9250, 9261), False, 'import random\n'), ((9266, 9291), 'numpy.random.seed', 'np.random.seed', (['conf.seed'], {}), '(conf.seed)\n', (9280, 9291), True, 'import numpy as np\n'), ((10496, 10507), 'time.time', 'time.time', ([], {}), '()\n', (10505, 10507), False, 'import time\n'), ((15380, 15407), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(5000)'], {}), '(5000)\n', (15401, 15407), False, 'import sys\n'), ((15475, 15491), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (15489, 15491), False, 'from argparse import ArgumentParser\n'), ((15505, 15538), 'utils.config.add_train_vae_args', 'config.add_train_vae_args', (['parser'], {}), '(parser)\n', (15530, 15538), True, 'import utils.config as config\n'), ((650, 687), 'jittor.nn.Linear', 'nn.Linear', (['feature_size', 'feature_size'], {}), '(feature_size, feature_size)\n', (659, 687), True, 'import jittor.nn as nn\n'), ((711, 748), 'jittor.nn.Linear', 'nn.Linear', (['feature_size', 'feature_size'], {}), '(feature_size, feature_size)\n', (720, 748), True, 'import jittor.nn as nn\n'), ((1279, 1340), 'jittor.nn.Conv3d', 'nn.Conv3d', (['(1)', 'hidden_size', '(4)'], {'padding': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(1, hidden_size, 4, padding=1, stride=1, bias=False)\n', (1288, 1340), True, 'import jittor.nn as nn\n'), ((1360, 1390), 'jittor.nn.InstanceNorm3d', 'nn.InstanceNorm3d', (['hidden_size'], {}), '(hidden_size)\n', (1377, 1390), True, 'import jittor.nn as nn\n'), ((1412, 1487), 'jittor.nn.Conv3d', 'nn.Conv3d', (['hidden_size', '(hidden_size * 2)', '(4)'], {'padding': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(hidden_size, hidden_size * 2, 4, padding=1, stride=2, bias=False)\n', (1421, 1487), True, 'import jittor.nn as nn\n'), ((1507, 1541), 'jittor.nn.InstanceNorm3d', 'nn.InstanceNorm3d', (['(hidden_size * 2)'], {}), '(hidden_size * 2)\n', (1524, 1541), True, 'import jittor.nn as nn\n'), ((1563, 1642), 'jittor.nn.Conv3d', 'nn.Conv3d', (['(hidden_size * 2)', '(hidden_size * 4)', '(4)'], {'padding': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(hidden_size * 2, hidden_size * 4, 4, padding=1, stride=2, bias=False)\n', (1572, 1642), True, 'import jittor.nn as nn\n'), ((1662, 1696), 'jittor.nn.InstanceNorm3d', 'nn.InstanceNorm3d', (['(hidden_size * 4)'], {}), '(hidden_size * 4)\n', (1679, 1696), True, 'import jittor.nn as nn\n'), ((1718, 1797), 'jittor.nn.Conv3d', 'nn.Conv3d', (['(hidden_size * 4)', '(hidden_size * 8)', '(4)'], {'padding': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(hidden_size * 4, hidden_size * 8, 4, padding=1, stride=2, bias=False)\n', (1727, 1797), True, 'import jittor.nn as nn\n'), ((1817, 1851), 'jittor.nn.InstanceNorm3d', 'nn.InstanceNorm3d', (['(hidden_size * 8)'], {}), '(hidden_size * 8)\n', (1834, 1851), True, 'import jittor.nn as nn\n'), ((1873, 1944), 'jittor.nn.Conv3d', 'nn.Conv3d', (['(hidden_size * 8)', 'feat_len', '(3)'], {'padding': '(0)', 'stride': '(1)', 'bias': '(True)'}), '(hidden_size * 8, feat_len, 3, padding=0, stride=1, bias=True)\n', (1882, 1944), True, 'import jittor.nn as nn\n'), ((1971, 1989), 'jittor.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.02)'], {}), '(0.02)\n', (1983, 1989), True, 'import jittor.nn as nn\n'), ((1998, 2037), 'jittor.init.xavier_uniform_', 'init.xavier_uniform_', (['self.conv1.weight'], {}), '(self.conv1.weight)\n', (2018, 2037), False, 'from jittor import init\n'), ((2046, 2085), 'jittor.init.xavier_uniform_', 'init.xavier_uniform_', (['self.conv2.weight'], {}), '(self.conv2.weight)\n', (2066, 2085), False, 'from jittor import init\n'), ((2094, 2133), 'jittor.init.xavier_uniform_', 'init.xavier_uniform_', (['self.conv3.weight'], {}), '(self.conv3.weight)\n', (2114, 2133), False, 'from jittor import init\n'), ((2142, 2181), 'jittor.init.xavier_uniform_', 'init.xavier_uniform_', (['self.conv4.weight'], {}), '(self.conv4.weight)\n', (2162, 2181), False, 'from jittor import init\n'), ((2190, 2224), 'jittor.init.constant_', 'init.constant_', (['self.conv5.bias', '(0)'], {}), '(self.conv5.bias, 0)\n', (2204, 2224), False, 'from jittor import init\n'), ((2824, 2860), 'jittor.nn.Linear', 'nn.Linear', (['(feat_len + 3)', 'latent_size'], {}), '(feat_len + 3, latent_size)\n', (2833, 2860), True, 'import jittor.nn as nn\n'), ((2869, 2918), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp1.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp1.weight, mean=0.0, std=0.02)\n', (2880, 2918), False, 'from jittor import init\n'), ((2927, 2960), 'jittor.init.constant_', 'init.constant_', (['self.mlp1.bias', '(0)'], {}), '(self.mlp1.bias, 0)\n', (2941, 2960), False, 'from jittor import init\n'), ((2987, 3005), 'jittor.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.02)'], {}), '(0.02)\n', (2999, 3005), True, 'import jittor.nn as nn\n'), ((3672, 3712), 'jittor.nn.Linear', 'nn.Linear', (['(feat_len + 3)', '(hidden_size * 8)'], {}), '(feat_len + 3, hidden_size * 8)\n', (3681, 3712), True, 'import jittor.nn as nn\n'), ((3733, 3776), 'jittor.nn.Linear', 'nn.Linear', (['(hidden_size * 8)', '(hidden_size * 8)'], {}), '(hidden_size * 8, hidden_size * 8)\n', (3742, 3776), True, 'import jittor.nn as nn\n'), ((3797, 3840), 'jittor.nn.Linear', 'nn.Linear', (['(hidden_size * 8)', '(hidden_size * 8)'], {}), '(hidden_size * 8, hidden_size * 8)\n', (3806, 3840), True, 'import jittor.nn as nn\n'), ((3861, 3904), 'jittor.nn.Linear', 'nn.Linear', (['(hidden_size * 8)', '(hidden_size * 4)'], {}), '(hidden_size * 8, hidden_size * 4)\n', (3870, 3904), True, 'import jittor.nn as nn\n'), ((3925, 3968), 'jittor.nn.Linear', 'nn.Linear', (['(hidden_size * 4)', '(hidden_size * 2)'], {}), '(hidden_size * 4, hidden_size * 2)\n', (3934, 3968), True, 'import jittor.nn as nn\n'), ((3989, 4028), 'jittor.nn.Linear', 'nn.Linear', (['(hidden_size * 2)', 'hidden_size'], {}), '(hidden_size * 2, hidden_size)\n', (3998, 4028), True, 'import jittor.nn as nn\n'), ((4049, 4074), 'jittor.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (4058, 4074), True, 'import jittor.nn as nn\n'), ((4083, 4132), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp1.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp1.weight, mean=0.0, std=0.02)\n', (4094, 4132), False, 'from jittor import init\n'), ((4141, 4174), 'jittor.init.constant_', 'init.constant_', (['self.mlp1.bias', '(0)'], {}), '(self.mlp1.bias, 0)\n', (4155, 4174), False, 'from jittor import init\n'), ((4183, 4232), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp2.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp2.weight, mean=0.0, std=0.02)\n', (4194, 4232), False, 'from jittor import init\n'), ((4241, 4274), 'jittor.init.constant_', 'init.constant_', (['self.mlp2.bias', '(0)'], {}), '(self.mlp2.bias, 0)\n', (4255, 4274), False, 'from jittor import init\n'), ((4283, 4332), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp3.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp3.weight, mean=0.0, std=0.02)\n', (4294, 4332), False, 'from jittor import init\n'), ((4341, 4374), 'jittor.init.constant_', 'init.constant_', (['self.mlp3.bias', '(0)'], {}), '(self.mlp3.bias, 0)\n', (4355, 4374), False, 'from jittor import init\n'), ((4383, 4432), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp4.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp4.weight, mean=0.0, std=0.02)\n', (4394, 4432), False, 'from jittor import init\n'), ((4441, 4474), 'jittor.init.constant_', 'init.constant_', (['self.mlp4.bias', '(0)'], {}), '(self.mlp4.bias, 0)\n', (4455, 4474), False, 'from jittor import init\n'), ((4483, 4532), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp5.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp5.weight, mean=0.0, std=0.02)\n', (4494, 4532), False, 'from jittor import init\n'), ((4541, 4574), 'jittor.init.constant_', 'init.constant_', (['self.mlp5.bias', '(0)'], {}), '(self.mlp5.bias, 0)\n', (4555, 4574), False, 'from jittor import init\n'), ((4583, 4632), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp6.weight'], {'mean': '(0.0)', 'std': '(0.02)'}), '(self.mlp6.weight, mean=0.0, std=0.02)\n', (4594, 4632), False, 'from jittor import init\n'), ((4641, 4674), 'jittor.init.constant_', 'init.constant_', (['self.mlp6.bias', '(0)'], {}), '(self.mlp6.bias, 0)\n', (4655, 4674), False, 'from jittor import init\n'), ((4683, 4734), 'jittor.init.gauss_', 'init.gauss_', (['self.mlp7.weight'], {'mean': '(1e-05)', 'std': '(0.02)'}), '(self.mlp7.weight, mean=1e-05, std=0.02)\n', (4694, 4734), False, 'from jittor import init\n'), ((4742, 4777), 'jittor.init.constant_', 'init.constant_', (['self.mlp7.bias', '(0.5)'], {}), '(self.mlp7.bias, 0.5)\n', (4756, 4777), False, 'from jittor import init\n'), ((4804, 4822), 'jittor.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.02)'], {}), '(0.02)\n', (4816, 4822), True, 'import jittor.nn as nn\n'), ((4846, 4858), 'jittor.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4856, 4858), True, 'import jittor.nn as nn\n'), ((5341, 5363), 'jittor.nn.Linear', 'nn.Linear', (['feat_len', '(8)'], {}), '(feat_len, 8)\n', (5350, 5363), True, 'import jittor.nn as nn\n'), ((5387, 5399), 'jittor.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5397, 5399), True, 'import jittor.nn as nn\n'), ((5765, 5777), 'jittor.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5775, 5777), True, 'import jittor.nn as nn\n'), ((7181, 7223), 'os.path.join', 'os.path.join', (['self.data_path', 'self.data_h5'], {}), '(self.data_path, self.data_h5)\n', (7193, 7223), False, 'import os\n'), ((7262, 7286), 'h5py.File', 'h5py.File', (['src_file', '"""r"""'], {}), "(src_file, 'r')\n", (7271, 7286), False, 'import h5py\n'), ((7934, 7969), 'numpy.zeros', 'np.zeros', (['num_points'], {'dtype': 'np.long'}), '(num_points, dtype=np.long)\n', (7942, 7969), True, 'import numpy as np\n'), ((8405, 8447), 'os.path.join', 'os.path.join', (['conf.log_path', 'conf.exp_name'], {}), '(conf.log_path, conf.exp_name)\n', (8417, 8447), False, 'import os\n'), ((8538, 8582), 'os.path.join', 'os.path.join', (['conf.model_path', 'conf.exp_name'], {}), '(conf.model_path, conf.exp_name)\n', (8550, 8582), False, 'import os\n'), ((8708, 8752), 'os.path.join', 'os.path.join', (['conf.model_path', 'conf.exp_name'], {}), '(conf.model_path, conf.exp_name)\n', (8720, 8752), False, 'import os\n'), ((8770, 8812), 'os.path.join', 'os.path.join', (['conf.log_path', 'conf.exp_name'], {}), '(conf.log_path, conf.exp_name)\n', (8782, 8812), False, 'import os\n'), ((8846, 8901), 'os.path.join', 'os.path.join', (['conf.log_path', 'conf.exp_name', '"""train.log"""'], {}), "(conf.log_path, conf.exp_name, 'train.log')\n", (8858, 8901), False, 'import os\n'), ((9121, 9145), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (9135, 9145), False, 'import random\n'), ((10313, 10369), 'os.path.join', 'os.path.join', (['conf.model_path', 'conf.exp_name', '"""conf.pth"""'], {}), "(conf.model_path, conf.exp_name, 'conf.pth')\n", (10325, 10369), False, 'import os\n'), ((14026, 14038), 'jittor.no_grad', 'jt.no_grad', ([], {}), '()\n', (14036, 14038), True, 'import jittor as jt\n'), ((932, 950), 'jittor.randn_like', 'jt.randn_like', (['std'], {}), '(std)\n', (945, 950), True, 'import jittor as jt\n'), ((3248, 3276), 'jittor.concat', 'jt.concat', (['[feat, norms]', '(-1)'], {}), '([feat, norms], -1)\n', (3257, 3276), True, 'import jittor as jt\n'), ((7849, 7873), 'numpy.nonzero', 'np.nonzero', (['(values > 0.5)'], {}), '(values > 0.5)\n', (7859, 7873), True, 'import numpy as np\n'), ((7889, 7913), 'numpy.nonzero', 'np.nonzero', (['(values < 0.5)'], {}), '(values < 0.5)\n', (7899, 7913), True, 'import numpy as np\n'), ((8472, 8514), 'os.path.join', 'os.path.join', (['conf.log_path', 'conf.exp_name'], {}), '(conf.log_path, conf.exp_name)\n', (8484, 8514), False, 'import os\n'), ((8607, 8651), 'os.path.join', 'os.path.join', (['conf.model_path', 'conf.exp_name'], {}), '(conf.model_path, conf.exp_name)\n', (8619, 8651), False, 'import os\n'), ((10223, 10274), 'os.path.join', 'os.path.join', (['conf.log_path', 'conf.exp_name', '"""train"""'], {}), "(conf.log_path, conf.exp_name, 'train')\n", (10235, 10274), False, 'import os\n'), ((13014, 13058), 'os.path.join', 'os.path.join', (['conf.model_path', 'conf.exp_name'], {}), '(conf.model_path, conf.exp_name)\n', (13026, 13058), False, 'import os\n'), ((6003, 6027), 'jittor.concat', 'jt.concat', (['[feat, x]', '(-1)'], {}), '([feat, x], -1)\n', (6012, 6027), True, 'import jittor as jt\n'), ((12104, 12116), 'jittor.no_grad', 'jt.no_grad', ([], {}), '()\n', (12114, 12116), True, 'import jittor as jt\n'), ((8000, 8045), 'numpy.random.randint', 'np.random.randint', (['pos.shape[0]'], {'size': '(half,)'}), '(pos.shape[0], size=(half,))\n', (8017, 8045), True, 'import numpy as np\n'), ((8088, 8133), 'numpy.random.randint', 'np.random.randint', (['neg.shape[0]'], {'size': '(half,)'}), '(neg.shape[0], size=(half,))\n', (8105, 8133), True, 'import numpy as np\n'), ((12507, 12551), 'os.path.join', 'os.path.join', (['conf.model_path', 'conf.exp_name'], {}), '(conf.model_path, conf.exp_name)\n', (12519, 12551), False, 'import os\n'), ((14162, 14173), 'time.time', 'time.time', ([], {}), '()\n', (14171, 14173), False, 'import time\n'), ((14662, 14673), 'time.time', 'time.time', ([], {}), '()\n', (14671, 14673), False, 'import time\n')] |
import unittest
import pandas as pd
import numpy as np
from microsim.population import NHANESDirectSamplePopulation
from microsim.outcome_model_repository import OutcomeModelRepository
from microsim.outcome import Outcome, OutcomeType
class TestOftenStrokeModelRepository(OutcomeModelRepository):
def __init__(self, stroke_rate):
super().__init__()
self._stroke_rate = stroke_rate
# override base class and always return a stroke event
def assign_cv_outcome(self, person, years=1, manualStrokeMIProbability=None):
return (
Outcome(OutcomeType.STROKE, False) if np.random.random() < self._stroke_rate else None
)
def assign_cv_outcome_vectorized(self, x):
if np.random.random() < self._stroke_rate:
x.miNext = False
x.strokeNext = True
x.deadNext = False
x.ageAtFirstStroke = (
x.age
if (x.ageAtFirstStroke is None) or (np.isnan(x.ageAtFirstStroke))
else x.ageAtFirstStroke
)
else:
x.miNext = False
x.strokeNext = False
x.deadNext = False
return x
def get_risk_for_person(self, person, outcomeModelType, years=1, vectorized=False):
return self._stroke_rate
def assign_non_cv_mortality(self, person):
return False
class TestOftenMIModelRepository(OutcomeModelRepository):
def __init__(self, mi_rate, fatality_rate=0.0, non_cv_mortality_rate=0.0):
super().__init__()
self._mi_rate = mi_rate
self._fatality_rate = fatality_rate
self._non_cv_mortality_rate = non_cv_mortality_rate
# override base class and always return a MI event
def assign_cv_outcome(self, person, years=1, manualStrokeMIProbability=None):
return (
Outcome(OutcomeType.MI, np.random.random() < self._fatality_rate)
if np.random.random() < self._mi_rate
else None
)
def assign_cv_outcome_vectorized(self, x):
if np.random.random() < self._mi_rate:
x.miNext = True
x.strokeNext = False
x.deadNext = np.random.random() < self._fatality_rate
x.miFatal = x.deadNext
x.ageAtFirstMI = (
x.age if (x.ageAtFirstMI is None) or (np.isnan(x.ageAtFirstMI)) else x.ageAtFirstMI
)
else:
x.miNext = False
x.strokeNext = False
x.deadNext = False
return x
def get_risk_for_person(self, person, outcomeModelType, years=1, vectorized=False):
return self._mi_rate
def assign_non_cv_mortality(self, person):
return np.random.uniform(size=1)[0] < self._non_cv_mortality_rate
def assign_non_cv_mortality_vectorized(self, person, years=1):
return np.random.uniform(size=1)[0] < self._non_cv_mortality_rate
# Can't inherit from BaseTreatmentStrategy/AddASingleBPMedTreatmentStrategy:
# ABCs and derived classes are not `pickle`-able, which breaks multiprocess/pandarellel
class addABPMedStrokeLargeEffectSize:
def __init__(self):
self._sbp_lowering = 5.5
self._dbp_lowering = 3.1
def get_changes_for_person(self, person):
return (
{"_antiHypertensiveCount": 1},
{"_bpMedsAdded": 1},
{"_sbp": -1 * self._sbp_lowering, "_dbp": -1 * self._dbp_lowering},
)
def get_treatment_recalibration_for_population(self):
return {OutcomeType.STROKE: 0.5, OutcomeType.MI: 0.92}
def get_treatment_recalibration_for_person(self, person):
return {OutcomeType.STROKE: 0.5, OutcomeType.MI: 0.92}
def repeat_treatment_strategy(self):
return False
def get_changes_vectorized(self, x):
x.antiHypertensiveCountNext = x.antiHypertensiveCountNext + 1
x.bpMedsAddedNext = 1
x.sbpNext = x.sbpNext - self._sbp_lowering
x.dbpNext = x.dbpNext - self._dbp_lowering
return x
def rollback_changes_vectorized(self, x):
x.antiHypertensiveCountNext = x.antiHypertensiveCountNext - 1
x.sbpNext = x.sbpNext + self._sbp_lowering
x.dbpNext = x.dbpNext + self._dbp_lowering
x.bpMedsAddedNext = 0
return x
class addABPMedStrokeHarm(addABPMedStrokeLargeEffectSize):
def get_treatment_recalibration_for_population(self):
return {OutcomeType.STROKE: 1.5, OutcomeType.MI: 0.92}
def get_treatment_recalibration_for_person(self, person):
return {OutcomeType.STROKE: 1.5, OutcomeType.MI: 0.92}
class addABPMedMIHarm(addABPMedStrokeLargeEffectSize):
def get_treatment_recalibration_for_population(self):
return {OutcomeType.MI: 1.5, OutcomeType.STROKE: 0.92}
def get_treatment_recalibration_for_person(self, person):
return {OutcomeType.MI: 1.5, OutcomeType.STROKE: 0.92}
class addABPMedMILargeEffectSize(addABPMedStrokeLargeEffectSize):
def get_treatment_recalibration_for_population(self):
return {OutcomeType.MI: 0.5, OutcomeType.STROKE: 0.92}
def get_treatment_recalibration_for_person(self, person):
return {OutcomeType.MI: 0.5, OutcomeType.STROKE: 0.92}
class TestTreatmentRecalibration(unittest.TestCase):
def setUp(self):
self.popSize = 500
# if we specify an effect size that is clinically smaller than the target...
# then the test should rollback strokes so that we end up with fewer strokes...
def testRecalibrationIncreasesStrokesWhenEffectSizeIsClincallySmallerButNumericallyLarger(
self,
):
alwaysStrokePop = NHANESDirectSamplePopulation(self.popSize, 2001)
alwaysStrokePop._outcome_model_repository = TestOftenStrokeModelRepository(0.5)
alwaysStrokePop.advance_vectorized(1)
# about half of the people should have a stroke...at baseline
numberOfStrokesInBasePopulation = pd.Series(
[
person.has_stroke_during_simulation()
for i, person in alwaysStrokePop._people.iteritems()
]
).sum()
# set a treatment strategy on teh population
alwaysStrokePop = NHANESDirectSamplePopulation(self.popSize, 2001)
alwaysStrokePop._outcome_model_repository = TestOftenStrokeModelRepository(0.5)
# on average, treatment will have an RR round 0.95 for the BP lowering effect applied
# so, we're going to recalibrate to a RR of 1.5...that will lead to many MORE strokes
alwaysStrokePop.set_bp_treatment_strategy(addABPMedStrokeHarm())
alwaysStrokePop.advance_vectorized(1)
numberOfStrokesInRecalibratedPopulation = pd.Series(
[
person.has_stroke_during_simulation()
for i, person in alwaysStrokePop._people.iteritems()
]
).sum()
self.assertLess(numberOfStrokesInBasePopulation, numberOfStrokesInRecalibratedPopulation)
# if we specivy an effect size that is clinically larger (numerically smaller) than the target...
# then the test should generate new stroke events so that we end up with more strokes
def testRecalibrationReducesStrokesWhenEffectSizeIsClincallyLargerButNumericallySmaller(self):
alwaysStrokePop = NHANESDirectSamplePopulation(self.popSize, 2001)
alwaysStrokePop._outcome_model_repository = TestOftenStrokeModelRepository(0.5)
alwaysStrokePop.advance_vectorized(1)
# about half of people shoudl have strokes
numberOfStrokesInBasePopulation = pd.Series(
[
person.has_stroke_during_simulation()
for i, person in alwaysStrokePop._people.iteritems()
]
).sum()
# set a treatment strategy on teh population
alwaysStrokePop = NHANESDirectSamplePopulation(self.popSize, 2001)
alwaysStrokePop._outcome_model_repository = TestOftenStrokeModelRepository(0.5)
alwaysStrokePop.set_bp_treatment_strategy(addABPMedStrokeLargeEffectSize())
alwaysStrokePop.advance_vectorized(1)
numberOfStrokesInRecalibratedPopulation = pd.Series(
[
person.has_stroke_during_simulation()
for i, person in alwaysStrokePop._people.iteritems()
]
).sum()
self.assertGreater(
numberOfStrokesInBasePopulation, numberOfStrokesInRecalibratedPopulation
)
# if we specify an effect size that is clincally smaller than the target...
# then the test should rollback MIS so that we end up with fewer MIS...
def testRecalibrationIncreasesSIsWhenEffectSizeIsClincallySmallerButNumericallyLarger(self):
alwaysMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
alwaysMIPop._outcome_model_repository = TestOftenMIModelRepository(0.5)
alwaysMIPop.advance_vectorized(1)
# about half of people have an MI at baseline
numberOfMIsInBasePopulation = pd.Series(
[person.has_mi_during_simulation() for i, person in alwaysMIPop._people.iteritems()]
).sum()
# set a treatment strategy on teh population
alwaysMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
alwaysMIPop._outcome_model_repository = TestOftenMIModelRepository(0.5)
alwaysMIPop.set_bp_treatment_strategy(addABPMedMIHarm())
alwaysMIPop.advance_vectorized(1)
numberOfMIsInRecalibratedPopulation = pd.Series(
[person.has_mi_during_simulation() for i, person in alwaysMIPop._people.iteritems()]
).sum()
self.assertLess(numberOfMIsInBasePopulation, numberOfMIsInRecalibratedPopulation)
# if we specify an effect size that is larger than the target...
# then the test should generate new mi events so that we end up with more MIs
def testRecalibrationReducesMIsWhenEffectSizeIsClincallyLargerButNumericallySmaller(self):
neverMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
neverMIPop._outcome_model_repository = TestOftenMIModelRepository(0.5)
neverMIPop.advance_vectorized(1)
# abou thalf of hte population has an MI at baseline
numberOfMIsInBasePopulation = pd.Series(
[person.has_mi_during_simulation() for i, person in neverMIPop._people.iteritems()]
).sum()
# set a treatment strategy on teh population
neverMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
neverMIPop._outcome_model_repository = TestOftenMIModelRepository(0.5)
neverMIPop.set_bp_treatment_strategy(addABPMedMILargeEffectSize())
neverMIPop.advance_vectorized(1)
numberOfMIsInRecalibratedPopulation = pd.Series(
[person.has_mi_during_simulation() for i, person in neverMIPop._people.iteritems()]
).sum()
self.assertGreater(numberOfMIsInBasePopulation, numberOfMIsInRecalibratedPopulation)
def testRollbackFatalEventsRollsBackDeath(self):
neverMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
neverMIPop._outcome_model_repository = TestOftenMIModelRepository(1.0, 1.0)
neverMIPop.advance_vectorized(1)
# the whole popuulation should have MIs at baseline
numberOfMIsInBasePopulation = pd.Series(
[person.has_mi_during_simulation() for i, person in neverMIPop._people.iteritems()]
).sum()
self.assertEqual(self.popSize, numberOfMIsInBasePopulation)
numberOfFatalMIsInBasePopulation = pd.Series(
[
person.has_mi_during_simulation() & person.is_dead()
for i, person in neverMIPop._people.iteritems()
]
).sum()
self.assertEqual(self.popSize, numberOfFatalMIsInBasePopulation)
neverMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
neverMIPop._outcome_model_repository = TestOftenMIModelRepository(1.0, 1.0)
# this requires that we rollback a lot of events.
neverMIPop.set_bp_treatment_strategy(addABPMedMILargeEffectSize())
neverMIPop.advance_vectorized(1)
numberOfMIsAfterRecalibration = pd.Series(
[person.has_mi_during_simulation() for i, person in neverMIPop._people.iteritems()]
).sum()
numberOfFatalMIsAfterRecalibration = pd.Series(
[
person.has_mi_during_simulation() & person.is_dead()
for i, person in neverMIPop._people.iteritems()
]
).sum()
self.assertGreater(numberOfFatalMIsInBasePopulation, numberOfFatalMIsAfterRecalibration)
def testAdvanceAfterRollbackWorksOnWholePopulation(self):
oftenMIPop = NHANESDirectSamplePopulation(self.popSize, 2001)
oftenMIPop._outcome_model_repository = TestOftenMIModelRepository(0.2, 0.2, 0.2)
# this requires that we rollback a lot of events.
oftenMIPop.set_bp_treatment_strategy(addABPMedMILargeEffectSize())
oftenMIPop.advance_vectorized(5)
ageLength = pd.Series([len(person._age) for i, person in oftenMIPop._people.iteritems()])
dead = pd.Series([person.is_dead() for i, person in oftenMIPop._people.iteritems()])
numberWithFullFollowup = pd.Series(
[
person.is_dead() or len(person._age) == 6
for i, person in oftenMIPop._people.iteritems()
]
).sum()
# some people were getting "lost" when they had events to rollback of if the had non CV daeths...
# this way everybody either is clearly marekd as dead or has compelte follow up
self.assertEqual(self.popSize, numberWithFullFollowup)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.uniform",
"numpy.isnan",
"microsim.outcome.Outcome",
"numpy.random.random",
"microsim.population.NHANESDirectSamplePopulation"
] | [((13623, 13638), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13636, 13638), False, 'import unittest\n'), ((5597, 5645), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (5625, 5645), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((6150, 6198), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (6178, 6198), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((7238, 7286), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (7266, 7286), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((7772, 7820), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (7800, 7820), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((8667, 8715), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (8695, 8715), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((9130, 9178), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (9158, 9178), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((9895, 9943), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (9923, 9943), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((10361, 10409), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (10389, 10409), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((10943, 10991), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (10971, 10991), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((11732, 11780), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (11760, 11780), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((12618, 12666), 'microsim.population.NHANESDirectSamplePopulation', 'NHANESDirectSamplePopulation', (['self.popSize', '(2001)'], {}), '(self.popSize, 2001)\n', (12646, 12666), False, 'from microsim.population import NHANESDirectSamplePopulation\n'), ((575, 609), 'microsim.outcome.Outcome', 'Outcome', (['OutcomeType.STROKE', '(False)'], {}), '(OutcomeType.STROKE, False)\n', (582, 609), False, 'from microsim.outcome import Outcome, OutcomeType\n'), ((731, 749), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (747, 749), True, 'import numpy as np\n'), ((2047, 2065), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2063, 2065), True, 'import numpy as np\n'), ((613, 631), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (629, 631), True, 'import numpy as np\n'), ((1921, 1939), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1937, 1939), True, 'import numpy as np\n'), ((2169, 2187), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2185, 2187), True, 'import numpy as np\n'), ((2695, 2720), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1)'}), '(size=1)\n', (2712, 2720), True, 'import numpy as np\n'), ((2837, 2862), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1)'}), '(size=1)\n', (2854, 2862), True, 'import numpy as np\n'), ((972, 1000), 'numpy.isnan', 'np.isnan', (['x.ageAtFirstStroke'], {}), '(x.ageAtFirstStroke)\n', (980, 1000), True, 'import numpy as np\n'), ((1864, 1882), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1880, 1882), True, 'import numpy as np\n'), ((2330, 2354), 'numpy.isnan', 'np.isnan', (['x.ageAtFirstMI'], {}), '(x.ageAtFirstMI)\n', (2338, 2354), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import time
import glob
from rlkit.misc.asset_loader import load_local_or_remote_file
from copy import deepcopy
from PIL import Image
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
from torchvision.transforms import ColorJitter, RandomResizedCrop, Resize, RandomAffine
import torchvision.transforms.functional as F
import rlkit.torch.pytorch_util as ptu
from torchvision.utils import save_image
import random
import math
import sys
import cv2
AUGMENT = 3
SIZE = 48
crop_prob = 0.95
cond_frac = 0.15 # Use FIRST cond_frac of traj for conditioning
samples_per_traj = 25 # Number of goals sampled per traj
samples_per_trans = 1 # Number of goals samples per trans
fixed_data_only = True # Only keep fixed script trajectories
total_size = 0
total_traj = 0
total_samples = 0
jitter = ColorJitter((0.75,1.25), (0.9,1.1), (0.9,1.1), (-0.1,0.1))
cropper = RandomResizedCrop((SIZE, SIZE), (0.9, 1.0), (0.9, 1.1))
def filter_files(all_files):
if not fixed_data_only:
return all_files
filtered_files = []
for f in all_files:
if 'fixed' in f:
filtered_files.append(f)
return filtered_files
def aug(x, j, c, do_c, do_f1, do_f2):
if do_c: x = F.resized_crop(x, c[0], c[1], c[2], c[3], (SIZE, SIZE), Image.ANTIALIAS)
else: x = F.resize(x, (SIZE, SIZE), Image.ANTIALIAS)
#if do_f1: x = F.hflip(x) # Don't horizonal flip it might confuse robot
#if do_f2: x = F.vflip(x)
x = j(x)
x = np.array(x) / 255
img = x.transpose([2, 1, 0]).flatten()
return img
def filter_keys(dictionary, keep=['latent', 'state']):
all_keys = list(dictionary.keys())
for key in all_keys:
delete = not any([word in key for word in keep])
if delete: del dictionary[key]
pretrained_vae_path = "/home/ashvin/data/sasha/awac-exps/real-world/vqvae/run4/id0/best_vqvae.pt"
model = load_local_or_remote_file(pretrained_vae_path)
ptu.set_gpu_mode(True)
catagorized_data = {'fixed_pnp': [], 'fixed_tray': [], 'fixed_pot': [], 'fixed_drawer': [], 'general': []}
all_files = glob.glob("/home/ashvin/data/s3doodad/demos/icra2021/dataset_v3/*")
all_files = filter_files(all_files)
random.shuffle(all_files)
for filename in all_files:
print(filename)
try:
data = np.load(filename, allow_pickle=True)
except:
print("COULDNT LOAD ABOVE FILE")
continue
data_list = None
# Check if traj is in specific catagory
for key in catagorized_data.keys():
if key in filename:
data_list = catagorized_data[key]
# Check not, assign to general
if data_list is None:
data_list = catagorized_data['general']
for traj_i in range(len(data)):
for _ in range(AUGMENT):
# Prepare augmentation
D = deepcopy(data[traj_i])
traj = D["observations"]
img = traj[0]["image_observation"]
img = img[:, 50:530, ::-1]
img = Image.fromarray(img, mode='RGB')
c = cropper.get_params(img, (0.75, 1.0), (0.75, 1.25))
j = jitter.get_params((0.75,1.25), (0.9,1.1), (0.9,1.1), (-0.1,0.1))
do_c = np.random.uniform() < crop_prob
do_f1 = np.random.uniform() < 0.5
do_f2 = np.random.uniform() < 0.5
# Process images
for t in range(len(traj)):
if not traj[t]:
print(traj_i, t)
continue
img = traj[t]["image_observation"]
img = img[:, 50:530, ::-1]
img = Image.fromarray(img, mode='RGB')
y = aug(img, j, c, do_c, do_f1, do_f2)
traj[t]["image_observation"] = y
# Encode images
num_images = len(traj)
images = np.stack([traj[i]['image_observation'] for i in range(num_images)])
latents = model.encode_np(images)
# Sample goals
if samples_per_traj > 0:
cond_timesteps = int(len(traj) * cond_frac)
num_repeat = math.ceil(samples_per_traj / cond_timesteps)
goal_context = np.repeat(latents[:cond_timesteps], num_repeat, axis=0)[:samples_per_traj]
sampled_goals = model.sample_prior(samples_per_traj, cond=goal_context)
# Add latent observations
for i in range(num_images):
if samples_per_traj > 0:
traj[i]["presampled_latent_goals"] = sampled_goals[i % samples_per_traj]
traj[i]["initial_latent_state"] = latents[0]
traj[i]["latent_observation"] = latents[i]
traj[i]["latent_achieved_goal"] = latents[i]
traj[i]["latent_desired_goal"] = latents[-1]
filter_keys(traj[i]) # Delete unnecesary keys
decoded_samples = model.decode(ptu.from_numpy(sampled_goals))
decoded_traj = model.decode(ptu.from_numpy(latents))
save_image(decoded_samples.data.view(-1, 3, 48, 48).transpose(2, 3),"/home/ashvin/data/sample_testing/decoded_samples.png")
save_image(decoded_traj.data.view(-1, 3, 48, 48).transpose(2, 3),"/home/ashvin/data/sample_testing/decoded_traj.png")
import pdb; pdb.set_trace()
# Update
data_list.append(D)
total_size += num_images
total_samples += samples_per_traj
total_traj += 1
print("Trajectories:", total_traj)
print("Datapoints:", total_size)
print("Samples:", total_samples)
# SAVE TRAJECTORIES FOR REINFORCEMENT LEARNING #
for key in catagorized_data.keys():
data_list = catagorized_data[key]
np.save('/media/ashvin/data2/data/val/v2/' + key + '_demos.npy', data_list)
| [
"torchvision.transforms.ColorJitter",
"numpy.random.uniform",
"numpy.load",
"numpy.save",
"copy.deepcopy",
"math.ceil",
"random.shuffle",
"torchvision.transforms.functional.resized_crop",
"torchvision.transforms.functional.resize",
"rlkit.torch.pytorch_util.from_numpy",
"rlkit.misc.asset_loader.... | [((887, 949), 'torchvision.transforms.ColorJitter', 'ColorJitter', (['(0.75, 1.25)', '(0.9, 1.1)', '(0.9, 1.1)', '(-0.1, 0.1)'], {}), '((0.75, 1.25), (0.9, 1.1), (0.9, 1.1), (-0.1, 0.1))\n', (898, 949), False, 'from torchvision.transforms import ColorJitter, RandomResizedCrop, Resize, RandomAffine\n'), ((956, 1011), 'torchvision.transforms.RandomResizedCrop', 'RandomResizedCrop', (['(SIZE, SIZE)', '(0.9, 1.0)', '(0.9, 1.1)'], {}), '((SIZE, SIZE), (0.9, 1.0), (0.9, 1.1))\n', (973, 1011), False, 'from torchvision.transforms import ColorJitter, RandomResizedCrop, Resize, RandomAffine\n'), ((1946, 1992), 'rlkit.misc.asset_loader.load_local_or_remote_file', 'load_local_or_remote_file', (['pretrained_vae_path'], {}), '(pretrained_vae_path)\n', (1971, 1992), False, 'from rlkit.misc.asset_loader import load_local_or_remote_file\n'), ((1993, 2015), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (['(True)'], {}), '(True)\n', (2009, 2015), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((2137, 2204), 'glob.glob', 'glob.glob', (['"""/home/ashvin/data/s3doodad/demos/icra2021/dataset_v3/*"""'], {}), "('/home/ashvin/data/s3doodad/demos/icra2021/dataset_v3/*')\n", (2146, 2204), False, 'import glob\n'), ((2241, 2266), 'random.shuffle', 'random.shuffle', (['all_files'], {}), '(all_files)\n', (2255, 2266), False, 'import random\n'), ((5779, 5854), 'numpy.save', 'np.save', (["('/media/ashvin/data2/data/val/v2/' + key + '_demos.npy')", 'data_list'], {}), "('/media/ashvin/data2/data/val/v2/' + key + '_demos.npy', data_list)\n", (5786, 5854), True, 'import numpy as np\n'), ((1289, 1361), 'torchvision.transforms.functional.resized_crop', 'F.resized_crop', (['x', 'c[0]', 'c[1]', 'c[2]', 'c[3]', '(SIZE, SIZE)', 'Image.ANTIALIAS'], {}), '(x, c[0], c[1], c[2], c[3], (SIZE, SIZE), Image.ANTIALIAS)\n', (1303, 1361), True, 'import torchvision.transforms.functional as F\n'), ((1376, 1418), 'torchvision.transforms.functional.resize', 'F.resize', (['x', '(SIZE, SIZE)', 'Image.ANTIALIAS'], {}), '(x, (SIZE, SIZE), Image.ANTIALIAS)\n', (1384, 1418), True, 'import torchvision.transforms.functional as F\n'), ((1546, 1557), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1554, 1557), True, 'import numpy as np\n'), ((2338, 2374), 'numpy.load', 'np.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (2345, 2374), True, 'import numpy as np\n'), ((2862, 2884), 'copy.deepcopy', 'deepcopy', (['data[traj_i]'], {}), '(data[traj_i])\n', (2870, 2884), False, 'from copy import deepcopy\n'), ((3026, 3058), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {'mode': '"""RGB"""'}), "(img, mode='RGB')\n", (3041, 3058), False, 'from PIL import Image\n'), ((5344, 5359), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5357, 5359), False, 'import pdb\n'), ((3226, 3245), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3243, 3245), True, 'import numpy as np\n'), ((3278, 3297), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3295, 3297), True, 'import numpy as np\n'), ((3324, 3343), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3341, 3343), True, 'import numpy as np\n'), ((3635, 3667), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {'mode': '"""RGB"""'}), "(img, mode='RGB')\n", (3650, 3667), False, 'from PIL import Image\n'), ((4139, 4183), 'math.ceil', 'math.ceil', (['(samples_per_traj / cond_timesteps)'], {}), '(samples_per_traj / cond_timesteps)\n', (4148, 4183), False, 'import math\n'), ((4958, 4987), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['sampled_goals'], {}), '(sampled_goals)\n', (4972, 4987), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5029, 5052), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['latents'], {}), '(latents)\n', (5043, 5052), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((4215, 4270), 'numpy.repeat', 'np.repeat', (['latents[:cond_timesteps]', 'num_repeat'], {'axis': '(0)'}), '(latents[:cond_timesteps], num_repeat, axis=0)\n', (4224, 4270), True, 'import numpy as np\n')] |
import numpy as np
import quaternion
def generate_trajectory_6d_quat(init_p, init_q, y_delta_p, y_delta_q):
cur_p = np.array(init_p)
cur_q = quaternion.from_float_array(init_q)
pred_p = []
pred_p.append(np.array(cur_p))
for [delta_p, delta_q] in zip(y_delta_p, y_delta_q):
cur_p = cur_p + np.matmul(quaternion.as_rotation_matrix(cur_q), delta_p.T).T
cur_q = cur_q * quaternion.from_float_array(delta_q).normalized()
pred_p.append(np.array(cur_p))
return np.reshape(pred_p, (len(pred_p), 3))
def generate_trajectory_3d(init_l, init_theta, init_psi, y_delta_l, y_delta_theta, y_delta_psi):
cur_l = np.array(init_l)
cur_theta = np.array(init_theta)
cur_psi = np.array(init_psi)
pred_l = []
pred_l.append(np.array(cur_l))
for [delta_l, delta_theta, delta_psi] in zip(y_delta_l, y_delta_theta, y_delta_psi):
cur_theta = cur_theta + delta_theta
cur_psi = cur_psi + delta_psi
cur_l[0] = cur_l[0] + delta_l * np.sin(cur_theta) * np.cos(cur_psi)
cur_l[1] = cur_l[1] + delta_l * np.sin(cur_theta) * np.sin(cur_psi)
cur_l[2] = cur_l[2] + delta_l * np.cos(cur_theta)
pred_l.append(np.array(cur_l))
return np.reshape(pred_l, (len(pred_l), 3)) | [
"quaternion.as_rotation_matrix",
"numpy.sin",
"numpy.array",
"numpy.cos",
"quaternion.from_float_array"
] | [((122, 138), 'numpy.array', 'np.array', (['init_p'], {}), '(init_p)\n', (130, 138), True, 'import numpy as np\n'), ((151, 186), 'quaternion.from_float_array', 'quaternion.from_float_array', (['init_q'], {}), '(init_q)\n', (178, 186), False, 'import quaternion\n'), ((654, 670), 'numpy.array', 'np.array', (['init_l'], {}), '(init_l)\n', (662, 670), True, 'import numpy as np\n'), ((687, 707), 'numpy.array', 'np.array', (['init_theta'], {}), '(init_theta)\n', (695, 707), True, 'import numpy as np\n'), ((722, 740), 'numpy.array', 'np.array', (['init_psi'], {}), '(init_psi)\n', (730, 740), True, 'import numpy as np\n'), ((221, 236), 'numpy.array', 'np.array', (['cur_p'], {}), '(cur_p)\n', (229, 236), True, 'import numpy as np\n'), ((775, 790), 'numpy.array', 'np.array', (['cur_l'], {}), '(cur_l)\n', (783, 790), True, 'import numpy as np\n'), ((477, 492), 'numpy.array', 'np.array', (['cur_p'], {}), '(cur_p)\n', (485, 492), True, 'import numpy as np\n'), ((1196, 1211), 'numpy.array', 'np.array', (['cur_l'], {}), '(cur_l)\n', (1204, 1211), True, 'import numpy as np\n'), ((1024, 1039), 'numpy.cos', 'np.cos', (['cur_psi'], {}), '(cur_psi)\n', (1030, 1039), True, 'import numpy as np\n'), ((1100, 1115), 'numpy.sin', 'np.sin', (['cur_psi'], {}), '(cur_psi)\n', (1106, 1115), True, 'import numpy as np\n'), ((1156, 1173), 'numpy.cos', 'np.cos', (['cur_theta'], {}), '(cur_theta)\n', (1162, 1173), True, 'import numpy as np\n'), ((330, 366), 'quaternion.as_rotation_matrix', 'quaternion.as_rotation_matrix', (['cur_q'], {}), '(cur_q)\n', (359, 366), False, 'import quaternion\n'), ((405, 441), 'quaternion.from_float_array', 'quaternion.from_float_array', (['delta_q'], {}), '(delta_q)\n', (432, 441), False, 'import quaternion\n'), ((1004, 1021), 'numpy.sin', 'np.sin', (['cur_theta'], {}), '(cur_theta)\n', (1010, 1021), True, 'import numpy as np\n'), ((1080, 1097), 'numpy.sin', 'np.sin', (['cur_theta'], {}), '(cur_theta)\n', (1086, 1097), True, 'import numpy as np\n')] |
import os
import sys
import argparse
#
# ========================
# Parser from args/configs
# ========================
#------- parser arguments ------
parser = argparse.ArgumentParser()
parser.add_argument('--configs',required=True)
parser.add_argument('--device',default=None, help='--device 0,1,2,3')
parser.add_argument('--evaluate',default=False, action='store_true')
parser.add_argument('--eval_ckpt_pth',default=None,help='ckpt for evaluate')
parser.add_argument('--debug', default=False, action='store_true')
parser.add_argument('--newconfig', default=False, action='store_true')
parser.add_argument('--best_ckpt_to_test', type=str, default=None)
args = parser.parse_args()
#--------- Device ----------
if args.device != 'cpu':
device_ids = args.device.strip()
os.environ['CUDA_VISIBLE_DEVICES'] = device_ids
device = 'cuda'
print(f'--------- Device = cuda:{device_ids} ---------')
device_ids = [int(idd) for idd in device_ids.split(',')]
else:
device = 'cpu'
print('--------- Device = cpu ---------')
from utils.config import Config, configs
import random
import torch
import torch.nn as nn
import numpy as np
import shutil
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import tensorboardX
from tqdm import tqdm
from utils.logger import Logger
#------- load configs ----------
configs.update_from_modules(args.configs)
#-------------------------------
# ==============================
# Fix Random Seed, Creat logfile
# ==============================
print(f'------------ Fix Random Seed: {configs.seed}------------')
seed = configs.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# =====================
# Dataset, Dataloader
# =====================
#-------- Train/Test/Valid Data --------
print('---------- Dataset, Dataloader ------------')
loaders = {}
dataset = configs.dataset()
try:
if configs.train.collate_fn is not None:
print('use custom train/valid collate function')
collate_fn = configs.train.collate_fn
except:
from torch.utils.data.dataloader import default_collate
collate_fn = default_collate
print('use default train/valid collate function')
for i, s in enumerate(['train', 'valid']): # (s=='train'),
loaders[s] = DataLoader(dataset[s], batch_size=configs.dataloader.batch_size[s],shuffle=(s=='train'),
num_workers=configs.dataloader.num_workers,
collate_fn=collate_fn,
pin_memory=configs.dataloader.pin_memory,
worker_init_fn=lambda worker_id: np.random.seed(np.random.get_state()[1][0] + worker_id + (i + 1)))
if args.evaluate and 'other_dataset' in configs.evaluate:
print('------------ Use Evaluate Dataset to Test -------------')
dataset = configs.evaluate.other_dataset()
else:
print('------------ Use Train Dataset to Test ------------')
dataset = configs.dataset()
try:
if configs.evaluate.collate_fn is not None:
print('use custom test collate function')
collate_fn = configs.evaluate.collate_fn
except:
from torch.utils.data.dataloader import default_collate
collate_fn = default_collate
print('use default test collate function')
loaders['test'] = DataLoader(dataset['test'], batch_size=configs.dataloader.batch_size['test'],shuffle=(s=='train'),
num_workers=configs.dataloader.num_workers,
collate_fn=collate_fn,
pin_memory=configs.dataloader.pin_memory,
worker_init_fn=lambda worker_id: np.random.seed(np.random.get_state()[1][0] + worker_id + 3))
# =======================================================
# Model & Device (parallel/non-parallel), cudnn Benchmark
# =======================================================
#--------- Model ----------
print('------------ model -----------')
model = configs.model()
#print(model)
#--------- Parallel ----------
if device != 'cpu':
if not configs.deterministic:
cudnn.benchmark = True
else:
cudnn.deterministic = True
cudnn.benchmark = False
#if configs.parallel and len(device_ids)>1:
# assert torch.cuda.device_count() > 1
model = nn.DataParallel(model)
print(f'Use Parallel: model on device {device_ids}')
model.to(device)
# =======================
# Train Tools (Criterion, Optimizer, Scheduler)
# =======================
print('------------ Train Tools -------------')
#------------------ Criterion ---------------------
criterion = configs.train.criterion() # loss class in pytorch or yourself
#------------------ Optimizer ---------------------
# always Adam or SGD+momentum
optimizer = configs.train.optimizer(model.parameters())
#------------------ Scheduler ---------------------
scheduler = configs.train.scheduler(optimizer)
# =======================
# Train One Epoch Kernel
# =======================
torch.autograd.set_detect_anomaly(True)
def train(model, dataloader, criterion, optimizer, scheduler, writer, current_step, device):
model.train()
for inputs, targets in tqdm(dataloader, desc='train', ncols=0):
if not isinstance(inputs, (list, tuple)):
inputs = inputs.to(device)
if not isinstance(targets, (list, tuple)):
targets = targets.to(device)
#inputs = inputs.cuda(1)
#targets = targets.cuda(1)
#print(f'inputs = {inputs[:, :3, :].permute(0, 2, 1)}')
outputs = model(inputs)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('loss/train',loss.item(), current_step)
current_step += 1
if scheduler is not None:
scheduler.step()
#print(loss.item())
# ======================
# Valid One Epoch Kernel
# ======================
# meters = {'meter1_{test/valid}':meter1, 'meter2_{test/valid}':meter2}
# best_results = {'meter1_{test/valid}':result1, 'meter2_{test/valid}':result2}
def valid(model, dataloader, criterion, meters, best_flags, best_results, writer, current_step):
model.eval()
results = {}
with torch.no_grad():
for inputs, targets in tqdm(dataloader, desc='valid', ncols=0):
#for inputs, targets in dataloader:
if not isinstance(inputs, (list, tuple)):
inputs = inputs.to(device)
if not isinstance(targets, (list, tuple)):
targets = targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
writer.add_scalar('loss/valid',loss.item(), current_step)
current_step += 1
for meter in meters.values():
meter.update(outputs, targets)
for k, meter in meters.items():
results[k] = meter.compute()
if isinstance(results[k], dict):
if not isinstance(best_results[k], dict):
best_results[k] = {}
if not isinstance(best_flags[k], dict):
best_flags[k] = {}
for name, value in results[k].items():
writer.add_scalar(f'{k}/{name}/valid', value, current_step)
try:
if value > best_results[k][name]:
best_results[k][name] = value
best_flags[k][name] = True
else:
best_flags[k][name] = False
except KeyError:
best_results[k][name] = 0
else:
writer.add_scalar(f'{k}/valid', results[k], current_step)
#print(best_results)
if results[k] > best_results[k]:
best_results[k] = results[k]
best_flags[k] = True
else:
best_flags[k] = False
return results
def test(model, dataloader, meters, configs, device):
model.eval()
results = {}
with torch.no_grad():
for inputs, targets in tqdm(dataloader, desc='test', ncols=0):
#for inputs, targets in dataloader:
if not isinstance(inputs, (list, tuple)):
inputs = inputs.to(device)
if not isinstance(targets, (list, tuple)):
targets = targets.to(device)
outputs = model(inputs)
for meter in meters.values():
meter.update(outputs, targets)
for k, meter in meters.items():
results[k] = meter.compute()
if isinstance(results[k], dict):
for name, value in results[k].items():
print(f'results[{k}][{name}] = {value}')
else:
print(f'results[{k}] = {results[k]}')
return results
# ==========================================
# If Evaluate, turn to the evaluate function
# ==========================================
if args.evaluate:
print('------------ Evaluate Begin ------------')
if args.eval_ckpt_pth != None and os.path.exists(args.eval_ckpt_pth):
ckpt = torch.load(args.eval_ckpt_pth)
model.load_state_dict(ckpt['model'])
elif args.best_ckpt_to_test != None:
print(f'use {args.best_ckpt_to_test} best ckpt')
ckpt = torch.load(configs.train.best_ckpt_paths[args.best_ckpt_to_test])
model.load_state_dict(ckpt['model'])
elif os.path.exists(configs.train.common_ckpt_path):
print('use train common ckpt')
ckpt = torch.load(configs.train.common_ckpt_path)
model.load_state_dict(ckpt['model'])
else:
print('ERROR: No checkpoint file !')
meters = {}
for k, meter in configs.evaluate.meters.items():
meters[k.format('test')] = meter()
if configs.evaluate.fn != None:
evaluate = configs.evaluate.fn
else:
evaluate = test
evaluate(model, loaders['test'], meters, configs, device)
sys.exit()
# ===============
# Main Train
# ===============
num_epochs = configs.train.num_epochs
start_epoch = 0
#---- create checkpoint save path for this experiment
os.makedirs(os.path.join(os.getcwd(), configs.train.ckpt_dir), exist_ok=True)
try:
logfile = configs.train.logfile
except KeyError:
logfile = None
logging = Logger(logfile)
logging.info(configs.exp_name)
# -------- Read CheckPoint if given -------------
train_common_ckpt_path = configs.train.common_ckpt_path
# Mulit-best for Mulit-Metrics
# {'meter1_valid':best_path1, 'meter2_valid':best_path2, ...}
train_best_ckpt_paths = configs.train.best_ckpt_paths
# -------- Initialize Tensorboard Writer, Meters, Result recorders -------------
writer = tensorboardX.SummaryWriter(f'runs/{configs.exp_name}')
# Meters, Result recorders
meters, best_flags, best_results = {}, {}, {}
train_current_step = 0
valid_current_step = 0
# Multi-Meter for Multi-Metrics
# meters = {'meter1_valid':meter1,...}
# best_results = {'meter1_valid':0,...}
for k, meter in configs.train.meters.items():
meters[k.format('valid')] = meter()
best_flags[k.format('valid')] = False
best_results[k.format('valid')] = 0
#print(best_results)
# -------- Just for recover the train progress ------------
if os.path.exists(train_common_ckpt_path) and not args.debug:
print('------------ Use checkpoint to recover train process ----------')
ckpt = torch.load(train_common_ckpt_path)
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
scheduler.load_state_dict(ckpt['scheduler'])
start_epoch = ckpt['last_epoch'] + 1
best_results = ckpt['best_results']
train_current_step = ckpt['train_current_step']
valid_current_step = ckpt['valid_current_step']
if not args.newconfig:
configs = ckpt['configs']
# -------- Main Train Step -------------
print('------------ Begin train process ------------')
try:
if configs.train.fn is not None:
print('use custom train function')
train = configs.train.fn
except:
print('use default train function')
try:
if configs.train.valid_fn is not None:
print('use custom valid function')
valid = configs.train.valid_fn
except:
print('use default valid function')
for epoch in range(start_epoch, configs.train.num_epochs):
print(f'epoch = {epoch}/{configs.train.num_epochs}')
logging.info(f'epoch = {epoch}/{configs.train.num_epochs}')
np.random.seed(epoch)
train(model, loaders['train'], criterion, optimizer, scheduler, writer, train_current_step, device)
train_current_step += len(loaders['train'])
# 每个epoch换一个新的
for k, meter in configs.train.meters.items():
meters[k.format('valid')] = meter()
if not (epoch+1)%configs.train.valid_interval:
results = valid(model, loaders['valid'], criterion, meters, best_flags, best_results, writer, valid_current_step)
valid_current_step += len(loaders['train'])
#scheduler.step(results[''])
for k in results.keys():
if isinstance(results[k], dict):
assert isinstance(best_results[k], dict)
assert isinstance(best_flags[k], dict)
for name, value in results[k].items():
logging.info(f'results[{k}][{name}] = {value}, best_results[{k}][{name}] = {best_results[k][name]}')
else:
logging.info(f'results[{k}] = {results[k]}, best_results[{k}] = {best_results[k]}')
#--------- Save CheckPoint ----------
if not args.debug:
torch.save(
{'model':model.state_dict(),
'optimizer':optimizer.state_dict(),
'scheduler':scheduler.state_dict(),
'last_epoch':epoch,
'best_results':best_results,
'train_current_step':train_current_step,
'valid_current_step':valid_current_step,
'configs':configs
}, train_common_ckpt_path)
for k, v in best_flags.items():
if isinstance(best_flags[k], dict):
for name, _ in best_flags[k].items():
shutil.copyfile(train_common_ckpt_path, train_best_ckpt_paths.format(k + '_' + name))
else:
if v:
shutil.copyfile(train_common_ckpt_path, train_best_ckpt_paths.format(k))
writer.close()
logging.close()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.config.configs.train.criterion",
"utils.config.configs.evaluate.other_dataset",
"utils.logger.Logger",
"torch.autograd.set_detect_anomaly",
"utils.config.configs.update_from_modules",
"torch.no_grad",
"utils.config.configs.train.scheduler",
"t... | [((164, 189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (187, 189), False, 'import argparse\n'), ((1351, 1392), 'utils.config.configs.update_from_modules', 'configs.update_from_modules', (['args.configs'], {}), '(args.configs)\n', (1378, 1392), False, 'from utils.config import Config, configs\n'), ((1613, 1630), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1624, 1630), False, 'import random\n'), ((1631, 1651), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1645, 1651), True, 'import numpy as np\n'), ((1652, 1675), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1669, 1675), False, 'import torch\n'), ((1864, 1881), 'utils.config.configs.dataset', 'configs.dataset', ([], {}), '()\n', (1879, 1881), False, 'from utils.config import Config, configs\n'), ((3941, 3956), 'utils.config.configs.model', 'configs.model', ([], {}), '()\n', (3954, 3956), False, 'from utils.config import Config, configs\n'), ((4582, 4607), 'utils.config.configs.train.criterion', 'configs.train.criterion', ([], {}), '()\n', (4605, 4607), False, 'from utils.config import Config, configs\n'), ((4846, 4880), 'utils.config.configs.train.scheduler', 'configs.train.scheduler', (['optimizer'], {}), '(optimizer)\n', (4869, 4880), False, 'from utils.config import Config, configs\n'), ((4958, 4997), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (4991, 4997), False, 'import torch\n'), ((10352, 10367), 'utils.logger.Logger', 'Logger', (['logfile'], {}), '(logfile)\n', (10358, 10367), False, 'from utils.logger import Logger\n'), ((10743, 10797), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', (['f"""runs/{configs.exp_name}"""'], {}), "(f'runs/{configs.exp_name}')\n", (10769, 10797), False, 'import tensorboardX\n'), ((11045, 11073), 'utils.config.configs.train.meters.items', 'configs.train.meters.items', ([], {}), '()\n', (11071, 11073), False, 'from utils.config import Config, configs\n'), ((2820, 2852), 'utils.config.configs.evaluate.other_dataset', 'configs.evaluate.other_dataset', ([], {}), '()\n', (2850, 2852), False, 'from utils.config import Config, configs\n'), ((2938, 2955), 'utils.config.configs.dataset', 'configs.dataset', ([], {}), '()\n', (2953, 2955), False, 'from utils.config import Config, configs\n'), ((4271, 4293), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (4286, 4293), True, 'import torch.nn as nn\n'), ((5136, 5175), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""train"""', 'ncols': '(0)'}), "(dataloader, desc='train', ncols=0)\n", (5140, 5175), False, 'from tqdm import tqdm\n'), ((9766, 9797), 'utils.config.configs.evaluate.meters.items', 'configs.evaluate.meters.items', ([], {}), '()\n', (9795, 9797), False, 'from utils.config import Config, configs\n'), ((10018, 10028), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10026, 10028), False, 'import sys\n'), ((11285, 11323), 'os.path.exists', 'os.path.exists', (['train_common_ckpt_path'], {}), '(train_common_ckpt_path)\n', (11299, 11323), False, 'import os\n'), ((11432, 11466), 'torch.load', 'torch.load', (['train_common_ckpt_path'], {}), '(train_common_ckpt_path)\n', (11442, 11466), False, 'import torch\n'), ((12478, 12499), 'numpy.random.seed', 'np.random.seed', (['epoch'], {}), '(epoch)\n', (12492, 12499), True, 'import numpy as np\n'), ((12691, 12719), 'utils.config.configs.train.meters.items', 'configs.train.meters.items', ([], {}), '()\n', (12717, 12719), False, 'from utils.config import Config, configs\n'), ((6195, 6210), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6208, 6210), False, 'import torch\n'), ((6243, 6282), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""valid"""', 'ncols': '(0)'}), "(dataloader, desc='valid', ncols=0)\n", (6247, 6282), False, 'from tqdm import tqdm\n'), ((8084, 8099), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8097, 8099), False, 'import torch\n'), ((8132, 8170), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""test"""', 'ncols': '(0)'}), "(dataloader, desc='test', ncols=0)\n", (8136, 8170), False, 'from tqdm import tqdm\n'), ((9125, 9159), 'os.path.exists', 'os.path.exists', (['args.eval_ckpt_pth'], {}), '(args.eval_ckpt_pth)\n', (9139, 9159), False, 'import os\n'), ((9176, 9206), 'torch.load', 'torch.load', (['args.eval_ckpt_pth'], {}), '(args.eval_ckpt_pth)\n', (9186, 9206), False, 'import torch\n'), ((10211, 10222), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10220, 10222), False, 'import os\n'), ((9365, 9430), 'torch.load', 'torch.load', (['configs.train.best_ckpt_paths[args.best_ckpt_to_test]'], {}), '(configs.train.best_ckpt_paths[args.best_ckpt_to_test])\n', (9375, 9430), False, 'import torch\n'), ((9485, 9531), 'os.path.exists', 'os.path.exists', (['configs.train.common_ckpt_path'], {}), '(configs.train.common_ckpt_path)\n', (9499, 9531), False, 'import os\n'), ((9587, 9629), 'torch.load', 'torch.load', (['configs.train.common_ckpt_path'], {}), '(configs.train.common_ckpt_path)\n', (9597, 9629), False, 'import torch\n'), ((3643, 3664), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (3662, 3664), True, 'import numpy as np\n'), ((2626, 2647), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (2645, 2647), True, 'import numpy as np\n')] |
import numpy as np
class Layer():
def __init__(self, nConnect, nNeurons, fAct):
self.fAct=fAct
self.b=np.random.rand(1, nNeurons) +2-1
self.W=np.random.rand(nConnect, nNeurons) +2-1
| [
"numpy.random.rand"
] | [((126, 153), 'numpy.random.rand', 'np.random.rand', (['(1)', 'nNeurons'], {}), '(1, nNeurons)\n', (140, 153), True, 'import numpy as np\n'), ((175, 209), 'numpy.random.rand', 'np.random.rand', (['nConnect', 'nNeurons'], {}), '(nConnect, nNeurons)\n', (189, 209), True, 'import numpy as np\n')] |
#!/usr/bin/python3
#
# Just some handy image utilities for all instruments.
# Uses OpenCV and Numpy
#
# Library imports
import cv2
import numpy as np
# Detect partial data products.
# MER images come down top to bottom, left to right, in distinct chunks.
# So checking if the bottom left corner of the image in question is solid black should
# be a decent check of a partial image.
# Return True if partial, False if otherwise
def checkPartial(image):
# Check to see if image is large enough; our thumbnails are 64x64!
# If that is the case, check against the whole image - likely it's either all or nothing at this size.
height, width = image.shape[:2]
if (height < 150) or (width < 150):
templateImage = np.zeros((height,width,1), np.uint8)
checkCorner = image
else:
templateImage = np.zeros((150,150,1), np.uint8) # 150 pixel square black area
checkCorner = image[-151:-1, -151:-1] # Bottom right 150 x 150 area of image
# Check for full match; if every single original pixel is solid black,
# there's a preeeeetty good chance it's a partial.
if (checkCorner == templateImage).all():
return True
else:
return False
| [
"numpy.zeros"
] | [((714, 752), 'numpy.zeros', 'np.zeros', (['(height, width, 1)', 'np.uint8'], {}), '((height, width, 1), np.uint8)\n', (722, 752), True, 'import numpy as np\n'), ((798, 831), 'numpy.zeros', 'np.zeros', (['(150, 150, 1)', 'np.uint8'], {}), '((150, 150, 1), np.uint8)\n', (806, 831), True, 'import numpy as np\n')] |
import socket
from datetime import datetime
import os.path as osp
import huepy as hue
import numpy as np
import torch
from torch.backends import cudnn
from torch.utils.tensorboard import SummaryWriter
import sys
sys.path.append('./')
from configs import args_faster_rcnn_hoim
from lib.datasets import get_data_loader
from lib.model.faster_rcnn_hoim import get_hoim_model
from lib.utils.misc import Nestedspace, resume_from_checkpoint, \
get_optimizer, get_lr_scheduler
from lib.utils.distributed import init_distributed_mode, is_main_process
from lib.utils.trainer import get_trainer
from lib.utils.serialization import mkdir_if_missing
def main(args):
if args.distributed:
init_distributed_mode(args)
device = torch.device(args.device)
cudnn.benchmark = False
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if is_main_process():
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
args.path = osp.join(
args.path, current_time + '_' + socket.gethostname())
mkdir_if_missing(args.path)
print(hue.info(hue.bold(hue.lightgreen(
'Working directory: {}'.format(args.path)))))
if args.train.use_tfboard:
tfboard = SummaryWriter(log_dir=args.path)
args.export_to_json(osp.join(args.path, 'args.json'))
else:
tfboard = None
train_loader = get_data_loader(args, train=True)
model = get_hoim_model(pretrained_backbone=True,
num_features=args.num_features, num_pids=args.num_pids,
num_cq_size=args.num_cq_size, num_bg_size=args.num_bg_size,
oim_momentum=args.train.oim_momentum, oim_scalar=args.oim_scalar,
min_size=args.train.min_size, max_size=args.train.max_size,
anchor_scales=(args.anchor_scales,), anchor_ratios=(
args.anchor_ratios,),
# RPN parameters
rpn_pre_nms_top_n_train=args.train.rpn_pre_nms_top_n,
rpn_post_nms_top_n_train=args.train.rpn_post_nms_top_n,
# rpn_pre_nms_top_n_test=args.test.rpn_pre_nms_top_n,
# rpn_post_nms_top_n_test=args.test.rpn_post_nms_top_n,
rpn_nms_thresh=args.train.rpn_nms_thresh,
rpn_fg_iou_thresh=args.train.rpn_positive_overlap,
rpn_bg_iou_thresh=args.train.rpn_negative_overlap,
rpn_batch_size_per_image=args.train.rpn_batch_size,
rpn_positive_fraction=args.train.rpn_fg_fraction,
# Box parameters
box_score_thresh=args.train.fg_thresh,
# box_nms_thresh=args.test.nms, # inference only
box_detections_per_img=args.train.rpn_post_nms_top_n, # use all
box_fg_iou_thresh=args.train.bg_thresh_hi,
box_bg_iou_thresh=args.train.bg_thresh_lo,
box_batch_size_per_image=args.train.rcnn_batch_size,
box_positive_fraction=args.train.fg_fraction, # for proposals
bbox_reg_weights=args.train.box_regression_weights,
)
model.to(device)
optimizer = get_optimizer(args, model)
lr_scheduler = get_lr_scheduler(args, optimizer)
if args.apex:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
model_without_ddp = model
if args.distributed:
if args.apex:
from apex.parallel import DistributedDataParallel, convert_syncbn_model
model = convert_syncbn_model(model)
model = DistributedDataParallel(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], find_unused_parameters=True)
model_without_ddp = model.module
if args.resume is not None:
args, model_without_ddp, optimizer, lr_scheduler = resume_from_checkpoint(
args, model_without_ddp, optimizer, lr_scheduler)
trainer = get_trainer(args, model, model_without_ddp, train_loader,
optimizer, lr_scheduler, device, tfboard)
trainer.run(train_loader, max_epochs=args.train.epochs)
if is_main_process():
tfboard.close()
if __name__ == '__main__':
arg_parser = args_faster_rcnn_hoim()
args = arg_parser.parse_args(namespace=Nestedspace())
main(args)
| [
"numpy.random.seed",
"lib.datasets.get_data_loader",
"torch.device",
"os.path.join",
"sys.path.append",
"lib.utils.distributed.is_main_process",
"torch.nn.parallel.DistributedDataParallel",
"lib.utils.misc.get_optimizer",
"socket.gethostname",
"lib.utils.misc.Nestedspace",
"lib.utils.misc.get_lr... | [((214, 235), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (229, 235), False, 'import sys\n'), ((736, 761), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (748, 761), False, 'import torch\n'), ((794, 819), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (808, 819), True, 'import numpy as np\n'), ((824, 852), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (841, 852), False, 'import torch\n'), ((861, 878), 'lib.utils.distributed.is_main_process', 'is_main_process', ([], {}), '()\n', (876, 878), False, 'from lib.utils.distributed import init_distributed_mode, is_main_process\n'), ((1387, 1420), 'lib.datasets.get_data_loader', 'get_data_loader', (['args'], {'train': '(True)'}), '(args, train=True)\n', (1402, 1420), False, 'from lib.datasets import get_data_loader\n'), ((1434, 2544), 'lib.model.faster_rcnn_hoim.get_hoim_model', 'get_hoim_model', ([], {'pretrained_backbone': '(True)', 'num_features': 'args.num_features', 'num_pids': 'args.num_pids', 'num_cq_size': 'args.num_cq_size', 'num_bg_size': 'args.num_bg_size', 'oim_momentum': 'args.train.oim_momentum', 'oim_scalar': 'args.oim_scalar', 'min_size': 'args.train.min_size', 'max_size': 'args.train.max_size', 'anchor_scales': '(args.anchor_scales,)', 'anchor_ratios': '(args.anchor_ratios,)', 'rpn_pre_nms_top_n_train': 'args.train.rpn_pre_nms_top_n', 'rpn_post_nms_top_n_train': 'args.train.rpn_post_nms_top_n', 'rpn_nms_thresh': 'args.train.rpn_nms_thresh', 'rpn_fg_iou_thresh': 'args.train.rpn_positive_overlap', 'rpn_bg_iou_thresh': 'args.train.rpn_negative_overlap', 'rpn_batch_size_per_image': 'args.train.rpn_batch_size', 'rpn_positive_fraction': 'args.train.rpn_fg_fraction', 'box_score_thresh': 'args.train.fg_thresh', 'box_detections_per_img': 'args.train.rpn_post_nms_top_n', 'box_fg_iou_thresh': 'args.train.bg_thresh_hi', 'box_bg_iou_thresh': 'args.train.bg_thresh_lo', 'box_batch_size_per_image': 'args.train.rcnn_batch_size', 'box_positive_fraction': 'args.train.fg_fraction', 'bbox_reg_weights': 'args.train.box_regression_weights'}), '(pretrained_backbone=True, num_features=args.num_features,\n num_pids=args.num_pids, num_cq_size=args.num_cq_size, num_bg_size=args.\n num_bg_size, oim_momentum=args.train.oim_momentum, oim_scalar=args.\n oim_scalar, min_size=args.train.min_size, max_size=args.train.max_size,\n anchor_scales=(args.anchor_scales,), anchor_ratios=(args.anchor_ratios,\n ), rpn_pre_nms_top_n_train=args.train.rpn_pre_nms_top_n,\n rpn_post_nms_top_n_train=args.train.rpn_post_nms_top_n, rpn_nms_thresh=\n args.train.rpn_nms_thresh, rpn_fg_iou_thresh=args.train.\n rpn_positive_overlap, rpn_bg_iou_thresh=args.train.rpn_negative_overlap,\n rpn_batch_size_per_image=args.train.rpn_batch_size,\n rpn_positive_fraction=args.train.rpn_fg_fraction, box_score_thresh=args\n .train.fg_thresh, box_detections_per_img=args.train.rpn_post_nms_top_n,\n box_fg_iou_thresh=args.train.bg_thresh_hi, box_bg_iou_thresh=args.train\n .bg_thresh_lo, box_batch_size_per_image=args.train.rcnn_batch_size,\n box_positive_fraction=args.train.fg_fraction, bbox_reg_weights=args.\n train.box_regression_weights)\n', (1448, 2544), False, 'from lib.model.faster_rcnn_hoim import get_hoim_model\n'), ((3445, 3471), 'lib.utils.misc.get_optimizer', 'get_optimizer', (['args', 'model'], {}), '(args, model)\n', (3458, 3471), False, 'from lib.utils.misc import Nestedspace, resume_from_checkpoint, get_optimizer, get_lr_scheduler\n'), ((3491, 3524), 'lib.utils.misc.get_lr_scheduler', 'get_lr_scheduler', (['args', 'optimizer'], {}), '(args, optimizer)\n', (3507, 3524), False, 'from lib.utils.misc import Nestedspace, resume_from_checkpoint, get_optimizer, get_lr_scheduler\n'), ((4376, 4479), 'lib.utils.trainer.get_trainer', 'get_trainer', (['args', 'model', 'model_without_ddp', 'train_loader', 'optimizer', 'lr_scheduler', 'device', 'tfboard'], {}), '(args, model, model_without_ddp, train_loader, optimizer,\n lr_scheduler, device, tfboard)\n', (4387, 4479), False, 'from lib.utils.trainer import get_trainer\n'), ((4571, 4588), 'lib.utils.distributed.is_main_process', 'is_main_process', ([], {}), '()\n', (4586, 4588), False, 'from lib.utils.distributed import init_distributed_mode, is_main_process\n'), ((4660, 4683), 'configs.args_faster_rcnn_hoim', 'args_faster_rcnn_hoim', ([], {}), '()\n', (4681, 4683), False, 'from configs import args_faster_rcnn_hoim\n'), ((695, 722), 'lib.utils.distributed.init_distributed_mode', 'init_distributed_mode', (['args'], {}), '(args)\n', (716, 722), False, 'from lib.utils.distributed import init_distributed_mode, is_main_process\n'), ((1048, 1075), 'lib.utils.serialization.mkdir_if_missing', 'mkdir_if_missing', (['args.path'], {}), '(args.path)\n', (1064, 1075), False, 'from lib.utils.serialization import mkdir_if_missing\n'), ((3600, 3648), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': '"""O1"""'}), "(model, optimizer, opt_level='O1')\n", (3614, 3648), False, 'from apex import amp\n'), ((4275, 4347), 'lib.utils.misc.resume_from_checkpoint', 'resume_from_checkpoint', (['args', 'model_without_ddp', 'optimizer', 'lr_scheduler'], {}), '(args, model_without_ddp, optimizer, lr_scheduler)\n', (4297, 4347), False, 'from lib.utils.misc import Nestedspace, resume_from_checkpoint, get_optimizer, get_lr_scheduler\n'), ((1239, 1271), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'args.path'}), '(log_dir=args.path)\n', (1252, 1271), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1300, 1332), 'os.path.join', 'osp.join', (['args.path', '"""args.json"""'], {}), "(args.path, 'args.json')\n", (1308, 1332), True, 'import os.path as osp\n'), ((3831, 3858), 'apex.parallel.convert_syncbn_model', 'convert_syncbn_model', (['model'], {}), '(model)\n', (3851, 3858), False, 'from apex.parallel import DistributedDataParallel, convert_syncbn_model\n'), ((3879, 3909), 'apex.parallel.DistributedDataParallel', 'DistributedDataParallel', (['model'], {}), '(model)\n', (3902, 3909), False, 'from apex.parallel import DistributedDataParallel, convert_syncbn_model\n'), ((3944, 3996), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['model'], {}), '(model)\n', (3989, 3996), False, 'import torch\n'), ((4017, 4129), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], find_unused_parameters=True)\n', (4058, 4129), False, 'import torch\n'), ((4727, 4740), 'lib.utils.misc.Nestedspace', 'Nestedspace', ([], {}), '()\n', (4738, 4740), False, 'from lib.utils.misc import Nestedspace, resume_from_checkpoint, get_optimizer, get_lr_scheduler\n'), ((903, 917), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (915, 917), False, 'from datetime import datetime\n'), ((1018, 1038), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1036, 1038), False, 'import socket\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""__description__"""
__author__ = "<NAME>, GIScience Research Group, Heidelberg University"
__email__ = "<EMAIL>"
import logging
import pygeos
import geopandas as gpd
from modules.utils import calc_compactness
from shapely.geometry import box
import glob
import os
import pandas as pd
import numpy as np
gpd.options.use_pygeos = True
def generate_street_blocks_gpd(in_dir):
"""
Generate street blcoks using geopandas (this is 8 times slower than generate_street_blocks())
:param in_dir:
:param out_dir:
:return:
"""
# Load traffic features
files = glob.glob(os.path.join(in_dir, "*.geojson"))
all_features = []
for f in files:
features = gpd.read_file(f)
features.drop("@snapshotTimestamp", axis=1, inplace=True)
features["type"] = os.path.basename(f).split(".")[0]
all_features.append(features)
features_df = pd.concat(all_features)
# Buffer line features
line_features = features_df.loc[
features_df.geometry.map(lambda x: x.geom_type != "Polygon")
]
line_features.loc[:, "geometry"] = line_features.buffer(0.00001)
poly_features = features_df.loc[
features_df.geometry.map(lambda x: x.geom_type == "Polygon")
]
all_features = pd.concat([line_features, poly_features])
# Bounding box as polygon
bbox_geom = box(*features_df.total_bounds)
bbox_df = gpd.GeoDataFrame({"geometry": [bbox_geom]}, crs="epsg:4326")
# Calculate symmetric difference
geoms_diff = gpd.overlay(all_features, bbox_df, how="symmetric_difference")
return geoms_diff
def polygons_from_traffic(in_dir):
"""
Generates street blocks by intersecting traffic features with each other. Sliver polygons with width < 10m
are merged with neighbouring polygons
:return:
"""
# Load traffic features
files = glob.glob(os.path.join(in_dir, "traffic", "*.geojson"))
assert len(files) > 0, f"No OSM features not found in {in_dir}"
all_features = []
for f in files:
features = gpd.read_file(f, names=["geometry"])
features = features.loc[:, ["geometry"]]
features["type"] = os.path.basename(f).split(".")[0]
all_features.append(features)
features_df = pd.concat(all_features)
# Bounding box as polygon
bbox_geom = pygeos.from_shapely(box(*features_df.total_bounds))
# Buffer line Features
# todo: adjust buffers based on traffic feature type
line_features = features_df.loc[
features_df.geometry.map(
lambda x: x.geom_type not in ("Polygon", "MultiPolygon")
)
]
line_geoms = line_features.apply(
lambda x: pygeos.from_shapely(x["geometry"]), axis=1
)
line_geoms_buf = pygeos.buffer(line_geoms, 0.00005)
# Merge buffered line features with polygon features
poly_features = features_df.loc[
features_df.geometry.map(lambda x: x.geom_type in ("Polygon", "MultiPolygon"))
]
poly_geoms = poly_features.apply(
lambda x: pygeos.from_shapely(x["geometry"]), axis=1
)
all_geoms = np.concatenate(
(np.array(poly_geoms).ravel(), np.array(line_geoms_buf).ravel())
)
all_geoms_union = pygeos.union_all(all_geoms)
# Calculate symmetric difference
geoms_diff = pygeos.symmetric_difference(bbox_geom, all_geoms_union)
geom_diff_df = gpd.GeoDataFrame(
{"geometry": pygeos.get_parts(geoms_diff)}, crs="epsg:4326"
)
geom_diff_df = geom_diff_df.loc[
geom_diff_df.geometry.map(lambda x: x.geom_type in ("Polygon", "Multipolygon"))
]
geom_diff_df = geom_diff_df.explode()
return geom_diff_df
def polygons_from_landuse(in_dir, street_blocks, epsg):
"""
Generates land use polygons
Features contained in bigger feature: Clip small feature from big feature
Features overlapping two features: Split features in three parts
:return
"""
street_blocks["tags"] = np.empty((len(street_blocks), 0)).tolist()
street_blocks = street_blocks.to_crs(epsg=epsg)
lu_polygons = street_blocks.copy()
# Load traffic features
files = glob.glob(os.path.join(in_dir, "landuse", "*.geojson"))
for f in files:
key = os.path.basename(f).split(".")[0]
features = gpd.read_file(f)
if len(features) == 0:
continue
features = features.loc[:, ["geometry", key]]
features = features.to_crs(epsg=epsg)
values = features[key].unique()
for val in values:
selected_features = features.loc[features[key] == val]
# Land use blocks which intersect selected land use features
intersected = gpd.overlay(
lu_polygons,
selected_features,
how="intersection",
keep_geom_type=True,
)
intersected["tags"] = intersected["tags"].map(lambda x: x + [(key, val)])
# Land use blocks which don't intersect the selected features
difference = gpd.overlay(lu_polygons, selected_features, how="difference")
lu_polygons = pd.concat(
[intersected[["geometry", "tags"]], difference[["geometry", "tags"]]],
axis=0,
ignore_index=True,
)
lu_polygons["tags"] = lu_polygons["tags"].map(
lambda tags: ";".join([f"{t[0]}={t[1]}" for t in tags])
)
lu_polygons = lu_polygons.loc[
lu_polygons.geometry.map(lambda x: x.geom_type in ("Polygon", "MultiPolygon"))
]
lu_polygons = lu_polygons.explode()
return lu_polygons
def clean_polygons(lu_polygons):
"""
Cleans land use polygons by removing small sliver polygons
:return:
"""
lu_polygons["geometry"] = lu_polygons.buffer(-3, join_style=2, resolution=2).buffer(
3, join_style=2, resolution=2
)
lu_polygons.reset_index(inplace=True, drop=True)
lu_polygons = lu_polygons.explode()
lu_polygons["compactness"] = calc_compactness(lu_polygons)
lu_polygons["area"] = lu_polygons.area
lu_polygons = lu_polygons.loc[
~((lu_polygons.area < 2500) & (lu_polygons.compactness < 0.05))
]
return lu_polygons.drop("compactness", axis=1)
def clip_buildings(in_dir, lu_polygons):
"""
Clip buildings out of land use polygons. convert all geometries to polygons. Point and line geometries are dropped.
:param aoi_name:
:param config:
:return:
"""
buildings_file = os.path.join(in_dir, "buildings", "building.geojson")
buildings = gpd.read_file(buildings_file).to_crs(lu_polygons.crs)
# Clip out buildings from land use polygons
lu_polygons_clip = gpd.overlay(lu_polygons, buildings, how="difference")
lu_polygons_clip.reset_index(drop=True, inplace=True)
# Convert geometry collections and multipolygons to polygons
geom_collections = lu_polygons_clip.loc[
lu_polygons_clip.geometry.map(
lambda x: x.geom_type in ("GeometryCollection", "MultiPolygon")
)
]
geom_collections = geom_collections.explode()
geom_collections.reset_index(drop=True, inplace=True)
geom_collections = geom_collections.loc[
geom_collections.geometry.map(lambda x: x.geom_type == "Polygon")
]
# Select all polygon features
polygons = lu_polygons_clip.loc[
lu_polygons_clip.geometry.map(lambda x: x.geom_type == "Polygon")
]
# Merge all features
lu_polygons_cleaned = pd.concat([polygons, geom_collections], axis=0)
return lu_polygons_cleaned.loc[~lu_polygons_cleaned.is_empty]
def generate_landuse_polygons(config):
"""
Generates landuse polygons based on traffic network and landuse features from OSM
:return:
"""
osm_dir = os.path.join(config["output_dir"], config["name"], "osm")
out_file = os.path.join(
config["output_dir"], config["name"], f"{config['name']}_lu_polygons.shp"
)
street_blocks = polygons_from_traffic(osm_dir)
# street_blocks.to_file(
# os.path.join(
# config["output_dir"], config["name"], f"{config['name']}_street_blocks.shp"
# )
# )
lu_polygons = polygons_from_landuse(osm_dir, street_blocks, config["epsg"])
# lu_polygons.to_file(
# os.path.join(
# config["output_dir"],
# config["name"],
# f"{config['name']}_lu_polygons_raw.shp",
# )
# )
lu_polygons_clean = clean_polygons(lu_polygons)
# lu_polygons_clean.to_file(
# os.path.join(
# config["output_dir"],
# config["name"],
# f"{config['name']}_lu_polygons_clean.shp",
# )
# )
lu_polygons_no_building = clip_buildings(osm_dir, lu_polygons_clean)
lu_polygons_no_building = lu_polygons_no_building.loc[
lu_polygons_no_building.geometry.map(
lambda x: x.geom_type in ("Polygon", "MultiPolygon")
)
]
lu_polygons_no_building.reset_index(drop=True)
lu_polygons_no_building = lu_polygons_no_building.explode()
lu_polygons_no_building.reset_index(drop=True, inplace=True)
lu_polygons_no_building.to_file(out_file)
| [
"pygeos.symmetric_difference",
"pygeos.union_all",
"pygeos.buffer",
"modules.utils.calc_compactness",
"os.path.basename",
"geopandas.GeoDataFrame",
"geopandas.overlay",
"pygeos.from_shapely",
"pygeos.get_parts",
"numpy.array",
"os.path.join",
"pandas.concat",
"shapely.geometry.box",
"geopa... | [((939, 962), 'pandas.concat', 'pd.concat', (['all_features'], {}), '(all_features)\n', (948, 962), True, 'import pandas as pd\n'), ((1304, 1345), 'pandas.concat', 'pd.concat', (['[line_features, poly_features]'], {}), '([line_features, poly_features])\n', (1313, 1345), True, 'import pandas as pd\n'), ((1393, 1423), 'shapely.geometry.box', 'box', (['*features_df.total_bounds'], {}), '(*features_df.total_bounds)\n', (1396, 1423), False, 'from shapely.geometry import box\n'), ((1438, 1498), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': [bbox_geom]}"], {'crs': '"""epsg:4326"""'}), "({'geometry': [bbox_geom]}, crs='epsg:4326')\n", (1454, 1498), True, 'import geopandas as gpd\n'), ((1554, 1616), 'geopandas.overlay', 'gpd.overlay', (['all_features', 'bbox_df'], {'how': '"""symmetric_difference"""'}), "(all_features, bbox_df, how='symmetric_difference')\n", (1565, 1616), True, 'import geopandas as gpd\n'), ((2287, 2310), 'pandas.concat', 'pd.concat', (['all_features'], {}), '(all_features)\n', (2296, 2310), True, 'import pandas as pd\n'), ((2777, 2809), 'pygeos.buffer', 'pygeos.buffer', (['line_geoms', '(5e-05)'], {}), '(line_geoms, 5e-05)\n', (2790, 2809), False, 'import pygeos\n'), ((3238, 3265), 'pygeos.union_all', 'pygeos.union_all', (['all_geoms'], {}), '(all_geoms)\n', (3254, 3265), False, 'import pygeos\n'), ((3321, 3376), 'pygeos.symmetric_difference', 'pygeos.symmetric_difference', (['bbox_geom', 'all_geoms_union'], {}), '(bbox_geom, all_geoms_union)\n', (3348, 3376), False, 'import pygeos\n'), ((6009, 6038), 'modules.utils.calc_compactness', 'calc_compactness', (['lu_polygons'], {}), '(lu_polygons)\n', (6025, 6038), False, 'from modules.utils import calc_compactness\n'), ((6499, 6552), 'os.path.join', 'os.path.join', (['in_dir', '"""buildings"""', '"""building.geojson"""'], {}), "(in_dir, 'buildings', 'building.geojson')\n", (6511, 6552), False, 'import os\n'), ((6695, 6748), 'geopandas.overlay', 'gpd.overlay', (['lu_polygons', 'buildings'], {'how': '"""difference"""'}), "(lu_polygons, buildings, how='difference')\n", (6706, 6748), True, 'import geopandas as gpd\n'), ((7484, 7531), 'pandas.concat', 'pd.concat', (['[polygons, geom_collections]'], {'axis': '(0)'}), '([polygons, geom_collections], axis=0)\n', (7493, 7531), True, 'import pandas as pd\n'), ((7769, 7826), 'os.path.join', 'os.path.join', (["config['output_dir']", "config['name']", '"""osm"""'], {}), "(config['output_dir'], config['name'], 'osm')\n", (7781, 7826), False, 'import os\n'), ((7842, 7933), 'os.path.join', 'os.path.join', (["config['output_dir']", "config['name']", 'f"""{config[\'name\']}_lu_polygons.shp"""'], {}), '(config[\'output_dir\'], config[\'name\'],\n f"{config[\'name\']}_lu_polygons.shp")\n', (7854, 7933), False, 'import os\n'), ((643, 676), 'os.path.join', 'os.path.join', (['in_dir', '"""*.geojson"""'], {}), "(in_dir, '*.geojson')\n", (655, 676), False, 'import os\n'), ((739, 755), 'geopandas.read_file', 'gpd.read_file', (['f'], {}), '(f)\n', (752, 755), True, 'import geopandas as gpd\n'), ((1909, 1953), 'os.path.join', 'os.path.join', (['in_dir', '"""traffic"""', '"""*.geojson"""'], {}), "(in_dir, 'traffic', '*.geojson')\n", (1921, 1953), False, 'import os\n'), ((2084, 2120), 'geopandas.read_file', 'gpd.read_file', (['f'], {'names': "['geometry']"}), "(f, names=['geometry'])\n", (2097, 2120), True, 'import geopandas as gpd\n'), ((2378, 2408), 'shapely.geometry.box', 'box', (['*features_df.total_bounds'], {}), '(*features_df.total_bounds)\n', (2381, 2408), False, 'from shapely.geometry import box\n'), ((4164, 4208), 'os.path.join', 'os.path.join', (['in_dir', '"""landuse"""', '"""*.geojson"""'], {}), "(in_dir, 'landuse', '*.geojson')\n", (4176, 4208), False, 'import os\n'), ((4297, 4313), 'geopandas.read_file', 'gpd.read_file', (['f'], {}), '(f)\n', (4310, 4313), True, 'import geopandas as gpd\n'), ((2707, 2741), 'pygeos.from_shapely', 'pygeos.from_shapely', (["x['geometry']"], {}), "(x['geometry'])\n", (2726, 2741), False, 'import pygeos\n'), ((3056, 3090), 'pygeos.from_shapely', 'pygeos.from_shapely', (["x['geometry']"], {}), "(x['geometry'])\n", (3075, 3090), False, 'import pygeos\n'), ((3435, 3463), 'pygeos.get_parts', 'pygeos.get_parts', (['geoms_diff'], {}), '(geoms_diff)\n', (3451, 3463), False, 'import pygeos\n'), ((4700, 4788), 'geopandas.overlay', 'gpd.overlay', (['lu_polygons', 'selected_features'], {'how': '"""intersection"""', 'keep_geom_type': '(True)'}), "(lu_polygons, selected_features, how='intersection',\n keep_geom_type=True)\n", (4711, 4788), True, 'import geopandas as gpd\n'), ((5050, 5111), 'geopandas.overlay', 'gpd.overlay', (['lu_polygons', 'selected_features'], {'how': '"""difference"""'}), "(lu_polygons, selected_features, how='difference')\n", (5061, 5111), True, 'import geopandas as gpd\n'), ((5139, 5250), 'pandas.concat', 'pd.concat', (["[intersected[['geometry', 'tags']], difference[['geometry', 'tags']]]"], {'axis': '(0)', 'ignore_index': '(True)'}), "([intersected[['geometry', 'tags']], difference[['geometry',\n 'tags']]], axis=0, ignore_index=True)\n", (5148, 5250), True, 'import pandas as pd\n'), ((6569, 6598), 'geopandas.read_file', 'gpd.read_file', (['buildings_file'], {}), '(buildings_file)\n', (6582, 6598), True, 'import geopandas as gpd\n'), ((849, 868), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (865, 868), False, 'import os\n'), ((2197, 2216), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2213, 2216), False, 'import os\n'), ((3146, 3166), 'numpy.array', 'np.array', (['poly_geoms'], {}), '(poly_geoms)\n', (3154, 3166), True, 'import numpy as np\n'), ((3176, 3200), 'numpy.array', 'np.array', (['line_geoms_buf'], {}), '(line_geoms_buf)\n', (3184, 3200), True, 'import numpy as np\n'), ((4244, 4263), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (4260, 4263), False, 'import os\n')] |
from dpsniper.attack.attack import Attack
from dpsniper.classifiers.stable_classifier import StableClassifier
import numpy as np
class MlAttack(Attack):
"""
An attack based on membership inference.
"""
def __init__(self, classifier: StableClassifier, thresh: float, q: float):
"""
Create an attack.
Args:
classifier: trained membership inference classifier
thresh: probability threshold
q: threshold tie-break probability
"""
self.classifier = classifier
self.thresh = thresh
self.q = q
def check(self, b):
if len(b.shape) == 1:
# make sure b has shape (n_samples, 1)
b = np.atleast_2d(b).T
probs = self.classifier.predict_probabilities(b)
above_thresh_probs = self._compute_above_thresh_probs(probs)
return above_thresh_probs
def _compute_above_thresh_probs(self, classifier_probs):
above = (classifier_probs > self.thresh).astype(float) # 1.0 iff classifier_probs > t
equal = (classifier_probs == self.thresh).astype(float) # 1.0 iff classifier_probs == t
return above + self.q * equal
def __str__(self):
return "t = {}, q = {}, CLASSIFIER = {}".format(self.thresh, self.q, str(self.classifier))
| [
"numpy.atleast_2d"
] | [((722, 738), 'numpy.atleast_2d', 'np.atleast_2d', (['b'], {}), '(b)\n', (735, 738), True, 'import numpy as np\n')] |
###Differential Evolution Validation###
##Brief explanation of the method##
"""
Validation will be done for various functions.
"""
__author__ = "<NAME> (<EMAIL>)"
__date__ = "2021"
__copyright__ = "Copyright (C) 2021 <NAME>"
__license__ = "GNU GPL Version 3.0"
##Modules##
import numpy as np
from scipy.optimize import differential_evolution
import time
import Grafics as graf
##Name of the files to save outputs##
#Logger modes: 'w' erase previous file, 'a' appending to the end of the file
output_namefile='DE_Test_Function'
log_console = graf.Logger('Results/Log_'+output_namefile+'.log', mode="w")
##Define the objective function##
#val=variable of the function to be optimized
#Ackley Function
solA=np.array([ 0, 0])
fA=0
boundsA = [(-5, 5), (-5, 5)]
def Ackley(valA):
x=valA
arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
zA=-20. * np.exp(arg1) + 20. + np.e - np.exp(arg2)
return zA-fA
#Rastrigin Function
solR=np.array([ 0, 0])
fR=0
n=2
boundsR=[(-5.12, 5.12) for i in range(0,n,1)]
def Rastrigin (valR):
x=valR
n=len(x)
zR=10*n+np.sum(x**2-10*np.cos(2*np.pi*x))
return zR-fR
#Rosenbrok Function
solB=np.array([ 1, 1])
fB=0
def Rosenbrok (valB):
x0=valB[:-1]
x1=valB[1:]
zB=np.sum(100*(x1-x0**2)**2+(1-x0)**2)
return zB-fB
##Method input parameters##
#args
strategy='best1bin' #Default strategy
max_generations=300
population_size=30
tolerance=1e-16
mutation=1 #between (0,2)
recombination=0.5 #Probability
#seed
disp=False
##Callback##
epsilon=1e-16
eA=[]
end_timeA=[]
def callback_A(xk,convergence):
eA.append(np.sum((xk-solA)**2))
end_timeA.append (time.time() - start_time)
if Ackley(xk)<epsilon:
return True
eR=[]
end_timeR=[]
def callback_R(xk,convergence):
eR.append(np.sum((xk-solR)**2))
end_timeR.append (time.time() - start_time)
if Rastrigin(xk)<epsilon:
return True
eB=[]
end_timeB=[]
def callback_B(xk,convergence):
eB.append(np.sum((xk-solB)**2))
end_timeB.append (time.time() - start_time)
if Rosenbrok(xk)<epsilon:
return True
#polish (The L-BFGS-B minimization method is used to polish the last member of the population.)
initial='latinhypercube'#It can be 'latinhypercube' (default), 'random' or array.
#atol
updating='deferred'
workers=-1
#constraints
print("----------------------------------------------------------------------------------")
print("Differential_evolution parameters: ")
print("----------------------------------------------------------------------------------")
print("Objective Function: Peaks, Ackley, Rastrigin and Rosenbrok Functions")
print("Bounds for the variables of the Ackley function =", boundsA[0])
print("Bounds for the variables of the Rastrigin and Rosenbrok function =", boundsR[0])
#print("args =", args)
print("Strategy =", strategy)
print("Maximum number of generations =", max_generations)
print("Total population size =", population_size)
print("Relative tolerance =", tolerance)
print("Mutation constant =", mutation)
print("Recombination constant =", recombination)
#print("Seed =", seed)
print("Prints the evaluated func at every iteration. =", disp)
print("Minimization halted value =", epsilon)
#print("polish =", polish)
print("Type of population initialization =", initial)
#print("atol =", atol)
print("Updating =", updating)
print("Workers =", workers)
#print("constraints =", constraints)
print("----------------------------------------------------------------------------------")
##Result##
#Result for the Ackley function
start_time = time.time()
ResultA = differential_evolution(Ackley,boundsA,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_A,polish=False,
init=initial,updating=updating,workers=workers)
end_timeA_Mem=time.time() - start_time
#Result for the Rastrigin function
start_time = time.time()
ResultR = differential_evolution(Rastrigin,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_R,polish=False,
init=initial,updating=updating,workers=workers)
end_timeR_Mem=time.time() - start_time
#Result for the Rosenbrok function
start_time = time.time()
ResultB = differential_evolution(Rosenbrok,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_B,polish=False,
init=initial,updating=updating,workers=workers)
end_timeB_Mem=time.time() - start_time
print("----------------------------------------------------------------------------------")
print("Differential_evolution result: ")
print("----------------------------------------------------------------------------------")
print("Result for the Ackley function\n", ResultA)
print("----------------------------------------------------------------------------------")
print("Result for the Rastrigin function\n", ResultR)
print("----------------------------------------------------------------------------------")
print("Result for the Rosenbrok function\n", ResultB)
print("----------------------------------------------------------------------------------")
##Error graph##
max_generations_A=np.array(range(10,len(eA),10))
eA_list=eA[10::10]
endtimeA_list=end_timeA[10::10]
text=graf.message_convert(ResultA,epsilon)
graf.Two_axes_plot(max_generations_A,eA_list,endtimeA_list,"Error graph for the Ackley function",
'max_generations',y1label='error', y2label='Time (s)', text=text)
max_generations_R=np.array(range(10,len(eR),10))
eR_list=eR[10::10]
endtimeR_list=end_timeR[10::10]
text=graf.message_convert(ResultR,epsilon)
graf.Two_axes_plot(max_generations_R,eR_list,endtimeR_list,"Error graph for the Rastrigin function",
'max_generations',y1label='error', y2label='Time (s)', text=text)
max_generations_B=np.array(range(10,len(eB),10))
eB_list=eB[10::10]
endtimeB_list=end_timeB[10::10]
text=graf.message_convert(ResultB,epsilon)
graf.Two_axes_plot(max_generations_B,eB_list,endtimeB_list,"Error graph for the Rosenbrok function",
'max_generations',y1label='error', y2label='Time (s)', text=text)
##Table##
##Analyzing max_generations##
#Initialize#
n=50
j=0
resultAT=[0]*n
end_timeAT=[0]*n
resultRT=[0]*n
end_timeRT=[0]*n
resultBT=[0]*n
end_timeBT=[0]*n
#Iterating max_generations#
for i in range(0,n):
#Result for the Ackley function
start_time = time.time()
resultAT[j] = differential_evolution(Ackley,boundsA,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_A,polish=False,
init=initial,updating=updating,workers=workers)
end_timeAT[j]=time.time() - start_time
#Result for the Rastrigin function
start_time = time.time()
resultRT[j] = differential_evolution(Rastrigin,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_R,polish=False,
init=initial,updating=updating,workers=workers)
end_timeRT[j]=time.time() - start_time
#Result for the Rosenbrok function
start_time = time.time()
resultBT[j] = differential_evolution(Rosenbrok,boundsR,strategy=strategy, maxiter=max_generations,popsize=population_size,
tol=tolerance,mutation=mutation,recombination=recombination,disp=False,callback=callback_B,polish=False,
init=initial,updating=updating,workers=workers)
end_timeBT[j]=time.time() - start_time
j=j+1
xA=np.array([res.x for res in resultAT])
eAT=np.sum((xA-solA)**2, axis=1)
funA=np.array([res.fun for res in resultAT])
nitA=np.array([res.nit for res in resultAT])
TeA=graf.Stats_Univariate(eAT)
TfunA=graf.Stats_Univariate(funA)
TnitA=graf.Stats_Univariate(nitA)
Tend_timeA=graf.Stats_Univariate(end_timeAT)
xR=np.array([res.x for res in resultRT])
eRT=np.sum((xR-solR)**2, axis=1)
funR=np.array([res.fun for res in resultRT])
nitR=np.array([res.nit for res in resultRT])
TeR=graf.Stats_Univariate(eRT)
TfunR=graf.Stats_Univariate(funR)
TnitR=graf.Stats_Univariate(nitR)
Tend_timeR=graf.Stats_Univariate(end_timeRT)
xB=np.array([res.x for res in resultBT])
eBT=np.sum((xB-solB)**2, axis=1)
funB=np.array([res.fun for res in resultBT])
nitB=np.array([res.nit for res in resultBT])
TeB=graf.Stats_Univariate(eBT)
TfunB=graf.Stats_Univariate(funB)
TnitB=graf.Stats_Univariate(nitB)
Tend_timeB=graf.Stats_Univariate(end_timeBT)
print("-------------------------------------------------------------------------")
print("Descriptive statistics ")
print("-------------------------------------------------------------------------")
print("Ackley Function ")
print("-------------------------------------------------------------------------")
print("Error")
TeA.Table()
print("-------------------------------------------------------------------------")
print("Function value")
TfunA.Table()
print("-------------------------------------------------------------------------")
print("Iterations number")
TnitA.Table()
print("-------------------------------------------------------------------------")
print("Execution time")
Tend_timeA.Table()
print("-------------------------------------------------------------------------")
print("Rastrigin Function ")
print("-------------------------------------------------------------------------")
print("Error")
TeR.Table()
print("-------------------------------------------------------------------------")
print("Function value")
TfunR.Table()
print("-------------------------------------------------------------------------")
print("Iterations number")
TnitR.Table()
print("-------------------------------------------------------------------------")
print("Execution time")
Tend_timeR.Table()
print("-------------------------------------------------------------------------")
print("Rosenbrok Function ")
print("-------------------------------------------------------------------------")
print("Error")
TeB.Table()
print("-------------------------------------------------------------------------")
print("Function value")
TfunB.Table()
print("-------------------------------------------------------------------------")
print("Iterations number")
TnitB.Table()
print("-------------------------------------------------------------------------")
print("Execution time")
Tend_timeB.Table()
#Save figures and log to files#
graf.multipage('Results/Figures_'+ output_namefile +'.pdf')
log_console.close()
| [
"Grafics.message_convert",
"numpy.sum",
"Grafics.Two_axes_plot",
"scipy.optimize.differential_evolution",
"time.time",
"numpy.array",
"numpy.exp",
"numpy.cos",
"Grafics.Stats_Univariate",
"Grafics.multipage",
"Grafics.Logger",
"numpy.sqrt"
] | [((554, 618), 'Grafics.Logger', 'graf.Logger', (["('Results/Log_' + output_namefile + '.log')"], {'mode': '"""w"""'}), "('Results/Log_' + output_namefile + '.log', mode='w')\n", (565, 618), True, 'import Grafics as graf\n'), ((721, 737), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (729, 737), True, 'import numpy as np\n'), ((1031, 1047), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1039, 1047), True, 'import numpy as np\n'), ((1239, 1255), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1247, 1255), True, 'import numpy as np\n'), ((3682, 3693), 'time.time', 'time.time', ([], {}), '()\n', (3691, 3693), False, 'import time\n'), ((3704, 3986), 'scipy.optimize.differential_evolution', 'differential_evolution', (['Ackley', 'boundsA'], {'strategy': 'strategy', 'maxiter': 'max_generations', 'popsize': 'population_size', 'tol': 'tolerance', 'mutation': 'mutation', 'recombination': 'recombination', 'disp': '(False)', 'callback': 'callback_A', 'polish': '(False)', 'init': 'initial', 'updating': 'updating', 'workers': 'workers'}), '(Ackley, boundsA, strategy=strategy, maxiter=\n max_generations, popsize=population_size, tol=tolerance, mutation=\n mutation, recombination=recombination, disp=False, callback=callback_A,\n polish=False, init=initial, updating=updating, workers=workers)\n', (3726, 3986), False, 'from scipy.optimize import differential_evolution\n'), ((4117, 4128), 'time.time', 'time.time', ([], {}), '()\n', (4126, 4128), False, 'import time\n'), ((4139, 4424), 'scipy.optimize.differential_evolution', 'differential_evolution', (['Rastrigin', 'boundsR'], {'strategy': 'strategy', 'maxiter': 'max_generations', 'popsize': 'population_size', 'tol': 'tolerance', 'mutation': 'mutation', 'recombination': 'recombination', 'disp': '(False)', 'callback': 'callback_R', 'polish': '(False)', 'init': 'initial', 'updating': 'updating', 'workers': 'workers'}), '(Rastrigin, boundsR, strategy=strategy, maxiter=\n max_generations, popsize=population_size, tol=tolerance, mutation=\n mutation, recombination=recombination, disp=False, callback=callback_R,\n polish=False, init=initial, updating=updating, workers=workers)\n', (4161, 4424), False, 'from scipy.optimize import differential_evolution\n'), ((4555, 4566), 'time.time', 'time.time', ([], {}), '()\n', (4564, 4566), False, 'import time\n'), ((4577, 4862), 'scipy.optimize.differential_evolution', 'differential_evolution', (['Rosenbrok', 'boundsR'], {'strategy': 'strategy', 'maxiter': 'max_generations', 'popsize': 'population_size', 'tol': 'tolerance', 'mutation': 'mutation', 'recombination': 'recombination', 'disp': '(False)', 'callback': 'callback_B', 'polish': '(False)', 'init': 'initial', 'updating': 'updating', 'workers': 'workers'}), '(Rosenbrok, boundsR, strategy=strategy, maxiter=\n max_generations, popsize=population_size, tol=tolerance, mutation=\n mutation, recombination=recombination, disp=False, callback=callback_B,\n polish=False, init=initial, updating=updating, workers=workers)\n', (4599, 4862), False, 'from scipy.optimize import differential_evolution\n'), ((5779, 5817), 'Grafics.message_convert', 'graf.message_convert', (['ResultA', 'epsilon'], {}), '(ResultA, epsilon)\n', (5799, 5817), True, 'import Grafics as graf\n'), ((5817, 5993), 'Grafics.Two_axes_plot', 'graf.Two_axes_plot', (['max_generations_A', 'eA_list', 'endtimeA_list', '"""Error graph for the Ackley function"""', '"""max_generations"""'], {'y1label': '"""error"""', 'y2label': '"""Time (s)"""', 'text': 'text'}), "(max_generations_A, eA_list, endtimeA_list,\n 'Error graph for the Ackley function', 'max_generations', y1label=\n 'error', y2label='Time (s)', text=text)\n", (5835, 5993), True, 'import Grafics as graf\n'), ((6106, 6144), 'Grafics.message_convert', 'graf.message_convert', (['ResultR', 'epsilon'], {}), '(ResultR, epsilon)\n', (6126, 6144), True, 'import Grafics as graf\n'), ((6144, 6323), 'Grafics.Two_axes_plot', 'graf.Two_axes_plot', (['max_generations_R', 'eR_list', 'endtimeR_list', '"""Error graph for the Rastrigin function"""', '"""max_generations"""'], {'y1label': '"""error"""', 'y2label': '"""Time (s)"""', 'text': 'text'}), "(max_generations_R, eR_list, endtimeR_list,\n 'Error graph for the Rastrigin function', 'max_generations', y1label=\n 'error', y2label='Time (s)', text=text)\n", (6162, 6323), True, 'import Grafics as graf\n'), ((6436, 6474), 'Grafics.message_convert', 'graf.message_convert', (['ResultB', 'epsilon'], {}), '(ResultB, epsilon)\n', (6456, 6474), True, 'import Grafics as graf\n'), ((6474, 6653), 'Grafics.Two_axes_plot', 'graf.Two_axes_plot', (['max_generations_B', 'eB_list', 'endtimeB_list', '"""Error graph for the Rosenbrok function"""', '"""max_generations"""'], {'y1label': '"""error"""', 'y2label': '"""Time (s)"""', 'text': 'text'}), "(max_generations_B, eB_list, endtimeB_list,\n 'Error graph for the Rosenbrok function', 'max_generations', y1label=\n 'error', y2label='Time (s)', text=text)\n", (6492, 6653), True, 'import Grafics as graf\n'), ((8280, 8317), 'numpy.array', 'np.array', (['[res.x for res in resultAT]'], {}), '([res.x for res in resultAT])\n', (8288, 8317), True, 'import numpy as np\n'), ((8322, 8354), 'numpy.sum', 'np.sum', (['((xA - solA) ** 2)'], {'axis': '(1)'}), '((xA - solA) ** 2, axis=1)\n', (8328, 8354), True, 'import numpy as np\n'), ((8356, 8395), 'numpy.array', 'np.array', (['[res.fun for res in resultAT]'], {}), '([res.fun for res in resultAT])\n', (8364, 8395), True, 'import numpy as np\n'), ((8401, 8440), 'numpy.array', 'np.array', (['[res.nit for res in resultAT]'], {}), '([res.nit for res in resultAT])\n', (8409, 8440), True, 'import numpy as np\n'), ((8445, 8471), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['eAT'], {}), '(eAT)\n', (8466, 8471), True, 'import Grafics as graf\n'), ((8478, 8505), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['funA'], {}), '(funA)\n', (8499, 8505), True, 'import Grafics as graf\n'), ((8512, 8539), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['nitA'], {}), '(nitA)\n', (8533, 8539), True, 'import Grafics as graf\n'), ((8551, 8584), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['end_timeAT'], {}), '(end_timeAT)\n', (8572, 8584), True, 'import Grafics as graf\n'), ((8589, 8626), 'numpy.array', 'np.array', (['[res.x for res in resultRT]'], {}), '([res.x for res in resultRT])\n', (8597, 8626), True, 'import numpy as np\n'), ((8631, 8663), 'numpy.sum', 'np.sum', (['((xR - solR) ** 2)'], {'axis': '(1)'}), '((xR - solR) ** 2, axis=1)\n', (8637, 8663), True, 'import numpy as np\n'), ((8665, 8704), 'numpy.array', 'np.array', (['[res.fun for res in resultRT]'], {}), '([res.fun for res in resultRT])\n', (8673, 8704), True, 'import numpy as np\n'), ((8710, 8749), 'numpy.array', 'np.array', (['[res.nit for res in resultRT]'], {}), '([res.nit for res in resultRT])\n', (8718, 8749), True, 'import numpy as np\n'), ((8754, 8780), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['eRT'], {}), '(eRT)\n', (8775, 8780), True, 'import Grafics as graf\n'), ((8787, 8814), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['funR'], {}), '(funR)\n', (8808, 8814), True, 'import Grafics as graf\n'), ((8821, 8848), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['nitR'], {}), '(nitR)\n', (8842, 8848), True, 'import Grafics as graf\n'), ((8860, 8893), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['end_timeRT'], {}), '(end_timeRT)\n', (8881, 8893), True, 'import Grafics as graf\n'), ((8898, 8935), 'numpy.array', 'np.array', (['[res.x for res in resultBT]'], {}), '([res.x for res in resultBT])\n', (8906, 8935), True, 'import numpy as np\n'), ((8940, 8972), 'numpy.sum', 'np.sum', (['((xB - solB) ** 2)'], {'axis': '(1)'}), '((xB - solB) ** 2, axis=1)\n', (8946, 8972), True, 'import numpy as np\n'), ((8974, 9013), 'numpy.array', 'np.array', (['[res.fun for res in resultBT]'], {}), '([res.fun for res in resultBT])\n', (8982, 9013), True, 'import numpy as np\n'), ((9019, 9058), 'numpy.array', 'np.array', (['[res.nit for res in resultBT]'], {}), '([res.nit for res in resultBT])\n', (9027, 9058), True, 'import numpy as np\n'), ((9063, 9089), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['eBT'], {}), '(eBT)\n', (9084, 9089), True, 'import Grafics as graf\n'), ((9096, 9123), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['funB'], {}), '(funB)\n', (9117, 9123), True, 'import Grafics as graf\n'), ((9130, 9157), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['nitB'], {}), '(nitB)\n', (9151, 9157), True, 'import Grafics as graf\n'), ((9169, 9202), 'Grafics.Stats_Univariate', 'graf.Stats_Univariate', (['end_timeBT'], {}), '(end_timeBT)\n', (9190, 9202), True, 'import Grafics as graf\n'), ((11344, 11405), 'Grafics.multipage', 'graf.multipage', (["('Results/Figures_' + output_namefile + '.pdf')"], {}), "('Results/Figures_' + output_namefile + '.pdf')\n", (11358, 11405), True, 'import Grafics as graf\n'), ((1324, 1373), 'numpy.sum', 'np.sum', (['(100 * (x1 - x0 ** 2) ** 2 + (1 - x0) ** 2)'], {}), '(100 * (x1 - x0 ** 2) ** 2 + (1 - x0) ** 2)\n', (1330, 1373), True, 'import numpy as np\n'), ((4041, 4052), 'time.time', 'time.time', ([], {}), '()\n', (4050, 4052), False, 'import time\n'), ((4479, 4490), 'time.time', 'time.time', ([], {}), '()\n', (4488, 4490), False, 'import time\n'), ((4917, 4928), 'time.time', 'time.time', ([], {}), '()\n', (4926, 4928), False, 'import time\n'), ((6933, 6944), 'time.time', 'time.time', ([], {}), '()\n', (6942, 6944), False, 'import time\n'), ((6963, 7245), 'scipy.optimize.differential_evolution', 'differential_evolution', (['Ackley', 'boundsA'], {'strategy': 'strategy', 'maxiter': 'max_generations', 'popsize': 'population_size', 'tol': 'tolerance', 'mutation': 'mutation', 'recombination': 'recombination', 'disp': '(False)', 'callback': 'callback_A', 'polish': '(False)', 'init': 'initial', 'updating': 'updating', 'workers': 'workers'}), '(Ackley, boundsA, strategy=strategy, maxiter=\n max_generations, popsize=population_size, tol=tolerance, mutation=\n mutation, recombination=recombination, disp=False, callback=callback_A,\n polish=False, init=initial, updating=updating, workers=workers)\n', (6985, 7245), False, 'from scipy.optimize import differential_evolution\n'), ((7392, 7403), 'time.time', 'time.time', ([], {}), '()\n', (7401, 7403), False, 'import time\n'), ((7422, 7707), 'scipy.optimize.differential_evolution', 'differential_evolution', (['Rastrigin', 'boundsR'], {'strategy': 'strategy', 'maxiter': 'max_generations', 'popsize': 'population_size', 'tol': 'tolerance', 'mutation': 'mutation', 'recombination': 'recombination', 'disp': '(False)', 'callback': 'callback_R', 'polish': '(False)', 'init': 'initial', 'updating': 'updating', 'workers': 'workers'}), '(Rastrigin, boundsR, strategy=strategy, maxiter=\n max_generations, popsize=population_size, tol=tolerance, mutation=\n mutation, recombination=recombination, disp=False, callback=callback_R,\n polish=False, init=initial, updating=updating, workers=workers)\n', (7444, 7707), False, 'from scipy.optimize import differential_evolution\n'), ((7858, 7869), 'time.time', 'time.time', ([], {}), '()\n', (7867, 7869), False, 'import time\n'), ((7888, 8173), 'scipy.optimize.differential_evolution', 'differential_evolution', (['Rosenbrok', 'boundsR'], {'strategy': 'strategy', 'maxiter': 'max_generations', 'popsize': 'population_size', 'tol': 'tolerance', 'mutation': 'mutation', 'recombination': 'recombination', 'disp': '(False)', 'callback': 'callback_B', 'polish': '(False)', 'init': 'initial', 'updating': 'updating', 'workers': 'workers'}), '(Rosenbrok, boundsR, strategy=strategy, maxiter=\n max_generations, popsize=population_size, tol=tolerance, mutation=\n mutation, recombination=recombination, disp=False, callback=callback_B,\n polish=False, init=initial, updating=updating, workers=workers)\n', (7910, 8173), False, 'from scipy.optimize import differential_evolution\n'), ((820, 858), 'numpy.sqrt', 'np.sqrt', (['(0.5 * (x[0] ** 2 + x[1] ** 2))'], {}), '(0.5 * (x[0] ** 2 + x[1] ** 2))\n', (827, 858), True, 'import numpy as np\n'), ((974, 986), 'numpy.exp', 'np.exp', (['arg2'], {}), '(arg2)\n', (980, 986), True, 'import numpy as np\n'), ((1674, 1698), 'numpy.sum', 'np.sum', (['((xk - solA) ** 2)'], {}), '((xk - solA) ** 2)\n', (1680, 1698), True, 'import numpy as np\n'), ((1856, 1880), 'numpy.sum', 'np.sum', (['((xk - solR) ** 2)'], {}), '((xk - solR) ** 2)\n', (1862, 1880), True, 'import numpy as np\n'), ((2041, 2065), 'numpy.sum', 'np.sum', (['((xk - solB) ** 2)'], {}), '((xk - solB) ** 2)\n', (2047, 2065), True, 'import numpy as np\n'), ((7304, 7315), 'time.time', 'time.time', ([], {}), '()\n', (7313, 7315), False, 'import time\n'), ((7766, 7777), 'time.time', 'time.time', ([], {}), '()\n', (7775, 7777), False, 'import time\n'), ((8232, 8243), 'time.time', 'time.time', ([], {}), '()\n', (8241, 8243), False, 'import time\n'), ((877, 903), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * x[0])'], {}), '(2.0 * np.pi * x[0])\n', (883, 903), True, 'import numpy as np\n'), ((905, 931), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * x[1])'], {}), '(2.0 * np.pi * x[1])\n', (911, 931), True, 'import numpy as np\n'), ((1718, 1729), 'time.time', 'time.time', ([], {}), '()\n', (1727, 1729), False, 'import time\n'), ((1900, 1911), 'time.time', 'time.time', ([], {}), '()\n', (1909, 1911), False, 'import time\n'), ((2085, 2096), 'time.time', 'time.time', ([], {}), '()\n', (2094, 2096), False, 'import time\n'), ((946, 958), 'numpy.exp', 'np.exp', (['arg1'], {}), '(arg1)\n', (952, 958), True, 'import numpy as np\n'), ((1177, 1198), 'numpy.cos', 'np.cos', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1183, 1198), True, 'import numpy as np\n')] |
# approximate sum of two von mises with a single von mises
import numpy as np
from scipy.stats import vonmises
def sim(a, b, plot=False, n=int(1e8)):
unwrapped = vonmises.rvs(a, size=n) + vonmises.rvs(b, size=n)
unwrapped = unwrapped
wrapped = (unwrapped + np.pi) % (2 * np.pi) - np.pi
kappa, _, _ = vonmises.fit(wrapped, floc=0, fscale=1)
if plot is True:
plt.hist(wrapped, normed=True, bins=100)
x = np.linspace(-np.pi, np.pi)
y = vonmises.pdf(x, kappa)
plt.plot(x, y)
return kappa
# import numpy as np
# import pymc3 as pm
# import matplotlib.pyplot as plt
# import theano.tensor as tt
# from scipy.stats import norm, vonmises
# from scipy.integrate import quad
#
#
# n = 10000
# mu = 3
# sigma = 3
#
# k = np.exp(norm.rvs(mu, sigma, size=n))
# x = vonmises.rvs(kappa=k, size=n)
#
# with pm.Model():
#
# mu = pm.Normal(name="mu", mu=0, sigma=10)
# sigma = pm.HalfCauchy(name="sigma", beta=1)
# delta = pm.Normal(name="delta", mu=0, sigma=1, shape=n)
# kappa = tt.exp(mu + delta * sigma) # IMPORTANT! Use non-centered parameterization
# pm.VonMises(name="obs", mu=0, kappa=kappa, observed=x)
# trace = pm.sample(10000, tune=5000, chains=2)
# pm.traceplot(trace, compact=True, var_names=["mu", "sigma"])
# plt.savefig("tmp.png")
#
# # hist(x, bins=100, normed=True)
# #
# # x = np.linspace(-np.pi, np.pi, 100)
# #
# # def pdf(x, mu, sigma, a):
# # g = 1
# # v = vonmises.pdf(x, kappa=mu)
# # def f(k, x):
# # g = gamma.pdf(k, mu**2 / sigma**2, scale=1. / (mu / sigma**2))
# # v = vonmises.pdf(x, kappa=k)
# # return g * v
# # return [quad(f, 0, a, _x)[0] for _x in x]
# #
# # def logpdf(x, mu, sigma, a):
# # g = 1
# # v = vonmises.pdf(x, kappa=mu)
# # def f(k, x):
# # g = gamma.logpdf(k, mu**2 / sigma**2, scale=1. / (mu / sigma**2))
# # v = vonmises.logpdf(x, kappa=k)
# # return g * v
# # return [quad(f, 0, a, _x)[0] for _x in x]
# #
# # [plot(x, pdf(x, mu, sigma, a)) for a in [500]]
# #
# #
# # plot(x, np.log(pdf(x, mu, sigma)))
#
#
#
#
#
#
# # from scipy.integrate import quad
# # import theano
# # import theano.tensor as tt
# # import numpy as np
# # import pymc3 as pm
# #
# #
# # class Integrate(theano.Op):
# # def __init__(self, expr, var, *extra_vars):
# # super().__init__()
# # self._expr = expr
# # self._var = var
# # self._extra_vars = extra_vars
# # self._func = theano.function(
# # [var] + list(extra_vars),
# # self._expr,
# # on_unused_input='ignore')
# #
# # def make_node(self, start, stop, *extra_vars):
# # self._extra_vars_node = extra_vars
# # assert len(self._extra_vars) == len(extra_vars)
# # self._start = start
# # self._stop = stop
# # vars = [start, stop] + list(extra_vars)
# # # vars = list(extra_vars)
# # return theano.Apply(self, vars, [tt.dscalar().type()])
# #
# # def perform(self, node, inputs, out):
# # start, stop, *args = inputs
# # val = quad(self._func, start, stop, args=tuple(args))[0]
# # out[0][0] = np.array(val)
# #
# # def grad(self, inputs, grads):
# # start, stop, *args = inputs
# # out, = grads
# # replace = dict(zip(self._extra_vars, args))
# #
# # replace_ = replace.copy()
# # replace_[self._var] = start
# # dstart = out * theano.clone(-self._expr, replace=replace_)
# #
# # replace_ = replace.copy()
# # replace_[self._var] = stop
# # dstop = out * theano.clone(self._expr, replace=replace_)
# #
# # grads = tt.grad(self._expr, self._extra_vars)
# # dargs = []
# # for grad in grads:
# # integrate = Integrate(grad, self._var, *self._extra_vars)
# # darg = out * integrate(start, stop, *args)
# # dargs.append(darg)
# #
# # return [dstart, dstop] + dargs
# #
# #
# # y_obs = 8.3
# #
# # start = theano.shared(1.)
# # stop = theano.shared(2.)
# # with pm.Model() as basic_model:
# # a = pm.Uniform('a', 1.5, 3.5)
# # b = pm.Uniform('b', 4., 6.)
# #
# # # Define the function to integrate in plain theano
# # t = tt.dscalar('t')
# # t.tag.test_value = np.zeros(())
# # a_ = tt.dscalar('a_')
# # a_.tag.test_value = np.ones(())*2.
# # b_ = tt.dscalar('b_')
# # b_.tag.test_value = np.ones(())*5.
# # func = t**a_ + b_
# # integrate = Integrate(func, t, a_, b_)
# #
# # # Now we plug in the values from the model.
# # # The `a_` and `b_` from above corresponds to the `a` and `b` here.
# # mu = integrate(start, stop, a, b)
# # y = pm.Normal('y', mu=mu, sd=0.4, observed=y_obs)
# # trace = pm.sample(1500, tune=500, cores=2, chains=2) | [
"scipy.stats.vonmises.rvs",
"numpy.linspace",
"scipy.stats.vonmises.fit",
"scipy.stats.vonmises.pdf"
] | [((317, 356), 'scipy.stats.vonmises.fit', 'vonmises.fit', (['wrapped'], {'floc': '(0)', 'fscale': '(1)'}), '(wrapped, floc=0, fscale=1)\n', (329, 356), False, 'from scipy.stats import vonmises\n'), ((167, 190), 'scipy.stats.vonmises.rvs', 'vonmises.rvs', (['a'], {'size': 'n'}), '(a, size=n)\n', (179, 190), False, 'from scipy.stats import vonmises\n'), ((193, 216), 'scipy.stats.vonmises.rvs', 'vonmises.rvs', (['b'], {'size': 'n'}), '(b, size=n)\n', (205, 216), False, 'from scipy.stats import vonmises\n'), ((439, 465), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi'], {}), '(-np.pi, np.pi)\n', (450, 465), True, 'import numpy as np\n'), ((478, 500), 'scipy.stats.vonmises.pdf', 'vonmises.pdf', (['x', 'kappa'], {}), '(x, kappa)\n', (490, 500), False, 'from scipy.stats import vonmises\n')] |
# 多项式回归示例
# 多项式: 跟线性模型比较,多项式引入了高次项
import numpy as np
import sklearn.linear_model as lm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
import sklearn.pipeline as pl # 管线
import sklearn.preprocessing as sp
# 从样本文件读取数据
train_x, train_y = [], [] # 输入, 输出
with open("../data/poly_sample.txt", "rt") as f:
for line in f.readlines():
# 拆分每行,并转换为浮点数
data = [float(substr)
for substr in line.split(",")]
train_x.append(data[:-1])#切片(结果是二维)
train_y.append(data[-1])#索引(结果是一维)
# 列表转数组
train_x = np.array(train_x)
train_y = np.array(train_y)
# print(train_x.shape)
# print(train_y.shape)
# 定义模型
## 多项式模型实现分为两步: 多项式扩展, 线性回归求解
model = pl.make_pipeline(
sp.PolynomialFeatures(20), # 多项式扩展,最高次项3
lm.LinearRegression()) # 线性回归模型
model.fit(train_x, train_y) # 训练
pred_train_y = model.predict(train_x) # 预测
# 打印R2值
r2 = sm.r2_score(train_y, pred_train_y)
print("r2:", r2)
# 可视化
test_x = np.linspace(train_x.min(),
train_x.max(),
100) # 在样本x坐标范围内产生100个点
# 将test_x变成二维输入模型执行预测
pre_test_y = model.predict(test_x.reshape(-1, 1))
mp.figure("Polynomial")
mp.title("Polynomial")
mp.xlabel("x", fontsize=14)
mp.ylabel("y", fontsize=14)
mp.grid(linestyle=":")
mp.scatter(train_x, train_y, c="blue", label="sample")
mp.plot(test_x, pre_test_y, c="red", label="poly")
mp.legend()
mp.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.figure",
"numpy.array",
"mat... | [((551, 568), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (559, 568), True, 'import numpy as np\n'), ((579, 596), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (587, 596), True, 'import numpy as np\n'), ((878, 912), 'sklearn.metrics.r2_score', 'sm.r2_score', (['train_y', 'pred_train_y'], {}), '(train_y, pred_train_y)\n', (889, 912), True, 'import sklearn.metrics as sm\n'), ((1128, 1151), 'matplotlib.pyplot.figure', 'mp.figure', (['"""Polynomial"""'], {}), "('Polynomial')\n", (1137, 1151), True, 'import matplotlib.pyplot as mp\n'), ((1152, 1174), 'matplotlib.pyplot.title', 'mp.title', (['"""Polynomial"""'], {}), "('Polynomial')\n", (1160, 1174), True, 'import matplotlib.pyplot as mp\n'), ((1175, 1202), 'matplotlib.pyplot.xlabel', 'mp.xlabel', (['"""x"""'], {'fontsize': '(14)'}), "('x', fontsize=14)\n", (1184, 1202), True, 'import matplotlib.pyplot as mp\n'), ((1203, 1230), 'matplotlib.pyplot.ylabel', 'mp.ylabel', (['"""y"""'], {'fontsize': '(14)'}), "('y', fontsize=14)\n", (1212, 1230), True, 'import matplotlib.pyplot as mp\n'), ((1231, 1253), 'matplotlib.pyplot.grid', 'mp.grid', ([], {'linestyle': '""":"""'}), "(linestyle=':')\n", (1238, 1253), True, 'import matplotlib.pyplot as mp\n'), ((1255, 1309), 'matplotlib.pyplot.scatter', 'mp.scatter', (['train_x', 'train_y'], {'c': '"""blue"""', 'label': '"""sample"""'}), "(train_x, train_y, c='blue', label='sample')\n", (1265, 1309), True, 'import matplotlib.pyplot as mp\n'), ((1310, 1360), 'matplotlib.pyplot.plot', 'mp.plot', (['test_x', 'pre_test_y'], {'c': '"""red"""', 'label': '"""poly"""'}), "(test_x, pre_test_y, c='red', label='poly')\n", (1317, 1360), True, 'import matplotlib.pyplot as mp\n'), ((1362, 1373), 'matplotlib.pyplot.legend', 'mp.legend', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as mp\n'), ((1374, 1383), 'matplotlib.pyplot.show', 'mp.show', ([], {}), '()\n', (1381, 1383), True, 'import matplotlib.pyplot as mp\n'), ((711, 736), 'sklearn.preprocessing.PolynomialFeatures', 'sp.PolynomialFeatures', (['(20)'], {}), '(20)\n', (732, 736), True, 'import sklearn.preprocessing as sp\n'), ((756, 777), 'sklearn.linear_model.LinearRegression', 'lm.LinearRegression', ([], {}), '()\n', (775, 777), True, 'import sklearn.linear_model as lm\n')] |
"""A set of classes that take in a GPy model and optimise their respective
acquisition functions over the model's decision space.
Each class can be used as follows:
>> acq_class = EI
>> acq_optimiser = acq_class(lb, ub, acq_budget, cf=None, args)
>> acq_optimiser(gpy_model)
The basic usage is that an optimiser is instantiated with the problem bounds,
``lb`` and ``ub``, a budget of calls to the GPy model (used for predicting the
mean and variance of locations in decision space), a constraint function that
returns True or False depending on if the decision vector it is given violates
any problem constraints, and additional arguments in the form of a dictionary
containing key: value pairs that are passed into the acquisition function used
by the optimiser; e.g. for the UCB acquisition function the value of beta is
needed and can be specified: args = {'beta': 2.5}.
Note that all acquisition optimisers use the NSGA-II algorithm apart from PI
which uses a multi-restart strategy, seeded by the best locations found from
uniformly sampling decision space.
"""
import scipy
import numpy as np
from . import standard_acq_funcs_minimize
from . import egreedy_acq_funcs_minimize
from .nsga2_pareto_front import NSGA2_pygmo
class BaseOptimiser:
"""Class of methods that maximise an acquisition function over a GPy model.
Parameters
----------
lb : (D, ) numpy.ndarray
Lower bound box constraint on D
ub : (D, ) numpy.ndarray
Upper bound box constraint on D
acq_budget : int
Maximum number of calls to the GPy model
cf : callable, optional
Constraint function that returns True if it is called with a
valid decision vector, else False.
acquisition_args : dict, optional
A dictionary containing key: value pairs that will be passed to the
corresponding acquisition function, see the classes below for
further details.
"""
def __init__(self, lb, ub, acq_budget, cf=None, acquisition_args={}):
self.lb = lb
self.ub = ub
self.cf = cf
self.acquisition_args = acquisition_args
self.acq_budget = acq_budget
def __call__(self, model):
raise NotImplementedError()
class ParetoFrontOptimiser(BaseOptimiser):
"""Class of acquisition function optimisers that use Pareto fronts.
The (estimated) Pareto front is calculated using NSGA-II [1]_, for full
details of the method see: nsga2_pareto_front.NSGA2_pygmo
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>.
A fast and elitist multiobjective genetic algorithm: NSGA-II.
IEEE Transactions on Evolutionary Computation 6, 2 (2001), 182–197.
"""
def get_front(self, model):
"""Gets the (estimated) Pareto front of the predicted mean and
standard deviation of a GPy.models.GPRegression model.
"""
X_front, musigma_front = NSGA2_pygmo(
model, self.acq_budget, self.lb, self.ub, self.cf
)
return X_front, musigma_front[:, 0], musigma_front[:, 1]
def __call__(self, model):
raise NotImplementedError()
class EI(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front that maximises EI.
See standard_acq_funcs_minimize.EI for details of the EI method.
"""
def __call__(self, model):
X, mu, sigma = self.get_front(model)
ei = standard_acq_funcs_minimize.EI(mu, sigma, y_best=np.min(model.Y))
return X[np.argmax(ei), :]
class UCB(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front that maximises UCB.
See standard_acq_funcs_minimize.UCB for details of the UCB method and its
optional arguments.
"""
def __call__(self, model):
X, mu, sigma = self.get_front(model)
ucb = standard_acq_funcs_minimize.UCB(
mu,
sigma,
lb=self.lb,
ub=self.ub,
t=model.X.shape[0] + 1,
d=model.X.shape[1],
**self.acquisition_args
)
return X[np.argmax(ucb), :]
class eFront(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front via the eFront method.
eFront greedily selects a point an (estimated) Pareto front that has the
best (lowest) mean predicted value with probability (1 - epsilon) and
randomly selects a point on the front with probability epsilon.
"""
def __call__(self, model):
X, mu, sigma = self.get_front(model)
Xnew = egreedy_acq_funcs_minimize.eFront(X, mu, sigma, **self.acquisition_args)
return Xnew
class eRandom(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front via the eRandom method.
eRandom greedily selects a point an (estimated) Pareto front that has the
best (lowest) mean predicted value with probability (1 - epsilon) and
randomly selects a point in decision space with probability epsilon.
"""
def __call__(self, model):
X, mu, sigma = self.get_front(model)
Xnew = egreedy_acq_funcs_minimize.eRandom(
X, mu, sigma, lb=self.lb, ub=self.ub, cf=self.cf, **self.acquisition_args
)
return Xnew
class PFRandom(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front via the PFRandom method.
PFRandom randomly selects a point on the Pareto front.
"""
def __call__(self, model):
X, _, _ = self.get_front(model)
Xnew = egreedy_acq_funcs_minimize.PFRandom(X)
return Xnew
class Explore(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front via the Explore method.
Explore selects the most exploratory point on the front, i.e. the location
with the largest standard deviation.
"""
def __call__(self, model):
X, _, sigma = self.get_front(model)
Xnew = egreedy_acq_funcs_minimize.Explore(X, sigma)
return Xnew
class Exploit(ParetoFrontOptimiser):
"""Selects the point on a GPy model's Pareto front via the Exploit method.
Exploit selects the most exploitative point on the front, i.e. the location
with the best (lowest) mean predicted value.
"""
def __call__(self, model):
X, mu, _ = self.get_front(model)
Xnew = egreedy_acq_funcs_minimize.Exploit(X, mu)
return Xnew
class PI(BaseOptimiser):
"""Maximises the PI acquisition function for a given GPy model.
See standard_acq_funcs_minimize.PI for details of the PI method.
Notes
-----
PI is maximised using the typical multi-restart approach of drawing a
large number of samples from across the decision space (X), evaluating the
locations with the acquisition function, and locally optimising the best 10
of these with L-BFGS-B. Here we make the assumption that each local
optimisation run will take ~100 evaluations -- emperically we found this to
be the case.
PI is not maximised using NSGA-II because the location that maximises PI is
not guaranteed to be on the Pareto front; see the paper for full details.
"""
def __call__(self, model):
D = model.X.shape[1]
incumbent = model.Y.min()
# objective function wrapper for L-BFGS-B
def min_obj(x):
# if we have a constraint function and it is violated,
# return a bad PI value
if (self.cf is not None) and (not self.cf(x)):
return np.inf
mu, sigmaSQR = model.predict(np.atleast_2d(x), full_cov=False)
# negate PI because we're using a minimiser
pi = -standard_acq_funcs_minimize.PI(
mu, np.sqrt(sigmaSQR), incumbent
).ravel()
return pi
# number of optimisation runs and *estimated* number of L-BFGS-B
# function evaluations per run; note this was calculate empirically and
# may not be true for all functions.
N_opt_runs = 10
fevals_assumed_per_run = 100
N_samples = self.acq_budget - (N_opt_runs * fevals_assumed_per_run)
if N_samples <= N_opt_runs:
N_samples = N_opt_runs
# initially perform a grid search for N_samples
x0_points = np.random.uniform(self.lb, self.ub, size=(N_samples, D))
fx0 = min_obj(x0_points).ravel()
# select the top N_opt_runs to evaluate with L-BFGS-B
x0_points = x0_points[np.argsort(fx0)[:N_opt_runs], :]
# Find the best optimum by starting from n_restart different random points.
# below is equivilent to: [(l, b) for (l, b) in zip(self.lb, self.ub)]
bounds = [*zip(self.lb, self.ub)]
# storage for the best found location (xb) and its function value (fx)
xb = np.zeros((N_opt_runs, D))
fx = np.zeros((N_opt_runs, 1))
# ensure we're using a good stopping criterion
# ftol = factr * numpy.finfo(float).eps
factr = 1e-15 / np.finfo(float).eps
# run L-BFGS-B on each of the 'N_opt_runs' starting locations
for i, x0 in enumerate(x0_points):
xb[i, :], fx[i, :], _ = scipy.optimize.fmin_l_bfgs_b(
min_obj, x0=x0, bounds=bounds, approx_grad=True, factr=factr
)
# return the best location
best_idx = np.argmin(fx.flat)
return xb[best_idx, :]
| [
"numpy.random.uniform",
"numpy.argmax",
"numpy.zeros",
"numpy.argmin",
"numpy.argsort",
"numpy.finfo",
"numpy.min",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.sqrt",
"numpy.atleast_2d"
] | [((8291, 8347), 'numpy.random.uniform', 'np.random.uniform', (['self.lb', 'self.ub'], {'size': '(N_samples, D)'}), '(self.lb, self.ub, size=(N_samples, D))\n', (8308, 8347), True, 'import numpy as np\n'), ((8814, 8839), 'numpy.zeros', 'np.zeros', (['(N_opt_runs, D)'], {}), '((N_opt_runs, D))\n', (8822, 8839), True, 'import numpy as np\n'), ((8853, 8878), 'numpy.zeros', 'np.zeros', (['(N_opt_runs, 1)'], {}), '((N_opt_runs, 1))\n', (8861, 8878), True, 'import numpy as np\n'), ((9353, 9371), 'numpy.argmin', 'np.argmin', (['fx.flat'], {}), '(fx.flat)\n', (9362, 9371), True, 'import numpy as np\n'), ((9177, 9272), 'scipy.optimize.fmin_l_bfgs_b', 'scipy.optimize.fmin_l_bfgs_b', (['min_obj'], {'x0': 'x0', 'bounds': 'bounds', 'approx_grad': '(True)', 'factr': 'factr'}), '(min_obj, x0=x0, bounds=bounds, approx_grad=\n True, factr=factr)\n', (9205, 9272), False, 'import scipy\n'), ((3515, 3530), 'numpy.min', 'np.min', (['model.Y'], {}), '(model.Y)\n', (3521, 3530), True, 'import numpy as np\n'), ((3549, 3562), 'numpy.argmax', 'np.argmax', (['ei'], {}), '(ei)\n', (3558, 3562), True, 'import numpy as np\n'), ((4126, 4140), 'numpy.argmax', 'np.argmax', (['ucb'], {}), '(ucb)\n', (4135, 4140), True, 'import numpy as np\n'), ((7572, 7588), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (7585, 7588), True, 'import numpy as np\n'), ((9007, 9022), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (9015, 9022), True, 'import numpy as np\n'), ((8482, 8497), 'numpy.argsort', 'np.argsort', (['fx0'], {}), '(fx0)\n', (8492, 8497), True, 'import numpy as np\n'), ((7733, 7750), 'numpy.sqrt', 'np.sqrt', (['sigmaSQR'], {}), '(sigmaSQR)\n', (7740, 7750), True, 'import numpy as np\n')] |
"""
Utilities
"""
from __future__ import division
import os.path as op
import logging
import numpy as np
import nibabel as nib
from nilearn import datasets
from nilearn.input_data import NiftiMasker
from .due import due
from . import references
LGR = logging.getLogger(__name__)
def get_template(space='mni152_1mm', mask=None):
"""
Load template file.
Parameters
----------
space : {'mni152_1mm', 'mni152_2mm', 'ale_2mm'}, optional
Template to load. Default is 'mni152_1mm'.
mask : {None, 'brain', 'gm'}, optional
Whether to return the raw template (None), a brain mask ('brain'), or
a gray-matter mask ('gm'). Default is None.
Returns
-------
img : :obj:`nibabel.nifti1.Nifti1Image`
Template image object.
"""
if space == 'mni152_1mm':
if mask is None:
img = nib.load(datasets.fetch_icbm152_2009()['t1'])
elif mask == 'brain':
img = nib.load(datasets.fetch_icbm152_2009()['mask'])
elif mask == 'gm':
img = datasets.fetch_icbm152_brain_gm_mask(threshold=0.2)
else:
raise ValueError('Mask {0} not supported'.format(mask))
elif space == 'mni152_2mm':
if mask is None:
img = datasets.load_mni152_template()
elif mask == 'brain':
img = datasets.load_mni152_brain_mask()
elif mask == 'gm':
# this approach seems to approximate the 0.2 thresholded
# GM mask pretty well
temp_img = datasets.load_mni152_template()
data = temp_img.get_data()
data = data * -1
data[data != 0] += np.abs(np.min(data))
data = (data > 1200).astype(int)
img = nib.Nifti1Image(data, temp_img.affine)
else:
raise ValueError('Mask {0} not supported'.format(mask))
elif space == 'ale_2mm':
if mask is None:
img = datasets.load_mni152_template()
else:
# Not the same as the nilearn brain mask, but should correspond to
# the default "more conservative" MNI152 mask in GingerALE.
img = nib.load(op.join(get_resource_path(),
'templates/MNI152_2x2x2_brainmask.nii.gz'))
else:
raise ValueError('Space {0} not supported'.format(space))
return img
def get_masker(mask):
"""
Get an initialized, fitted nilearn Masker instance from passed argument.
Parameters
----------
mask : str, Nifti1nibabel.nifti1.Nifti1Image, or any nilearn Masker
Returns
-------
masker : an initialized, fitted instance of a subclass of
`nilearn.input_data.base_masker.BaseMasker`
"""
if isinstance(mask, str):
mask = nib.load(mask)
if isinstance(mask, nib.nifti1.Nifti1Image):
mask = NiftiMasker(mask)
if not (hasattr(mask, 'transform') and
hasattr(mask, 'inverse_transform')):
raise ValueError("mask argument must be a string, a nibabel image,"
" or a Nilearn Masker instance.")
# Fit the masker if needed
if not hasattr(mask, 'mask_img_'):
mask.fit()
return mask
def listify(obj):
''' Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. '''
return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
def round2(ndarray):
"""
Numpy rounds X.5 values to nearest even integer. We want to round to the
nearest integer away from zero.
"""
onedarray = ndarray.flatten()
signs = np.sign(onedarray) # pylint: disable=no-member
idx = np.where(np.abs(onedarray - np.round(onedarray)) == 0.5)[0]
x = np.abs(onedarray)
y = np.round(x)
y[idx] = np.ceil(x[idx])
y *= signs
rounded = y.reshape(ndarray.shape)
return rounded.astype(int)
def vox2mm(ijk, affine):
"""
Convert matrix subscripts to coordinates.
From here:
http://blog.chrisgorgolewski.org/2014/12/how-to-convert-between-voxel-and-mm.html
"""
xyz = nib.affines.apply_affine(affine, ijk)
return xyz
def mm2vox(xyz, affine):
"""
Convert coordinates to matrix subscripts.
From here:
http://blog.chrisgorgolewski.org/2014/12/how-to-convert-between-voxel-and-mm.html
"""
ijk = nib.affines.apply_affine(np.linalg.inv(affine), xyz).astype(int)
return ijk
@due.dcite(references.LANCASTER_TRANSFORM,
description='Introduces the Lancaster MNI-to-Talairach transform, '
'as well as its inverse, the Talairach-to-MNI '
'transform.')
@due.dcite(references.LANCASTER_TRANSFORM_VALIDATION,
description='Validates the Lancaster MNI-to-Talairach and '
'Talairach-to-MNI transforms.')
def tal2mni(coords):
"""
Python version of BrainMap's tal2icbm_other.m.
This function converts coordinates from Talairach space to MNI
space (normalized using templates other than those contained
in SPM and FSL) using the tal2icbm transform developed and
validated by <NAME> at the Research Imaging Center in
San Antonio, Texas.
http://www3.interscience.wiley.com/cgi-bin/abstract/114104479/ABSTRACT
FORMAT outpoints = tal2icbm_other(inpoints)
Where inpoints is N by 3 or 3 by N matrix of coordinates
(N being the number of points)
ric.uthscsa.edu 3/14/07
"""
# Find which dimensions are of size 3
shape = np.array(coords.shape)
if all(shape == 3):
LGR.info('Input is an ambiguous 3x3 matrix.\nAssuming coords are row '
'vectors (Nx3).')
use_dim = 1
elif not any(shape == 3):
raise AttributeError('Input must be an Nx3 or 3xN matrix.')
else:
use_dim = np.where(shape == 3)[0][0]
# Transpose if necessary
if use_dim == 1:
coords = coords.transpose()
# Transformation matrices, different for each software package
icbm_other = np.array([[0.9357, 0.0029, -0.0072, -1.0423],
[-0.0065, 0.9396, -0.0726, -1.3940],
[0.0103, 0.0752, 0.8967, 3.6475],
[0.0000, 0.0000, 0.0000, 1.0000]])
# Invert the transformation matrix
icbm_other = np.linalg.inv(icbm_other)
# Apply the transformation matrix
coords = np.concatenate((coords, np.ones((1, coords.shape[1]))))
coords = np.dot(icbm_other, coords)
# Format the output, transpose if necessary
out_coords = coords[:3, :]
if use_dim == 1:
out_coords = out_coords.transpose()
return out_coords
@due.dcite(references.LANCASTER_TRANSFORM,
description='Introduces the Lancaster MNI-to-Talairach transform, '
'as well as its inverse, the Talairach-to-MNI '
'transform.')
@due.dcite(references.LANCASTER_TRANSFORM_VALIDATION,
description='Validates the Lancaster MNI-to-Talairach and '
'Talairach-to-MNI transforms.')
def mni2tal(coords):
"""
Python version of BrainMap's icbm_other2tal.m.
This function converts coordinates from MNI space (normalized using
templates other than those contained in SPM and FSL) to Talairach space
using the icbm2tal transform developed and validated by <NAME> at
the Research Imaging Center in San Antonio, Texas.
http://www3.interscience.wiley.com/cgi-bin/abstract/114104479/ABSTRACT
FORMAT outpoints = icbm_other2tal(inpoints)
Where inpoints is N by 3 or 3 by N matrix of coordinates
(N being the number of points)
ric.uthscsa.edu 3/14/07
"""
# Find which dimensions are of size 3
shape = np.array(coords.shape)
if all(shape == 3):
LGR.info('Input is an ambiguous 3x3 matrix.\nAssuming coords are row '
'vectors (Nx3).')
use_dim = 1
elif not any(shape == 3):
raise AttributeError('Input must be an Nx3 or 3xN matrix.')
else:
use_dim = np.where(shape == 3)[0][0]
# Transpose if necessary
if use_dim == 1:
coords = coords.transpose()
# Transformation matrices, different for each software package
icbm_other = np.array([[0.9357, 0.0029, -0.0072, -1.0423],
[-0.0065, 0.9396, -0.0726, -1.3940],
[0.0103, 0.0752, 0.8967, 3.6475],
[0.0000, 0.0000, 0.0000, 1.0000]])
# Apply the transformation matrix
coords = np.concatenate((coords, np.ones((1, coords.shape[1]))))
coords = np.dot(icbm_other, coords)
# Format the output, transpose if necessary
out_coords = coords[:3, :]
if use_dim == 1:
out_coords = out_coords.transpose()
return out_coords
def get_resource_path():
"""
Returns the path to general resources, terminated with separator. Resources
are kept outside package folder in "datasets".
Based on function by <NAME> used in Neurosynth Python package.
"""
return op.abspath(op.join(op.dirname(__file__), 'resources') + op.sep)
def try_prepend(value, prefix):
if isinstance(value, str):
return op.join(prefix, value)
else:
return value
def find_stem(arr):
"""
From https://www.geeksforgeeks.org/longest-common-substring-array-strings/
"""
# Determine size of the array
n = len(arr)
# Take first word from array
# as reference
s = arr[0]
ll = len(s)
res = ""
for i in range(ll):
for j in range(i + 1, ll + 1):
# generating all possible substrings of our ref string arr[0] i.e s
stem = s[i:j]
k = 1
for k in range(1, n):
# Check if the generated stem is common to to all words
if stem not in arr[k]:
break
# If current substring is present in all strings and its length is
# greater than current result
if (k + 1 == n and len(res) < len(stem)):
res = stem
return res
| [
"numpy.abs",
"numpy.ones",
"os.path.join",
"numpy.round",
"os.path.dirname",
"nilearn.datasets.load_mni152_brain_mask",
"nilearn.datasets.load_mni152_template",
"nilearn.input_data.NiftiMasker",
"nibabel.Nifti1Image",
"numpy.ceil",
"numpy.min",
"nilearn.datasets.fetch_icbm152_brain_gm_mask",
... | [((255, 282), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'import logging\n'), ((3592, 3610), 'numpy.sign', 'np.sign', (['onedarray'], {}), '(onedarray)\n', (3599, 3610), True, 'import numpy as np\n'), ((3718, 3735), 'numpy.abs', 'np.abs', (['onedarray'], {}), '(onedarray)\n', (3724, 3735), True, 'import numpy as np\n'), ((3744, 3755), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (3752, 3755), True, 'import numpy as np\n'), ((3769, 3784), 'numpy.ceil', 'np.ceil', (['x[idx]'], {}), '(x[idx])\n', (3776, 3784), True, 'import numpy as np\n'), ((4070, 4107), 'nibabel.affines.apply_affine', 'nib.affines.apply_affine', (['affine', 'ijk'], {}), '(affine, ijk)\n', (4094, 4107), True, 'import nibabel as nib\n'), ((5481, 5503), 'numpy.array', 'np.array', (['coords.shape'], {}), '(coords.shape)\n', (5489, 5503), True, 'import numpy as np\n'), ((5987, 6130), 'numpy.array', 'np.array', (['[[0.9357, 0.0029, -0.0072, -1.0423], [-0.0065, 0.9396, -0.0726, -1.394], [\n 0.0103, 0.0752, 0.8967, 3.6475], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.9357, 0.0029, -0.0072, -1.0423], [-0.0065, 0.9396, -0.0726, -\n 1.394], [0.0103, 0.0752, 0.8967, 3.6475], [0.0, 0.0, 0.0, 1.0]])\n', (5995, 6130), True, 'import numpy as np\n'), ((6277, 6302), 'numpy.linalg.inv', 'np.linalg.inv', (['icbm_other'], {}), '(icbm_other)\n', (6290, 6302), True, 'import numpy as np\n'), ((6424, 6450), 'numpy.dot', 'np.dot', (['icbm_other', 'coords'], {}), '(icbm_other, coords)\n', (6430, 6450), True, 'import numpy as np\n'), ((7692, 7714), 'numpy.array', 'np.array', (['coords.shape'], {}), '(coords.shape)\n', (7700, 7714), True, 'import numpy as np\n'), ((8198, 8341), 'numpy.array', 'np.array', (['[[0.9357, 0.0029, -0.0072, -1.0423], [-0.0065, 0.9396, -0.0726, -1.394], [\n 0.0103, 0.0752, 0.8967, 3.6475], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.9357, 0.0029, -0.0072, -1.0423], [-0.0065, 0.9396, -0.0726, -\n 1.394], [0.0103, 0.0752, 0.8967, 3.6475], [0.0, 0.0, 0.0, 1.0]])\n', (8206, 8341), True, 'import numpy as np\n'), ((8552, 8578), 'numpy.dot', 'np.dot', (['icbm_other', 'coords'], {}), '(icbm_other, coords)\n', (8558, 8578), True, 'import numpy as np\n'), ((2754, 2768), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (2762, 2768), True, 'import nibabel as nib\n'), ((2834, 2851), 'nilearn.input_data.NiftiMasker', 'NiftiMasker', (['mask'], {}), '(mask)\n', (2845, 2851), False, 'from nilearn.input_data import NiftiMasker\n'), ((9142, 9164), 'os.path.join', 'op.join', (['prefix', 'value'], {}), '(prefix, value)\n', (9149, 9164), True, 'import os.path as op\n'), ((6379, 6408), 'numpy.ones', 'np.ones', (['(1, coords.shape[1])'], {}), '((1, coords.shape[1]))\n', (6386, 6408), True, 'import numpy as np\n'), ((8507, 8536), 'numpy.ones', 'np.ones', (['(1, coords.shape[1])'], {}), '((1, coords.shape[1]))\n', (8514, 8536), True, 'import numpy as np\n'), ((1260, 1291), 'nilearn.datasets.load_mni152_template', 'datasets.load_mni152_template', ([], {}), '()\n', (1289, 1291), False, 'from nilearn import datasets\n'), ((4348, 4369), 'numpy.linalg.inv', 'np.linalg.inv', (['affine'], {}), '(affine)\n', (4361, 4369), True, 'import numpy as np\n'), ((9017, 9037), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (9027, 9037), True, 'import os.path as op\n'), ((873, 902), 'nilearn.datasets.fetch_icbm152_2009', 'datasets.fetch_icbm152_2009', ([], {}), '()\n', (900, 902), False, 'from nilearn import datasets\n'), ((1051, 1102), 'nilearn.datasets.fetch_icbm152_brain_gm_mask', 'datasets.fetch_icbm152_brain_gm_mask', ([], {'threshold': '(0.2)'}), '(threshold=0.2)\n', (1087, 1102), False, 'from nilearn import datasets\n'), ((1340, 1373), 'nilearn.datasets.load_mni152_brain_mask', 'datasets.load_mni152_brain_mask', ([], {}), '()\n', (1371, 1373), False, 'from nilearn import datasets\n'), ((1935, 1966), 'nilearn.datasets.load_mni152_template', 'datasets.load_mni152_template', ([], {}), '()\n', (1964, 1966), False, 'from nilearn import datasets\n'), ((5788, 5808), 'numpy.where', 'np.where', (['(shape == 3)'], {}), '(shape == 3)\n', (5796, 5808), True, 'import numpy as np\n'), ((7999, 8019), 'numpy.where', 'np.where', (['(shape == 3)'], {}), '(shape == 3)\n', (8007, 8019), True, 'import numpy as np\n'), ((967, 996), 'nilearn.datasets.fetch_icbm152_2009', 'datasets.fetch_icbm152_2009', ([], {}), '()\n', (994, 996), False, 'from nilearn import datasets\n'), ((1527, 1558), 'nilearn.datasets.load_mni152_template', 'datasets.load_mni152_template', ([], {}), '()\n', (1556, 1558), False, 'from nilearn import datasets\n'), ((1742, 1780), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data', 'temp_img.affine'], {}), '(data, temp_img.affine)\n', (1757, 1780), True, 'import nibabel as nib\n'), ((3678, 3697), 'numpy.round', 'np.round', (['onedarray'], {}), '(onedarray)\n', (3686, 3697), True, 'import numpy as np\n'), ((1665, 1677), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1671, 1677), True, 'import numpy as np\n')] |
import random
import math
import numpy as np
class RBFN:
def __init__(self, num_neuron, data_dim):
self.num_neuron = num_neuron + 1
self.data_dim = data_dim
self.dev_max = 15
self.list_neuron = [G_neuron(self.data_dim, is_constant=True if j == 0 else False, dev_max = self.dev_max)
for j in range(self.num_neuron)]
def output(self, data):
def denormalize(val):
return max(min(40*val, 40), -40)
data = np.asarray(data)
if len(data) != self.data_dim:
raise IndexError("data dimention must be %d, while it's %d" %(self.data_dim, len(data)))
out = 0
for neuron in self.list_neuron:
out += neuron.output(data)
return denormalize(out)
def update_parameters(self, parameters):
'''
parameters: parameters of RBFN which been optimized by GA
( w0, w1 ,w2 , …, wj , m11, …, m1i, m21, …, m2i, …, mj1,…, mji, σ1, σ2, …, σj)
'''
self.list_neuron[0].weight = parameters[0]
weights = parameters[1:self.num_neuron]
means = parameters[self.num_neuron : -(self.num_neuron-1)]
devs = parameters[-(self.num_neuron-1):]
weight_neurons = self.list_neuron[1:]
for i in range(self.num_neuron-1):
weight_neurons[i].weight = weights[i]
weight_neurons[i].means = np.asarray(means[i*self.data_dim : (i+1)*self.data_dim])
weight_neurons[i].dev = devs[i]
def load_parameters(self, parameters):
'''
parameters: (w0, w1, m11, m12,.. σ1, w2, m21, m22, ... ,σ2... wj, mj1, ... σj)
'''
if len(parameters) != self.num_neuron:
raise IndexError("neuron num incorrect")
if len(parameters[1]) != self.data_dim + 2:
raise IndexError("data dim incorrect")
else:
self.list_neuron[0].weight = parameters[0][0]
parameters = parameters[1:]
for n, neuron in enumerate(self.list_neuron[1:]):
neuron.weight = parameters[n][0]
neuron.means = np.asarray(parameters[n][1:-1])
neuron.dev = parameters[n][-1]
class G_neuron:
def __init__(self, data_dim, dev_max, means=None, is_constant = False):
self.is_constant = is_constant
self.data_dim = data_dim
if not self.is_constant:
self.means = means
self.dev_max = dev_max
self.dev = random.uniform(0, self.dev_max)
else:
self.means = None
self.dev = None
self.weight = random.uniform(-1,1)
def output(self, data):
if self.is_constant:
return self.weight
else:
if self.means is None:
self.means = np.random.uniform(-40, 40, size=self.data_dim)
return self.weight*self.__Gaussian_kernel(data)
def __Gaussian_kernel(self, data):
if self.dev == 0:
return 0
else:
return math.exp(-(data-self.means).dot(data-self.means)/(2*self.dev**2))
| [
"numpy.random.uniform",
"numpy.asarray",
"random.uniform"
] | [((501, 517), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (511, 517), True, 'import numpy as np\n'), ((2615, 2636), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2629, 2636), False, 'import random\n'), ((1411, 1471), 'numpy.asarray', 'np.asarray', (['means[i * self.data_dim:(i + 1) * self.data_dim]'], {}), '(means[i * self.data_dim:(i + 1) * self.data_dim])\n', (1421, 1471), True, 'import numpy as np\n'), ((2489, 2520), 'random.uniform', 'random.uniform', (['(0)', 'self.dev_max'], {}), '(0, self.dev_max)\n', (2503, 2520), False, 'import random\n'), ((2123, 2154), 'numpy.asarray', 'np.asarray', (['parameters[n][1:-1]'], {}), '(parameters[n][1:-1])\n', (2133, 2154), True, 'import numpy as np\n'), ((2804, 2850), 'numpy.random.uniform', 'np.random.uniform', (['(-40)', '(40)'], {'size': 'self.data_dim'}), '(-40, 40, size=self.data_dim)\n', (2821, 2850), True, 'import numpy as np\n')] |
# Librairies
print("Load Libraries")
import os
import numpy as np
import pandas as pd
import tensorflow.keras.preprocessing.image as kpi
import tensorflow.keras.models as km
from tensorflow.python.client import device_lib
MODE = "GPU" if "GPU" in [k.device_type for k in device_lib.list_local_devices()] else "CPU"
print(MODE)
## Argument
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=20)
DATA_DIR = '/Users/cecile/Documents/INSA/5A/AI-Frameworks/CodeDevelopment'
parser.add_argument('--data_dir', type=str,
default=DATA_DIR+"/data/")
parser.add_argument('--results_dir', type=str,
default=DATA_DIR+"/results/")
parser.add_argument('--model_dir', type=str,
default=DATA_DIR+"/model/")
args = parser.parse_args()
# TODO Define generator.
## Data Generator
img_width = 150
img_height = 150
data_dir_test = args.data_dir+'/test'
N_test = len(os.listdir(data_dir_test+"/test"))
test_datagen = kpi.ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
data_dir_test,
target_size=(img_height, img_width),
batch_size=args.batch_size,
class_mode=None,
shuffle=False)
## Download model
# Todo Download model saved in learning script.
args_str = "epochs_%d_batch_size_%d" %(args.epochs, args.batch_size)
model_conv = km.load_model(args.model_dir + "/" + args_str + ".h5")
## Prediction
test_prediction = model_conv.predict_generator(test_generator, N_test // args.batch_size, verbose=1)
## Save prediction in csv
images_test = test_generator.filenames
classes = [int(t>0.5) for t in test_prediction]
array = np.vstack((images_test, test_prediction[:,0], classes)).T
df = pd.DataFrame(array, columns=["filename","probabilities","classes"])
df.to_csv(args.results_dir+"/prediction_"+args_str+".csv", index=False)
| [
"pandas.DataFrame",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser",
"tensorflow.python.client.device_lib.list_local_devices",
"os.listdir",
"numpy.vstack"
] | [((370, 395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (393, 395), False, 'import argparse\n'), ((1076, 1117), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'kpi.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1098, 1117), True, 'import tensorflow.keras.preprocessing.image as kpi\n'), ((1451, 1505), 'tensorflow.keras.models.load_model', 'km.load_model', (["(args.model_dir + '/' + args_str + '.h5')"], {}), "(args.model_dir + '/' + args_str + '.h5')\n", (1464, 1505), True, 'import tensorflow.keras.models as km\n'), ((1808, 1877), 'pandas.DataFrame', 'pd.DataFrame', (['array'], {'columns': "['filename', 'probabilities', 'classes']"}), "(array, columns=['filename', 'probabilities', 'classes'])\n", (1820, 1877), True, 'import pandas as pd\n'), ((1025, 1060), 'os.listdir', 'os.listdir', (["(data_dir_test + '/test')"], {}), "(data_dir_test + '/test')\n", (1035, 1060), False, 'import os\n'), ((1745, 1801), 'numpy.vstack', 'np.vstack', (['(images_test, test_prediction[:, 0], classes)'], {}), '((images_test, test_prediction[:, 0], classes))\n', (1754, 1801), True, 'import numpy as np\n'), ((275, 306), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (304, 306), False, 'from tensorflow.python.client import device_lib\n')] |
#!/usr/bin/env python2
from __future__ import print_function, division
import scipy.stats, json, argparse
import numpy as np
import matplotlib
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
def unpack_spans(l):
out = []
for span in l:
if span[0] is None: out += [None] * span[1]
else: out += list(range(span[0], span[1]+1))
return out
def extract_info(obj):
qpresent = 0
for span in obj['qpresent']: qpresent += span[1] - span[0] + 1
spresent = 0
for span in obj['spresent']: spresent += span[1] - span[0] + 1
qaligned = 0
#for span in obj['qaligned']:
# if span[0] is None: continue
# else: qaligned += span[1] - span[0]
#print(obj['qaligned'])
#print(unpack_spans(obj['qaligned']))
#print(len(unpack_spans(obj['qaligned'])))
#print(len(unpack_spans(obj['saligned'])))
#print(len(obj['distances']))
saligned = 0
#for span in obj['saligned']:
# if span[0] is None: continue
# else: saligned += span[1] - span[0]
aligned = 0
for dist in obj['distances']:
if dist is None: continue
else: aligned += 1
#return obj['rmsd'], min(qaligned/qpresent, saligned/spresent)
return obj['rmsd'], aligned/max(qpresent, spresent)
def get_bin(bins, data):
for i, x in enumerate(bins):
if data <= x: return i
return i
def plot_kernel(kernel, lim=None, values=None):
lim = [[0., 10.], [0., 1.]] if lim is None else lim
#X, Y = np.mgrid[lim[0][0]:lim[0][1]:0.1, lim[1][0]:lim[1][1]:0.01]
X, Y = np.mgrid[lim[0][0]:lim[0][1]:0.05, lim[1][0]:lim[1][1]:0.005]
positions = np.vstack([X.ravel(), Y.ravel()])
Z = np.reshape(kernel(positions).T, X.shape)
fig, ax = plt.subplots()
plt.tight_layout(True)
#fig.set_figwidth(10)
#fig.set_figheight(10)
#ax.imshow(np.rot90(Z), cmap=cm.gist_earth_r, extent=lim[0]+lim[1], aspect=10)
ax.imshow(np.rot90(Z), cmap=cm.magma, extent=lim[0]+lim[1], aspect=7)
#ax.plot(values[0], values[1], '.', markersize=1)
ax.set_xlim(lim[0])
ax.set_ylim(lim[1])
plt.show()
def plot(x, y, z):
X, Y = np.meshgrid(x, y)
plt.rc('text', usetex=True)
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.plot_
ax = fig.gca(projection='3d')
#ax.contour(X, Y, z, extend3d=True, cmap=cm.coolwarm)
surf = ax.plot_surface(X, Y, z/np.sum(z), antialiased=False, cmap=cm.magma)
#ax.set_zscale('log')
ax.set_zlim(0.0, np.max(z/np.sum(z)))
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.03f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('RMSD')
ax.set_ylabel('Coverage')
ax.set_zlabel('f')
plt.savefig('qr.png')
plt.show()
def main(filelist, max_rmsd=4.0, min_cov=0.8):
bins_rmsd = np.arange(0, max_rmsd, 0.1)
bins_cov = np.arange(min_cov, 1.0, 0.01)
freq_rmsd = np.zeros(len(bins_rmsd))
freq_cov = np.zeros(len(bins_cov))
freq_2d = np.zeros([len(freq_cov), len(freq_rmsd)])
rmsds = []
covs = []
n = 0
for fn in filelist:
with open(fn) as f:
for l in f:
sl = l.split('\t')
name = sl[0]
obj = json.loads(sl[1])
rmsd, cov = extract_info(obj)
#if rmsd > max_rmsd: continue
if rmsd == -1: continue
#if cov < min_cov: continue
if cov == -1: continue
rmsdbin = get_bin(bins_rmsd, rmsd)
covbin = get_bin(bins_cov, cov)
freq_rmsd[rmsdbin] += 1
freq_cov[covbin] += 1
freq_2d[covbin,rmsdbin] += 1
rmsds.append(rmsd)
covs.append(cov)
#print(name, rmsd, cov)
#print(zip(bins_rmsd, freq_rmsd))
#print(zip(bins_cov, freq_cov))
#print(freq_2d)
values = np.vstack([rmsds, covs])
kernel = scipy.stats.gaussian_kde(values)
plot_kernel(lim=[[0., max_rmsd], [min_cov, 1.]], kernel=kernel, values=values)
#plot(bins_rmsd, bins_cov, freq_2d)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='+', help='superpositions to do stats for')
parser.add_argument('--max-rmsd', type=float, default=7.5)
parser.add_argument('--min-cov', type=float, default=0.0)
args = parser.parse_args()
main(args.infile, max_rmsd=args.max_rmsd, min_cov=args.min_cov)
| [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.sum",
"json.loads",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.rot90",
"numpy.arange",
"matplotlib.pyplot.rc",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.FormatStrFormatter",
"numpy... | [((1730, 1744), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1768), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', (['(True)'], {}), '(True)\n', (1762, 1768), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2101, 2118), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2112, 2118), True, 'import numpy as np\n'), ((2120, 2147), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (2126, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2671, 2692), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""qr.png"""'], {}), "('qr.png')\n", (2682, 2692), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2702, 2704), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2793), 'numpy.arange', 'np.arange', (['(0)', 'max_rmsd', '(0.1)'], {}), '(0, max_rmsd, 0.1)\n', (2775, 2793), True, 'import numpy as np\n'), ((2806, 2835), 'numpy.arange', 'np.arange', (['min_cov', '(1.0)', '(0.01)'], {}), '(min_cov, 1.0, 0.01)\n', (2815, 2835), True, 'import numpy as np\n'), ((3614, 3638), 'numpy.vstack', 'np.vstack', (['[rmsds, covs]'], {}), '([rmsds, covs])\n', (3623, 3638), True, 'import numpy as np\n'), ((3839, 3864), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3862, 3864), False, 'import scipy.stats, json, argparse\n'), ((1907, 1918), 'numpy.rot90', 'np.rot90', (['Z'], {}), '(Z)\n', (1915, 1918), True, 'import numpy as np\n'), ((2477, 2494), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (2490, 2494), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2526, 2553), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.03f"""'], {}), "('%.03f')\n", (2544, 2553), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2342, 2351), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (2348, 2351), True, 'import numpy as np\n'), ((2437, 2446), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (2443, 2446), True, 'import numpy as np\n'), ((3104, 3121), 'json.loads', 'json.loads', (['sl[1]'], {}), '(sl[1])\n', (3114, 3121), False, 'import scipy.stats, json, argparse\n')] |
"""
Data Utils for Sauvola Document Binarization
"""
import os
import cv2
import numpy as np
from glob import glob
from absl import logging
def collect_binarization_by_dataset(dataset_root) :
"""Load all training samples from dataset root
and return a dict {'dataset_name' -> [original, GT]}
"""
all_files = glob(f'{dataset_root}/*.*')
dataset_lut = {}
for f in all_files :
if 'source' in f:
source_file = f
target_file = f.replace('source', 'target')
if target_file in all_files :
logging.info(f"Found pair\n\tsource={source_file}\n\ttarget={target_file}")
else :
logging.warning(f"Fail to find pair\n\tsource={source_file}\n\ttarget={target_file}")
continue
dname = os.path.basename(f).split('_')[0]
if dname not in dataset_lut :
dataset_lut[dname] = []
dataset_lut[dname].append((source_file, target_file))
return dataset_lut
class DataGenerator :
"""Simple Data Generator that consumes paired (img, gt)
and outputs batch of (X, Y), where
X is of shape `output_shape + (1,)`
Y is of shape `output_shape + (1,)`
When mode='training', image flipping is applied
#TODO:
1. more data augmentations, e.g. color, size, noise, etc.
2. balanced sampling w.r.t. dataset names
"""
def __init__(self, data_pairs,
output_shape=(256,256),
batch_size=64,
nb_batches_per_epoch=1000,
mode='training',
seed=123455,
minimum_text_rate=0,
) :
self.data_pairs = self._read_data_if_necessary(data_pairs)
self.nb_samples = len(data_pairs)
self.output_shape = output_shape
self.mode = mode
self.minimum_text_rate = minimum_text_rate
if mode != 'testing' :
self.batch_size = batch_size
self.nb_batches_per_epoch = nb_batches_per_epoch
else :
self.batch_size = 1
self.nb_batches_per_epoch = self.nb_samples
self.batch_idx = 0
self.prng = self.get_prng(seed)
def _read_data_if_necessary(self, data_pairs) :
rets = []
for src, gt in data_pairs :
if isinstance(src, str) :
src = cv2.imread(src, 0)
if isinstance(gt, str) :
gt = cv2.imread(gt, 0)
rets.append([src, gt])
return rets
def get_prng(self, seed=None) :
if (seed is not None) :
return np.random.RandomState(seed)
else :
return np.random.RandomState(None)
def __len__(self) :
return self.nb_batches_per_epoch
def __iter__(self) :
return self
def __next__(self) :
bidx = self.batch_idx
if (self.batch_idx >= self.nb_batches_per_epoch) :
bidx = self.batch_idx = 0
else :
self.batch_idx += 1
return self[bidx]
def crop_sample(self, img, gt, prng, niter=1) :
h, w = img.shape[:2]
if self.output_shape is None :
assert self.batch_size == 1, "ERROR: original output size is only compatible with batch_size = 1"
return img, gt
else :
th, tw = self.output_shape
if (h<th) :
return self.crop_sample( np.row_stack([img, img]),
np.row_stack([gt, gt]),
prng )
elif (w<tw) :
return self.crop_sample( np.column_stack([img, img]),
np.column_stack([gt, gt]),
prng )
else :
y0 = prng.randint(0, h-th+1)
x0 = prng.randint(0, w-tw+1)
cim, cgt = img[y0:y0+th, x0:x0+tw], gt[y0:y0+th, x0:x0+tw]
perc_text = np.mean(cgt < 127)
if perc_text < self.minimum_text_rate :
if niter < 5 :
return self.crop_sample(img, gt, prng, niter+1)
return cim, cgt
def __getitem__(self, batch_idx) :
if self.mode != 'testing' :
if self.mode == 'training' :
prng = self.prng
else :
prng = self.get_prng(batch_idx)
indices = prng.randint(0, self.nb_samples, size=(self.batch_size,))
else :
indices = [batch_idx]
prng = self.prng
X, Y = [], []
for i in indices :
img, gt = self.data_pairs[i]
x, y = self.crop_sample(img, gt, prng)
if (self.mode == 'training') :
if prng.randn() > 0 :
x = x[::-1]
y = y[::-1]
if prng.randn() > 0 :
x = x[:,::-1]
y = y[:,::-1]
if prng.randn() > 0 :
x = x.T
y = y.T
X.append(x)
Y.append(y)
return self.postprocess_image(X), self.postprocess_label(Y)
def postprocess_image(self, X) :
X = [ (x-x.min())/(x.max()-x.min()+.1) for x in X]
return np.expand_dims(np.stack(X, axis=0), axis=-1).astype('float32')
def postprocess_label(self, Y) :
Y = np.expand_dims(np.stack(Y, axis=0), axis=-1).astype('float32')-127
Y = np.sign(Y)
return Y | [
"numpy.stack",
"os.path.basename",
"numpy.column_stack",
"numpy.random.RandomState",
"absl.logging.warning",
"absl.logging.info",
"cv2.imread",
"numpy.mean",
"numpy.row_stack",
"numpy.sign",
"glob.glob"
] | [((326, 353), 'glob.glob', 'glob', (['f"""{dataset_root}/*.*"""'], {}), "(f'{dataset_root}/*.*')\n", (330, 353), False, 'from glob import glob\n'), ((5491, 5501), 'numpy.sign', 'np.sign', (['Y'], {}), '(Y)\n', (5498, 5501), True, 'import numpy as np\n'), ((2633, 2660), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2654, 2660), True, 'import numpy as np\n'), ((2695, 2722), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (2716, 2722), True, 'import numpy as np\n'), ((568, 643), 'absl.logging.info', 'logging.info', (['f"""Found pair\n\tsource={source_file}\n\ttarget={target_file}"""'], {}), '(f"""Found pair\n\tsource={source_file}\n\ttarget={target_file}""")\n', (580, 643), False, 'from absl import logging\n'), ((679, 769), 'absl.logging.warning', 'logging.warning', (['f"""Fail to find pair\n\tsource={source_file}\n\ttarget={target_file}"""'], {}), '(\n f"""Fail to find pair\n\tsource={source_file}\n\ttarget={target_file}""")\n', (694, 769), False, 'from absl import logging\n'), ((2396, 2414), 'cv2.imread', 'cv2.imread', (['src', '(0)'], {}), '(src, 0)\n', (2406, 2414), False, 'import cv2\n'), ((2473, 2490), 'cv2.imread', 'cv2.imread', (['gt', '(0)'], {}), '(gt, 0)\n', (2483, 2490), False, 'import cv2\n'), ((3434, 3458), 'numpy.row_stack', 'np.row_stack', (['[img, img]'], {}), '([img, img])\n', (3446, 3458), True, 'import numpy as np\n'), ((3502, 3524), 'numpy.row_stack', 'np.row_stack', (['[gt, gt]'], {}), '([gt, gt])\n', (3514, 3524), True, 'import numpy as np\n'), ((3999, 4017), 'numpy.mean', 'np.mean', (['(cgt < 127)'], {}), '(cgt < 127)\n', (4006, 4017), True, 'import numpy as np\n'), ((5315, 5334), 'numpy.stack', 'np.stack', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (5323, 5334), True, 'import numpy as np\n'), ((810, 829), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (826, 829), False, 'import os\n'), ((3641, 3668), 'numpy.column_stack', 'np.column_stack', (['[img, img]'], {}), '([img, img])\n', (3656, 3668), True, 'import numpy as np\n'), ((3712, 3737), 'numpy.column_stack', 'np.column_stack', (['[gt, gt]'], {}), '([gt, gt])\n', (3727, 3737), True, 'import numpy as np\n'), ((5427, 5446), 'numpy.stack', 'np.stack', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (5435, 5446), True, 'import numpy as np\n')] |
import numpy as np
import rospy
import math
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion
rad2degrees = 180.0 / math.pi
degrees2rad = math.pi / 180.0
class ImuHandler(object):
"""
Handler for ROS topics of type: sensor_msgs/imu
Args:
topic_name: Name of ROS topic to be subscribed
buffer_size: Variable buffer, depend on frame rate of topic, default: 500
queue_size: Subscriber queue_size
"""
def __init__(self, topic_name, buffer_size=500, queue_size=10):
self.imu_data = Imu()
[self.roll, self.pitch, self.yaw,
self.angular_x, self.angular_y, self.angular_z,
self.lin_acc_x, self.lin_acc_y, self.lin_acc_z] = np.zeros((9, 1)) * 0.0
[self._roll, self._pitch, self._yaw,
self._angular_x, self._angular_y, self._angular_z,
self._lin_acc_x, self._lin_acc_y, self._lin_acc_z] = np.zeros((9, 1)) * 0.0
self.topic_imu = topic_name
self.queue_size = queue_size
self.buffer_size = buffer_size
self.counter = 0
self.buffer = np.zeros([self.buffer_size, 9])
self.sub = rospy.Subscriber(self.topic_imu, Imu, self.callback, queue_size=self.queue_size)
def callback(self, msg):
self.imu_data = msg
quat = (
self.imu_data.orientation.x,
self.imu_data.orientation.y,
self.imu_data.orientation.z,
self.imu_data.orientation.w)
(self._roll, self._pitch, self._yaw) = euler_from_quaternion(quat)
self._angular_x = self.imu_data.angular_velocity.x
self._angular_y = self.imu_data.angular_velocity.y
self._angular_z = self.imu_data.angular_velocity.z
self._lin_acc_x = self.imu_data.linear_acceleration.x
self._lin_acc_y = self.imu_data.linear_acceleration.y
self._lin_acc_z = self.imu_data.linear_acceleration.z
if self.counter < self.buffer_size:
self.buffer[self.counter] = [self._roll, self._pitch, self._yaw,
self._angular_x, self._angular_y, self._angular_z,
self._lin_acc_x, self._lin_acc_y, self._lin_acc_z]
else:
rospy.loginfo("ImuHandler for: " + self.topic_imu + " has reached buffer size.")
self.counter += 1
def get_value(self):
if self.counter > 0:
self.roll = np.sum(self.buffer[:, 0]) / self.counter
self.pitch = np.sum(self.buffer[:, 1]) / self.counter
self.yaw = np.sum(self.buffer[:, 2]) / self.counter
self.angular_x = np.sum(self.buffer[:, 3]) / self.counter
self.angular_y = np.sum(self.buffer[:, 4]) / self.counter
self.angular_z = np.sum(self.buffer[:, 5]) / self.counter
self.lin_acc_x = np.sum(self.buffer[:, 6]) / self.counter
self.lin_acc_y = np.sum(self.buffer[:, 7]) / self.counter
self.lin_acc_z = np.sum(self.buffer[:, 8]) / self.counter
self.buffer = np.zeros([self.buffer_size, 9])
self.counter = 0
# return instant measurement
# return [self._roll, self._pitch, self._yaw, self._angular_x, self._angular_y, self._angular_z]
# return Mean measurement
return [self.roll, self.pitch, self.yaw,
self.angular_x, self.angular_y, self.angular_z,
self.lin_acc_x, self.lin_acc_y, self.lin_acc_z]
| [
"rospy.Subscriber",
"numpy.sum",
"numpy.zeros",
"sensor_msgs.msg.Imu",
"rospy.loginfo",
"tf.transformations.euler_from_quaternion"
] | [((568, 573), 'sensor_msgs.msg.Imu', 'Imu', ([], {}), '()\n', (571, 573), False, 'from sensor_msgs.msg import Imu\n'), ((1107, 1138), 'numpy.zeros', 'np.zeros', (['[self.buffer_size, 9]'], {}), '([self.buffer_size, 9])\n', (1115, 1138), True, 'import numpy as np\n'), ((1159, 1244), 'rospy.Subscriber', 'rospy.Subscriber', (['self.topic_imu', 'Imu', 'self.callback'], {'queue_size': 'self.queue_size'}), '(self.topic_imu, Imu, self.callback, queue_size=self.queue_size\n )\n', (1175, 1244), False, 'import rospy\n'), ((1528, 1555), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['quat'], {}), '(quat)\n', (1549, 1555), False, 'from tf.transformations import euler_from_quaternion\n'), ((3056, 3087), 'numpy.zeros', 'np.zeros', (['[self.buffer_size, 9]'], {}), '([self.buffer_size, 9])\n', (3064, 3087), True, 'import numpy as np\n'), ((732, 748), 'numpy.zeros', 'np.zeros', (['(9, 1)'], {}), '((9, 1))\n', (740, 748), True, 'import numpy as np\n'), ((923, 939), 'numpy.zeros', 'np.zeros', (['(9, 1)'], {}), '((9, 1))\n', (931, 939), True, 'import numpy as np\n'), ((2253, 2338), 'rospy.loginfo', 'rospy.loginfo', (["('ImuHandler for: ' + self.topic_imu + ' has reached buffer size.')"], {}), "('ImuHandler for: ' + self.topic_imu + ' has reached buffer size.'\n )\n", (2266, 2338), False, 'import rospy\n'), ((2440, 2465), 'numpy.sum', 'np.sum', (['self.buffer[:, 0]'], {}), '(self.buffer[:, 0])\n', (2446, 2465), True, 'import numpy as np\n'), ((2506, 2531), 'numpy.sum', 'np.sum', (['self.buffer[:, 1]'], {}), '(self.buffer[:, 1])\n', (2512, 2531), True, 'import numpy as np\n'), ((2570, 2595), 'numpy.sum', 'np.sum', (['self.buffer[:, 2]'], {}), '(self.buffer[:, 2])\n', (2576, 2595), True, 'import numpy as np\n'), ((2641, 2666), 'numpy.sum', 'np.sum', (['self.buffer[:, 3]'], {}), '(self.buffer[:, 3])\n', (2647, 2666), True, 'import numpy as np\n'), ((2711, 2736), 'numpy.sum', 'np.sum', (['self.buffer[:, 4]'], {}), '(self.buffer[:, 4])\n', (2717, 2736), True, 'import numpy as np\n'), ((2781, 2806), 'numpy.sum', 'np.sum', (['self.buffer[:, 5]'], {}), '(self.buffer[:, 5])\n', (2787, 2806), True, 'import numpy as np\n'), ((2852, 2877), 'numpy.sum', 'np.sum', (['self.buffer[:, 6]'], {}), '(self.buffer[:, 6])\n', (2858, 2877), True, 'import numpy as np\n'), ((2922, 2947), 'numpy.sum', 'np.sum', (['self.buffer[:, 7]'], {}), '(self.buffer[:, 7])\n', (2928, 2947), True, 'import numpy as np\n'), ((2992, 3017), 'numpy.sum', 'np.sum', (['self.buffer[:, 8]'], {}), '(self.buffer[:, 8])\n', (2998, 3017), True, 'import numpy as np\n')] |
import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(n=5, k=2.0, alpha=1e-4, beta=.75, description=""):
vx = chainer.Variable(np.random.rand(2, 4, 6, 8).astype(np.float32))
vy = chainer.functions.local_response_normalization(vx, n=n, k=k, alpha=alpha, beta=beta)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] F.local_response_normalization {description}",
graph=graph,
backend=["webgpu", "webassembly", "fallback"],
inputs={x: vx.data},
expected={y: vy.data}
)
def test():
template()
def test_n():
template(n=4)
def test_k():
template(k=3.5)
def test_alpha():
template(alpha=1e-2)
def test_beta():
template(beta=0.5)
def test_with_placeholder():
vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
vy = chainer.functions.local_response_normalization(vx)
N = Placeholder(label="N")
C = Placeholder(label="C")
H = Placeholder(label="H")
W = Placeholder(label="W")
px = PlaceholderVariable([N, C, H, W])
py = chainer.functions.local_response_normalization(px)
graph = ChainerConverter().convert([px], [py])
x = graph.inputs[0]
y = graph.outputs[0]
N.value = 1
C.value = 3
H.value = 16
W.value = 16
generate_kernel_test_case(
description=f"[chainer] F.local_response_normalization with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
)
| [
"chainer.functions.local_response_normalization",
"test.util.generate_kernel_test_case",
"webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable",
"numpy.random.rand",
"webdnn.frontend.chainer.converter.ChainerConverter",
"webdnn.graph.placeholder.Placeholder"
] | [((450, 538), 'chainer.functions.local_response_normalization', 'chainer.functions.local_response_normalization', (['vx'], {'n': 'n', 'k': 'k', 'alpha': 'alpha', 'beta': 'beta'}), '(vx, n=n, k=k, alpha=alpha,\n beta=beta)\n', (496, 538), False, 'import chainer\n'), ((642, 855), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] F.local_response_normalization {description}"""', 'graph': 'graph', 'backend': "['webgpu', 'webassembly', 'fallback']", 'inputs': '{x: vx.data}', 'expected': '{y: vy.data}'}), "(description=\n f'[chainer] F.local_response_normalization {description}', graph=graph,\n backend=['webgpu', 'webassembly', 'fallback'], inputs={x: vx.data},\n expected={y: vy.data})\n", (667, 855), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((1190, 1240), 'chainer.functions.local_response_normalization', 'chainer.functions.local_response_normalization', (['vx'], {}), '(vx)\n', (1236, 1240), False, 'import chainer\n'), ((1250, 1272), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""N"""'}), "(label='N')\n", (1261, 1272), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1281, 1303), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""C"""'}), "(label='C')\n", (1292, 1303), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1312, 1334), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""H"""'}), "(label='H')\n", (1323, 1334), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1343, 1365), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""W"""'}), "(label='W')\n", (1354, 1365), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1375, 1408), 'webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable', 'PlaceholderVariable', (['[N, C, H, W]'], {}), '([N, C, H, W])\n', (1394, 1408), False, 'from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable\n'), ((1418, 1468), 'chainer.functions.local_response_normalization', 'chainer.functions.local_response_normalization', (['px'], {}), '(px)\n', (1464, 1468), False, 'import chainer\n'), ((1642, 1848), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] F.local_response_normalization with placeholder"""', 'graph': 'graph', 'backend': "['webgpu', 'webassembly']", 'inputs': '{x: vx.data}', 'expected': '{y: vy.data}'}), "(description=\n f'[chainer] F.local_response_normalization with placeholder', graph=\n graph, backend=['webgpu', 'webassembly'], inputs={x: vx.data}, expected\n ={y: vy.data})\n", (1667, 1848), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((548, 566), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (564, 566), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((1482, 1500), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (1498, 1500), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((394, 420), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)', '(6)', '(8)'], {}), '(2, 4, 6, 8)\n', (408, 420), True, 'import numpy as np\n'), ((1132, 1160), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)', '(16)', '(16)'], {}), '(1, 3, 16, 16)\n', (1146, 1160), True, 'import numpy as np\n')] |
"""
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from vocab import *
import json
import os
if not os.path.exists('./data/'):
os.mkdir('./data/')
def one_hot(v, ndim):
v_one_hot = np.zeros(
(len(v), ndim,)
)
for i in range(len(v)):
v_one_hot[i][v[i]] = 1.0
return v_one_hot
x = []
y = []
all_chars = []
for book in [
'pride_and_prejudice.txt',
'shakespeare.txt'
]:
with open('books/%s' % book, 'r') as infile:
chars = [
c for c in ' '.join(infile.read().lower().split())
if c in set(vocab)
]
all_chars += [' ']
all_chars += chars
all_chars = list(' '.join(''.join(all_chars).split()))
num_chars = len(all_chars)
with open('cleaned.txt', 'w') as outfile:
outfile.write(''.join(all_chars))
x, y = [], []
data_portions = [
('train', 0.8),
('validate', 0.05),
('test', 0.05),
('evaluate', 0.05),
]
dev = False
if dev:
# shrink data to make things go faster
for i in range(len(data_portions)):
data_portions[i] = (
data_portions[i][0],
data_portions[i][1] * 0.1
)
max_i = sum([
int(round(len(all_chars) * fraction))
for name, fraction in data_portions
]) - seq_len
for i in range(max_i):
in_char_seq = all_chars[i: i + seq_len]
# one hot representation
sample_x = np.zeros((len(in_char_seq), n_vocab,))
for j, c in enumerate(in_char_seq):
sample_x[j][char_to_int[c]] = 1
x.append(sample_x)
sample_y = np.zeros(n_vocab)
sample_y[char_to_int[all_chars[i + seq_len]]] = 1
y.append(sample_y)
x, y = np.array(x).astype('int32'), np.array(y).astype('int32')
start_i = 0
for name, fraction in data_portions:
end_i = start_i + int(round(len(x) * fraction))
print(start_i, end_i)
x0 = x[start_i: end_i]
y0 = y[start_i: end_i]
print('dims:')
print(x0.shape)
print(y0.shape)
start_i = end_i
with open('data/%s.jsonl' % name, 'w') as outfile:
for sample_x, sample_y in zip(x0, y0):
outfile.write(json.dumps({
'in_seq': sample_x.tolist(),
'out_char': sample_y.tolist()
}))
outfile.write('\n')
del x0, y0
| [
"numpy.zeros",
"os.mkdir",
"os.path.exists",
"numpy.array"
] | [((627, 652), 'os.path.exists', 'os.path.exists', (['"""./data/"""'], {}), "('./data/')\n", (641, 652), False, 'import os\n'), ((658, 677), 'os.mkdir', 'os.mkdir', (['"""./data/"""'], {}), "('./data/')\n", (666, 677), False, 'import os\n'), ((2049, 2066), 'numpy.zeros', 'np.zeros', (['n_vocab'], {}), '(n_vocab)\n', (2057, 2066), True, 'import numpy as np\n'), ((2152, 2163), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2160, 2163), True, 'import numpy as np\n'), ((2181, 2192), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2189, 2192), True, 'import numpy as np\n')] |
import numpy as np
from pyproteas.cv2_wrapper import CV2Wrapper
from pyproteas.image import Image
from pyproteas.video import Video
from pyproteas.person import Person
from pyproteas.api import APIInterface
class PeopleCounting(object):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
self.cv = CV2Wrapper()
self.video = Video(**self.kwargs)
self.Image = Image(**self.kwargs)
def run(self):
frames = self.__split_video(self.video.to_frames(), self.video.fps)
count = np.array([])
for arr in frames:
_count = np.array([])
for f in arr:
f = self.Image.morph_close(self.Image.morph_open(self.Image.binarize(f)))
_contours = self.cv.contours(f)
_count = np.append(_count, self.count(_contours))
count = np.append(count, np.round(np.average(_count), 0))
return count
def __split_video(self, frames, fps):
fps = fps * self.kwargs['samplingInterval']
trim_length = int(len(frames) % fps)
if trim_length > 0:
frames = frames[:-trim_length]
return np.split(frames, len(frames)//fps)
def count(self, contours, **kwargs):
"""
Example: limits = [[x_0, x_1],[y_0, y_1]] where x_* are horizontal limits and y_* are vertical limits in the frame
"""
centroids = list()
limits = kwargs.get('limits', [])
area_thresh = kwargs.get('frameAreaThreshold', 500)
for c in contours:
if self.cv.contour_area(c) > area_thresh:
centroids.append(self.cv.centroid(c)[0])
if len(limits) > 0 and self.cv.centroid(c)[1] not in range(limits[1]):
centroids.pop()
return len(centroids)
def track(self):
pass
class FaceRecognition(object):
def __init__(self, name, **kwargs):
pass
class MotionRecognition(object):
def __init__(self, name, **kwargs):
pass
| [
"numpy.average",
"pyproteas.cv2_wrapper.CV2Wrapper",
"pyproteas.image.Image",
"pyproteas.video.Video",
"numpy.array"
] | [((353, 365), 'pyproteas.cv2_wrapper.CV2Wrapper', 'CV2Wrapper', ([], {}), '()\n', (363, 365), False, 'from pyproteas.cv2_wrapper import CV2Wrapper\n'), ((387, 407), 'pyproteas.video.Video', 'Video', ([], {}), '(**self.kwargs)\n', (392, 407), False, 'from pyproteas.video import Video\n'), ((429, 449), 'pyproteas.image.Image', 'Image', ([], {}), '(**self.kwargs)\n', (434, 449), False, 'from pyproteas.image import Image\n'), ((562, 574), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (570, 574), True, 'import numpy as np\n'), ((623, 635), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (631, 635), True, 'import numpy as np\n'), ((912, 930), 'numpy.average', 'np.average', (['_count'], {}), '(_count)\n', (922, 930), True, 'import numpy as np\n')] |
from typing import Tuple, Dict, Optional
from utils.constants import *
import numpy as np
import torch.nn.functional as F
from torch.nn import DataParallel
from torchvision.utils import save_image
def l2_regularisation(m):
l2_reg = None
for W in m.parameters():
if l2_reg is None:
l2_reg = W.norm(2)
else:
l2_reg = l2_reg + W.norm(2)
return l2_reg
def save_example_images(images, batches_done, suffix, filesort):
"""
save some plots in PIC_DIR
"""
normalize = True if images.max() > 1 and images.min() >= 0 else False
save_image(images, f'./{PREFIX_OUTPUT}/{DATA_MANAGER.stamp}/{PIC_DIR}/{batches_done}_{suffix}.{filesort}',
nrow=4, normalize=normalize)
def scheduler(i, D_number, G_number):
if D_number == G_number == 1:
return "DG"
else:
total = D_number + G_number
if i % total < D_number:
return "D"
else:
return "G"
def compute_accuracy(predictions, targets):
"""
Gets the accuracy for discriminator
"""
actual_predictions = predictions > 0.5
true_positives = (actual_predictions == (targets > 0.5)).type(torch.DoubleTensor)
accuracy = (torch.mean(true_positives))
actual_predictions.detach()
true_positives.detach()
return accuracy.item()
def unpack_batch(batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
if len(batch.keys()) == 2 or (len(batch.keys()) == 3 and LABELS_CHANNELS == 25):
return batch["image"].to(DEVICE).float(), batch["label"].to(DEVICE).float()
elif len(batch.keys()) == 3 and LABELS_CHANNELS == 2:
return batch["image"].to(DEVICE).float(), batch["label"].to(DEVICE).float(), batch["dist"].to(DEVICE).float()
def instance_checker(model, model_type):
m = model.module if isinstance(model, DataParallel) else model
return isinstance(m, model_type)
def renormalize(input_batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
assert input_batch.shape[1] == 3, f"Channel dimension C = {str(input_batch.shape[1])}. Expected C = 3"
denorm_input = (input_batch + 1)/2
shape = (1, 3, 1, 1)
mean = torch.tensor(mean).reshape(shape).to(DEVICE)
std = torch.tensor(std).reshape(shape).to(DEVICE)
renorm_output = (denorm_input - mean)/std
return renorm_output
def refactor_batch(input_batch, size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], device=DEVICE):
assert len(input_batch.shape) == 4
output_batch = renormalize(torch.nn.functional.upsample_bilinear(input_batch, (size, size)).to(device), mean, std)
return output_batch
def torch_comp_along_dim(funct, input: Optional, *args, multiple_idxs=False, dim=0):
if type(input) == tuple:
assert len(input) == 2, "Expected tuple of size: 2, received tuple of size: " + str(len(input))
if multiple_idxs:
return torch.stack([funct((input[0][i], input[1][i]), *args) for i in range(len(input[1]))], dim=dim)
else:
return torch.stack([funct((input[0], input[1][i]), *args) for i in range(len(input[1]))], dim=dim)
else:
if multiple_idxs:
return torch.stack([funct(input[i], args[0][i], *args[1:]) for i in range(len(input))], dim=dim)
else:
return torch.stack([funct(input[i], *args) for i in range(len(input))], dim=dim)
def comp_along_dim(funct, input: Optional, *args, multiple_idxs=False, dim=0):
if type(input) == tuple:
assert len(input) == 2, "Expected tuple of size: 2, received tuple of size: " + str(len(input))
if multiple_idxs:
return [funct((input[0][i], input[1][i]), *args) for i in range(len(input[1]))]
else:
return [funct((input[0], input[1][i]), *args) for i in range(len(input[1]))]
else:
return [funct(input[i], *args) for i in range(len(input))]
def get_ce(pred: torch.Tensor, target: torch.Tensor, dim:int=CHANNEL_DIM)-> torch.Tensor:
out = - target * torch.log(pred.clamp(min=1e-11)) # clamp to prevent gradient explosion
return out.sum(dim)
def get_entropy(p: torch.Tensor, dim:int=CHANNEL_DIM) -> torch.Tensor:
if type(p) != np.ndarray:
out = -p * p.clamp(min=1e-7).log()
else:
out = -p * np.log(p.clip(min=1e-7))
if dim == None:
return out
else:
return out.sum(dim)
def tile(a, dim, n_tile):
"""
This function is taken form PyTorch forum and mimics the behavior of tf.tile.
Source: https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853/3
"""
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).to(DEVICE)
out= torch.index_select(a, dim, order_index)
return out
def calibration_net_forward_pass(calibration_net, images, bb_preds, ign_idxs, args):
if bb_preds is not None:
p_input = torch.cat((images, bb_preds), dim=1)
else:
p_input = images
calnet_preds_logits = calibration_net(p_input, return_logits=True)
calnet_preds = F.softmax(calnet_preds_logits / args.temperature.reshape(1, LABELS_CHANNELS, 1, 1).to(DEVICE), dim=1) # perform temperature scaling
if ign_idxs is not None:
# set unlabelled pixels to class unlabelled
w = torch.ones(calnet_preds.shape)
w[ign_idxs[0], :, ign_idxs[1], ign_idxs[2]] = 0.
r = torch.zeros(calnet_preds.shape)
r[ign_idxs[0], 24, ign_idxs[1], ign_idxs[2]] = 1.
calnet_preds = calnet_preds * w.to(DEVICE) + r.to(DEVICE)
calnet_labelled_images = torch.cat((images, calnet_preds.detach()), dim=CHANNEL_DIM) # condition final prediction on input images and calibration net preds
assert not torch.isnan(calnet_preds).any(), "Calibration net output is NaN"
assert not torch.isinf(calnet_preds).any(), "Calibration net output is Inf"
return calnet_preds_logits, calnet_preds, calnet_labelled_images
def generator_forward_pass(generator, images, calnet_labelled_images, ign_idxs, args):
g_input = images if args.calibration_net == "EmptyCalNet" else calnet_labelled_images.detach()
pred_dist,_,_ = generator.sample(g_input, ign_idxs=ign_idxs, n_samples=args.n_cal_samples)
pred_dist_labelled = torch_comp_along_dim(torch.cat, (images, pred_dist), CHANNEL_DIM, dim=0)
assert not torch.isnan(pred_dist).any(), "Generator output is NaN"
assert not torch.isinf(pred_dist).any(), "Generator output is Inf"
preds = pred_dist[0]
pred_labelled = pred_dist_labelled[0]
return preds, pred_labelled, pred_dist, pred_dist_labelled
def discriminator_forward_pass(discriminator, true_labelled, pred_labelled, args):
assert not args.generator == "EmptyGenerator", "Need to have an active generator to use a discriminator"
# concat true and fake
combined_input = torch.cat((pred_labelled.detach(), true_labelled.detach()), dim=0) # todo should this be shuffled?
# discriminator forward pass
scores = discriminator(combined_input)
assert not torch.isnan(scores).any(), "Discriminator output is NaN"
assert not torch.isinf(scores).any(), "Discriminator output is Inf"
shape = scores.shape
shape = (shape[0]//2, *shape[1:])
gt_labels = torch.cat((torch.zeros(shape).to(DEVICE), torch.ones(shape).to(DEVICE)), dim=0)
# compute discriminator accuracy
accuracy_discriminator = compute_accuracy(scores, gt_labels)
return combined_input, scores, gt_labels, accuracy_discriminator
| [
"torchvision.utils.save_image",
"numpy.arange"
] | [((594, 738), 'torchvision.utils.save_image', 'save_image', (['images', 'f"""./{PREFIX_OUTPUT}/{DATA_MANAGER.stamp}/{PIC_DIR}/{batches_done}_{suffix}.{filesort}"""'], {'nrow': '(4)', 'normalize': 'normalize'}), "(images,\n f'./{PREFIX_OUTPUT}/{DATA_MANAGER.stamp}/{PIC_DIR}/{batches_done}_{suffix}.{filesort}'\n , nrow=4, normalize=normalize)\n", (604, 738), False, 'from torchvision.utils import save_image\n'), ((4783, 4800), 'numpy.arange', 'np.arange', (['n_tile'], {}), '(n_tile)\n', (4792, 4800), True, 'import numpy as np\n')] |
# Copyright 2021, <NAME>.
#
# Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision
# of professor <NAME> and engineer <NAME> and with the support of engineer <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import json # json encoder and decoder
import os # provides a portable way of using operating system dependent functionality
import sys # system-specific parameters and functions
import numpy as np # the fundamental package for scientific computing with Python
import torch # tensor library like NumPy, with strong GPU support
from logzero import logger # robust and effective logging for Python
from torch.utils import data # used to import data.Dataset
class Dataset(data.Dataset):
""" Fresh dataset class. """
# list of malware tags
tags = ["adware", "flooder", "ransomware", "dropper", "spyware", "packed",
"crypto_miner", "file_infector", "installer", "worm", "downloader"]
def __init__(self,
S, # already initialized memmap containing the sha256 hashes of samples from the Fresh Dataset
X, # already initialized tensor (memmap) containing the features of samples from the Fresh Dataset
y, # already initialized tensor (memmap) containing the labels of samples from the Fresh Dataset
sig_to_label_dict, # signature-to-label dict
return_shas=False): # whether to return the sha256 of the data points or not
""" Initialize fresh dataset given a set of already initialized tensors (memmaps).
Args:
S: Already initialized memmap containing the sha256 hashes of samples from the Fresh Dataset
X: Already initialized tensor (memmap) containing the features of samples from the Fresh Dataset
y: Already initialized tensor (memmap) containing the labels of samples from the Fresh Dataset
sig_to_label_dict: Signature-to-label dict
return_shas: Whether to return the sha256 of the data points or not (default: False)
"""
# set current instance tensors
self.S = S
self.X = X
self.y = y
# get total number of samples from the memmap containing the sample shas
self.N = S.shape[0]
# set current instance sig to label dict
self.sig_to_label_dict = sig_to_label_dict
# compute number of families of the dataset from the sig to label dict
self.n_families = len(sig_to_label_dict.keys())
self.return_shas = return_shas
# generate signature-to-label inverse dictionary (label-to-signature)
self.sig_to_label_inv_dict = {v: k for k, v in self.sig_to_label_dict.items()}
@classmethod
def from_file(cls,
ds_root, # fresh dataset root directory (where to find .dat files)
return_shas=False): # whether to return the sha256 of the data points or not
""" Open fresh dataset from file and initialize the corresponding Fresh Dataset instance.
Args:
ds_root: Fresh dataset root directory (where to find .dat files)
return_shas: Whether to return the sha256 of the data points or not (default: False)
"""
# set feature dimension
ndim = 2381
# generate X (features vector), y (labels vector) and S (shas) file names
X_path = os.path.join(ds_root, "X_fresh.dat")
y_path = os.path.join(ds_root, "y_fresh.dat")
S_path = os.path.join(ds_root, "S_fresh.dat")
# generate sig-to-label filename
sig_to_label_path = os.path.join(ds_root, "sig_to_label.json")
# if at least one of those files does not exist -> error
if not (os.path.exists(X_path)
and os.path.exists(y_path)
and os.path.exists(S_path)
and os.path.exists(sig_to_label_path)):
logger.error("Fresh Dataset's X, y, S files not found.")
sys.exit(1)
# open signature-to-label file and load its content in signature-to-label dict
with open(sig_to_label_path, 'r') as sig_to_label_file:
sig_to_label_dict = json.load(sig_to_label_file)
logger.info('Opening fresh Dataset at {}.'.format(ds_root))
# open S (shas) memory map in Read+ mode (+ because pytorch does not support read only ndarrays)
S = np.memmap(S_path, dtype=np.dtype('U64'), mode="r+")
# get number of elements from S vector
N = S.shape[0]
# open y (labels) memory map in Read+ mode (+ because pytorch does not support read only ndarrays)
y = torch.from_numpy(np.memmap(y_path, dtype=np.float32, mode="r+", shape=(N,)))
# open X (features) memory map in Read+ mode (+ because pytorch does not support read only ndarrays)
X = torch.from_numpy(np.memmap(X_path, dtype=np.float32, mode="r+", shape=(N, ndim)))
logger.info("{} samples loaded.".format(N))
# instantiate a Fresh Dataset instance with the just opened tensors (memmaps) and return it
return cls(S, X, y, sig_to_label_dict=sig_to_label_dict, return_shas=return_shas)
def __len__(self):
""" Get Dataset total length.
Returns:
Dataset length.
"""
return self.N # return the total number of samples
def __getitem__(self,
index): # index of the item to get
""" Get item from dataset.
Args:
index: Index of the item to get
Returns:
Sha256 (if required), features and labels associated to the sample with index 'index'.
"""
# get feature vector
features = self.X[index]
# get label
label = self.y[index]
if self.return_shas:
# get sha256
sha = self.S[index]
# return sha256, features and label associated to the sample with index 'index'
return sha, features, label
else:
# return features and label associated to the sample with index 'index'
return features, label
def sig_to_label(self,
sig): # family signature
""" Convert family signature to numerical label.
Args:
sig: Family signature
Returns:
Numerical label.
"""
# return corresponding label
return self.sig_to_label_dict[sig]
def label_to_sig(self,
label): # numerical label
""" Convert numerical label to family signature.
Args:
label: Numerical label
Returns:
Family signature.
"""
# return corresponding family signature
return self.sig_to_label_inv_dict[label]
def get_as_tensors(self):
""" Get dataset tensors (numpy memmap arrays).
Returns:
S (shas, if requested), X (features) and y (labels) dataset tensors.
"""
if self.return_shas:
return self.S, self.X, self.y
else:
return self.X, self.y
| [
"json.load",
"numpy.dtype",
"os.path.exists",
"numpy.memmap",
"os.path.join",
"sys.exit",
"logzero.logger.error"
] | [((3958, 3994), 'os.path.join', 'os.path.join', (['ds_root', '"""X_fresh.dat"""'], {}), "(ds_root, 'X_fresh.dat')\n", (3970, 3994), False, 'import os\n'), ((4012, 4048), 'os.path.join', 'os.path.join', (['ds_root', '"""y_fresh.dat"""'], {}), "(ds_root, 'y_fresh.dat')\n", (4024, 4048), False, 'import os\n'), ((4066, 4102), 'os.path.join', 'os.path.join', (['ds_root', '"""S_fresh.dat"""'], {}), "(ds_root, 'S_fresh.dat')\n", (4078, 4102), False, 'import os\n'), ((4172, 4214), 'os.path.join', 'os.path.join', (['ds_root', '"""sig_to_label.json"""'], {}), "(ds_root, 'sig_to_label.json')\n", (4184, 4214), False, 'import os\n'), ((4474, 4530), 'logzero.logger.error', 'logger.error', (['"""Fresh Dataset\'s X, y, S files not found."""'], {}), '("Fresh Dataset\'s X, y, S files not found.")\n', (4486, 4530), False, 'from logzero import logger\n'), ((4543, 4554), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4551, 4554), False, 'import sys\n'), ((4739, 4767), 'json.load', 'json.load', (['sig_to_label_file'], {}), '(sig_to_label_file)\n', (4748, 4767), False, 'import json\n'), ((5214, 5272), 'numpy.memmap', 'np.memmap', (['y_path'], {'dtype': 'np.float32', 'mode': '"""r+"""', 'shape': '(N,)'}), "(y_path, dtype=np.float32, mode='r+', shape=(N,))\n", (5223, 5272), True, 'import numpy as np\n'), ((5413, 5476), 'numpy.memmap', 'np.memmap', (['X_path'], {'dtype': 'np.float32', 'mode': '"""r+"""', 'shape': '(N, ndim)'}), "(X_path, dtype=np.float32, mode='r+', shape=(N, ndim))\n", (5422, 5476), True, 'import numpy as np\n'), ((4297, 4319), 'os.path.exists', 'os.path.exists', (['X_path'], {}), '(X_path)\n', (4311, 4319), False, 'import os\n'), ((4340, 4362), 'os.path.exists', 'os.path.exists', (['y_path'], {}), '(y_path)\n', (4354, 4362), False, 'import os\n'), ((4383, 4405), 'os.path.exists', 'os.path.exists', (['S_path'], {}), '(S_path)\n', (4397, 4405), False, 'import os\n'), ((4426, 4459), 'os.path.exists', 'os.path.exists', (['sig_to_label_path'], {}), '(sig_to_label_path)\n', (4440, 4459), False, 'import os\n'), ((4979, 4994), 'numpy.dtype', 'np.dtype', (['"""U64"""'], {}), "('U64')\n", (4987, 4994), True, 'import numpy as np\n')] |
import numpy as np
from layers import Operation, MSE
class LinearRegression:
def __init__(self):
# 기울기와 편향 초기화
m = 0.01 * np.random.randn(1, 1)
b = 0.01 * np.random.randn(1, 1)
# 계층 생성
self.layers = [
Operation(m, b)
]
self.loss_layer = MSE()
# 모든 가중치와 기울기를 리스트에 모은다.
self.params, self.grads = [], []
for layer in self.layers:
self.params += layer.params
self.grads += layer.grads
def predict(self, x):
for layer in self.layers:
x = layer.forward(x)
return x
def forward(self, x, t):
score = self.predict(x)
loss = self.loss_layer.forward(score, t)
return loss
def backward(self, dout=1):
dout = self.loss_layer.backward(dout)
for layer in reversed(self.layers):
dout = layer.backward(dout)
return dout
def get_params(self):
params = []
for layer in self.layers:
params += layer.params
return params | [
"layers.Operation",
"layers.MSE",
"numpy.random.randn"
] | [((312, 317), 'layers.MSE', 'MSE', ([], {}), '()\n', (315, 317), False, 'from layers import Operation, MSE\n'), ((143, 164), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (158, 164), True, 'import numpy as np\n'), ((184, 205), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (199, 205), True, 'import numpy as np\n'), ((259, 274), 'layers.Operation', 'Operation', (['m', 'b'], {}), '(m, b)\n', (268, 274), False, 'from layers import Operation, MSE\n')] |
import pickle, os, json
import numpy as np
num_args = 9
def get_data(path):
'''
func desc:
takes the pickle file and arranges it in a matrix list form so as to set the member variables accordingly
expected order in pickle file is NUMPY arrays x, l, m, L, d, r, s, n, k
x: [num_instances, num_features]
l: [num_instances, num_rules]
m: [num_instances, num_rules]
L: [num_instances, 1]
d: [num_instances, 1]
r: [num_instances, num_rules]
s: [num_instances, num_rules]
n: [num_rules] Mask for s
k: [num_rules] LF classes, range 0 to num_classes-1
'''
data=[]
with open(path,'rb') as file:
a=pickle.load(file)
data.append(a) # check if this is required
assert len(data)==num_args
return data
def analyze_w_predictions(x,l,m,L,d,weights,probs,rule_classes):
'''
func desc:
analyze the rule network by computing the precisions of the rules and comparing old and new rule stats
input:
x: [num_instances, num_features]
l: [num_instances, num_rules]
m: [num_instances, num_rules]
L: [num_instances, 1]
d: [num_instances, 1]
weights: [num_instances, num_rules]
probs: [num_instances, num_classes]
rule_classes: [num_rules,1]
output:
void, prints the required statistics
'''
num_classes = probs.shape[1]
new_m = convert_weights_to_m(weights) * m
new_l = convert_m_to_l(new_m,rule_classes,num_classes)
o_micro,o_marco_p,o_rp = get_rule_precision(l,L,m)
n_mirco,new_macro_p,n_rp = get_rule_precision(new_l,L,new_m)
print("old micro precision: ", o_micro)
print("new micro precision: ", n_mirco)
print("old rule firings: ", np.sum(m))
print("new rule firings: ", np.sum(new_m))
print("old rule coverage: ", len([i for i in m if sum(i) > 0]))
print("new rule coverage: ", len([i for i in new_m if sum(i) > 0]))
def convert_weights_to_m(weights):
'''
func desc:
converts weights to m
input:
weights([batch_size, num_rules]) - the weights matrix corresponding to rule network(w_network) in the algorithm
output:
m([batch_size, num_rules]) - the rule coverage matrix where m_ij = 1 if jth rule covers ith instance
'''
new_m = weights > 0.5
new_m = new_m.astype(np.int32)
return new_m
def convert_m_to_l(m,rule_classes,num_classes):
'''
func desc:
converts m to l
input:
m([batch_size, num_rules]) - the rule coverage matrix where m_ij = 1 if jth rule covers ith instance
rule_classes -
num_classes(non_negative integer) - number of available classes
output:
l([batch_size, num_rules]) - labels assigned by the rules
'''
rule_classes = np.array([rule_classes]*m.shape[0])
l = m * rule_classes + (1-m)*num_classes
return l
def get_rule_precision(l,L,m):
'''
func desc:
get the precision of the rules
input:
l([batch_size, num_rules]) - labels assigned by the rules
L([batch_size, 1]) - L_i = 1 if the ith instance has already a label assigned to it in the dataset
m([batch_size, num_rules]) - the rule coverage matrix where m_ij = 1 if jth rule covers ith instance
output:
micro_p -
macro_p -
comp -
'''
L = L.reshape([L.shape[0],1])
comp = np.equal(l,L).astype(np.float)
comp = comp * m
comp = np.sum(comp,0)
support = np.sum(m,0)
micro_p = np.sum(comp)/np.sum(support)
macro_p = comp/(support + 1e-25)
supported_rules = [idx for idx,support_val in enumerate(support) if support_val>0]
macro_p = macro_p[supported_rules]
macro_p = np.mean(macro_p)
return micro_p,macro_p,comp/(support + 1e-25)
# from utils
def merge_dict_a_into_b(a, b):
'''
func desc:
set the dict values of b to that of a
input:
a, b : dicts
output:
void
'''
for key in a:
assert key not in b
b[key] = a[key]
def print_tf_global_variables():
'''
Func Desc:
prints all the global variables
Input:
Output:
'''
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
print(json.dumps([str(foo) for foo in tf.global_variables()], indent=4))
def print_var_list(var_list):
'''
Func Desc:
Prints the given variable list
Input:
var_list
Output:
'''
print(json.dumps([str(foo) for foo in var_list], indent=4))
def pretty_print(data_structure):
'''
Func Desc:
prints the given data structure in the desired format
Input:
data_structure
Output:
'''
print(json.dumps(data_structure, indent=4))
def get_list_or_None(s, dtype=int):
'''
Func Desc:
Returns the list of types of the variables in the string s
Input:
s - string
dtype function (default - int)
Output:
None or list
'''
if s.strip() == '':
return None
else:
lst = s.strip().split(',')
return [dtype(x) for x in lst]
def get_list(s):
'''
Func Desc:
returns the output of get_list_or_None as a list
Input:
s - list
Output:
lst - list
'''
lst = get_list_or_None(s)
if lst is None:
return []
else:
return lst
def None_if_zero(n):
'''
Func Desc:
the max(0,n) function with none id n<=0
Input:
n - integer
Output:
if n>0 then n else None
'''
if n <= 0:
return None
else:
return n
def boolean(s):
'''
Func Desc:
returns the expected boolean value for the given string
Input:
s - string
Output:
boolean or error
'''
if s == 'True':
return True
if s == 'False':
return False
raise ValueError('Invalid boolean value: %s' % s)
def set_to_list_of_values_if_None_or_empty(lst, val, num_vals):
'''
Func Desc:
returns lst if it is not empty else returns a same length list but with all its entries equal to val
lst - list
val - value
num_vals (integer) - length of the list lst
Output:
lst or same length val list
'''
if not lst:
return [val] * num_vals
else:
print(len(lst), num_vals)
assert len(lst) == num_vals
return lst
# from snorkel_utils
def conv_l_to_lsnork(l,m):
'''
func desc:
in snorkel convention
if a rule does not cover an instance assign it label -1
we follow the convention where we assign the label num_classes instead of -1
valid class labels range from {0,1,...num_classes-1}
conv_l_to_lsnork: converts l in our format to snorkel's format
input:
l([batch_size, num_rules]) - rule label matrix
m([batch_size, num_rules]) - rule coverage matrix
output:
lsnork([batch_size, num_rules])
'''
lsnork = l*m + -1*(1-m)
return lsnork.astype(np.int)
# from metric_utils
def compute_accuracy(support, recall):
'''
func desc:
compute the required accuracy
input:
support
recall
output:
accuracy
'''
return np.sum(support * recall) / np.sum(support)
# from data_utils
def dump_labels_to_file(save_filename, x, l, m, L, d, weights=None, f_d_U_probs=None, rule_classes=None):
'''
Func Desc:
dumps the given data into a pickle file
Input:
save_filename - the name of the pickle file in which the arguments/data is required to be saved
x ([batch_size x num_features])
l ([batch_size x num_rules])
m ([batch_size x num_rules])
L ([batch_size x 1])
d ([batch_size x 1])
weights (default - None)
f_d_U_probs (default - None)
rule_classes (default - None)
Output:
'''
save_file = open(save_filename, 'wb')
pickle.dump(x, save_file)
pickle.dump(l, save_file)
pickle.dump(m, save_file)
pickle.dump(L, save_file)
pickle.dump(d, save_file)
if not weights is None:
pickle.dump(weights, save_file)
if not f_d_U_probs is None:
pickle.dump(f_d_U_probs, save_file)
if not rule_classes is None:
pickle.dump(rule_classes,save_file)
save_file.close()
def load_from_pickle_with_per_class_sampling_factor(fname, per_class_sampling_factor):
'''
Func Desc:
load the data from the given pickle file with per class sampling factor
Input:
fname - name of the pickle file from which data need to be loaded
per_class_sampling_factor
Output:
the required matrices
x1 ([batch_size x num_features])
l1 ([batch_size x num_rules])
m1 ([batch_size x num_rules])
L1 ([batch_size x 1])
d1 ([batch_size x 1])
'''
with open(fname, 'rb') as f:
x = pickle.load(f)
l = pickle.load(f)
m = pickle.load(f)
L = pickle.load(f)
d = np.squeeze(pickle.load(f))
x1 = []
l1 = []
m1 = []
L1 = []
d1 = []
for xx, ll, mm, LL, dd in zip(x, l, m, L, d):
for i in range(per_class_sampling_factor[LL]):
x1.append(xx)
l1.append(ll)
m1.append(mm)
L1.append(LL)
d1.append(dd)
x1 = np.array(x1)
l1 = np.array(l1)
m1 = np.array(m1)
L1 = np.array(L1)
d1 = np.array(d1)
return x1, l1, m1, L1, d1
def combine_d_covered_U_pickles(d_name, infer_U_name, out_name, d_sampling_factor, U_sampling_factor):
'''
Func Desc:
combine the labelled and unlabelled data, merge the corresponding parameters together and store them in new file
Input:
d_name - the pickle file storing labelled data
infer_U_name - the pickle file storing unlabelled data
out_name - the name of the file where merged output needs to be stored
d_sampling_factor - the per_class_sampling_factor for labelled data
U_sampling_factor - the per_class_sampling_factor for unlabelled data
Output:
'''
#d_sampling_factor = np.array(d_sampling_factor)
#U_sampling_factor = np.array(U_sampling_factor)
d_x, d_l, d_m, d_L, d_d = load_from_pickle_with_per_class_sampling_factor(d_name, d_sampling_factor)
U_x, U_l, U_m, U_L, U_d = load_from_pickle_with_per_class_sampling_factor(infer_U_name, U_sampling_factor)
x = np.concatenate((d_x, U_x))
l = np.concatenate((d_l, U_l))
m = np.concatenate((d_m, U_m))
L = np.concatenate((d_L, U_L))
#print(d_d.shape)
#print(U_d.shape)
d = np.concatenate((d_d, U_d))
with open(out_name, 'wb') as out_file:
pickle.dump(x, out_file)
pickle.dump(l, out_file)
pickle.dump(m, out_file)
pickle.dump(L, out_file)
pickle.dump(d, out_file)
# from learn2reweight_utils
def updated_theta_copy(grads, variables, lr, mode):
'''
Func Desc:
updates the theta (parameters) using rhe given learning rate, grads and variables
Input:
grads - gradients
variables
lr - learning rate
mode
Output:
vals - list of the updated gradients
'''
vals = []
if mode == 1:
for g,v in zip(grads,variables):
vals.append(v+lr*g)
elif mode == -1:
for g,v in zip(grads,variables):
vals.append(v-lr*g)
else:
print("invalid mode error!")
print(exit(1))
return vals
| [
"pickle.dump",
"numpy.sum",
"json.dumps",
"numpy.equal",
"numpy.mean",
"numpy.array",
"pickle.load",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.concatenate"
] | [((2503, 2540), 'numpy.array', 'np.array', (['([rule_classes] * m.shape[0])'], {}), '([rule_classes] * m.shape[0])\n', (2511, 2540), True, 'import numpy as np\n'), ((3084, 3099), 'numpy.sum', 'np.sum', (['comp', '(0)'], {}), '(comp, 0)\n', (3090, 3099), True, 'import numpy as np\n'), ((3110, 3122), 'numpy.sum', 'np.sum', (['m', '(0)'], {}), '(m, 0)\n', (3116, 3122), True, 'import numpy as np\n'), ((3327, 3343), 'numpy.mean', 'np.mean', (['macro_p'], {}), '(macro_p)\n', (3334, 3343), True, 'import numpy as np\n'), ((3764, 3788), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (3786, 3788), True, 'import tensorflow.compat.v1 as tf\n'), ((6940, 6965), 'pickle.dump', 'pickle.dump', (['x', 'save_file'], {}), '(x, save_file)\n', (6951, 6965), False, 'import pickle, os, json\n'), ((6967, 6992), 'pickle.dump', 'pickle.dump', (['l', 'save_file'], {}), '(l, save_file)\n', (6978, 6992), False, 'import pickle, os, json\n'), ((6994, 7019), 'pickle.dump', 'pickle.dump', (['m', 'save_file'], {}), '(m, save_file)\n', (7005, 7019), False, 'import pickle, os, json\n'), ((7021, 7046), 'pickle.dump', 'pickle.dump', (['L', 'save_file'], {}), '(L, save_file)\n', (7032, 7046), False, 'import pickle, os, json\n'), ((7048, 7073), 'pickle.dump', 'pickle.dump', (['d', 'save_file'], {}), '(d, save_file)\n', (7059, 7073), False, 'import pickle, os, json\n'), ((8134, 8146), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (8142, 8146), True, 'import numpy as np\n'), ((8153, 8165), 'numpy.array', 'np.array', (['l1'], {}), '(l1)\n', (8161, 8165), True, 'import numpy as np\n'), ((8172, 8184), 'numpy.array', 'np.array', (['m1'], {}), '(m1)\n', (8180, 8184), True, 'import numpy as np\n'), ((8191, 8203), 'numpy.array', 'np.array', (['L1'], {}), '(L1)\n', (8199, 8203), True, 'import numpy as np\n'), ((8210, 8222), 'numpy.array', 'np.array', (['d1'], {}), '(d1)\n', (8218, 8222), True, 'import numpy as np\n'), ((9145, 9171), 'numpy.concatenate', 'np.concatenate', (['(d_x, U_x)'], {}), '((d_x, U_x))\n', (9159, 9171), True, 'import numpy as np\n'), ((9177, 9203), 'numpy.concatenate', 'np.concatenate', (['(d_l, U_l)'], {}), '((d_l, U_l))\n', (9191, 9203), True, 'import numpy as np\n'), ((9209, 9235), 'numpy.concatenate', 'np.concatenate', (['(d_m, U_m)'], {}), '((d_m, U_m))\n', (9223, 9235), True, 'import numpy as np\n'), ((9241, 9267), 'numpy.concatenate', 'np.concatenate', (['(d_L, U_L)'], {}), '((d_L, U_L))\n', (9255, 9267), True, 'import numpy as np\n'), ((9311, 9337), 'numpy.concatenate', 'np.concatenate', (['(d_d, U_d)'], {}), '((d_d, U_d))\n', (9325, 9337), True, 'import numpy as np\n'), ((606, 623), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (617, 623), False, 'import pickle, os, json\n'), ((1562, 1571), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1568, 1571), True, 'import numpy as np\n'), ((1602, 1615), 'numpy.sum', 'np.sum', (['new_m'], {}), '(new_m)\n', (1608, 1615), True, 'import numpy as np\n'), ((3133, 3145), 'numpy.sum', 'np.sum', (['comp'], {}), '(comp)\n', (3139, 3145), True, 'import numpy as np\n'), ((3146, 3161), 'numpy.sum', 'np.sum', (['support'], {}), '(support)\n', (3152, 3161), True, 'import numpy as np\n'), ((4194, 4230), 'json.dumps', 'json.dumps', (['data_structure'], {'indent': '(4)'}), '(data_structure, indent=4)\n', (4204, 4230), False, 'import pickle, os, json\n'), ((6327, 6351), 'numpy.sum', 'np.sum', (['(support * recall)'], {}), '(support * recall)\n', (6333, 6351), True, 'import numpy as np\n'), ((6354, 6369), 'numpy.sum', 'np.sum', (['support'], {}), '(support)\n', (6360, 6369), True, 'import numpy as np\n'), ((7102, 7133), 'pickle.dump', 'pickle.dump', (['weights', 'save_file'], {}), '(weights, save_file)\n', (7113, 7133), False, 'import pickle, os, json\n'), ((7166, 7201), 'pickle.dump', 'pickle.dump', (['f_d_U_probs', 'save_file'], {}), '(f_d_U_probs, save_file)\n', (7177, 7201), False, 'import pickle, os, json\n'), ((7235, 7271), 'pickle.dump', 'pickle.dump', (['rule_classes', 'save_file'], {}), '(rule_classes, save_file)\n', (7246, 7271), False, 'import pickle, os, json\n'), ((7789, 7803), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7800, 7803), False, 'import pickle, os, json\n'), ((7810, 7824), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7821, 7824), False, 'import pickle, os, json\n'), ((7831, 7845), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7842, 7845), False, 'import pickle, os, json\n'), ((7852, 7866), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7863, 7866), False, 'import pickle, os, json\n'), ((9381, 9405), 'pickle.dump', 'pickle.dump', (['x', 'out_file'], {}), '(x, out_file)\n', (9392, 9405), False, 'import pickle, os, json\n'), ((9408, 9432), 'pickle.dump', 'pickle.dump', (['l', 'out_file'], {}), '(l, out_file)\n', (9419, 9432), False, 'import pickle, os, json\n'), ((9435, 9459), 'pickle.dump', 'pickle.dump', (['m', 'out_file'], {}), '(m, out_file)\n', (9446, 9459), False, 'import pickle, os, json\n'), ((9462, 9486), 'pickle.dump', 'pickle.dump', (['L', 'out_file'], {}), '(L, out_file)\n', (9473, 9486), False, 'import pickle, os, json\n'), ((9489, 9513), 'pickle.dump', 'pickle.dump', (['d', 'out_file'], {}), '(d, out_file)\n', (9500, 9513), False, 'import pickle, os, json\n'), ((3028, 3042), 'numpy.equal', 'np.equal', (['l', 'L'], {}), '(l, L)\n', (3036, 3042), True, 'import numpy as np\n'), ((7884, 7898), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7895, 7898), False, 'import pickle, os, json\n'), ((3828, 3849), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (3847, 3849), True, 'import tensorflow.compat.v1 as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2020/8/5
@project: SPAIC
@filename: Neuron
@author: <NAME>
@contact: <EMAIL>
@description:
定义神经集群和神经元模型。
神经元集群保存神经元数目、神经元编号、类型、模型、模型参数、神经元位置等信息,参与网络构建
"""
# print('__file__={0:<35} | __name__={1:<20} | __package__={2:<20}'.format(__file__,__name__,str(__package__)))
import numpy as np
from ..Network import Assembly
from abc import ABC, abstractmethod
from collections import OrderedDict
from ..Network.BaseModule import VariableAgent
import re
# from brian2 import *
class NeuronGroup(Assembly):
'''Class for a group of neurons.
'''
_class_label = '<neg>'
_is_terminal = True
def __init__(self, neuron_number=None,
neuron_shape=None,
neuron_type=('excitatory', 'inhibitory', 'pyramidal', '...'),
neuron_position=('x, y, z' or 'x, y'),
neuron_model=None,
name=None,
**kwargs
):
super(NeuronGroup, self).__init__(name=name)
self.set_num_shape(num=neuron_number, shape=neuron_shape)
self.outlayer = kwargs.get("outlayer", False)
# self.neuron_model = neuron_model
if neuron_type == ('excitatory', 'inhibitory', 'pyramidal', '...'):
self.type = ['nontype']
elif isinstance(neuron_type, list):
self.type = neuron_type
else:
self.type = [neuron_type]
if neuron_position == ('x, y, z' or 'x, y'):
self.position = []
else:
neuron_position = np.array(neuron_position)
assert neuron_position.shape[0] == neuron_number, " Neuron_position not equal to neuron number"
self.position = neuron_position
self.parameters = kwargs
if isinstance(neuron_model, str):
self.model_class = NeuronModel.apply_model(neuron_model)
self.model_name = neuron_model # self.neuron_model -> self.model_name
self.model = None
elif isinstance(neuron_model, NeuronModel):
self.model = neuron_model
self.model_class = None
self.model_name = 'custom_model'
else:
raise ValueError(" only support set neuron model with string or NeuronModel class constructed by @custom_model()")
self._var_names = list()
self._var_dict = dict()
self._operations = OrderedDict()
def set_num_shape(self, num, shape):
self.num = num
self.shape = shape
if self.shape is not None:
num = np.prod(self.shape)
if self.num is None:
self.num = num
else:
assert self.num == num, "the neuron number is not accord with neuron shape"
elif self.num is not None:
self.shape = [self.num]
else:
ValueError("neither neuron number nor neuron shape is defined")
def set_parameter(self):
pass
def get_model(self):
return self.model
def add_neuron_label(self, key: str):
if isinstance(key, str):
if '[updated]' in key:
return self.id + ':' +'{'+key.replace('[updated]',"")+'}' + '[updated]'
else:
return self.id + ':' +'{'+key+'}'
elif isinstance(key, list) or isinstance(key, tuple):
keys = []
for k in key:
if isinstance(k, str):
if '[updated]' in k:
mk = self.id + ':' + '{' + k.replace('[updated]', "") + '}' + '[updated]'
else:
mk = self.id + ':' + '{' + k + '}'
keys.append(mk)
elif isinstance(k, VariableAgent):
keys.append(k.var_name)
return keys
def build(self, backend):
'''
Parameters
----------
backend : Backend.Backend
Returns
-------
'''
self._backend = backend
batch_size = self._backend.get_batch_size()
# if(self.parameters is not None):
# self.model = self.model_class(**self.model_parameters)
# else:
# self.model = self.model_class()
if self.model_class is not None:
self.model = self.model_class(**self.parameters)
dt = backend.dt
for (key, tau_var) in self.model._tau_constant_variables.items():
key = self.add_neuron_label(key)
tau_var = np.exp(-dt / tau_var)
shape = ()
backend.add_variable(key, shape, value=tau_var)
self._var_names.append(key)
self._var_dict[key] = VariableAgent(backend, key)
for (key, membrane_tau_var) in self.model._membrane_variables.items():
key = self.add_neuron_label(key)
membrane_tau_var = dt / membrane_tau_var
shape = (1, *self.shape) # (1, neuron_num)
backend.add_variable(key, shape, value=membrane_tau_var)
self._var_names.append(key)
self._var_dict[key] = VariableAgent(backend, key)
for (key, var) in self.model._variables.items():
# add the rule to extend new dimension before shape (for slif model)
extend_tag = re.search("\[\d*\]", key)
if extend_tag is not None:
extend_tag = int(key[extend_tag.start() + 1:extend_tag.end() - 1])
key = self.add_neuron_label(key)
self._var_dict[key] = VariableAgent(backend, key)
if extend_tag is not None:
shape = (1, extend_tag, *self.shape)
else:
shape = (1, *self.shape) # (batch_size, neuron_shape)
backend.add_variable(key, shape, value=var)
self._var_names.append(key)
self._var_dict[key] = VariableAgent(backend, key)
for (key, var) in self.model._constant_variables.items():
key = self.add_neuron_label(key)
if isinstance(var, np.ndarray):
if var.size > 1:
shape = var.shape
else:
shape = ()
elif isinstance(var, torch.Tensor):
shape = var.shape
elif hasattr(var, '__iter__'):
var = np.array(var)
if var.size > 1:
shape = var.shape
else:
shape = ()
else:
shape = ()
backend.add_variable(key, shape, value=var, is_constant=False)
self._var_names.append(key)
self._var_dict[key] = VariableAgent(backend, key)
op_count = 0
for op in self.model._operations:
addcode_op = []
if isinstance(op[1], str):
op_name = str(op_count) + ':' + op[1]
for ind, name in enumerate(op):
if ind != 1:
addcode_op.append(self.add_neuron_label(op[ind]))
else:
addcode_op.append(op[ind])
backend.add_operation(addcode_op)
else:
op_name = str(op_count) + ':' + 'custom_function'
for ind, name in enumerate(op):
if ind != 1:
addcode_op.append(self.add_neuron_label(op[ind]))
else:
addcode_op.append(op[ind])
backend.register_standalone(addcode_op[2], addcode_op[1], addcode_op[0])
op_count += 1
self._operations[op_name] = addcode_op
if self.model_name == "slif" or self.model_name == 'selif':
self.model.build((1, *self.shape), backend)
self.model.outlayer = self.outlayer
update_code = self.model.update_op_code
intital_code = self.model.initial_op_code
backend.register_initial(intital_code[0], intital_code[1], intital_code[2])
backend.register_standalone(self.add_neuron_label(update_code[0]), update_code[1],
[self.add_neuron_label(update_code[2])])
backend.register_standalone(self.add_neuron_label('V'), self.model.return_V, [])
backend.register_standalone(self.add_neuron_label('S'), self.model.return_S, [])
@staticmethod
def custom_model(input_vars, output_vars, new_vars_dict, equation_type=('iterative','euler_iterative','exp_euler_iterative','ode'), backend='torch', custom_function_name='custom', base_model=None, add_threshold=True):
'''
Examples:
@NeuronGroup.custom_model(input_vars=['M', 'S', 'WgtSum'], output_vars=['V', 'M', 'S'],
new_vars_dict={'V':0, 'M':0, 'S':0, 'WgtSum':0}, equation_type='exp_euler_iterative')
def func(M, S, WgtSum):
M = (WgtSum-M)/tau
S = (WgtSum-S)/tau
V = M - S
return V, M, S
NeuronGroup(...., neuron_model=func)
'''
assert backend == 'torch'
if base_model is None:
model = NeuronModel()
elif isinstance(base_model, NeuronModel):
model = base_model
else:
raise ValueError("base model is given wrong type")
model.name = custom_function_name
if equation_type == 'iterative' or equation_type == 'ode':
for key, value in new_vars_dict.items():
if '[constant]' in key:
model._constant_variables[key.replace('[constant]','')] = value
else:
model._variables[key] = value
elif equation_type == 'euler_iterative':
for key, value in new_vars_dict.items():
if '[constant]' in key:
model._constant_variables[key.replace('[constant]','')] = value
elif 'tau' in key.lower():
model._membrane_variables[key] = value
else:
model._variables[key] = value
elif equation_type == 'exp_euler_iterative':
for key, value in new_vars_dict.items():
if '[constant]' in key:
model._constant_variables[key.replace('[constant]','')] = value
elif 'tau' in key.lower():
model._tau_constant_variables[key] = value
else:
model._variables[key] = value
new_vars_dict = dict()
new_vars_dict.update(model._variables)
new_vars_dict.update(model._tau_constant_variables)
new_vars_dict.update(model._membrane_variables)
new_vars_dict.update(model._constant_variables)
for var in input_vars:
if isinstance(var, VariableAgent):
continue
elif var not in new_vars_dict:
if '[updated]' in var:
if var.replace('[updated]', '') in new_vars_dict:
continue
else:
raise ValueError("The variable %s is not in model variable dict and not a Variable of other modules"%va)
for var in output_vars:
if isinstance(var, VariableAgent):
continue
elif var not in new_vars_dict:
if '[updated]' in var:
if var.replace('[updated]', '') in new_vars_dict:
continue
else:
raise ValueError("The variable %s is not in model variable dict and not a Variable of other modules"%var)
def model_function(func):
op_code = [input_vars, func, output_vars]
model._operations.append(op_code)
if add_threshold == True:
model._operations.append(('O', 'threshold', 'V[updated]', 'Vth'))
return model
return model_function
class NeuronModel(ABC):
'''
op -> (return_name, operation_name, input_name1, input_name2...)
'''
#: A dictionary mapping neuron model names to `Model` objects
neuron_models = dict()
def __init__(self, **kwargs):
super(NeuronModel, self).__init__()
self.name = 'none'
self._operations = []
self._variables = dict()
self._tau_constant_variables = dict()
self._membrane_variables = dict()
self._constant_variables = dict()
self.neuron_parameters = dict()
@staticmethod
def register(name, model):
'''
Register a neuron model. Registered neuron models can be referred to
# via their name.
Parameters
----------
name : str
A short name for the state updater (e.g. `'lif'`)
model : `NeuronModel`
The neuron model object, e.g. an `CLIFModel`, 'SLIFModel'.
'''
# only deal with lower case names -- we don't want to have 'LIF' and
# 'lif', for example
name = name.lower()
if name in NeuronModel.neuron_models:
raise ValueError(('A neuron_model with the name "%s" has already been registered') % name)
if not issubclass(model, NeuronModel):
raise ValueError(('Given model of type %s does not seem to be a valid NeuronModel.' % str(type(model))))
NeuronModel.neuron_models[name] = model
model.name = name
@staticmethod
def apply_model(model_name):
'''
Parameters
----------
model_name : str
Returns
-------
'''
model_name = model_name.lower()
if model_name not in NeuronModel.neuron_models:
raise ValueError(('Given model name is not in the model list'))
else:
return NeuronModel.neuron_models[model_name]
# @abstractmethod
# def get_var(self):
# NotImplementedError()
# @abstractmethod
# def get_op(self):
# NotImplementedError()
# @abstractmethod
# def get_tau(self):
# NotImplementedError()
class CLIFModel(NeuronModel):
"""
Current LIF 3-kernel model:
V(t) = M(t) − S(t) − E(t)
I^n[t] = V0 * WgtSum^n[t-1] #sum(w * O^(n-1)[t])
M^n[t] = betaM * M^n[t-1] + I^n[t-1]
S^n[t] = betaS * S^n[t-1] + I^n[t-1]
E^n[t] = betaM * E^n[t-1] + Vth * O^n[t-1]
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(CLIFModel, self).__init__()
self.neuron_parameters['tau_p'] = kwargs.get('tau_p', 12.0)
self.neuron_parameters['tau_q'] = kwargs.get('tau_q', 8.0)
self.neuron_parameters['tau_m'] = kwargs.get('tau_m', 20.0)
self.neuron_parameters['v_th'] = kwargs.get('v_th', 1.0)
self._variables['M'] = 0.0
self._variables['S'] = 0.0
self._variables['E'] = 0.0
self._variables['I'] = 0.0
self._variables['O'] = 0.0
self._variables['V'] = 0.0
self._variables['WgtSum'] = 0.0
beta = self.neuron_parameters['tau_p'] / self.neuron_parameters['tau_q']
V0 = (1 / (beta - 1)) * (beta ** (beta / (beta - 1)))
self._constant_variables['V0'] = V0
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
self._tau_constant_variables['tauM'] = self.neuron_parameters['tau_m']
self._tau_constant_variables['tauP'] = self.neuron_parameters['tau_p']
self._tau_constant_variables['tauQ'] = self.neuron_parameters['tau_q']
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum[updated]'))
self._operations.append(('M', 'var_linear', 'tauP', 'M', 'I[updated]'))
self._operations.append(('S', 'var_linear', 'tauQ', 'S', 'I[updated]'))
self._operations.append(('PSP', 'minus', 'M[updated]', 'S[updated]'))
self._operations.append(('V', 'minus', 'PSP', 'E'))
self._operations.append(('O', 'threshold', 'V[updated]', 'Vth'))
self._operations.append(('Resetting', 'var_mult', 'Vth', 'O[updated]'))
self._operations.append(('E', 'var_linear', 'tauM', 'E', 'Resetting'))
NeuronModel.register("clif", CLIFModel)
class IFModel(NeuronModel):
"""
IF model:
V(t) = V(t-1) * (1 - O(t-1)) + WgtSum[t] - ConstantDecay
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(IFModel, self).__init__()
self.neuron_parameters['ConstantDecay'] = kwargs.get('ConstantDecay', 0.0)
self.neuron_parameters['v_th'] = kwargs.get('v_th', 1.0)
self._variables['O'] = 0.0
self._variables['V'] = 0.0
self._variables['WgtSum'] = 0.0
self._constant_variables['ConstantDecay'] = self.neuron_parameters['ConstantDecay']
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
self._operations.append(('Vtemp', 'add', 'V', 'WgtSum[updated]'))
self._operations.append(('Vtemp1', 'minus', 'Vtemp', 'ConstantDecay'))
self._operations.append(('O', 'threshold', 'Vtemp1', 'Vth'))
self._operations.append(('Resetting', 'var_mult', 'Vtemp1', 'O[updated]'))
self._operations.append(('V', 'minus', 'Vtemp1', 'Resetting'))
NeuronModel.register("if", IFModel)
import torch
class QLIFModel(NeuronModel):
"""
LIF neuron model for Q-Backprop learning
E[t] = beta_m*E[t-1] + Vth*O[t-1]
V[t] = WgtSum(PSP[t]) - E[t]
O[t] = spike_function(V[t])
# Q for non-spiking
Mp[t] = beta_m*Mp[t-1] + WpSum[t]
Sp[t] = beta_s*Sp[t-1] + WpSum[t]
P[t] = Mp[t] - Sp[t]
# Q for spiking
Mq[t] = beta_m*Mq[t-1] + WqSum[t]
Sq[t] = beta_s*Sq[t-1] + WqSum[t]
Q[t] = Mq[t] - Sq[t]
R[t] = ~O[t]*beta_s*R[t-1] + O[t]
F[t] = 1 - R[t]
QP[t] = Q[t] - P[t]
BQ[t] = R[t]*Q[t] + F[t]*P[t]
TP[t] = R[t]*P[t] + F[t]*Reward - P[t-1]
TQ[t] = F[t]*Q[t] + R[t]*Reward - Q[t-1]
"""
# WgtSum(BQ_post[t - 1]) / WgtSum)
def __init__(self, **kwargs):
super(QLIFModel, self).__init__()
self._tau_constant_variables['Beta_m'] = kwargs.get('tau_m', 20.0)
self._tau_constant_variables['Beta_s'] = kwargs.get('tau_s', 8.0)
self._constant_variables['Vth'] = kwargs.get('v_th', 1.0)
self._constant_variables['One'] = 1.0
self._variables['E'] = 0.0
self._variables['O'] = 0.0
self._variables['V'] = 0.0
self._variables['P'] = 0.0
self._variables['Mp'] = 0.0
self._variables['Sp'] = 0.0
self._variables['Q'] = 0.0
self._variables['Mq'] = 0.0
self._variables['Sq'] = 0.0
self._variables['R'] = 0.0
self._variables['F'] = 0.0
self._variables['QP'] = 0.0
self._variables['BQ'] = 0.0
self._variables['TP'] = 0.0
self._variables['TQ'] = 0.0
self._variables['PSP'] = 0.0
self._variables['WpSum'] = 0.0
self._variables['WqSum'] = 0.0
self._variables['Reward'] = 0.0
self._operations.append(('Resetting', 'var_mult', 'Vth', 'O'))
self._operations.append(('E', 'var_linear', 'Beta_m', 'E', 'Resetting'))
self._operations.append(('SumPSP', 'reduce_sum', 'PSP'))
self._operations.append(('V', 'minus', 'SumPSP', 'E[updated]'))
self._operations.append(('Mp', 'var_linear', 'Beta_m', 'Mp', 'WpSum'))
self._operations.append(('Sp', 'var_linear', 'Beta_s', 'Sp', 'WpSum'))
self._operations.append(('P', 'minus', 'Mp[updated]', 'Sp[updated]'))
self._operations.append(('Mq', 'var_linear', 'Beta_m', 'Mq', 'WpSum'))
self._operations.append(('Sq', 'var_linear', 'Beta_s', 'Sq', 'WpSum'))
self._operations.append(('Q', 'minus', 'Mq[updated]', 'Sq[updated]'))
self._operations.append(('RTmp', 'var_mult', 'Beta_s', 'R'))
self._operations.append(('', 'var_linear', 'Beta_s', 'R'))
NeuronModel.register("qlif", QLIFModel)
class SELIFModel(NeuronModel): #Exponential Model
"""
SpikeProp LIF 3-kernel model:
V[t] = WgtSum[t-1] - E[t-1]
I[t] = spike_func(V[t])
M[t] = betaM * M[t-1] + I[t]
S[t] = betaS * S[t-1] + I[t]
E[t] = betaM * E[t-1] + Vth * I[t]
O[t] = M[t] − S[t]
"""
def __init__(self,
tau_m=12.0,
tau_p=6.0,
tau_q=2.0,
tau_r=16.0,
v_th=1.0,
v_reset=2.0,
outlayer=False,
**kwargs
):
super(SELIFModel, self).__init__()
from spaic.Learning.TRUE_Learner import TRUE_SpikeProp
# initial value for state variables
self._variables['[2]O'] = 0.0
self._variables['V'] = 0.0
self._variables['dV'] = 0.0
self._variables['S'] = 0.0
self._variables['WgtSum'] = 0.0
self.tau_m = tau_m
self.tau_e = tau_p
self.tau_s = tau_q
self.tau_r = tau_r
self.v_th = v_th
self.v_reset = 1.0 * v_th
self.outlayer = outlayer
self.beta = tau_p / tau_q
self.V0 = (1 / (self.beta - 1)) * (self.beta ** (self.beta / (self.beta - 1)))
# self.delat_m = self.tau_m/(self.tau_m-self.tau_s)
# self.delat_s = self.tau_s/(self.tau_m-self.tau_s)
# self.delat_ms = self.tau_m*self.tau_s/(self.tau_m-self.tau_s)
self.update_op_code = ('[2]O', self.update, 'WgtSum[updated]') # 'WgtSum[updated]'
self.return_op_code = (None, self.return_V, [])
self.initial_op_code = (None, self.initial, [])
'''
V(t) = M(t) − S(t) − E(t)
I^n[t] = V0 * WgtSum^n[t-1] #sum(w * O^(n-1)[t])
M^n[t] = betaM * M^n[t-1] + I^n[t-1]
S^n[t] = betaS * S^n[t-1] + I^n[t-1]
E^n[t] = betaM * E^n[t-1] + Vth * O^n[t-1]
O^n[t] = spike_func(V^n[t-1])
'''
def attach_learner(self, learner):
self.learner = learner
def build(self, shape, backend):
self.dt = backend.dt
self.M_initial = torch.zeros(shape, device=backend.device)
self.S_initial = torch.zeros(shape, device=backend.device)
self.R_initial = torch.zeros(shape, device=backend.device)
self.V_initial = torch.zeros(shape, device=backend.device)
self.beta_m = np.exp(-backend.dt / self.tau_m)
self.beta_s = np.exp(-backend.dt / self.tau_s)
self.beta_e = np.exp(-backend.dt / self.tau_e)
self.beta_r = np.exp(-backend.dt / self.tau_r)
self.running_var = None
self.running_mean = None
self.decay = 0.9999
self.initial()
self.rec_E = []
def initial(self):
self.M = self.M_initial
self.S = self.S_initial
self.R = self.R_initial
self.V = self.V_initial
self.O = None
self.rec_E = []
def norm_hook(self, grad):
if self.running_var is None:
self.running_var = torch.norm(grad, dim=0) * 0
self.running_mean = torch.mean(grad, dim=0) * 0
else:
self.running_var = self.decay * self.running_var + (1 - self.decay) * torch.norm(grad, dim=0)
self.running_mean = self.decay * self.running_mean + (1 - self.decay) * torch.mean(grad, dim=0)
return (grad - self.running_mean) / (1.0e-10 + self.running_var)
def update(self, WgtSum):
# with torch.no_grad():
# self.dV = self.E / self.tau_m +self.S / self.tau_s - self.M / self.tau_m
I = self.V0 * WgtSum
# WgtSum.register_hook(self.norm_hook)
if I.dim() == self.M.dim() + 1:
Ii = I[:, 0, ...]
I0 = I[:, 1, ...]
self.M = self.beta_e * self.M + (Ii - I0 / self.tau_e)
self.S = self.beta_s * self.S + (Ii - I0 / self.tau_s)
else:
self.M = self.beta_e * self.M + I
self.S = self.beta_s * self.S + I
if self.O is not None:
Oi = self.O[:, 0, ...] #+ 0.9*self.O[:, 0, ...].detach() # *self.O[:, 0, ...].gt(0.0)
Ot = self.O[:, 1, ...] #+ 0.9*self.O[:, 1, ...].detach()
else:
Oi = 0.0
Ot = 0.0
# expv = torch.clamp_max(torch.exp(2.5 * self.V)-1, 12)
expv = 2.0*torch.pow(torch.clamp(self.V,-10,self.v_th), 2.0)
self.R = self.beta_r*self.R + (1-self.beta_r)*self.V + 10.0*(Oi-Ot/self.tau_r)
self.dV = (expv - self.V + (self.M - self.S) + 0.5-0.5*self.R) / self.tau_m
# with torch.no_grad():
# self.ddV = (self.dV * (0.2 * expv - 1) + self.S / self.tau_s - self.M / self.tau_e) / self.tau_m
# self.dV2 = self.dV + self.ddV * self.dt
self.V = self.V + self.dV * self.dt - self.v_reset*(Oi-Ot/self.tau_m)# + self.ddV * self.dt ** 2 / 2.0
# self.P = self.M + self.S
self.O = self.learner.threshold(self.V, self.dV, self.v_th)
#
# self.rec_E.append(self.Vmax)
# if (I is not None) and (I.requires_grad == True):
# I.retain_grad()
# self.rec_E.append(I)
return self.O
def return_V(self):
return self.V
def return_M(self):
return self.M
def return_S(self):
return self.S
def return_dV(self):
return self.dV
@property
def E_values(self):
return torch.stack(self.rec_E, dim=-1).cpu().detach().numpy()
@property
def E_grads(self):
grads = []
for v in self.rec_E:
if v.grad is not None:
grads.append(v.grad.cpu().numpy())
else:
grads.append(torch.zeros_like(v).cpu().numpy())
grads = np.stack(grads[1:], axis=-1)
return grads
NeuronModel.register("selif", SELIFModel)
class SLIFModel(NeuronModel):
"""
SpikeProp LIF 3-kernel model:
V[t] = WgtSum[t-1] - E[t-1]
I[t] = spike_func(V[t])
M[t] = betaM * M[t-1] + I[t]
S[t] = betaS * S[t-1] + I[t]
E[t] = betaM * E[t-1] + Vth * I[t]
O[t] = M[t] − S[t]
"""
def __init__(self,
tau_m=20.0,
tau_p=20.0,
tau_q=8.0,
v_th=1.0,
v_reset=2.0,
outlayer=False
):
super(SLIFModel, self).__init__()
from spaic.Learning.TRUE_Learner import TRUE_SpikeProp
# initial value for state variables
self._variables['[2]O'] = 0.0
self._variables['V'] = 0.0
self._variables['dV'] = 0.0
self._variables['S'] = 0.0
self._variables['WgtSum'] = 0.0
self.tau_m = tau_m
self.tau_e = tau_p
self.tau_s = tau_q
self.v_th = v_th
self.v_reset = 5.0 * v_th
self.outlayer = outlayer
self.beta = tau_m / tau_q
self.V0 = (1 / (self.beta - 1)) * (self.beta ** (self.beta / (self.beta - 1)))
# self.delat_m = self.tau_m/(self.tau_m-self.tau_s)
# self.delat_s = self.tau_s/(self.tau_m-self.tau_s)
# self.delat_ms = self.tau_m*self.tau_s/(self.tau_m-self.tau_s)
self.update_op_code = ('[2]O', self.update, 'WgtSum[updated]') # 'WgtSum[updated]'
self.return_op_code = (None, self.return_V, [])
self.initial_op_code = (None, self.initial, [])
'''
V(t) = M(t) − S(t) − E(t)
I^n[t] = V0 * WgtSum^n[t-1] #sum(w * O^(n-1)[t])
M^n[t] = betaM * M^n[t-1] + I^n[t-1]
S^n[t] = betaS * S^n[t-1] + I^n[t-1]
E^n[t] = betaM * E^n[t-1] + Vth * O^n[t-1]
O^n[t] = spike_func(V^n[t-1])
'''
def attach_learner(self, learner):
self.learner = learner
def build(self, shape, backend):
self.dt = backend.dt
self.M_initial = torch.zeros(shape, device=backend.device)
self.S_initial = torch.zeros(shape, device=backend.device)
self.E_initial = torch.zeros(shape, device=backend.device)
self.V_initial = torch.zeros(shape, device=backend.device)
self.O_initial = torch.zeros((1,2,1), device=backend.device)
self.beta_m = np.exp(-backend.dt / self.tau_m)
self.beta_s = np.exp(-backend.dt / self.tau_s)
self.beta_e = np.exp(-backend.dt / self.tau_e)
self.deta_m = (1 - self.dt/(2*self.tau_m))/self.tau_m
self.deta_s = (1 - self.dt/(2*self.tau_s))/self.tau_s
self.running_var = None
self.running_mean = None
self.decay = 0.9999
self.initial()
self.rec_E = []
def initial(self):
self.M = self.M_initial
self.S = self.S_initial
self.E = self.E_initial
self.V = self.V_initial
self.O = None
self.rec_E = []
def norm_hook(self, grad):
if self.running_var is None:
self.running_var = torch.norm(grad, dim=0) * 0
self.running_mean = torch.mean(grad, dim=0) * 0
else:
self.running_var = self.decay * self.running_var + (1 - self.decay) * torch.norm(grad, dim=0)
self.running_mean = self.decay * self.running_mean + (1 - self.decay) * torch.mean(grad, dim=0)
return (grad - self.running_mean) / (1.0e-10 + self.running_var)
def update(self, WgtSum):
# with torch.no_grad():
# self.dV = self.E / self.tau_m +self.S / self.tau_s - self.M / self.tau_m
I = self.V0 * WgtSum
# WgtSum.register_hook(self.norm_hook)
# Oi = self.O[:, 0, ...]
# Ot = self.O[:, 1, ...]
if self.O is not None:
Oi = 0.1*self.O[:, 0, ...] + 0.9*self.O[:, 0, ...]
Ot = 0.1*self.O[:, 1, ...] + 0.9*self.O[:, 1, ...]
else:
Oi = 0
Ot = 0
if I.dim() == self.M.dim() + 1:
Ii = I[:, 0, ...]
I0 = I[:, 1, ...]
self.M = self.beta_m * self.M + Ii - I0 / self.tau_m - self.v_reset*(Oi-Ot/self.tau_m)
self.S = self.beta_s * self.S + Ii - I0 / self.tau_s
else:
self.M = self.beta_m * self.M + I - self.v_reset*(Oi-Ot/self.tau_m)
self.S = self.beta_s * self.S + I
self.V = self.M - self.S
# self.P = self.M + self.S
self.dV = self.S*self.deta_s - self.M*self.deta_m
#
# if self.O is not None:
# self.E = self.E*Oi.lt(1.0).float()# + (0.5*self.E.detach() - 0.5*self.E)*Oi.ge(1.0).float()
# MSbase = torch.clamp_min(self.M*self.tau_s/(self.S*self.tau_m+1.0e-20), 0)
# self.Vmax = self.M*MSbase**self.delat_s - self.S*MSbase**self.delat_m
self.O = self.learner.threshold(self.V, self.dV, self.v_th)
#
# self.rec_E.append(self.Vmax)
# if (I is not None) and (I.requires_grad == True):
# I.retain_grad()
# self.rec_E.append(I)
return self.O
def return_V(self):
return self.V
def return_M(self):
return self.M
def return_S(self):
return self.S
def return_dV(self):
return self.dV
@property
def E_values(self):
return torch.stack(self.rec_E, dim=-1).cpu().detach().numpy()
@property
def E_grads(self):
grads = []
for v in self.rec_E:
if v.grad is not None:
grads.append(v.grad.cpu().numpy())
else:
grads.append(torch.zeros_like(v).cpu().numpy())
grads = np.stack(grads[1:], axis=-1)
return grads
# for ii in range(2):
# out.append(test.update(I))
NeuronModel.register("slif", SLIFModel)
class SELIFDebugModel(NeuronModel):
def __init__(self,
tau_m=20.0, tau_p=20.0,tau_q=8.0,v_th=1.0,
outlayer=False
):
super(SELIFDebugModel, self).__init__()
from spaic.Learning.TRUE_Learner import TRUE_SpikeProp
# initial value for state variables
self._variables['[2]O'] = 0.0
self._variables['V'] = 0.0
self._variables['dV'] = 0.0
self._variables['S'] = 0.0
self._variables['WgtSum'] = 0.0
self._variables['cumV'] = 0.0
self.tau_m = tau_m
self.tau_e = tau_p
self.tau_s = tau_q
self.v_th = v_th
self.v_reset = 5.0 * v_th
self.outlayer = outlayer
self.beta = tau_m / tau_q
self.V0 = (1 / (self.beta - 1)) * (self.beta ** (self.beta / (self.beta - 1)))
self.update_op_code = ('[2]O', self.update, 'WgtSum[updated]') # 'WgtSum[updated]'
self.return_op_code = (None, self.return_V, [])
self.initial_op_code = (None, self.initial, [])
def attach_learner(self, learner):
self.learner = learner
def build(self, shape, backend):
self.dt = backend.dt
self.M_initial = torch.zeros(shape, device=backend.device)
self.S_initial = torch.zeros(shape, device=backend.device)
self.V_initial = torch.zeros(shape, device=backend.device)
self.beta_m = np.exp(-backend.dt / self.tau_m)
self.beta_s = np.exp(-backend.dt / self.tau_s)
self.beta_e = np.exp(-backend.dt / self.tau_e)
self.initial()
self.rec_E = []
def initial(self):
self.M = self.M_initial
self.S = self.S_initial
self.V = self.V_initial
self.cumV = self.V_initial
self.O = None
self.rec_E = []
def update(self, WgtSum):
I = self.V0 * WgtSum
if I.dim() == self.M.dim() + 1:
Ii = I[:, 0, ...]
I0 = I[:, 1, ...]
self.M = self.beta_e * self.M + (Ii - I0 / self.tau_e)
self.S = self.beta_s * self.S + (Ii - I0 / self.tau_s)
else:
self.M = self.beta_e * self.M + I
self.S = self.beta_s * self.S + I
if self.O is not None:
Oi = self.O[:, 0, ...] #+ 0.9*self.O[:, 0, ...].detach() # *self.O[:, 0, ...].gt(0.0)
Ot = self.O[:, 1, ...] #+ 0.9*self.O[:, 1, ...].detach()
else:
Oi = 0.0
Ot = 0.0
# expv = torch.clamp_max(torch.exp(2.5 * self.V)-1, 12)
expv = 2.0*torch.pow(torch.clamp(self.V,-10,self.v_th), 2.0)
self.dV = (expv - self.V + (self.M - self.S)) / self.tau_m
self.V = self.V + self.dV * self.dt - self.v_reset*(Oi-Ot/self.tau_m)
self.O = self.learner.threshold(self.V, self.dV, self.v_th)
return self.O
def return_V(self):
return self.V
class LIFModel(NeuronModel):
"""
LIF model:
# V(t) = tuaM * V^n[t-1] + WgtSum[t] # tauM: constant membrane time (tauM=RmCm)
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(LIFModel, self).__init__()
# initial value for state variables
self.neuron_parameters['tau_m'] = kwargs.get('tau_m', 20.0) # 20
self.neuron_parameters['v_th'] = kwargs.get('v_th', 1)
self.neuron_parameters['v_reset'] = kwargs.get('v_reset', 0.0)
self._variables['V'] = 0.0
self._variables['O'] = 0.0
self._variables['WgtSum'] = 0.0
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
self._constant_variables['Vreset'] = self.neuron_parameters['v_reset']
self._tau_constant_variables['tauM'] = self.neuron_parameters['tau_m']
self._operations.append(('Vtemp', 'var_linear', 'tauM', 'V', 'WgtSum[updated]'))#
self._operations.append(('O', 'threshold', 'Vtemp', 'Vth'))
self._operations.append(('Resetting', 'var_mult', 'Vtemp', 'O[updated]'))
self._operations.append(('V', 'minus', 'Vtemp', 'Resetting'))
NeuronModel.register("lif", LIFModel)
class ConstantCurrentLIFModel(NeuronModel):
"""
ConstantCurrentLIF model:
V(t) = V^n[t-1] + (dt/taum) * (Ureset-V^n[t-1]+I) # tauM: constant membrane time (tauM=RmCm) WgtSum = I*Weight
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(ConstantCurrentLIFModel, self).__init__()
# initial value for state variables
self.neuron_parameters['tau_m'] = kwargs.get('tau_m', 20.0)
self.neuron_parameters['v_th'] = kwargs.get('v_th', 1.0)
self.neuron_parameters['v_reset'] = kwargs.get('v_reset', 0.0)
self._variables['V'] = 0.0
self._variables['O'] = 0.0
self._variables['I'] = 0.0
self._variables['WgtSum'] = 0.0
# beta = self.neuron_parameters['tau_p'] / self.neuron_parameters['tau_q']
V0 = 1.0 # (1 / (beta - 1)) * (beta ** (beta / (beta - 1)))
self._constant_variables['V0'] = V0
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
self._constant_variables['Vreset'] = self.neuron_parameters['v_reset']
self._membrane_variables['tauM'] = self.neuron_parameters['tau_m']
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum'))
self._operations.append(('decayV', 'minus', 'I', 'V'))
self._operations.append(('Vtemp', 'var_linear', 'tauM', 'decayV', 'V'))
self._operations.append(('O', 'threshold', 'Vtemp', 'Vth'))
self._operations.append(('Resetting', 'var_mult', 'Vtemp', 'O'))
self._operations.append(('V', 'minus', 'Vtemp', 'Resetting'))
NeuronModel.register("constantcurrentlif", ConstantCurrentLIFModel)
class NonSpikingLIFModel(NeuronModel):
"""
NonSpikingLIF model:
# V(t) = -tuaM * V^n[t-1] + M^n[t] - S^n[t] # tauM: constant membrane time (tauM=RmCm)
V(t) = V^n[t-1] + (dt/taum) * (PSP-V^n[t-1]) # tauM: constant membrane time (tauM=RmCm)
I^n[t] = V0 * WgtSum^n[t-1] # sum(w * O^(n-1)[t])
M^n[t] = tauP * M^n[t-1] + I^n[t-1] # tauP: decaying time constants of membrane integration
S^n[t] = tauQ * S^n[t-1] + I^n[t-1] # tauQ: decaying time constants of synaptic currents
PSP = M - S
"""
def __init__(self, **kwargs):
super(NonSpikingLIFModel, self).__init__()
# initial value for state variables
self.neuron_parameters['tau_p'] = kwargs.get('tau_p', 4.0)
self.neuron_parameters['tau_q'] = kwargs.get('tau_q', 1.0)
self.neuron_parameters['tau_m'] = kwargs.get('tau_m', 1.0)
self.neuron_parameters['v_th'] = kwargs.get('v_th', 1.0)
self.neuron_parameters['v_reset'] = kwargs.get('v_reset', 0.0)
self._variables['I'] = 0.0
self._variables['M'] = 0.0
self._variables['S'] = 0.0
self._variables['V'] = 0.0
self._variables['O'] = 0.0
self._variables['WgtSum'] = 0.0
beta = self.neuron_parameters['tau_p'] / self.neuron_parameters['tau_q']
V0 = (1 / (beta - 1)) * (beta ** (beta / (beta - 1)))
self._constant_variables['V0'] = V0
self._membrane_variables['tauM'] = self.neuron_parameters['tau_m']
# self._tau_constant_variables['tauM'] = self.neuron_parameters['tau_m']
self._tau_constant_variables['tauP'] = self.neuron_parameters['tau_p']
self._tau_constant_variables['tauQ'] = self.neuron_parameters['tau_q']
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum'))
self._operations.append(('M', 'var_linear', 'tauP', 'M', 'I'))
self._operations.append(('S', 'var_linear', 'tauQ', 'S', 'I'))
self._operations.append(('PSP', 'minus', 'M', 'S'))
self._operations.append(('decayV', 'minus', 'PSP', 'V'))
self._operations.append(('V', 'var_linear', 'tauM', 'decayV', 'V'))
NeuronModel.register("nonspikinglif", NonSpikingLIFModel)
class LIFMModel(NeuronModel):
"""
LIF model:
# I_che = tauP*I + WgtSum^n[t-1] + b^n # sum(w * O^(n-1)[t])
# I = I_che + I_ele
# F = tauM * exp(-O^n[t-1] / tauM)
# V(t) = V^n[t-1] * F + I
# O^(n)[t] = spike_func(V^n(t))
"""
def __init__(self, **kwargs):
super(LIFMModel, self).__init__()
# initial value for state variables
self.neuron_parameters['tau_p'] = kwargs.get('tau_p', 1.0)
self.neuron_parameters['tau_m'] = kwargs.get('tau_m', 10.0)
self.neuron_parameters['v_th'] = kwargs.get('v_th', 1.0)
self._variables['V'] = 0.0
self._variables['O'] = 0.0
self._variables['WgtSum'] = 0.0
self._variables['b'] = 0.0
self._variables['I_che'] = 0.0
self._variables['I_ele'] = 0.0
self._variables['I'] = 0.0
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
# self._constant_variables['Vreset'] = v_reset
self._tau_constant_variables['tauM'] = self.neuron_parameters['tau_m']
self._tau_constant_variables['tauP'] = self.neuron_parameters['tau_p']
self._operations.append(('PSP', 'add', 'WgtSum[updated]', 'b'))
self._operations.append(('I_che', 'var_linear', 'tauP', 'I_che', 'PSP'))
self._operations.append(('I', 'add', 'I_che[updated]', 'I_ele'))
self._operations.append(('Vtemp', 'var_linear', 'V', 'tauM', 'I[updated]'))
self._operations.append(('O', 'threshold', 'Vtemp', 'Vth'))
self._operations.append(('Vreset', 'var_mult', 'Vtemp', 'O[updated]'))
self._operations.append(('V', 'minus', 'Vtemp', 'Vreset'))
NeuronModel.register("lifm", LIFMModel)
class IZHModel(NeuronModel):
"""
IZH model:
V = V + dt / tauM * (C1 * V * V + C2 * V + C3 - U + PSP) # tauM=1 此处加tauM是为了添加op时和LIF模型保存一致
V = V + dt / tauM * (V* (C1 * V + C2) + C3 - U + PSP) # 由上式拆分而来
U = U + a. * (b. * V - U)
PSP = M^n[t] - S^n[t]
M^n[t] = tauP * M^n[t-1] + I^n[t-1] # tauP: decaying time constants of membrane integration
S^n[t] = tauQ * S^n[t-1] + I^n[t-1] # tauQ: decaying time constants of synaptic currents
I^n[t] = V0 * WgtSum^n[t-1] # WgtSum = sum(w * O^(n-1)[t])
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(IZHModel, self).__init__()
# initial value for state variables
self.neuron_parameters['tau_p'] = kwargs.get('tau_p', 4.0)
self.neuron_parameters['tau_q'] = kwargs.get('tau_q', 1.0)
self.neuron_parameters['a'] = kwargs.get('a', 0.02)
self.neuron_parameters['b'] = kwargs.get('b', 0.2)
self.neuron_parameters['Vrest'] = kwargs.get('Vrest', -65.0)
self.neuron_parameters['Ureset'] = kwargs.get('Ureset', 8.0)
self._variables['I'] = 0.0
self._variables['M'] = 0.0
self._variables['S'] = 0.0
self._variables['O'] = 0.0
self._variables['V'] = -65.0 # self.neuron_parameters['c']
self._variables['U'] = 1.0 # self.neuron_parameters['b']*self._variables['V']
self._variables['WgtSum'] = 0.0 # 1.8
beta = self.neuron_parameters['tau_p'] / self.neuron_parameters['tau_q']
V0 = (1 / (beta - 1)) * (beta ** (beta / (beta - 1)))
self._constant_variables['V0'] = V0
self._constant_variables['a'] = self.neuron_parameters['a']
self._constant_variables['b'] = self.neuron_parameters['b']
self._constant_variables['Vth'] = 30.0
self._variables['Vreset'] = self.neuron_parameters['Vrest'] - self._constant_variables['Vth'] # 为了发放后将电压重置为-65
self._constant_variables['Ureset'] = self.neuron_parameters['Ureset'] # 8.0
self._constant_variables['C1'] = 0.04
self._constant_variables['C2'] = 5
self._constant_variables['C3'] = 140
self._membrane_variables['tauM'] = 1.0
self._tau_constant_variables['tauP'] = self.neuron_parameters['tau_p']
self._tau_constant_variables['tauQ'] = self.neuron_parameters['tau_q']
# V = V + dt / tauM * (C1 * V * V + C2 * V + C3 - U + PSP)
# V = V + dt / tauM * (V* (C1 * V + C2) + C3 - U + PSP)
# U = U + dt /tauM * a. * (b. * V - U)
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum'))
self._operations.append(('M', 'var_linear', 'tauP', 'M', 'I[updated]'))
self._operations.append(('S', 'var_linear', 'tauQ', 'S', 'I[updated]'))
self._operations.append(('PSP', 'minus', 'M[updated]', 'S[updated]'))
self._operations.append(('O', 'threshold', 'V', 'Vth'))
self._operations.append(('VResetting', 'var_mult', 'Vreset', 'O[updated]'))
self._operations.append(('Vtemp', 'add', 'V', 'VResetting'))
self._operations.append(('UResetting', 'var_mult', 'Ureset', 'O[updated]'))
self._operations.append(('Utemp', 'add', 'U', 'UResetting'))
self._operations.append(('temp_V1', 'var_linear', 'C1', 'Vtemp', 'C2'))
self._operations.append(('temp_V2', 'var_linear', 'temp_V1', 'Vtemp', 'C3'))
self._operations.append(('temp_V3', 'minus', 'temp_V2', 'Utemp'))
self._operations.append(('temp_V4', 'add', 'temp_V3', 'PSP'))
self._operations.append(('V', 'var_linear', 'tauM', 'temp_V4', 'Vtemp'))
self._operations.append(('temp_U1', 'var_mult', 'b', 'V'))
self._operations.append(('temp_U2', 'minus', 'temp_U1', 'Utemp'))
self._operations.append(('U', 'var_linear', 'a', 'temp_U2', 'Utemp'))
# self._operations.append(('V', 'izh_v', 'V', 'U', 'PSP'))
# self._operations.append(('U', 'izh_u', 'a', 'b', 'V[updated]', 'U'))
NeuronModel.register("izh", IZHModel)
class aEIFModel(NeuronModel):
"""
aEIF model:
V = V + dt / tauM * (EL - V + EXP - U + I^n[t])
U = U + dt / tauW * (a * (V - EL) - U)
EXP = delta_t * delta_t2 * exp(du_th/delta_t2)
du = V - EL
du_th = V - Vth
I^n[t] = V0 * WgtSum^n[t-1]
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(aEIFModel, self).__init__()
# initial value for state variables
self.neuron_parameters['tau_p'] = kwargs.get('tau_p', 12.0)
self.neuron_parameters['tau_q'] = kwargs.get('tau_q', 4.0)
self.neuron_parameters['tau_w'] = kwargs.get('tau_w', 144)
self.neuron_parameters['tau_m'] = kwargs.get('tau_m', 1.0)
self.neuron_parameters['a'] = kwargs.get('a', 0.05)
self.neuron_parameters['b'] = kwargs.get('b', 0.0805)
self.neuron_parameters['delta_t'] = kwargs.get('delta_t', 30.0)
self.neuron_parameters['delta_t2'] = kwargs.get('delta_t2', 1.0)
self.neuron_parameters['EL'] = kwargs.get('EL', -70.6)
self._variables['I'] = 0.0
self._variables['I_che'] = 0.0
self._variables['I_ele'] = 0.0
self._variables['M'] = 0.0
self._variables['S'] = 0.0
self._variables['O'] = 0.0
self._variables['V'] = -70.6
# self._variables['Vt'] = -70.6
self._variables['U'] = 0.0 # self.neuron_parameters['b'] * (-70.6)
self._variables['WgtSum'] = 0.0
self._variables['EXP'] = 0.0
beta = self.neuron_parameters['tau_p'] / self.neuron_parameters['tau_q']
V0 = (1 / (beta - 1)) * (beta ** (beta / (beta - 1)))
self._constant_variables['V0'] = V0
self._constant_variables['EL'] = self.neuron_parameters['EL']
self._constant_variables['a'] = self.neuron_parameters['a']
self._constant_variables['b'] = self.neuron_parameters['b']
self._constant_variables['Vth'] = -50.4
# self._constant_variables['Ureset'] = -8.0
# self._constant_variables['C1'] = 0.6
self._constant_variables['delta_t'] = self.neuron_parameters['delta_t'] * self.neuron_parameters['delta_t2']
self._constant_variables['delta_t2'] = self.neuron_parameters['delta_t2'] # *10
self._membrane_variables['tauM'] = self.neuron_parameters['tau_m']
self._membrane_variables['tauW'] = self.neuron_parameters['tau_w']
# V = V + dt / tauM * (EL - V + EXP - U + I ^ n[t])
# U = U + dt / tauW * (a * (V - EL) - U)
# EXP = delta_t * exp(du_th / delta_t2)
# du = V - EL
# du_th = V - Vth
# I ^ n[t] = V0 * WgtSum ^ n[t - 1]
#
# O ^ n[t] = spike_func(V ^ n[t - 1])
self._operations.append(('I_che', 'var_mult', 'V0', 'WgtSum[updated]'))
self._operations.append(('I', 'add', 'I_che[updated]', 'I_ele[updated]'))
self._operations.append(('dv', 'minus', 'V', 'EL'))
self._operations.append(('dv_th', 'minus', 'V', 'Vth'))
self._operations.append(('EXP_T1', 'div', 'dv_th', 'delta_t2'))
self._operations.append(('EXP_T2', 'exp', 'EXP_T1'))
self._operations.append(('EXP', 'var_mult', 'delta_t', 'EXP_T2'))
self._operations.append(('temp_V1', 'minus', 'EXP[updated]', 'dv'))
self._operations.append(('temp_V2', 'minus', 'temp_V1', 'U'))
self._operations.append(('temp_V3', 'add', 'temp_V2', 'I'))
self._operations.append(('Vt', 'var_linear', 'tauM', 'temp_V3', 'V'))
self._operations.append(('temp_U1', 'var_mult', 'a', 'dv'))
self._operations.append(('temp_U2', 'minus', 'temp_U1', 'U'))
self._operations.append(('Ut', 'var_linear', 'tauW', 'temp_U2', 'U'))
self._operations.append(('O', 'threshold', 'Vt', 'Vth'))
self._operations.append(('Vtemp2', 'var_mult', 'Vt', 'O[updated]'))
self._operations.append(('Vtemp3', 'minus', 'Vt', 'Vtemp2'))
self._operations.append(('V', 'var_linear', 'EL', 'O[updated]', 'Vtemp3'))
self._operations.append(('U', 'var_linear', 'b', 'O[updated]', 'Ut'))
NeuronModel.register("aeif", aEIFModel)
NeuronModel.register("adex", aEIFModel)
class GLIFModel(NeuronModel):
"""
Current GLIF5 model:
V = V + dt/tau_m * (R * (I + I1 + I2) - (V - E_L))
Theta_s = Theta_s - b_s * Theta_s
I_j = I_j - k_j * I_j (j = 1, 2)
Theta_v = Theta_v + a_v * (V - E_L) - b_v * Theta_v
v_th = Theta_v + Theta_s + Theta_inf
O = spike_func(V)
Reset function:
V = E_L + f_v * (V - E_L) - delta_v
Theta_s = Theta_s + delta_Theta_s
I_j = f_j * I_j + delta_I_j (j = 1, 2; f_j = 1)
Theta_v = Theta_v
After transform:
part I:
I^n[t] = tau_p * I + WgtSum^n[t-1]
I_sum = I1 + I2
I_sum = I_sum + I^n[t]
S^n[t] = tau_q * S^n[t-1] + I_sum
dv = V^n[t-1] - E_L
decay = S^n[t] - dv
V = tauM * decay + V
Theta_s = (-b_s) * Theta_s + Theta_s
I_j = (-k_j) * I_j + I_j
Theta_temp = a_v * dv + Theta_v
Theta_v = (-b_v) * Theta_v + Theta_temp
Theta = Theta_v + Theta_s
v_th = Theta + Theta_inf
O = spike_func(V, v_th)
part II(reset part):
dv_reset = V^n[t] - E_L
deltaV(constant) = E_L - delta_v
Vreset = f_v * dv_reset + deltaV
Vreset = Vreset - V
V = Vreset * O + V
Theta_s = Theta_s + delta_Theta_s * O
I_j = I_j + delta_I_j * O
"""
def __init__(self, **kwargs):
super(GLIFModel, self).__init__()
self.neuron_parameters['R'] = kwargs.get('R', 0.2)
self.neuron_parameters['C'] = kwargs.get('C', 1.0)
self.neuron_parameters['E_L'] = kwargs.get('E_L', 0.0)
self.neuron_parameters['Theta_inf'] = kwargs.get('Theta_inf', 1.0)
# self.neuron_parameters['delta_t'] = kwargs.get('delta_t', 0.0)
self.neuron_parameters['f_v'] = kwargs.get('f_v', 0.1)
self.neuron_parameters['delta_v'] = kwargs.get('delta_v', 0.05)
self.neuron_parameters['b_s'] = -kwargs.get('b_s', 0.02)
self.neuron_parameters['delta_Theta_s'] = kwargs.get('delta_Theta_s', 0.02)
self.neuron_parameters['k_1'] = -kwargs.get('k_1', 0.1)
self.neuron_parameters['k_2'] = -kwargs.get('k_2', 0.1)
self.neuron_parameters['delta_I1'] = kwargs.get('delta_I1', 1.0)
self.neuron_parameters['delta_I2'] = kwargs.get('delta_I2', 1.0)
self.neuron_parameters['a_v'] = kwargs.get('a_v', 0.05)
self.neuron_parameters['b_v'] = -kwargs.get('b_v', 0.1)
self.neuron_parameters['tau_p'] = kwargs.get('tau_p', 1.0)
self.neuron_parameters['tau_q'] = kwargs.get('tau_q', 1.0)
self._variables['V'] = 0.0
self._variables['Theta_s'] = 0.1
self._variables['Theta_v'] = 0.2
self._variables['I1'] = 0.08
self._variables['I2'] = 0.12
self._variables['I'] = 0.0
self._variables['S'] = 0.0
self._variables['O'] = 0.0
self._variables['v_th'] = 0.0
self._constant_variables['deltaV'] = self.neuron_parameters['E_L'] - self.neuron_parameters['delta_v']
self._constant_variables['R'] = self.neuron_parameters['R']
self._constant_variables['C'] = self.neuron_parameters['C']
self._constant_variables['E_L'] = self.neuron_parameters['E_L']
self._constant_variables['Theta_inf'] = self.neuron_parameters['Theta_inf']
self._constant_variables['f_v'] = self.neuron_parameters['f_v']
self._constant_variables['delta_v'] = self.neuron_parameters['delta_v']
self._constant_variables['b_s'] = self.neuron_parameters['b_s']
self._constant_variables['delta_Theta_s'] = self.neuron_parameters['delta_Theta_s']
self._constant_variables['k1'] = self.neuron_parameters['k_1']
self._constant_variables['k2'] = self.neuron_parameters['k_2']
self._constant_variables['delta_I1'] = self.neuron_parameters['delta_I1']
self._constant_variables['delta_I2'] = self.neuron_parameters['delta_I2']
self._constant_variables['a_v'] = self.neuron_parameters['a_v']
self._constant_variables['b_v'] = self.neuron_parameters['b_v']
self._membrane_variables['tau_m'] = self.neuron_parameters['R'] * self.neuron_parameters['C']
self._tau_constant_variables['tauP'] = self.neuron_parameters['tau_p']
self._tau_constant_variables['tauQ'] = self.neuron_parameters['tau_q']
# self._tau_constant_variables['k1'] = self.neuron_parameters['k_1']
# self._tau_constant_variables['k2'] = self.neuron_parameters['k_2']
self._operations.append(('I', 'var_linear', 'tauP', 'I', 'WgtSum'))
self._operations.append(('I_sum', 'add', 'I1', 'I2'))
self._operations.append(('I_sum', 'add', 'I_sum', 'I[updated]'))
self._operations.append(('S', 'var_linear', 'tauQ', 'S', 'I_sum'))
self._operations.append(('dv', 'minus', 'V', 'E_L'))
self._operations.append(('decay', 'minus', 'S[updated]', 'dv'))
self._operations.append(('Vt', 'var_linear', 'tau_m', 'decay', 'V'))
self._operations.append(('Theta_st', 'var_linear', 'b_s', 'Theta_s', 'Theta_s'))
self._operations.append(('I1t', 'var_linear', 'k1', 'I1', 'I1'))
self._operations.append(('I2t', 'var_linear', 'k2', 'I2', 'I2'))
self._operations.append(('Theta_temp', 'var_linear', 'a_v', 'dv', 'Theta_v'))
self._operations.append(('Theta_v', 'var_linear', 'b_v', 'Theta_v', 'Theta_temp'))
self._operations.append(('Theta', 'add', 'Theta_v[updated]', 'Theta_st'))
self._operations.append(('v_th', 'add', 'Theta', 'Theta_inf'))
self._operations.append(('O', 'threshold', 'Vt', 'v_th[updated]'))
self._operations.append(('I1', 'var_linear', 'delta_I1', 'O[updated]', 'I1t'))
self._operations.append(('I2', 'var_linear', 'delta_I2', 'O[updated]', 'I2t'))
self._operations.append(('Theta_s', 'var_linear', 'delta_Theta_s', 'O[updated]', 'Theta_st'))
self._operations.append(('dv_reset', 'minus', 'Vt', 'E_L'))
self._operations.append(('Vreset', 'var_linear', 'f_v', 'dv_reset', 'deltaV'))
self._operations.append(('Vreset', 'add', 'Vreset', 'E_L'))
self._operations.append(('Vreset', 'minus', 'Vreset', 'Vt'))
self._operations.append(('V', 'var_linear', 'Vreset', 'O[updated]', 'Vt'))
NeuronModel.register("glif", GLIFModel)
class HodgkinHuxleyModel(NeuronModel):
"""
Hodgkin-Huxley model:
V = V + dt/tau_v * (I - Ik)
Ik = NA + K + L
NA = g_NA * m^3 * h * (V - V_NA)
K = g_K * n^4 * (V - V_K)
L = g_L * (V - V_L)
m = m + dt/tau_m * (alpha_m * (1-m) - beta_m * m)
n = n + dt/tau_n * (alpha_n * (1-n) - beta_n * n)
h = h + dt/tau_h * (alpha_h * (1-h) - beta_h * h)
original function:
alpha_m = 0.1 * (-V + 25) / (exp((-V+25)/10) - 1)
beta_m = 4 * exp(-V/18)
alpha_n = 0.01 * (-V + 10) / (exp((-V+10)/10) - 1)
beta_n = 0.125 * exp(-V/80)
alpha_h = 0.07 * exp(-V/20)
beta_h = 1/(exp((-V+30)/10) + 1)
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(HodgkinHuxleyModel, self).__init__()
# initial value for state variables
self.neuron_parameters['dt'] = kwargs.get('dt', 0.1)
self.neuron_parameters['g_NA'] = kwargs.get('g_NA', 120.0)
self.neuron_parameters['g_K'] = kwargs.get('g_K', 36.0)
self.neuron_parameters['g_L'] = kwargs.get('g_L', 0.3)
self.neuron_parameters['E_NA'] = kwargs.get('E_NA', 120.0)
self.neuron_parameters['E_K'] = kwargs.get('E_K', -12.0)
self.neuron_parameters['E_L'] = kwargs.get('E_L', 10.6)
# a_m = a_m1 * (-V + a_m2) / (exp((-V + a_m2) / a_m3 - 1)
# b_m = b_m1 * exp((-V + b_m2) / b_m3)
self.neuron_parameters['a_m1'] = kwargs.get('alpha_m1', 0.1)
self.neuron_parameters['a_m2'] = kwargs.get('alpha_m2', 25.0)
self.neuron_parameters['a_m3'] = kwargs.get('alpha_m3', 10.0)
# self.neuron_parameters['a_m4'] = kwargs.get('alpha_m4', -1)
self.neuron_parameters['b_m1'] = kwargs.get('beta_m1', 4.0)
self.neuron_parameters['b_m2'] = kwargs.get('beta_m2', 0.0)
self.neuron_parameters['b_m3'] = kwargs.get('beta_m3', 18.0)
# a_n = a_n1 * (-V + a_n2) / (exp((-V + a_n2) / a_n3 - 1)
# b_n = b_n1 * exp((-V + b_n2) / b_n3)
self.neuron_parameters['a_n1'] = kwargs.get('alpha_n1', 0.01)
self.neuron_parameters['a_n2'] = kwargs.get('alpha_n2', 10.0)
self.neuron_parameters['a_n3'] = kwargs.get('alpha_n3', 10.0)
# self.neuron_parameters['a_n4'] = kwargs.get('alpha_n4', -1)
self.neuron_parameters['b_n1'] = kwargs.get('beta_n1', 0.125)
self.neuron_parameters['b_n2'] = kwargs.get('beta_n2', 0.0)
self.neuron_parameters['b_n3'] = kwargs.get('beta_n3', 80.0)
# a_h = a_h1 * exp((-V + a_h2) / a_h3)
# b_h = b_h1 / (exp((-V + b_h2) / b_h3 + 1)
self.neuron_parameters['a_h1'] = kwargs.get('alpha_h1', 0.07)
self.neuron_parameters['a_h2'] = kwargs.get('alpha_h2', 0.0)
self.neuron_parameters['a_h3'] = kwargs.get('alpha_h3', 20.0)
self.neuron_parameters['b_h1'] = kwargs.get('beta_h1', 1.0) # 分子
self.neuron_parameters['b_h2'] = kwargs.get('beta_h2', 30.0)
self.neuron_parameters['b_h3'] = kwargs.get('beta_h3', 10.0)
self.neuron_parameters['65'] = kwargs.get('V65', 0.0)
self.neuron_parameters['m'] = kwargs.get('m', 0.5)
self.neuron_parameters['n'] = kwargs.get('n', 0.5)
self.neuron_parameters['h'] = kwargs.get('h', 0.06)
# self.neuron_parameters['b_h4'] = kwargs.get('beta_h4', 1)
self.neuron_parameters['V'] = kwargs.get('V', 0.0)
self.neuron_parameters['vth'] = kwargs.get('vth', 1.0)
self._variables['I'] = 0.0
self._variables['O'] = 0.0
self._variables['V'] = self.neuron_parameters['V']
self._variables['m'] = self.neuron_parameters['m']
self._variables['n'] = self.neuron_parameters['n']
self._variables['h'] = self.neuron_parameters['h']
# beta = self.neuron_parameters['tau_p'] / self.neuron_parameters['tau_q']
# V0 = (1 / (beta - 1)) * (beta ** (beta / (beta - 1)))
self._constant_variables['V0'] = 1.0
self._constant_variables['Vth'] = self.neuron_parameters['vth']
self._constant_variables['Vreset'] = 0.0
self._membrane_variables['tauM'] = 1.0
self._membrane_variables['tauN'] = 1.0
self._membrane_variables['tauH'] = 1.0
self._membrane_variables['tauV'] = 1.0
self._constant_variables['1'] = 1.0
self._constant_variables['65'] = self.neuron_parameters['65']
self._constant_variables['dt'] = self.neuron_parameters['dt']
self._constant_variables['g_NA'] = self.neuron_parameters['g_NA']
self._constant_variables['g_K'] = self.neuron_parameters['g_K']
self._constant_variables['g_L'] = self.neuron_parameters['g_L']
self._constant_variables['E_NA'] = self.neuron_parameters['E_NA']
self._constant_variables['E_K'] = self.neuron_parameters['E_K']
self._constant_variables['E_L'] = self.neuron_parameters['E_L']
self._constant_variables['a_m1'] = self.neuron_parameters['a_m1']
self._constant_variables['a_m2'] = self.neuron_parameters['a_m2']
self._constant_variables['a_m3'] = self.neuron_parameters['a_m3']
self._constant_variables['b_m1'] = self.neuron_parameters['b_m1']
self._constant_variables['b_m2'] = self.neuron_parameters['b_m2']
self._constant_variables['b_m3'] = self.neuron_parameters['b_m3']
self._constant_variables['a_n1'] = self.neuron_parameters['a_n1']
self._constant_variables['a_n2'] = self.neuron_parameters['a_n2']
self._constant_variables['a_n3'] = self.neuron_parameters['a_n3']
self._constant_variables['b_n1'] = self.neuron_parameters['b_n1']
self._constant_variables['b_n2'] = self.neuron_parameters['b_n2']
self._constant_variables['b_n3'] = self.neuron_parameters['b_n3']
self._constant_variables['a_h1'] = self.neuron_parameters['a_h1']
self._constant_variables['a_h2'] = self.neuron_parameters['a_h2']
self._constant_variables['a_h3'] = self.neuron_parameters['a_h3']
self._constant_variables['b_h1'] = self.neuron_parameters['b_h1']
self._constant_variables['b_h2'] = self.neuron_parameters['b_h2']
self._constant_variables['b_h3'] = self.neuron_parameters['b_h3']
self._operations.append(('V65', 'add', 'V', '65'))
# a_m = a_m1 * (-V + a_m2) / (exp((-V + a_m2) / a_m3) - 1)
# b_m = b_m1 * exp((-V + b_m2) / b_m3)
# alpha_m
self._operations.append(('Vam', 'minus', 'a_m2', 'V65'))
self._operations.append(('Vamd', 'div', 'Vam', 'a_m3'))
self._operations.append(('expVamd1', 'exp', 'Vamd'))
self._operations.append(('expVamd', 'minus', 'expVamd1', '1'))
self._operations.append(('amtemp', 'div', 'Vam', 'expVamd'))
self._operations.append(('a_m', 'var_mult', 'a_m1', 'amtemp'))
# beta_m
self._operations.append(('Vbm', 'minus', 'b_m2', 'V65'))
self._operations.append(('Vbmd', 'div', 'Vbm', 'b_m3'))
self._operations.append(('expVbmd', 'exp', 'Vbmd'))
self._operations.append(('b_m', 'var_mult', 'b_m1', 'expVbmd'))
# a_n = a_n1 * (-V + a_n2) / (exp((-V + a_n2) / a_n3) - 1)
# b_n = b_n1 * exp((-V + b_n2) / b_n3)
# alpha_n
self._operations.append(('Van', 'minus', 'a_n2', 'V65'))
self._operations.append(('Vand', 'div', 'Van', 'a_n3'))
self._operations.append(('expVand1', 'exp', 'Vand'))
self._operations.append(('expVand', 'minus', 'expVand1', '1'))
self._operations.append(('antemp', 'div', 'Van', 'expVand'))
self._operations.append(('a_n', 'var_mult', 'a_n1', 'antemp'))
# beta_n
self._operations.append(('Vbn', 'minus', 'b_n2', 'V65'))
self._operations.append(('Vbnd', 'div', 'Vbn', 'b_n3'))
self._operations.append(('expVbnd', 'exp', 'Vbnd'))
self._operations.append(('b_n', 'var_mult', 'b_n1', 'expVbnd'))
# a_h = a_h1 * exp((-V + a_h2) / a_h3)
# b_h = b_h1 / (exp((-V + b_h2) / b_h3 + 1)
# alpha_h
self._operations.append(('Vah', 'minus', 'a_h2', 'V65'))
self._operations.append(('Vahd', 'div', 'Vah', 'a_h3'))
self._operations.append(('expVahd', 'exp', 'Vahd'))
self._operations.append(('a_h', 'var_mult', 'a_h1', 'expVahd'))
# beta_h
self._operations.append(('Vbh', 'minus', 'b_h2', 'V65'))
self._operations.append(('Vbhd', 'div', 'Vbh', 'b_h3'))
self._operations.append(('expVbhd1', 'exp', 'Vbhd'))
self._operations.append(('expVbhd', 'add', 'expVbhd1', '1'))
self._operations.append(('b_h', 'div', 'b_h1', 'expVbhd'))
# m = m + alpha_m * (1 - m) - beta_m * m
# n = n + alpha_n * (1 - n) - beta_n * n
# h = h + alpha_h * (1 - h) - beta_h * h
# m
self._operations.append(('mtemp1', 'minus', '1', 'm'))
self._operations.append(('mtemp2', 'var_mult', 'a_m', 'mtemp1'))
self._operations.append(('betam', 'var_mult', 'b_m', 'm'))
self._operations.append(('mtemp3', 'minus', 'mtemp2', 'betam'))
self._operations.append(('mtemp4', 'var_mult', 'tauM', 'mtemp3'))
self._operations.append(('m', 'add', 'mtemp4', 'm'))
# n
self._operations.append(('ntemp1', 'minus', '1', 'n'))
self._operations.append(('ntemp2', 'var_mult', 'a_n', 'ntemp1'))
self._operations.append(('betan', 'var_mult', 'b_n', 'n'))
self._operations.append(('ntemp3', 'minus', 'ntemp2', 'betan'))
self._operations.append(('ntemp4', 'var_mult', 'tauN', 'ntemp3'))
self._operations.append(('n', 'add', 'ntemp4', 'n'))
# h
self._operations.append(('htemp1', 'minus', '1', 'h'))
self._operations.append(('htemp2', 'var_mult', 'a_h', 'htemp1'))
self._operations.append(('betah', 'var_mult', 'b_h', 'h'))
self._operations.append(('htemp3', 'minus', 'htemp2', 'betah'))
self._operations.append(('htemp4', 'var_mult', 'tauH', 'htemp3'))
self._operations.append(('h', 'add', 'htemp4', 'h'))
# g_NAm3h
self._operations.append(('m2', 'var_mult', 'm[updated]', 'm[updated]'))
self._operations.append(('m3', 'var_mult', 'm2', 'm[updated]'))
self._operations.append(('m3h', 'var_mult', 'm3', 'h[updated]'))
self._operations.append(('g_NAm3h', 'var_mult', 'g_NA', 'm3h'))
# g_Kn4
self._operations.append(('n2', 'var_mult', 'n[updated]', 'n[updated]'))
self._operations.append(('n4', 'var_mult', 'n2', 'n2'))
self._operations.append(('g_Kn4', 'var_mult', 'g_K', 'n4'))
self._operations.append(('d_NA', 'minus', 'V', 'E_NA'))
self._operations.append(('d_K', 'minus', 'V', 'E_K'))
self._operations.append(('d_L', 'minus', 'V', 'E_L'))
# Ik, NA, K, L
self._operations.append(('NA', 'var_mult', 'g_NAm3h', 'd_NA'))
self._operations.append(('K', 'var_mult', 'g_Kn4', 'd_K'))
self._operations.append(('L', 'var_mult', 'g_L', 'd_L'))
self._operations.append(('Ik1', 'add', 'NA', 'K'))
self._operations.append(('Ik2', 'add', 'Ik1', 'L'))
self._operations.append(('Ik', 'var_mult', 'tauV', 'Ik2'))
# I
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum'))
self._operations.append(('Vtemp', 'minus', 'I[updated]', 'Ik'))
self._operations.append(('V', 'add', 'V', 'Vtemp'))
# O
self._operations.append(('O', 'threshold', 'V[updated]', 'Vth'))
NeuronModel.register("hh", HodgkinHuxleyModel)
class LIFSTDPEXModel(NeuronModel):
"""
LIF model:
V(t) = decay_v * (v - v_rest) + v_rest + I^n[t]
I^n[t] = V0 * WgtSum^n[t] #V0 = 1
theta(t) = decay_th * theta[t-1]
if v >= (vth + theta) then s_out = 1; else s_out = 0;
Reset:
V(t) = s_out * v_reset + (1 - s_out) * v; theta = theta + s_out * th_inc
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(LIFSTDPEXModel, self).__init__()
# initial value for state variables
self.neuron_parameters['decay_v'] = kwargs.get('decay_v', np.exp(-1/100))
self.neuron_parameters['decay_th'] = kwargs.get('decay_th', np.exp(-1/1e7))
self.neuron_parameters['th_inc'] = kwargs.get('th_inc', 0.05)
self.neuron_parameters['v_th'] = kwargs.get('v_th', -52.0)
self.neuron_parameters['v_rest'] = kwargs.get('v_rest', -65.0)
self.neuron_parameters['v_reset'] = kwargs.get('v_reset', -60.0)
self._variables['I'] = 0.0
self._variables['V'] = -65.0
self._variables['O'] = 0.0
self._variables['WgtSum'] = 0.0
self._variables['theta[stay]'] = 0.0
self._variables['Vth_theta'] = 0.0
self._constant_variables['V0'] = 1
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
self._constant_variables['Vreset'] = self.neuron_parameters['v_reset']
self._constant_variables['Vrest'] = self.neuron_parameters['v_rest']
self._constant_variables['th_inc'] = self.neuron_parameters['th_inc']
self._constant_variables['decay_th'] = self.neuron_parameters['decay_th']
self._constant_variables['decay_v'] = self.neuron_parameters['decay_v']
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum[updated]'))
self._operations.append(('PSP1', 'minus', 'V', 'Vrest'))
self._operations.append(('PSP2', 'var_linear', 'decay_v', 'PSP1', 'Vrest'))
self._operations.append(('Vtemp', 'add', 'PSP2', 'I[updated]'))
self._operations.append(('theta_temp', 'var_mult', 'decay_th', 'theta[stay]'))
self._operations.append(('Vth_theta', 'add', 'Vth', 'theta_temp'))
self._operations.append(('O', 'threshold', 'Vtemp', 'Vth_theta'))
self._operations.append(('Resetting1', 'var_mult', 'Vreset', 'O[updated]'))
self._operations.append(('Resetting2', 'var_mult', 'Vtemp', 'O[updated]'))
self._operations.append(('Resetting3', 'minus', 'Vtemp', 'Resetting2'))
self._operations.append(('V', 'add', 'Resetting1', 'Resetting3'))
self._operations.append(('Resetting_theta', 'var_mult', 'O[updated]', 'th_inc'))
self._operations.append(('theta_stay', 'add', 'theta_temp', 'Resetting_theta'))
NeuronModel.register("lifstdp_ex", LIFSTDPEXModel)
class LIFSTDPIHModel(NeuronModel):
"""
LIF model:
V(t) = decay_v * (v - v_rest) + v_rest + I^n[t]
I^n[t] = V0 * WgtSum^n[t] #V0 = 1
Reset:
V(t) = s_out * v_reset + (1 - s_out) * v;
O^n[t] = spike_func(V^n[t-1])
"""
def __init__(self, **kwargs):
super(LIFSTDPIHModel, self).__init__()
# initial value for state variables
self.neuron_parameters['decay_v'] = kwargs.get('decay_v', np.exp(-1/10))
self.neuron_parameters['v_th'] = kwargs.get('v_th', -40.0)
self.neuron_parameters['v_rest'] = kwargs.get('v_rest', -60.0)
self.neuron_parameters['v_reset'] = kwargs.get('v_reset', -45.0)
self._variables['I'] = 0.0
self._variables['V'] = -60.0
self._variables['O'] = 0.0
self._variables['WgtSum'] = 0.0
self._constant_variables['V0'] = 1
self._constant_variables['Vth'] = self.neuron_parameters['v_th']
self._constant_variables['Vreset'] = self.neuron_parameters['v_reset']
self._constant_variables['Vrest'] = self.neuron_parameters['v_rest']
self._constant_variables['decay_v'] = self.neuron_parameters['decay_v']
self._operations.append(('I', 'var_mult', 'V0', 'WgtSum[updated]'))
self._operations.append(('PSP1', 'minus', 'V', 'Vrest'))
self._operations.append(('PSP2', 'var_linear', 'decay_v', 'PSP1', 'Vrest'))
self._operations.append(('Vtemp', 'add', 'PSP2', 'I[updated]'))
self._operations.append(('O', 'threshold', 'Vtemp', 'Vth'))
self._operations.append(('Resetting1', 'var_mult', 'Vreset', 'O[updated]'))
self._operations.append(('Resetting2', 'var_mult', 'Vtemp', 'O[updated]'))
self._operations.append(('Resetting3', 'minus', 'Vtemp', 'Resetting2'))
self._operations.append(('V', 'add', 'Resetting1', 'Resetting3'))
NeuronModel.register("lifstdp_ih", LIFSTDPIHModel)
class CANN_MeanFieldModel(NeuronModel):
"""
Mean Field Model used in "Fung CC, <NAME>, Wu S. A moving bump in a continuous manifold: a comprehensive study of
the tracking dynamics of continuous attractor neural networks. Neural Comput. 2010 Mar;22(3):752-92. doi: 10.1162/neco.2009.07-08-824. "
"<NAME>, Wong KY, Fung CC, Mi Y, <NAME>. Continuous Attractor Neural Networks: Candidate of a Canonical Model for
Neural Information Representation.F1000Res. 2016 Feb 10;5:F1000 Faculty Rev-156. doi: 10.12688/f1000research.7387.1. "
U = U + dt/tau * (Iext + rho*WgtSum - U)
O = U^2/(1 + k*rho*sum(U^2))
(WgtSum = weight*O_pre)
"""
def __init__(self, **kwargs):
super(CANN_MeanFieldModel, self).__init__()
self._constant_variables['rho'] = kwargs.get('rho', 0.02)
self._constant_variables['k_rho'] = kwargs.get('k', 0.01)
self._constant_variables['1'] = 1
self._constant_variables['2'] = 2
self._membrane_variables['tau'] = kwargs.get('tau', 1.0)
self._variables['Iext'] = 0.0
self._variables['WgtSum'] = 0.0
self._variables['U'] = 0.0
self._variables['O'] = 0.0
self._operations.append(('Isum', 'var_linear', 'rho', 'WgtSum', 'Iext'))
self._operations.append(('dU', 'minus', 'Isum', 'U'))
self._operations.append(('U', 'var_linear', 'tau', 'dU', 'U'))
self._operations.append(('ReU', 'relu', 'U[updated]'))
self._operations.append(('U2', 'var_mult', 'ReU', 'ReU'))
self._operations.append(('SumU2', 'reduce_sum', 'U2', '1'))
self._operations.append(('RBase','var_linear', 'k_rho', 'SumU2', '1'))
self._operations.append(('O', 'div', 'U2', 'RBase'))
NeuronModel.register("cann_field", CANN_MeanFieldModel)
class MeanFieldModel(NeuronModel):
"""
Mean Field Model of LIF neuron "
U = U + dt/tau * (rho*(Iext + WgtSum) - U)
O = relu(U)
(WgtSum = weight*O_pre)
"""
def __init__(self, **kwargs):
super(MeanFieldModel, self).__init__()
self._constant_variables['rho'] = kwargs.get('rho', 0.1)
self._constant_variables['1'] = 1
self._constant_variables['2'] = 2
self._membrane_variables['tau'] = kwargs.get('tau', 1.0)
self._variables['Iext'] = 0.0
self._variables['WgtSum'] = 0.0
self._variables['U'] = 0.0
self._variables['O'] = 0.0
self._operations.append(('Isum', 'var_linear', 'rho', 'WgtSum', 'Iext'))
self._operations.append(('dU', 'minus', 'Isum', 'U'))
self._operations.append(('U', 'var_linear', 'tau', 'dU', 'U'))
self._operations.append(('O', 'relu', 'U[updated]'))
NeuronModel.register("meanfield", MeanFieldModel)
class SimpleRateModel(NeuronModel):
"""
Rate model "
U = U + dt/tau * (sigmoid(Iext + WgtSum) - U)
(WgtSum = weight*O_pre)
"""
def __init__(self, **kwargs):
super(MeanFieldModel, self).__init__()
self._membrane_variables['tau'] = kwargs.get('tau', 1.0)
self._variables['Iext'] = 0.0
self._variables['WgtSum'] = 0.0
self._variables['U'] = 0.0
self._operations.append(('Isum', 'add', 'WgtSum', 'Iext'))
self._operations.append(('F', 'sigmoid', 'Isum'))
self._operations.append(('dU', 'minus', 'F', 'U'))
self._operations.append(('U', 'var_linear', 'tau', 'dU', 'U'))
| [
"numpy.stack",
"torch.mean",
"torch.stack",
"torch.zeros_like",
"torch.norm",
"torch.zeros",
"torch.clamp",
"numpy.array",
"numpy.exp",
"collections.OrderedDict",
"re.search",
"numpy.prod"
] | [((2392, 2405), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2403, 2405), False, 'from collections import OrderedDict\n'), ((21912, 21953), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (21923, 21953), False, 'import torch\n'), ((21979, 22020), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (21990, 22020), False, 'import torch\n'), ((22046, 22087), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (22057, 22087), False, 'import torch\n'), ((22113, 22154), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (22124, 22154), False, 'import torch\n'), ((22177, 22209), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_m)'], {}), '(-backend.dt / self.tau_m)\n', (22183, 22209), True, 'import numpy as np\n'), ((22232, 22264), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_s)'], {}), '(-backend.dt / self.tau_s)\n', (22238, 22264), True, 'import numpy as np\n'), ((22287, 22319), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_e)'], {}), '(-backend.dt / self.tau_e)\n', (22293, 22319), True, 'import numpy as np\n'), ((22342, 22374), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_r)'], {}), '(-backend.dt / self.tau_r)\n', (22348, 22374), True, 'import numpy as np\n'), ((25510, 25538), 'numpy.stack', 'np.stack', (['grads[1:]'], {'axis': '(-1)'}), '(grads[1:], axis=-1)\n', (25518, 25538), True, 'import numpy as np\n'), ((27602, 27643), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (27613, 27643), False, 'import torch\n'), ((27669, 27710), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (27680, 27710), False, 'import torch\n'), ((27736, 27777), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (27747, 27777), False, 'import torch\n'), ((27803, 27844), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (27814, 27844), False, 'import torch\n'), ((27870, 27915), 'torch.zeros', 'torch.zeros', (['(1, 2, 1)'], {'device': 'backend.device'}), '((1, 2, 1), device=backend.device)\n', (27881, 27915), False, 'import torch\n'), ((27936, 27968), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_m)'], {}), '(-backend.dt / self.tau_m)\n', (27942, 27968), True, 'import numpy as np\n'), ((27991, 28023), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_s)'], {}), '(-backend.dt / self.tau_s)\n', (27997, 28023), True, 'import numpy as np\n'), ((28046, 28078), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_e)'], {}), '(-backend.dt / self.tau_e)\n', (28052, 28078), True, 'import numpy as np\n'), ((31216, 31244), 'numpy.stack', 'np.stack', (['grads[1:]'], {'axis': '(-1)'}), '(grads[1:], axis=-1)\n', (31224, 31244), True, 'import numpy as np\n'), ((32577, 32618), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (32588, 32618), False, 'import torch\n'), ((32644, 32685), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (32655, 32685), False, 'import torch\n'), ((32711, 32752), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'backend.device'}), '(shape, device=backend.device)\n', (32722, 32752), False, 'import torch\n'), ((32775, 32807), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_m)'], {}), '(-backend.dt / self.tau_m)\n', (32781, 32807), True, 'import numpy as np\n'), ((32830, 32862), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_s)'], {}), '(-backend.dt / self.tau_s)\n', (32836, 32862), True, 'import numpy as np\n'), ((32885, 32917), 'numpy.exp', 'np.exp', (['(-backend.dt / self.tau_e)'], {}), '(-backend.dt / self.tau_e)\n', (32891, 32917), True, 'import numpy as np\n'), ((1552, 1577), 'numpy.array', 'np.array', (['neuron_position'], {}), '(neuron_position)\n', (1560, 1577), True, 'import numpy as np\n'), ((2552, 2571), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (2559, 2571), True, 'import numpy as np\n'), ((4483, 4504), 'numpy.exp', 'np.exp', (['(-dt / tau_var)'], {}), '(-dt / tau_var)\n', (4489, 4504), True, 'import numpy as np\n'), ((5259, 5287), 're.search', 're.search', (['"""\\\\[\\\\d*\\\\]"""', 'key'], {}), "('\\\\[\\\\d*\\\\]', key)\n", (5268, 5287), False, 'import re\n'), ((67702, 67718), 'numpy.exp', 'np.exp', (['(-1 / 100)'], {}), '(-1 / 100)\n', (67708, 67718), True, 'import numpy as np\n'), ((67786, 67809), 'numpy.exp', 'np.exp', (['(-1 / 10000000.0)'], {}), '(-1 / 10000000.0)\n', (67792, 67809), True, 'import numpy as np\n'), ((70361, 70376), 'numpy.exp', 'np.exp', (['(-1 / 10)'], {}), '(-1 / 10)\n', (70367, 70376), True, 'import numpy as np\n'), ((22813, 22836), 'torch.norm', 'torch.norm', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (22823, 22836), False, 'import torch\n'), ((22873, 22896), 'torch.mean', 'torch.mean', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (22883, 22896), False, 'import torch\n'), ((24120, 24155), 'torch.clamp', 'torch.clamp', (['self.V', '(-10)', 'self.v_th'], {}), '(self.V, -10, self.v_th)\n', (24131, 24155), False, 'import torch\n'), ((28641, 28664), 'torch.norm', 'torch.norm', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (28651, 28664), False, 'import torch\n'), ((28701, 28724), 'torch.mean', 'torch.mean', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (28711, 28724), False, 'import torch\n'), ((33919, 33954), 'torch.clamp', 'torch.clamp', (['self.V', '(-10)', 'self.v_th'], {}), '(self.V, -10, self.v_th)\n', (33930, 33954), False, 'import torch\n'), ((22997, 23020), 'torch.norm', 'torch.norm', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (23007, 23020), False, 'import torch\n'), ((23105, 23128), 'torch.mean', 'torch.mean', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (23115, 23128), False, 'import torch\n'), ((28825, 28848), 'torch.norm', 'torch.norm', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (28835, 28848), False, 'import torch\n'), ((28933, 28956), 'torch.mean', 'torch.mean', (['grad'], {'dim': '(0)'}), '(grad, dim=0)\n', (28943, 28956), False, 'import torch\n'), ((6282, 6295), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (6290, 6295), True, 'import numpy as np\n'), ((25185, 25216), 'torch.stack', 'torch.stack', (['self.rec_E'], {'dim': '(-1)'}), '(self.rec_E, dim=-1)\n', (25196, 25216), False, 'import torch\n'), ((30891, 30922), 'torch.stack', 'torch.stack', (['self.rec_E'], {'dim': '(-1)'}), '(self.rec_E, dim=-1)\n', (30902, 30922), False, 'import torch\n'), ((25459, 25478), 'torch.zeros_like', 'torch.zeros_like', (['v'], {}), '(v)\n', (25475, 25478), False, 'import torch\n'), ((31165, 31184), 'torch.zeros_like', 'torch.zeros_like', (['v'], {}), '(v)\n', (31181, 31184), False, 'import torch\n')] |
import os
import cv2
import numpy as np
from dotenv import load_dotenv
from keras.models import load_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
load_dotenv()
model = load_model("./model-010.h5")
results = {0: 'no mask', 1: 'mask'}
GR_dict = {0: (0, 0, 255), 1: (0, 255, 0)}
rect_size = 4
cap = cv2.VideoCapture(0)
haarcascade = cv2.CascadeClassifier(os.getenv("CASCADE_CLASSIFIER"))
while True:
(rval, im) = cap.read()
im = cv2.flip(im, 1, 1)
rerect_size = cv2.resize(im, (im.shape[1] // rect_size, im.shape[0] // rect_size))
faces = haarcascade.detectMultiScale(rerect_size)
for f in faces:
(x, y, w, h) = [v * rect_size for v in f]
face_img = im[y:y + h, x:x + w]
rerect_sized = cv2.resize(face_img, (150, 150))
normalized = rerect_sized / 255.0
reshaped = np.reshape(normalized, (1, 150, 150, 3))
reshaped = np.vstack([reshaped])
result = model.predict(reshaped)
label = np.argmax(result, axis=1)[0]
cv2.rectangle(im, (x, y), (x + w, y + h), GR_dict[label], 2)
cv2.rectangle(im, (x, y - 40), (x + w, y), GR_dict[label], -1)
cv2.putText(im, results[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.imshow('LIVE', im)
key = cv2.waitKey(10)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"cv2.putText",
"numpy.argmax",
"cv2.waitKey",
"cv2.imshow",
"dotenv.load_dotenv",
"cv2.VideoCapture",
"numpy.vstack",
"numpy.reshape",
"cv2.rectangle",
"cv2.flip",
"cv2.destroyAllWindows",
"os.getenv",
"cv2.resize"
] | [((151, 164), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (162, 164), False, 'from dotenv import load_dotenv\n'), ((173, 201), 'keras.models.load_model', 'load_model', (['"""./model-010.h5"""'], {}), "('./model-010.h5')\n", (183, 201), False, 'from keras.models import load_model\n'), ((303, 322), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (319, 322), False, 'import cv2\n'), ((1346, 1369), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1367, 1369), False, 'import cv2\n'), ((360, 391), 'os.getenv', 'os.getenv', (['"""CASCADE_CLASSIFIER"""'], {}), "('CASCADE_CLASSIFIER')\n", (369, 391), False, 'import os\n'), ((443, 461), 'cv2.flip', 'cv2.flip', (['im', '(1)', '(1)'], {}), '(im, 1, 1)\n', (451, 461), False, 'import cv2\n'), ((481, 549), 'cv2.resize', 'cv2.resize', (['im', '(im.shape[1] // rect_size, im.shape[0] // rect_size)'], {}), '(im, (im.shape[1] // rect_size, im.shape[0] // rect_size))\n', (491, 549), False, 'import cv2\n'), ((1250, 1272), 'cv2.imshow', 'cv2.imshow', (['"""LIVE"""', 'im'], {}), "('LIVE', im)\n", (1260, 1272), False, 'import cv2\n'), ((1283, 1298), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1294, 1298), False, 'import cv2\n'), ((738, 770), 'cv2.resize', 'cv2.resize', (['face_img', '(150, 150)'], {}), '(face_img, (150, 150))\n', (748, 770), False, 'import cv2\n'), ((832, 872), 'numpy.reshape', 'np.reshape', (['normalized', '(1, 150, 150, 3)'], {}), '(normalized, (1, 150, 150, 3))\n', (842, 872), True, 'import numpy as np\n'), ((892, 913), 'numpy.vstack', 'np.vstack', (['[reshaped]'], {}), '([reshaped])\n', (901, 913), True, 'import numpy as np\n'), ((1010, 1070), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y)', '(x + w, y + h)', 'GR_dict[label]', '(2)'], {}), '(im, (x, y), (x + w, y + h), GR_dict[label], 2)\n', (1023, 1070), False, 'import cv2\n'), ((1079, 1141), 'cv2.rectangle', 'cv2.rectangle', (['im', '(x, y - 40)', '(x + w, y)', 'GR_dict[label]', '(-1)'], {}), '(im, (x, y - 40), (x + w, y), GR_dict[label], -1)\n', (1092, 1141), False, 'import cv2\n'), ((1150, 1249), 'cv2.putText', 'cv2.putText', (['im', 'results[label]', '(x, y - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 255, 255)', '(2)'], {}), '(im, results[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8,\n (255, 255, 255), 2)\n', (1161, 1249), False, 'import cv2\n'), ((972, 997), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (981, 997), True, 'import numpy as np\n')] |
# coding: utf-8
import pickle
import numpy as np
from collections import OrderedDict
from ..common.layers import *
class DeepConvNet:
"""识别率为99%以上的高精度的ConvNet, 共21层
网络结构如下所示
conv - relu - conv - relu - pool -
conv - relu - conv - relu - pool -
conv - relu - conv - relu - pool -
affine - relu - dropout - affine -dropout -softmax
"""
def __init__(self,input_dim=(1,28,28),
conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},
conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},
conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},
conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},
conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},
conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},
hidden_size=50, output_size=10
):
# 初始化权重
# 各层的神经元平均与前一层的几个神经元有连接(TODO: 自动计算)
pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3,64*4*4,hidden_size])
# 使用ReLU的情况下推荐的初始值
weight_init_scales = np.sqrt(2.0 / pre_node_nums)
self.params = {}
pre_channel_num = input_dim[0]
for idx ,conv_param in enumerate([conv_param_1,conv_param_2,conv_param_3,conv_param_4,conv_param_5,conv_param_6]):
self.params['W' + str(idx+1)] = weight_init_scales[idx] * np.random.randn(conv_param['filter_num'],pre_channel_num,conv_param['filter_size'], conv_param['filter_size'])
self.params['b'+ str(idx+1)] = np.zeros(conv_param['filter_num'])
pre_channel_num = conv_param['filter_num']
#
self.params['W7'] = weight_init_scales[6] * np.random.randn(64*4*4 ,hidden_size)
self.params['b7'] = np.zeros(hidden_size)
#
self.params['W8'] = weight_init_scales[7] * np.random.randn(hidden_size ,output_size)
self.params['b8'] = np.zeros(output_size)
# 生成层
self.layers = []
# 1
self.layers.append(Convolution(self.params['W1'] ,self.params['b1'] ,
conv_param_1['stride'], conv_param_1['pad']))
# 2
self.layers.append(Relu())
# 3
self.layers.append(Convolution(self.params['W2'] ,self.params['b2'] ,
conv_param_2['stride'], conv_param_2['pad']))
# 4
self.layers.append(Relu())
# 5
self.layers.append(Pooling(pool_h=2,pool_w=2,stride=2))
# 6
self.layers.append(Convolution(self.params['W3'] ,self.params['b3'] ,
conv_param_3['stride'], conv_param_3['pad']))
# 7
self.layers.append(Relu())
# 8
self.layers.append(Convolution(self.params['W4'] ,self.params['b4'] ,
conv_param_4['stride'], conv_param_4['pad']))
# 9
self.layers.append(Relu())
# 10
self.layers.append(Pooling(pool_h=2,pool_w=2,stride=2))
# 11
self.layers.append(Convolution(self.params['W5'] ,self.params['b5'] ,
conv_param_5['stride'], conv_param_5['pad']))
# 12
self.layers.append(Relu())
# 13
self.layers.append(Convolution(self.params['W6'] ,self.params['b6'] ,
conv_param_6['stride'], conv_param_6['pad']))
# 14
self.layers.append(Relu())
# 15
self.layers.append(Pooling(pool_h=2,pool_w=2,stride=2))
# 16
self.layers.append(Affine(self.params['W7'],self.params['b7']))
# 17
self.layers.append(Relu())
# 18
self.layers.append(Dropout(0.5))
# 19
self.layers.append(Affine(self.params['W8'] ,self.params['b8']))
# 20
self.layers.append(Dropout(0.5))
# 21
self.last_layer = SoftmaxWithLoss()
def predict(self,x ,train_flg=False):
for layer in self.layers:
if isinstance(layer,Dropout):
x = layer.forward(x,train_flg)
else:
x = layer.forward(x)
return x
def loss(self,x,t):
y =self.predict(x,train_flg=True)
return self.last_layer.forward(y,t)
def accuracy(self,x,t,batch_size=100):
if t.ndim != 1: t = np.argmax(t,axis = 1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i*batch_size: (i+1)*batch_size]
tt = t[i*batch_size: (i+1)*batch_size]
y= self.predict(tx,train_flg=False)
y = np.argmax(y,axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.last_layer.backward(dout)
tmp_layers = self.layers.copy()
tmp_layers.reverse()
for layer in tmp_layers:
dout = layer.backward(dout)
# 设定
grads = {}
# 0,2,5,7 .. 是卷积层,全链接层的索引位置
for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):
grads['W' + str(i+1)] = self.layers[layer_idx].dW
grads['b' + str(i+1)] = self.layers[layer_idx].db
return grads
def save_params(self, file_name="params.pkl"):
params = {}
for key, val in self.params.items():
params[key] = val
with open(file_name, 'wb') as f:
pickle.dump(params, f)
def load_params(self, file_name="params.pkl"):
with open(file_name, 'rb') as f:
params = pickle.load(f)
for key, val in params.items():
self.params[key] = val
# 0,2,5,7 .. 是卷积层,全链接层的索引位置
for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):
self.layers[layer_idx].W = self.params['W' + str(i+1)]
self.layers[layer_idx].b = self.params['b' + str(i+1)] | [
"pickle.dump",
"numpy.sum",
"numpy.random.randn",
"numpy.argmax",
"numpy.zeros",
"pickle.load",
"numpy.array",
"numpy.sqrt"
] | [((1126, 1236), 'numpy.array', 'np.array', (['[1 * 3 * 3, 16 * 3 * 3, 16 * 3 * 3, 32 * 3 * 3, 32 * 3 * 3, 64 * 3 * 3, 64 *\n 4 * 4, hidden_size]'], {}), '([1 * 3 * 3, 16 * 3 * 3, 16 * 3 * 3, 32 * 3 * 3, 32 * 3 * 3, 64 * 3 *\n 3, 64 * 4 * 4, hidden_size])\n', (1134, 1236), True, 'import numpy as np\n'), ((1275, 1303), 'numpy.sqrt', 'np.sqrt', (['(2.0 / pre_node_nums)'], {}), '(2.0 / pre_node_nums)\n', (1282, 1303), True, 'import numpy as np\n'), ((2011, 2032), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (2019, 2032), True, 'import numpy as np\n'), ((2190, 2211), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (2198, 2211), True, 'import numpy as np\n'), ((1756, 1790), 'numpy.zeros', 'np.zeros', (["conv_param['filter_num']"], {}), "(conv_param['filter_num'])\n", (1764, 1790), True, 'import numpy as np\n'), ((1938, 1978), 'numpy.random.randn', 'np.random.randn', (['(64 * 4 * 4)', 'hidden_size'], {}), '(64 * 4 * 4, hidden_size)\n', (1953, 1978), True, 'import numpy as np\n'), ((2112, 2153), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (2127, 2153), True, 'import numpy as np\n'), ((5016, 5036), 'numpy.argmax', 'np.argmax', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (5025, 5036), True, 'import numpy as np\n'), ((5278, 5298), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (5287, 5298), True, 'import numpy as np\n'), ((5317, 5332), 'numpy.sum', 'np.sum', (['(y == tt)'], {}), '(y == tt)\n', (5323, 5332), True, 'import numpy as np\n'), ((6158, 6180), 'pickle.dump', 'pickle.dump', (['params', 'f'], {}), '(params, f)\n', (6169, 6180), False, 'import pickle\n'), ((6295, 6309), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6306, 6309), False, 'import pickle\n'), ((1594, 1711), 'numpy.random.randn', 'np.random.randn', (["conv_param['filter_num']", 'pre_channel_num', "conv_param['filter_size']", "conv_param['filter_size']"], {}), "(conv_param['filter_num'], pre_channel_num, conv_param[\n 'filter_size'], conv_param['filter_size'])\n", (1609, 1711), True, 'import numpy as np\n')] |
from .errors import SDSSFileNotSpecifiedException
from astropy.io import fits
from .convolution.convolution import Convolution
import numpy as np
class Spectrum(object):
def __init__(self, filepath=None):
if filepath is None:
raise SDSSFileNotSpecifiedException("A spectrum file must "
"be specified to create a spectrum.")
self.filepath = filepath
self._num_hdu = None
self._is_valid = None
self._ra = None
self._dec = None
self._flux = None
self._wavelength = None
self._error = None
self._id = None
self._objecttype = None
@property
def num_hdu(self):
""" Returns the HDU list of this file. """
with fits.open(self.filepath) as hdu_list:
try:
return len(hdu_list)
except NameError:
print("You need to give a correct name...")
@property
def is_valid(self):
""" Returns the HDU list of this file. """
try:
with fits.open(self.filepath) as hdu_list:
pass
except FileNotFoundError:
print(f'File {self.filepath.split("/")[-1]} does not exist in directory \
{"".join(self.filepath.split("/")[:-1])}')
self._is_valid = False
self._is_valid = True
return self._is_valid
@property
def id(self):
if getattr(self,'_id',None) is None:
# ivar = inverse variance of flux
with fits.open(self.filepath) as hdu_list:
try:
self._id = str(hdu_list[2].data['BOSS_SPECOBJ_ID'][0])
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._id
@property
def objecttype(self):
if getattr(self,'_objecttype',None) is None:
# ivar = inverse variance of flux
with fits.open(self.filepath) as hdu_list:
try:
self._objecttype = hdu_list[2].data['OBJTYPE']
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._objecttype
@property
def ra(self):
""" Returns the RA of this spectrum in degrees. """
if self._ra is None:
with fits.open(self.filepath) as hdu_list:
try:
self._ra = hdu_list[0].header["PLUG_RA"]
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._ra
@property
def dec(self):
""" Returns the DEC of this spectrum in degrees. """
if self._dec is None:
with fits.open(self.filepath) as hdu_list:
try:
self._dec = hdu_list[0].header["PLUG_DEC"]
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._dec
@property
def wavelength(self):
"""Wavelength binning, linear bins."""
if getattr(self, '_wavelength', None) is None:
with fits.open(self.filepath) as hdu_list:
try:
self._wavelength = 10**hdu_list[1].data['loglam']
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._wavelength
@property
def flux(self):
if getattr(self, '_flux', None) is None:
with fits.open(self.filepath) as hdu_list:
try:
self._flux = hdu_list[1].data['flux']
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._flux
@property
def error(self):
if getattr(self,'_error',None) is None:
# ivar = inverse variance of flux
with fits.open(self.filepath) as hdu_list:
try:
ivar = hdu_list[1].data['ivar']
np.where(ivar)
self._error = 1./ivar
except KeyError:
print('You need to update the code to account for the modified keyword.')
return self._error
def plot(self, name_figure):
""" Creates a plot of the spectrum """
import matplotlib.pyplot as plt
# Bunch of things so the plot looks nice. Nothing to do here.
plt.rcParams['axes.linewidth'] = 1.5
font = {'family': 'serif', 'size': 15}
scatter_kwargs = {"zorder": 100}
error_kwargs = {"lw": .5, "zorder": 0}
plt.rc('font', **font)
fig, axes = plt.subplots(1, figsize=(8, 7))
plt.rcParams['xtick.major.size'] = 8
plt.rcParams['xtick.major.width'] = 1.8
plt.rcParams['xtick.minor.size'] = 5
plt.rcParams['xtick.minor.width'] = 1.3
plt.rcParams['ytick.major.size'] = 8
plt.rcParams['ytick.major.width'] = 1.8
plt.rcParams['ytick.minor.size'] = 7
plt.rcParams['ytick.minor.width'] = 1.8
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams["legend.fancybox"] = True
#################
# Plotting Spectrum
plt.clf()
plt.plot(self.wavelength, self.flux, color = 'b', lw = 1.5)
plt.plot(self.wavelength, self.error, color = 'r', lw = 1)
plt.xlabel(r'Wavelength $(\AA)$')
plt.ylabel(r'Flux (Some units)')
plt.xlim([min(self.wavelength) - 1, max(self.wavelength) + 1])
plt.ylim([min(self.flux) - 0.1 * (min(self.flux)), max(self.flux) - 0.1 * (max(self.flux))])
plt.savefig(name_figure + '.png', dpi=200)
def color(self, filter_name1, filter_name2):
### Compute color from the spectrum
mag_object1 = Convolution(self.wavelength, self.flux, filter_name1)
mag_object2 = Convolution(self.wavelength, self.flux, filter_name2)
return mag_object1.magnitude - mag_object2.magnitude
def line_ew(self, name_line):
##### Retrieve the equivalent width of a given line
with fits.open(self.filepath) as hdu_list:
try:
index = hdu_list[3].data['LINENAME'] == name_line
line_value = hdu_list[3].data['LINEEW'][index]
except ValueError:
return "Value line wrong"
return line_value
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.where",
"matplotlib.pyplot.rc",
"astropy.io.fits.open",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((4840, 4862), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (4846, 4862), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4914), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(8, 7)'}), '(1, figsize=(8, 7))\n', (4895, 4914), True, 'import matplotlib.pyplot as plt\n'), ((5481, 5490), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5488, 5490), True, 'import matplotlib.pyplot as plt\n'), ((5499, 5554), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wavelength', 'self.flux'], {'color': '"""b"""', 'lw': '(1.5)'}), "(self.wavelength, self.flux, color='b', lw=1.5)\n", (5507, 5554), True, 'import matplotlib.pyplot as plt\n'), ((5567, 5621), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wavelength', 'self.error'], {'color': '"""r"""', 'lw': '(1)'}), "(self.wavelength, self.error, color='r', lw=1)\n", (5575, 5621), True, 'import matplotlib.pyplot as plt\n'), ((5634, 5667), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength $(\\\\AA)$"""'], {}), "('Wavelength $(\\\\AA)$')\n", (5644, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux (Some units)"""'], {}), "('Flux (Some units)')\n", (5686, 5707), True, 'import matplotlib.pyplot as plt\n'), ((5889, 5931), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(name_figure + '.png')"], {'dpi': '(200)'}), "(name_figure + '.png', dpi=200)\n", (5900, 5931), True, 'import matplotlib.pyplot as plt\n'), ((824, 848), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (833, 848), False, 'from astropy.io import fits\n'), ((6350, 6374), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (6359, 6374), False, 'from astropy.io import fits\n'), ((1126, 1150), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (1135, 1150), False, 'from astropy.io import fits\n'), ((1599, 1623), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (1608, 1623), False, 'from astropy.io import fits\n'), ((2041, 2065), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (2050, 2065), False, 'from astropy.io import fits\n'), ((2465, 2489), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (2474, 2489), False, 'from astropy.io import fits\n'), ((2878, 2902), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (2887, 2902), False, 'from astropy.io import fits\n'), ((3312, 3336), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (3321, 3336), False, 'from astropy.io import fits\n'), ((3701, 3725), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (3710, 3725), False, 'from astropy.io import fits\n'), ((4117, 4141), 'astropy.io.fits.open', 'fits.open', (['self.filepath'], {}), '(self.filepath)\n', (4126, 4141), False, 'from astropy.io import fits\n'), ((4248, 4262), 'numpy.where', 'np.where', (['ivar'], {}), '(ivar)\n', (4256, 4262), True, 'import numpy as np\n')] |
"""
Implements fast ica for two components
"""
import numpy as np
import sounddevice as sd
from scipy.io import wavfile
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
def main():
"""
Main program
"""
## Speech data ===========================================================
fs, data1 = wavfile.read('./ertain-20160302/wav/cc-01.wav')
fs, data2 = wavfile.read('./ertain-20160302/wav/cc-02.wav')
data1 = np.trim_zeros(data1)
data2 = np.trim_zeros(data2)
data1 = data1 / 65536
data2 = data2 / 65536
length = min(len(data1), len(data2))
S = np.mat([data1[:length], data2[:length]])
## =======================================================================
## SINE function ==========================================================
# np.random.seed(0)
# sample_rate = 16000
# numseconds = .1
# freq1 = 440
# freq2 = 261.63
# timebase = np.linspace(0, numseconds, numseconds * sample_rate)
# s_1 = np.sin(2 * np.pi * freq1 * timebase) # signal 1 : sinusoidal signal
# s_2 = np.sin(2 * np.pi * freq2 * timebase) # signal 1 : sinusoidal signal
# s_1 = s_1 + np.random.normal(size=s_1.size)
# s_2 = s_2 + np.random.normal(size=s_2.size)
## SINE function ==========================================================
# S = np.mat([s_1, s_2])
# S += 0.2 * np.random.normal(size=S.shape) # Add noise
#===========================================================================
# Generate random mixing matrix
# A = np.random.rand(2, 2)
# Or predefined mixing matrix
A = np.mat([[1, 2], [1, 1]])
X = np.dot(A, S) # Generate observations
# Perform FastICA
est = fast_ica(X)
print("Playing Original")
sd.play(np.array(S[0]).squeeze(), fs)
sd.wait()
sd.play(np.array(S[1]).squeeze(), fs)
sd.wait()
print("Playing Mixed")
sd.play(np.array(X[0]).squeeze(), fs)
sd.wait()
sd.play(np.array(X[1]).squeeze(), fs)
sd.wait()
print("FastICA output")
sd.play(np.array(est[0]).squeeze() / 10, fs)
sd.wait()
sd.play(np.array(est[1]).squeeze() / 10, fs)
sd.wait()
# Plot output ===================================================
# plt.subplot(3, 1, 1)
# plt.title('Fast ICA output')
# plt.plot(est.T)
# plt.subplot(3, 1, 2)
# plt.title('Original source signal')
# plt.plot(S.T)
# plt.subplot(3, 1, 3)
# plt.title('Mixed signal')
# plt.plot(X.T)
# plt.show()
# ===============================================================
def eigen2by2(mat):
"""
Compute eigen values and vectors of two by two matrix
mat = [[a, b],
[c, d]]
"""
a = mat[0,0]
b = mat[0,1]
c = mat[1,0]
d = mat[1,1]
tr = a + d
det = a * d - b * c
i = np.mat([[1,0], [0,1]])
eigval1 = (tr + np.sqrt(tr ** 2 - 4 * det)) / 2
eigval2 = (tr - np.sqrt(tr ** 2 - 4 * det)) / 2
ev1 = mat - eigval1 * i
ev2 = mat - eigval2 * i
eigvec = np.mat([[ev1[0,0], ev2[0, 0]], [ev1[1,0], ev2[1, 0]]])
eigval = np.mat([[eigval2, 0], [0, eigval1]])
eigvec = eigvec / np.linalg.norm(eigvec, axis=0)
return (eigval, eigvec)
def fast_ica(input_mat, max_num_iterations=1000, epsilon=0.0001):
"""Implement fast ica algorithm
Args:
X (TODO): (num_component, num_sample)
Returns: TODO
"""
(num_comp, num_sample) = np.shape(input_mat)
#==== Prewhitening data ====#
# Center matrix
mean = np.mean(input_mat, axis=1)
center_mat = np.mat(input_mat - mean)
# Find covariance matrix cov_mat = np.cov(center_mat.T)
cov_mat = center_mat * center_mat.T / (num_sample - 1)
# Find eigenvalues and eigenvector of the covariance matrix
(eig_val, eig_vec) = eigen2by2(cov_mat)
# (eig_val, eig_vec) = np.linalg.eig(cov_mat)
# eig_val = np.mat(np.diagflat(eig_val))
# eig_vec = np.mat(eig_vec)
# Whiten input matrix
whitening_mat = np.linalg.inv(np.sqrt(eig_val)) * eig_vec.T
dewhitening_mat = eig_vec * np.sqrt(eig_val)
white_mat = whitening_mat * center_mat
# two basis vectors are orthogonal. only need to run once
# the other one is rotate 90 deg
# initialize a random vector
weight = np.mat(np.random.normal(size=(num_comp, 1)))
# normalize the weight
weight = weight / np.linalg.norm(weight)
# keep a history matrix
weight_old = np.mat(np.zeros_like(weight))
iteration = 0
while iteration < max_num_iterations:
# Test for convergence
if np.linalg.norm(weight - weight_old) < epsilon \
or np.linalg.norm(weight + weight_old) < epsilon:
print("converged")
break
# update weight
weight_old = weight
weight = (white_mat * np.power(white_mat.T * weight, 3)) \
/ num_sample - 3 * weight
# normalize the weight
weight = weight / np.linalg.norm(weight)
basis_set = np.mat(np.zeros((num_comp, num_comp)))
basis_set[:, 0] = weight
basis_set[:, 1] = np.matrix([[0,-1], [1,0]]) * weight
dewhiten_vec = dewhitening_mat * basis_set
ica_fltr = basis_set.T * whitening_mat
return ica_fltr * np.mat(input_mat)
if __name__ == "__main__":
main()
| [
"numpy.matrix",
"numpy.zeros_like",
"numpy.trim_zeros",
"numpy.power",
"numpy.zeros",
"scipy.io.wavfile.read",
"numpy.shape",
"numpy.mean",
"numpy.linalg.norm",
"numpy.array",
"sounddevice.wait",
"numpy.random.normal",
"numpy.dot",
"numpy.mat",
"numpy.sqrt"
] | [((362, 409), 'scipy.io.wavfile.read', 'wavfile.read', (['"""./ertain-20160302/wav/cc-01.wav"""'], {}), "('./ertain-20160302/wav/cc-01.wav')\n", (374, 409), False, 'from scipy.io import wavfile\n'), ((426, 473), 'scipy.io.wavfile.read', 'wavfile.read', (['"""./ertain-20160302/wav/cc-02.wav"""'], {}), "('./ertain-20160302/wav/cc-02.wav')\n", (438, 473), False, 'from scipy.io import wavfile\n'), ((487, 507), 'numpy.trim_zeros', 'np.trim_zeros', (['data1'], {}), '(data1)\n', (500, 507), True, 'import numpy as np\n'), ((520, 540), 'numpy.trim_zeros', 'np.trim_zeros', (['data2'], {}), '(data2)\n', (533, 540), True, 'import numpy as np\n'), ((644, 684), 'numpy.mat', 'np.mat', (['[data1[:length], data2[:length]]'], {}), '([data1[:length], data2[:length]])\n', (650, 684), True, 'import numpy as np\n'), ((1655, 1679), 'numpy.mat', 'np.mat', (['[[1, 2], [1, 1]]'], {}), '([[1, 2], [1, 1]])\n', (1661, 1679), True, 'import numpy as np\n'), ((1689, 1701), 'numpy.dot', 'np.dot', (['A', 'S'], {}), '(A, S)\n', (1695, 1701), True, 'import numpy as np\n'), ((1849, 1858), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1856, 1858), True, 'import sounddevice as sd\n'), ((1905, 1914), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1912, 1914), True, 'import sounddevice as sd\n'), ((1989, 1998), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1996, 1998), True, 'import sounddevice as sd\n'), ((2045, 2054), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (2052, 2054), True, 'import sounddevice as sd\n'), ((2137, 2146), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (2144, 2146), True, 'import sounddevice as sd\n'), ((2200, 2209), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (2207, 2209), True, 'import sounddevice as sd\n'), ((2872, 2896), 'numpy.mat', 'np.mat', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (2878, 2896), True, 'import numpy as np\n'), ((3071, 3127), 'numpy.mat', 'np.mat', (['[[ev1[0, 0], ev2[0, 0]], [ev1[1, 0], ev2[1, 0]]]'], {}), '([[ev1[0, 0], ev2[0, 0]], [ev1[1, 0], ev2[1, 0]]])\n', (3077, 3127), True, 'import numpy as np\n'), ((3139, 3175), 'numpy.mat', 'np.mat', (['[[eigval2, 0], [0, eigval1]]'], {}), '([[eigval2, 0], [0, eigval1]])\n', (3145, 3175), True, 'import numpy as np\n'), ((3478, 3497), 'numpy.shape', 'np.shape', (['input_mat'], {}), '(input_mat)\n', (3486, 3497), True, 'import numpy as np\n'), ((3564, 3590), 'numpy.mean', 'np.mean', (['input_mat'], {'axis': '(1)'}), '(input_mat, axis=1)\n', (3571, 3590), True, 'import numpy as np\n'), ((3608, 3632), 'numpy.mat', 'np.mat', (['(input_mat - mean)'], {}), '(input_mat - mean)\n', (3614, 3632), True, 'import numpy as np\n'), ((3198, 3228), 'numpy.linalg.norm', 'np.linalg.norm', (['eigvec'], {'axis': '(0)'}), '(eigvec, axis=0)\n', (3212, 3228), True, 'import numpy as np\n'), ((4112, 4128), 'numpy.sqrt', 'np.sqrt', (['eig_val'], {}), '(eig_val)\n', (4119, 4128), True, 'import numpy as np\n'), ((4326, 4362), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_comp, 1)'}), '(size=(num_comp, 1))\n', (4342, 4362), True, 'import numpy as np\n'), ((4414, 4436), 'numpy.linalg.norm', 'np.linalg.norm', (['weight'], {}), '(weight)\n', (4428, 4436), True, 'import numpy as np\n'), ((4490, 4511), 'numpy.zeros_like', 'np.zeros_like', (['weight'], {}), '(weight)\n', (4503, 4511), True, 'import numpy as np\n'), ((5043, 5073), 'numpy.zeros', 'np.zeros', (['(num_comp, num_comp)'], {}), '((num_comp, num_comp))\n', (5051, 5073), True, 'import numpy as np\n'), ((5126, 5154), 'numpy.matrix', 'np.matrix', (['[[0, -1], [1, 0]]'], {}), '([[0, -1], [1, 0]])\n', (5135, 5154), True, 'import numpy as np\n'), ((5278, 5295), 'numpy.mat', 'np.mat', (['input_mat'], {}), '(input_mat)\n', (5284, 5295), True, 'import numpy as np\n'), ((2916, 2942), 'numpy.sqrt', 'np.sqrt', (['(tr ** 2 - 4 * det)'], {}), '(tr ** 2 - 4 * det)\n', (2923, 2942), True, 'import numpy as np\n'), ((2968, 2994), 'numpy.sqrt', 'np.sqrt', (['(tr ** 2 - 4 * det)'], {}), '(tr ** 2 - 4 * det)\n', (2975, 2994), True, 'import numpy as np\n'), ((4050, 4066), 'numpy.sqrt', 'np.sqrt', (['eig_val'], {}), '(eig_val)\n', (4057, 4066), True, 'import numpy as np\n'), ((4996, 5018), 'numpy.linalg.norm', 'np.linalg.norm', (['weight'], {}), '(weight)\n', (5010, 5018), True, 'import numpy as np\n'), ((1815, 1829), 'numpy.array', 'np.array', (['S[0]'], {}), '(S[0])\n', (1823, 1829), True, 'import numpy as np\n'), ((1871, 1885), 'numpy.array', 'np.array', (['S[1]'], {}), '(S[1])\n', (1879, 1885), True, 'import numpy as np\n'), ((1955, 1969), 'numpy.array', 'np.array', (['X[0]'], {}), '(X[0])\n', (1963, 1969), True, 'import numpy as np\n'), ((2011, 2025), 'numpy.array', 'np.array', (['X[1]'], {}), '(X[1])\n', (2019, 2025), True, 'import numpy as np\n'), ((4617, 4652), 'numpy.linalg.norm', 'np.linalg.norm', (['(weight - weight_old)'], {}), '(weight - weight_old)\n', (4631, 4652), True, 'import numpy as np\n'), ((4684, 4719), 'numpy.linalg.norm', 'np.linalg.norm', (['(weight + weight_old)'], {}), '(weight + weight_old)\n', (4698, 4719), True, 'import numpy as np\n'), ((2096, 2112), 'numpy.array', 'np.array', (['est[0]'], {}), '(est[0])\n', (2104, 2112), True, 'import numpy as np\n'), ((2159, 2175), 'numpy.array', 'np.array', (['est[1]'], {}), '(est[1])\n', (2167, 2175), True, 'import numpy as np\n'), ((4863, 4896), 'numpy.power', 'np.power', (['(white_mat.T * weight)', '(3)'], {}), '(white_mat.T * weight, 3)\n', (4871, 4896), True, 'import numpy as np\n')] |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file implements wrappers to some of the functions in fqe.bitstring
"""
from ctypes import c_int, c_bool, c_ulonglong, POINTER
import numpy
from numpy.ctypeslib import ndpointer
from fqe.lib import lib_fqe
def _count_bits(string: int):
func = lib_fqe.count_bits
func.argtypes = [c_ulonglong]
return func(c_ulonglong(string))
def _get_occupation(string: int):
func = lib_fqe.get_occupation
out = numpy.zeros((64,), dtype=numpy.int32)
func.argtypes = [
ndpointer(dtype=numpy.int32, flags=('C_CONTIGUOUS', 'ALIGNED')),
c_ulonglong
]
count = func(out, string)
return out[:count]
def _lexicographic_bitstring_generator(out, norb: int, nele: int):
func = lib_fqe.lexicographic_bitstring_generator
func.argtypes = [
ndpointer(dtype=numpy.uint64, flags=('C_CONTIGUOUS', 'ALIGNED')), c_int,
c_int
]
func(out, norb, nele)
| [
"ctypes.c_ulonglong",
"numpy.zeros",
"numpy.ctypeslib.ndpointer"
] | [((1022, 1059), 'numpy.zeros', 'numpy.zeros', (['(64,)'], {'dtype': 'numpy.int32'}), '((64,), dtype=numpy.int32)\n', (1033, 1059), False, 'import numpy\n'), ((921, 940), 'ctypes.c_ulonglong', 'c_ulonglong', (['string'], {}), '(string)\n', (932, 940), False, 'from ctypes import c_int, c_bool, c_ulonglong, POINTER\n'), ((1090, 1153), 'numpy.ctypeslib.ndpointer', 'ndpointer', ([], {'dtype': 'numpy.int32', 'flags': "('C_CONTIGUOUS', 'ALIGNED')"}), "(dtype=numpy.int32, flags=('C_CONTIGUOUS', 'ALIGNED'))\n", (1099, 1153), False, 'from numpy.ctypeslib import ndpointer\n'), ((1386, 1450), 'numpy.ctypeslib.ndpointer', 'ndpointer', ([], {'dtype': 'numpy.uint64', 'flags': "('C_CONTIGUOUS', 'ALIGNED')"}), "(dtype=numpy.uint64, flags=('C_CONTIGUOUS', 'ALIGNED'))\n", (1395, 1450), False, 'from numpy.ctypeslib import ndpointer\n')] |
"Convenience functions for randomness"
import numpy as np
def set_seed(seed):
"""
Set the random seed.
:param seed: random seed
:rtype: void
"""
np.random.seed(seed)
def choice(seq):
"""
Randomly chooses an item from a sequence.
Probabilities are uniform.
:param seq: choices
:returns: randomly chosen item
"""
l = len(seq)
idx = np.random.randint(0, l)
return seq[idx]
def sample_with_replacement(seq, n):
"""
Choose a sample of n items with replacement from a sequence c
:param seq: sequence to choose from
:param n: sample size
:returns: randomly chosen items
:rtype: list
"""
l = len(seq)
return [seq[np.random.randint(0,l)] for x in range(n)]
| [
"numpy.random.randint",
"numpy.random.seed"
] | [((173, 193), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (187, 193), True, 'import numpy as np\n'), ((394, 417), 'numpy.random.randint', 'np.random.randint', (['(0)', 'l'], {}), '(0, l)\n', (411, 417), True, 'import numpy as np\n'), ((713, 736), 'numpy.random.randint', 'np.random.randint', (['(0)', 'l'], {}), '(0, l)\n', (730, 736), True, 'import numpy as np\n')] |
from sklearn.model_selection import KFold
from sklearn.utils import shuffle
from normalizer import Normalizer
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
import os
class Dataloader(object):
"""
A class that manages loading image datasets from folders
to TensroFlow Dataset types that are ready for training.
Handles training data normalization, spliiting into train
and val datasets based on folds, does data augmentation
Attributes
----------
img_size : int
Image size to which all of the images will be transformed
n_folds : int
Number of folds to divide dataset into
seed : int
Number representing random SEED
size : int
Number of images inside the dataset
classes : [ClassInfo]
List of ClassInfo, each of ClassInfo contains info about
class in dataset: its name, absolute path, size, index
normalizer : Normalizer
Custom class implenting Welford's online algorithm for
calculating mean and std, then scales and centers data
respectively to the calculated mean and standard deviation
filenames : [str]
List of absolute paths to all images
labels : numpy.array
Numpy array of shape (number of images in dataset,
number of classes in dataset); contains labels for training
Methods
-------
fit(path)
fitting the loader with path to image data
train(batch_size, fold, augment)
Returns training dataset
val(batch_size, fold)
Returns validation dataset
"""
def __init__(self, img_size, n_folds, seed):
self.img_size = img_size
self.n_folds = n_folds
self.seed = seed
if n_folds > 1:
self.kf = KFold(n_splits=n_folds, random_state=seed)
def fit(self, path, png_to_jpg=False):
""" Fitting the loader with image data
Args:
path: path to dataset folder, str
png_to_jpg: if needed to convert all png
images to jpg, bool, optional
Returns:
self
"""
self._analyze_path(path)
if png_to_jpg:
self._convert_png_to_jpg(path)
self._generate_tensor_slices()
return self
def train(self, batch_size, fold_idx, normalize=True, augment=False):
""" Dataset for model training
Args:
batch_size: int, number of images in a batch
fold_idx: int, index of fold, from 0 to n_folds - 1
normalize: bool, wether to normalize training data
with Welford's online algorithm.
augment: bool, wether to use augmentation or not
Returns:
data: TensroFlow dataset
steps: int, number of steps in train epoch
"""
if not (fold_idx >= 0 and fold_idx < self.n_folds):
raise Exception(('Fold index {} is out of expected range:'
+ ' [0, {}]').format(fold_idx, self.n_folds - 1))
if normalize and augment:
raise Exception('Both augmentations and normalization ' +
'with Welford algo is not supported ')
print(' ... Generating Training Dataset ... ')
if self.n_folds == 1:
train_idx = range(0, len(self.filenames))
else:
train_idx, _ = list(self.kf.split(self.filenames))[fold_idx]
filenames = np.array(self.filenames)[train_idx]
labels = np.array(self.labels)[train_idx]
steps = math.ceil(len(filenames)/batch_size)
if normalize:
mean, std = Normalizer.calc_mean_and_std(filenames, self.img_size)
mean = np.array([mean['red'], mean['green'], mean['blue']])
std = np.array([std['red'], std['green'], std['blue']])
else:
# values taken from ImageNet Dataset
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
self.normalizer = Normalizer(mean, std)
data = tf.data.Dataset.from_tensor_slices(
(tf.constant(filenames), tf.constant(labels))
)
data = data.map(self.parse_fn)
if augment:
augs = [self.flip, self.color, self.rotate, self.zoom]
for f in augs:
data = data.map(f, num_parallel_calls=4)
data = data.map(self.drop, num_parallel_calls=4)
data = data.shuffle(buffer_size=len(filenames))
data = data.batch(batch_size)
data = data.prefetch(1)
return data, steps
def val(self, batch_size, fold_idx):
""" Dataset for data validation
Args:
batch_size: int, number of images in a batch
fold_idx: int, index of fold, from 0 to n_folds - 1
Returns:
data : TensroFlow dataset
steps : number of steps in val epoch
"""
if not (fold_idx >= 0 and fold_idx < self.n_folds):
raise Exception(('Fold index {} is out of expected range:'
+ ' [0, {}]').format(fold_idx, self.n_folds - 1))
_, test_idx = list(self.kf.split(self.filenames))[fold_idx]
filenames = np.array(self.filenames)[test_idx]
labels = np.array(self.labels)[test_idx]
steps = math.ceil(len(filenames)/batch_size)
data = tf.data.Dataset.from_tensor_slices(
(tf.constant(filenames), tf.constant(labels))
)
data = data.map(self.parse_fn).batch(batch_size)
data = data.prefetch(1)
return data, steps
def parse_fn(self, filename, label):
''' Parsing filename, label pair into float array '''
''' Performing normalization, size transformation '''
img = tf.io.read_file(filename)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.rgb_to_grayscale(img)
img = tf.image.grayscale_to_rgb(img)
img = tf.image.resize(img, (self.img_size, self.img_size))
img = tf.cast(img, tf.float32) / 255
img = self.normalizer(img)
return img, label
def _analyze_path(self, path):
''' Analyzing defined path, extracting class names '''
print(' ... Checking "{}" path '.format(path))
if not os.path.exists(path):
raise Exception('Specified path does not exist')
self.classes = [ClassInfo(f.name, f.path) for f in \
os.scandir(path) if f.is_dir() \
and f.name != 'other']
if len(self.classes) == 0:
raise Exception('Specified path has no folders')
print(' ... Found {} classes, \
\n ... listing them below:'.format(len(self.classes)))
self.size = 0
for item in self.classes:
self.size += item.size
print(' ... [{}] {} images '.format(item.name, item.size))
print(' ... Total image number: {}'.format(self.size))
def _generate_tensor_slices(self):
''' Generates filenames and labels for model training '''
print(' ... Generating {} filenames & labels'.format(self.size))
idx, counter = 0, 0
self.filenames = []
self.labels = np.zeros((self.size, len(self.classes)),
dtype='float32')
for item in self.classes:
item.index = counter
paths = [os.path.join(item.path, f) for f \
in os.listdir(item.path) if f.endswith('jpg')]
self.filenames += paths
self.labels[idx:idx + item.size, item.index] = 1.0
idx += item.size
counter += 1
self.filenames, self.labels = shuffle(self.filenames, \
self.labels, random_state=self.seed)
def _convert_png_to_jpg(self, path):
''' Converts all png images in dataset to jpg images '''
for item in self.classes:
png_images = [f for f in os.listdir(item.path) \
if f.endswith('png')]
print(' ... Converting {} {} PNG images to JPG ...'.\
format(len(png_images), item.name))
for img in png_images:
self.png_to_jpg(img)
@staticmethod
def png_to_jpg(path):
''' Converts image from PNG to JPG
Args:
path: path to image, str
'''
im = Image.open(path)
rgb_im = im.convert('RGB')
new_path = path[:-3] + 'jpg'
rgb_im.save(new_path)
os.remove(path)
# ----------------------- AUGMENTATION ------------------------ #
@staticmethod
def flip(x: tf.Tensor, y: tf.Tensor) -> (tf.Tensor, tf.Tensor):
"""Flip augmentation
Args:
x: Image to flip
y: its label
Returns:
(Augmented image, label)
"""
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
return x, y
@staticmethod
def color(x: tf.Tensor, y: tf.Tensor) -> (tf.Tensor, tf.Tensor):
"""Color augmentation
Args:
x: Image
y: label
Returns:
(Augmented image, label)
"""
x = tf.image.random_hue(x, 0.08)
x = tf.image.random_saturation(x, 0.6, 1.6)
x = tf.image.random_brightness(x, 0.05)
x = tf.image.random_contrast(x, 0.7, 1.3)
return x, y
@staticmethod
def rotate(x: tf.Tensor, y: tf.Tensor) -> (tf.Tensor, tf.Tensor):
"""Rotation augmentation
Args:
x: Image
y: label
Returns:
(Augmented image, label)
"""
return tf.image.rot90(x, tf.random.uniform(shape=[], minval=0,
maxval=4, dtype=tf.int32)), y
def zoom(self, x: tf.Tensor, y: tf.Tensor) -> (tf.Tensor, tf.Tensor):
"""Zoom augmentation
Args:
x: Image
y: label
Returns:
(Augmented image, label)
"""
# Generate 20 crop settings, ranging from a 1% to 20% crop.
scales = list(np.arange(0.8, 1.0, 0.01))
boxes = np.zeros((len(scales), 4))
for i, scale in enumerate(scales):
x1 = y1 = 0.5 - (0.5 * scale)
x2 = y2 = 0.5 + (0.5 * scale)
boxes[i] = [x1, y1, x2, y2]
def random_crop(img):
# Create different crops for an image
crops = tf.image.crop_and_resize([img], boxes=boxes,
box_indices=np.zeros(len(scales)),
crop_size=(self.img_size, self.img_size))
# Return a random crop
return crops[tf.random.uniform(shape=[], minval=0,
maxval=len(scales), dtype=tf.int32)]
choice = tf.random.uniform(shape=[], minval=0.,
maxval=1., dtype=tf.float32)
# Only apply cropping 50% of the time
return tf.cond(choice < 0.5, lambda: x, lambda: random_crop(x)), y
@staticmethod
def drop(x: tf.Tensor, y: tf.Tensor) -> (tf.Tensor, tf.Tensor):
x = tf.clip_by_value(x, 0, 1)
return x, y
class ClassInfo(object):
"""
A class that stores information about class inside the dataset
Attributes
----------
name : str
a string containing class name
path: str
a string contating absolute path to the class folder
size: int
a number of image belonging to this class
index: int
an integer representing class number in the dataset,
lies within [0; n_classes]; this number is the output of
the trained model
"""
def __init__(self, name, path):
self.name = name
self.path = path
images = [f for f in os.listdir(path) if f.endswith('jpg') or \
f.endswith('jpeg') or f.endswith('png')]
self.size = len(images)
self.index = None # create a dummy for further initialization
| [
"os.remove",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.image.grayscale_to_rgb",
"tensorflow.clip_by_value",
"numpy.arange",
"normalizer.Normalizer",
"os.path.join",
"tensorflow.random.uniform",
"tensorflow.image.random_contrast",
"os.path.exists",
"tensorflow.cast",
"tensorflow.io.read_... | [((4053, 4074), 'normalizer.Normalizer', 'Normalizer', (['mean', 'std'], {}), '(mean, std)\n', (4063, 4074), False, 'from normalizer import Normalizer\n'), ((5793, 5818), 'tensorflow.io.read_file', 'tf.io.read_file', (['filename'], {}), '(filename)\n', (5808, 5818), True, 'import tensorflow as tf\n'), ((5833, 5870), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (5853, 5870), True, 'import tensorflow as tf\n'), ((5885, 5915), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['img'], {}), '(img)\n', (5910, 5915), True, 'import tensorflow as tf\n'), ((5930, 5960), 'tensorflow.image.grayscale_to_rgb', 'tf.image.grayscale_to_rgb', (['img'], {}), '(img)\n', (5955, 5960), True, 'import tensorflow as tf\n'), ((5975, 6027), 'tensorflow.image.resize', 'tf.image.resize', (['img', '(self.img_size, self.img_size)'], {}), '(img, (self.img_size, self.img_size))\n', (5990, 6027), True, 'import tensorflow as tf\n'), ((7770, 7830), 'sklearn.utils.shuffle', 'shuffle', (['self.filenames', 'self.labels'], {'random_state': 'self.seed'}), '(self.filenames, self.labels, random_state=self.seed)\n', (7777, 7830), False, 'from sklearn.utils import shuffle\n'), ((8637, 8652), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (8646, 8652), False, 'import os\n'), ((8985, 9019), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (9016, 9019), True, 'import tensorflow as tf\n'), ((9032, 9063), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['x'], {}), '(x)\n', (9060, 9063), True, 'import tensorflow as tf\n'), ((9337, 9365), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['x', '(0.08)'], {}), '(x, 0.08)\n', (9356, 9365), True, 'import tensorflow as tf\n'), ((9378, 9417), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['x', '(0.6)', '(1.6)'], {}), '(x, 0.6, 1.6)\n', (9404, 9417), True, 'import tensorflow as tf\n'), ((9430, 9465), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', '(0.05)'], {}), '(x, 0.05)\n', (9456, 9465), True, 'import tensorflow as tf\n'), ((9478, 9515), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x', '(0.7)', '(1.3)'], {}), '(x, 0.7, 1.3)\n', (9502, 9515), True, 'import tensorflow as tf\n'), ((10937, 11006), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[]', 'minval': '(0.0)', 'maxval': '(1.0)', 'dtype': 'tf.float32'}), '(shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)\n', (10954, 11006), True, 'import tensorflow as tf\n'), ((11265, 11290), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (11281, 11290), True, 'import tensorflow as tf\n'), ((1790, 1832), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_folds', 'random_state': 'seed'}), '(n_splits=n_folds, random_state=seed)\n', (1795, 1832), False, 'from sklearn.model_selection import KFold\n'), ((3483, 3507), 'numpy.array', 'np.array', (['self.filenames'], {}), '(self.filenames)\n', (3491, 3507), True, 'import numpy as np\n'), ((3536, 3557), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (3544, 3557), True, 'import numpy as np\n'), ((3668, 3722), 'normalizer.Normalizer.calc_mean_and_std', 'Normalizer.calc_mean_and_std', (['filenames', 'self.img_size'], {}), '(filenames, self.img_size)\n', (3696, 3722), False, 'from normalizer import Normalizer\n'), ((3742, 3794), 'numpy.array', 'np.array', (["[mean['red'], mean['green'], mean['blue']]"], {}), "([mean['red'], mean['green'], mean['blue']])\n", (3750, 3794), True, 'import numpy as np\n'), ((3813, 3862), 'numpy.array', 'np.array', (["[std['red'], std['green'], std['blue']]"], {}), "([std['red'], std['green'], std['blue']])\n", (3821, 3862), True, 'import numpy as np\n'), ((3945, 3976), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3953, 3976), True, 'import numpy as np\n'), ((3995, 4026), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4003, 4026), True, 'import numpy as np\n'), ((5241, 5265), 'numpy.array', 'np.array', (['self.filenames'], {}), '(self.filenames)\n', (5249, 5265), True, 'import numpy as np\n'), ((5293, 5314), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (5301, 5314), True, 'import numpy as np\n'), ((6042, 6066), 'tensorflow.cast', 'tf.cast', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (6049, 6066), True, 'import tensorflow as tf\n'), ((6304, 6324), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6318, 6324), False, 'import os\n'), ((10239, 10264), 'numpy.arange', 'np.arange', (['(0.8)', '(1.0)', '(0.01)'], {}), '(0.8, 1.0, 0.01)\n', (10248, 10264), True, 'import numpy as np\n'), ((4139, 4161), 'tensorflow.constant', 'tf.constant', (['filenames'], {}), '(filenames)\n', (4150, 4161), True, 'import tensorflow as tf\n'), ((4163, 4182), 'tensorflow.constant', 'tf.constant', (['labels'], {}), '(labels)\n', (4174, 4182), True, 'import tensorflow as tf\n'), ((5442, 5464), 'tensorflow.constant', 'tf.constant', (['filenames'], {}), '(filenames)\n', (5453, 5464), True, 'import tensorflow as tf\n'), ((5466, 5485), 'tensorflow.constant', 'tf.constant', (['labels'], {}), '(labels)\n', (5477, 5485), True, 'import tensorflow as tf\n'), ((6481, 6497), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (6491, 6497), False, 'import os\n'), ((7472, 7498), 'os.path.join', 'os.path.join', (['item.path', 'f'], {}), '(item.path, f)\n', (7484, 7498), False, 'import os\n'), ((9815, 9878), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': '(4)', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=4, dtype=tf.int32)\n', (9832, 9878), True, 'import tensorflow as tf\n'), ((12008, 12024), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12018, 12024), False, 'import os\n'), ((7534, 7555), 'os.listdir', 'os.listdir', (['item.path'], {}), '(item.path)\n', (7544, 7555), False, 'import os\n'), ((8043, 8064), 'os.listdir', 'os.listdir', (['item.path'], {}), '(item.path)\n', (8053, 8064), False, 'import os\n')] |
import torch
import numpy as np
import torch.nn as nn
# First checking if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
batch_size = 50
# loss and optimization functions
lr = 0.001
criterion = nn.BCELoss()
# load trained model
net = torch.load('models/torch_rnn_trained.model')
# load test dataloader
test_loader = torch.load('data/processed/torch_rnn_test.loader')
test_losses = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size)
# set to eval mode
net.eval()
print('starting testing...')
# iterate over test data
for inputs, labels in test_loader:
# create new vars for hidden state
h = tuple([each.data for each in h])
if train_on_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# calculate loss
test_loss = criterion(output.squeeze(), labels.float())
test_losses.append(test_loss.item())
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze())
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(
correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# Get test data loss and accuracy
test_losses = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size)
net.eval()
# iterate over test data
for inputs, labels in test_loader:
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# calculate loss
test_loss = criterion(output.squeeze(), labels.float())
test_losses.append(test_loss.item())
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(
correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# -- stats! -- ##
# avg test loss
print("Test loss: {:.3f}".format(np.mean(test_losses)))
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
| [
"numpy.sum",
"torch.nn.BCELoss",
"torch.load",
"numpy.mean",
"torch.cuda.is_available"
] | [((107, 132), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (130, 132), False, 'import torch\n'), ((312, 324), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (322, 324), True, 'import torch.nn as nn\n'), ((353, 397), 'torch.load', 'torch.load', (['"""models/torch_rnn_trained.model"""'], {}), "('models/torch_rnn_trained.model')\n", (363, 397), False, 'import torch\n'), ((436, 486), 'torch.load', 'torch.load', (['"""data/processed/torch_rnn_test.loader"""'], {}), "('data/processed/torch_rnn_test.loader')\n", (446, 486), False, 'import torch\n'), ((1397, 1412), 'numpy.sum', 'np.sum', (['correct'], {}), '(correct)\n', (1403, 1412), True, 'import numpy as np\n'), ((2421, 2436), 'numpy.sum', 'np.sum', (['correct'], {}), '(correct)\n', (2427, 2436), True, 'import numpy as np\n'), ((2506, 2526), 'numpy.mean', 'np.mean', (['test_losses'], {}), '(test_losses)\n', (2513, 2526), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
rgbImgDir = r"F:\IIIT-H Work\win_det_heatmaps\datasets\IIIT-H Dataset\GoogleEarth\instance_segmentation\dataset_voc\SegmentationClassPNG"
binaryImgDir = r"F:\IIIT-H Work\win_det_heatmaps\datasets\IIIT-H Dataset\GoogleEarth\instance_segmentation\dataset\labels"
allfiles = os.listdir(rgbImgDir)
images = [ fname for fname in allfiles if fname.endswith('.png')]
for i in range(len(images)):
path = os.path.join(rgbImgDir, images[i])
print('path:', path)
inputImg = cv2.imread(path)
# print('inputImg shape:', inputImg.shape)
# print('inputImg datatype:', inputImg.dtype)
# print('inputImg', np.amax(inputImg, axis=2))
grayImg = cv2.cvtColor(inputImg, cv2.COLOR_BGR2GRAY)
# print(np.amax(grayImg))
_, binaryImg = cv2.threshold(grayImg, 0, 255, cv2.THRESH_BINARY)
print(np.amax(binaryImg))
cv2.imwrite(os.path.join(binaryImgDir, images[i]), binaryImg) | [
"cv2.cvtColor",
"cv2.threshold",
"numpy.amax",
"cv2.imread",
"os.path.join",
"os.listdir"
] | [((314, 335), 'os.listdir', 'os.listdir', (['rgbImgDir'], {}), '(rgbImgDir)\n', (324, 335), False, 'import os\n'), ((443, 477), 'os.path.join', 'os.path.join', (['rgbImgDir', 'images[i]'], {}), '(rgbImgDir, images[i])\n', (455, 477), False, 'import os\n'), ((518, 534), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (528, 534), False, 'import cv2\n'), ((698, 740), 'cv2.cvtColor', 'cv2.cvtColor', (['inputImg', 'cv2.COLOR_BGR2GRAY'], {}), '(inputImg, cv2.COLOR_BGR2GRAY)\n', (710, 740), False, 'import cv2\n'), ((790, 839), 'cv2.threshold', 'cv2.threshold', (['grayImg', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(grayImg, 0, 255, cv2.THRESH_BINARY)\n', (803, 839), False, 'import cv2\n'), ((851, 869), 'numpy.amax', 'np.amax', (['binaryImg'], {}), '(binaryImg)\n', (858, 869), True, 'import numpy as np\n'), ((887, 924), 'os.path.join', 'os.path.join', (['binaryImgDir', 'images[i]'], {}), '(binaryImgDir, images[i])\n', (899, 924), False, 'import os\n')] |
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ___ ___ ________ ________ ______ ________ ________ ##
## | | | | | ____| / | / __ \ | ____|| ____| ##
## | |__| | | |__ | (----` ______| | | | | |__ | |__ ##
## | __ | | __| \ \ |______| | | | | __| | __| ##
## | | | | | |____.----) | | `--' | | | | | ##
## |__| |__| |_______|_______/ \______/ |__| |__| ##
## ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
# Import packages
import time
import numpy as np
import matplotlib.pyplot as plt
import hes_off_object_oriented.process_functions as pm
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
def test_fluid_initialization():
# Initialize fluid
air = pm.create_fluid(
name="Air",
components=np.asarray(["N2", "O2", "CO2", "H2O", "Ar"]),
MW=np.asarray([28.01, 32.00, 44.01, 18.02, 39.95]) / 1e3,
LHV=np.asarray([00.00, 00.00, 00.00, 00.00, 00.00]) * 1e6,
CR= np.asarray([0, 0, 1, 0, 0]),
fraction= np.asarray([0.7739,0.2076,0.0003,0.0089,0.0093]))
# Print fluid info
pm.print_fluid(air)
def test_mixture_creation():
# Create an air mixture using a molar basis and check its molecular mass
components = np.asarray([pm.nitrogen, pm.oxygen, pm.carbon_dioxide, pm.water, pm.argon])
molar_composition = np.asarray([0.7739, 0.2076, 0.0003, 0.0089, 0.0093])
air1 = pm.create_fluid_mixture("Air mix", components, molar_composition, fraction_type="molar")
assert np.abs(air1["MW_mean"] - 0.028865255) < 1e-9
# Create an air mixture using a mass basis and check its molecular mass
air2 = pm.create_fluid_mixture("Air mix", components, air1["y"], fraction_type="mass")
assert np.abs(air2["MW_mean"] - 0.028865255) < 1e-9
def test_molar_to_mass_conversion():
# Simple numerical test that can be solved "by hand"
x = np.asarray([0.5, 0.5])
MW = np.asarray([10, 20])
y = pm.convert_molar_to_mass_fraction(x, MW)
assert np.abs(y[0] - x[0]*10/15) < 1e-9
assert np.abs(y[1] - x[1]*20/15) < 1e-9
def test_mass_to_molar_conversion():
# Simple numerical test that can be solved "by hand"
y = np.asarray([0.5, 0.5])
MW = np.asarray([10, 20])
x = pm.convert_mass_to_molar_fraction(y, MW)
assert np.abs(x[0] - y[0]*(2*20)/(10+20)) < 1e-9
assert np.abs(x[1] - y[1]*(2*10)/(10+20)) < 1e-9
def test_carbon_dioxide_emissions_computation():
# Compute the specific CO2 emissions of natural gas combustion
efficiency = 1.00
fuel = pm.natural_gas
specific_emissions = pm.compute_GT_specific_carbon_dioxide_emissions(efficiency, fuel["x"], fuel["MW"], fuel["LHV"], fuel["CR"])
assert np.abs(specific_emissions - 192.6400256508272) < 1e-9 # Value computed in advance
# Compute the specific CO2 emissions of hydrogen combustion
efficiency = 1.00
fuel = pm.hydrogen
specific_emissions = pm.compute_GT_specific_carbon_dioxide_emissions(efficiency, fuel["x"], fuel["MW"], fuel["LHV"], fuel["CR"])
assert np.abs(specific_emissions - 0.00) < 1e-9 # Should be zero
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
## ------------------------------------------------------------------------------------------------------------------ ##
import numba as nb
# Run the tests when this script is executed as main
if __name__ == '__main__':
start = time.time()
a = pm.create_fluid(name="Carbon dioxide",
components=np.asarray(["123", "hwh"]),
fractions=np.asarray([1.0]),
fraction_type="molar",
MW=np.asarray([44.01]) / 1e3,
LHV=np.asarray([0.00]) * 1e6,
CR=np.asarray([1]))
end = time.time()
print("ellapsed time: ", end-start)
# print(a.fractions)
# print(a.name)
# test_fluid_initialization()
# test_mixture_creation()
# test_molar_to_mass_conversion()
# test_mass_to_molar_conversion()
# test_carbon_dioxide_emissions_computation() | [
"hes_off_object_oriented.process_functions.print_fluid",
"hes_off_object_oriented.process_functions.convert_molar_to_mass_fraction",
"numpy.abs",
"hes_off_object_oriented.process_functions.convert_mass_to_molar_fraction",
"hes_off_object_oriented.process_functions.compute_GT_specific_carbon_dioxide_emission... | [((2297, 2316), 'hes_off_object_oriented.process_functions.print_fluid', 'pm.print_fluid', (['air'], {}), '(air)\n', (2311, 2316), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((2442, 2517), 'numpy.asarray', 'np.asarray', (['[pm.nitrogen, pm.oxygen, pm.carbon_dioxide, pm.water, pm.argon]'], {}), '([pm.nitrogen, pm.oxygen, pm.carbon_dioxide, pm.water, pm.argon])\n', (2452, 2517), True, 'import numpy as np\n'), ((2542, 2594), 'numpy.asarray', 'np.asarray', (['[0.7739, 0.2076, 0.0003, 0.0089, 0.0093]'], {}), '([0.7739, 0.2076, 0.0003, 0.0089, 0.0093])\n', (2552, 2594), True, 'import numpy as np\n'), ((2606, 2698), 'hes_off_object_oriented.process_functions.create_fluid_mixture', 'pm.create_fluid_mixture', (['"""Air mix"""', 'components', 'molar_composition'], {'fraction_type': '"""molar"""'}), "('Air mix', components, molar_composition,\n fraction_type='molar')\n", (2629, 2698), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((2839, 2918), 'hes_off_object_oriented.process_functions.create_fluid_mixture', 'pm.create_fluid_mixture', (['"""Air mix"""', 'components', "air1['y']"], {'fraction_type': '"""mass"""'}), "('Air mix', components, air1['y'], fraction_type='mass')\n", (2862, 2918), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((3080, 3102), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3090, 3102), True, 'import numpy as np\n'), ((3112, 3132), 'numpy.asarray', 'np.asarray', (['[10, 20]'], {}), '([10, 20])\n', (3122, 3132), True, 'import numpy as np\n'), ((3142, 3182), 'hes_off_object_oriented.process_functions.convert_molar_to_mass_fraction', 'pm.convert_molar_to_mass_fraction', (['x', 'MW'], {}), '(x, MW)\n', (3175, 3182), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((3376, 3398), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3386, 3398), True, 'import numpy as np\n'), ((3408, 3428), 'numpy.asarray', 'np.asarray', (['[10, 20]'], {}), '([10, 20])\n', (3418, 3428), True, 'import numpy as np\n'), ((3438, 3478), 'hes_off_object_oriented.process_functions.convert_mass_to_molar_fraction', 'pm.convert_mass_to_molar_fraction', (['y', 'MW'], {}), '(y, MW)\n', (3471, 3478), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((3776, 3888), 'hes_off_object_oriented.process_functions.compute_GT_specific_carbon_dioxide_emissions', 'pm.compute_GT_specific_carbon_dioxide_emissions', (['efficiency', "fuel['x']", "fuel['MW']", "fuel['LHV']", "fuel['CR']"], {}), "(efficiency, fuel['x'], fuel\n ['MW'], fuel['LHV'], fuel['CR'])\n", (3823, 3888), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((4114, 4226), 'hes_off_object_oriented.process_functions.compute_GT_specific_carbon_dioxide_emissions', 'pm.compute_GT_specific_carbon_dioxide_emissions', (['efficiency', "fuel['x']", "fuel['MW']", "fuel['LHV']", "fuel['CR']"], {}), "(efficiency, fuel['x'], fuel\n ['MW'], fuel['LHV'], fuel['CR'])\n", (4161, 4226), True, 'import hes_off_object_oriented.process_functions as pm\n'), ((4775, 4786), 'time.time', 'time.time', ([], {}), '()\n', (4784, 4786), False, 'import time\n'), ((5118, 5129), 'time.time', 'time.time', ([], {}), '()\n', (5127, 5129), False, 'import time\n'), ((2706, 2743), 'numpy.abs', 'np.abs', (["(air1['MW_mean'] - 0.028865255)"], {}), "(air1['MW_mean'] - 0.028865255)\n", (2712, 2743), True, 'import numpy as np\n'), ((2930, 2967), 'numpy.abs', 'np.abs', (["(air2['MW_mean'] - 0.028865255)"], {}), "(air2['MW_mean'] - 0.028865255)\n", (2936, 2967), True, 'import numpy as np\n'), ((3194, 3223), 'numpy.abs', 'np.abs', (['(y[0] - x[0] * 10 / 15)'], {}), '(y[0] - x[0] * 10 / 15)\n', (3200, 3223), True, 'import numpy as np\n'), ((3238, 3267), 'numpy.abs', 'np.abs', (['(y[1] - x[1] * 20 / 15)'], {}), '(y[1] - x[1] * 20 / 15)\n', (3244, 3267), True, 'import numpy as np\n'), ((3490, 3532), 'numpy.abs', 'np.abs', (['(x[0] - y[0] * (2 * 20) / (10 + 20))'], {}), '(x[0] - y[0] * (2 * 20) / (10 + 20))\n', (3496, 3532), True, 'import numpy as np\n'), ((3543, 3585), 'numpy.abs', 'np.abs', (['(x[1] - y[1] * (2 * 10) / (10 + 20))'], {}), '(x[1] - y[1] * (2 * 10) / (10 + 20))\n', (3549, 3585), True, 'import numpy as np\n'), ((3895, 3941), 'numpy.abs', 'np.abs', (['(specific_emissions - 192.6400256508272)'], {}), '(specific_emissions - 192.6400256508272)\n', (3901, 3941), True, 'import numpy as np\n'), ((4233, 4265), 'numpy.abs', 'np.abs', (['(specific_emissions - 0.0)'], {}), '(specific_emissions - 0.0)\n', (4239, 4265), True, 'import numpy as np\n'), ((1965, 2009), 'numpy.asarray', 'np.asarray', (["['N2', 'O2', 'CO2', 'H2O', 'Ar']"], {}), "(['N2', 'O2', 'CO2', 'H2O', 'Ar'])\n", (1975, 2009), True, 'import numpy as np\n'), ((2168, 2195), 'numpy.asarray', 'np.asarray', (['[0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 0, 0])\n', (2178, 2195), True, 'import numpy as np\n'), ((2219, 2271), 'numpy.asarray', 'np.asarray', (['[0.7739, 0.2076, 0.0003, 0.0089, 0.0093]'], {}), '([0.7739, 0.2076, 0.0003, 0.0089, 0.0093])\n', (2229, 2271), True, 'import numpy as np\n'), ((4862, 4888), 'numpy.asarray', 'np.asarray', (["['123', 'hwh']"], {}), "(['123', 'hwh'])\n", (4872, 4888), True, 'import numpy as np\n'), ((4917, 4934), 'numpy.asarray', 'np.asarray', (['[1.0]'], {}), '([1.0])\n', (4927, 4934), True, 'import numpy as np\n'), ((5090, 5105), 'numpy.asarray', 'np.asarray', (['[1]'], {}), '([1])\n', (5100, 5105), True, 'import numpy as np\n'), ((2026, 2072), 'numpy.asarray', 'np.asarray', (['[28.01, 32.0, 44.01, 18.02, 39.95]'], {}), '([28.01, 32.0, 44.01, 18.02, 39.95])\n', (2036, 2072), True, 'import numpy as np\n'), ((2097, 2134), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0])\n', (2107, 2134), True, 'import numpy as np\n'), ((4996, 5015), 'numpy.asarray', 'np.asarray', (['[44.01]'], {}), '([44.01])\n', (5006, 5015), True, 'import numpy as np\n'), ((5044, 5061), 'numpy.asarray', 'np.asarray', (['[0.0]'], {}), '([0.0])\n', (5054, 5061), True, 'import numpy as np\n')] |
""" This module contains utility functions.
"""
import random
import numpy as np
import SimpleITK as sitk
def visualize_slice(image, mask, ax, location, width, **kwargs):
""" Visualize a slice of a 3D numpy array.
Args:
image: A 3D numpy array.
mask: A 3D numpy array.
ax: An Axis object to be used for drawing.
location: Center point in Hounsfield unit for the observation window.
width: Width of the observation window in Hounsfield unit.
**kwargs: Other graphical parameters used for drawing.
"""
radious = width // 2
min_voxel, max_voxel = location - radious, location + radious
img = image.copy()
img[img < min_voxel] = min_voxel
img[img > max_voxel] = max_voxel
ax.imshow(img, interpolation='none', **kwargs)
if mask is not None:
mask_image = np.ma.masked_where(mask == 0, mask)
ax.imshow(mask_image, cmap='autumn', interpolation='none', alpha=0.7)
def read_DICOM_from_dir(dir_path):
""" Read a CT image (or its contours) from a directory.
Args:
dir_path (str): Address of the directory containing DICOM files.
Returns: A SimpleITK.Image.
"""
reader = sitk.ImageSeriesReader()
series_ids = reader.GetGDCMSeriesIDs(dir_path)
if len(series_ids) == 0:
raise ValueError('No DICOM file in directory:\n{}'.format(dir_path))
slice_paths = reader.GetGDCMSeriesFileNames(dir_path, series_ids[0])
reader.SetFileNames(slice_paths)
ct = reader.Execute()
# Reading matadata from one slice, currently SimpleITK does not support
# extracting metadata when reading a series
reader = sitk.ImageFileReader()
reader.SetFileName(slice_paths[0])
img = reader.Execute()
for k in img.GetMetaDataKeys():
value = img.GetMetaData(k)
ct.SetMetaData(k, value)
return ct
def read_catalog(catalog_file_path, sep=','):
""" Read a Catalog file.
A Catalog file is a tabular file containing 4 columns. These are
(1) sample_id, (2) image_id, (3) image_src, and (4) mask_src,
respectively.
Args:
catalog_file_path: Address of a catalog file.
sep: A field seperator.
Returns:
return a list of lists, where each internal list contain for elements:
(1) sample_id, (2) image_id, (3) image_src, and (4) mask_src.
"""
data_sequence = []
with open(catalog_file_path) as fin:
for line in fin:
line = line.strip()
if line.startswith('#') or line == '':
continue
instance_info = [x.strip() for x in line.split(sep)]
data_sequence.append(instance_info)
return data_sequence
def partition(elements, portions):
""" partitions samples randomly.
Args:
elements: A list.
portions: A list of positive real numbers, each between 0 and 1,
exclusively. The summation of elements in portions must be 1.
Returns:
A random partition of samples, where partitions[i] includes
portions[i] * 100 percent of samples.
"""
assert sum(portions) == 1
index_value_map = {idx: value for idx, value in enumerate(elements)}
random_indices = list(index_value_map.keys())
random.shuffle(random_indices)
partitions = []
total = 0
start = 0
for i in range(len(portions) - 1):
num_elements = int(portions[i] * len(elements))
total += num_elements
partition_indices = random_indices[start: total]
partitions.append([index_value_map[idx] for idx in partition_indices])
partition_indices = random_indices[total:]
partitions.append([index_value_map[idx] for idx in partition_indices])
return partitions
| [
"SimpleITK.ImageFileReader",
"random.shuffle",
"numpy.ma.masked_where",
"SimpleITK.ImageSeriesReader"
] | [((1201, 1225), 'SimpleITK.ImageSeriesReader', 'sitk.ImageSeriesReader', ([], {}), '()\n', (1223, 1225), True, 'import SimpleITK as sitk\n'), ((1656, 1678), 'SimpleITK.ImageFileReader', 'sitk.ImageFileReader', ([], {}), '()\n', (1676, 1678), True, 'import SimpleITK as sitk\n'), ((3265, 3295), 'random.shuffle', 'random.shuffle', (['random_indices'], {}), '(random_indices)\n', (3279, 3295), False, 'import random\n'), ((851, 886), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(mask == 0)', 'mask'], {}), '(mask == 0, mask)\n', (869, 886), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
pd.options.mode.chained_assignment = None
from sklearn.externals import joblib
data = pd.read_csv("titanic_train.csv")
median_age = data['age'].median()
data['age'].fillna(median_age, inplace = True)
data_input = data[['pclass','age','sex']]
expected_output = data[['survived']]
data_input['pclass'].replace('3rd', 3, inplace = True)
data_input['pclass'].replace('2nd', 2, inplace = True)
data_input['pclass'].replace('1st', 1, inplace = True)
data_input['sex'] = np.where(data_input['sex'] == 'female', 0, 1)
input_train, input_test, expected_op_train, expected_op_test = train_test_split(data_input, expected_output, test_size=0.33, random_state = 1000)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(input_train, expected_op_train)
accuracy = rf.score(input_test, expected_op_test)
print("accuracy is {}%".format(accuracy*100))
joblib.dump(rf, "titanic_survival_predictor_model", compress = 9)
| [
"sklearn.ensemble.RandomForestClassifier",
"sklearn.cross_validation.train_test_split",
"sklearn.externals.joblib.dump",
"pandas.read_csv",
"numpy.where"
] | [((234, 266), 'pandas.read_csv', 'pd.read_csv', (['"""titanic_train.csv"""'], {}), "('titanic_train.csv')\n", (245, 266), True, 'import pandas as pd\n'), ((616, 661), 'numpy.where', 'np.where', (["(data_input['sex'] == 'female')", '(0)', '(1)'], {}), "(data_input['sex'] == 'female', 0, 1)\n", (624, 661), True, 'import numpy as np\n'), ((726, 811), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['data_input', 'expected_output'], {'test_size': '(0.33)', 'random_state': '(1000)'}), '(data_input, expected_output, test_size=0.33, random_state=1000\n )\n', (742, 811), False, 'from sklearn.cross_validation import train_test_split\n'), ((816, 856), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (838, 856), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((995, 1058), 'sklearn.externals.joblib.dump', 'joblib.dump', (['rf', '"""titanic_survival_predictor_model"""'], {'compress': '(9)'}), "(rf, 'titanic_survival_predictor_model', compress=9)\n", (1006, 1058), False, 'from sklearn.externals import joblib\n')] |
'''
Summary
-------
This script produces two figures for each dataset
1. Plot of outputs vs inputs
2. Plot of performance score vs. num training data seen
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from FeatureTransformPolynomial import PolynomialFeatureTransform
from LinearRegressionMAPEstimator import LinearRegressionMAPEstimator
from LinearRegressionPosteriorPredictiveEstimator import LinearRegressionPosteriorPredictiveEstimator
if __name__ == '__main__':
order_list = [0, 1, 2, 3, 4]
n_train_list = [0, 8, 64, 512]
alpha = 1.0 # moderate prior precision
beta = 20.0 # strong likelihood precision
for dataset_name in ['toyline', 'toywave']:
train_df = pd.read_csv("../data/%s_train.csv" % dataset_name)
test_df = pd.read_csv("../data/%s_test.csv" % dataset_name)
x_train_ND, t_train_N = train_df['x'].values[:,np.newaxis], train_df['y'].values
x_test_ND, t_test_N = test_df['x'].values[:,np.newaxis], test_df['y'].values
fig1, perf_vs_N__ax_grid = plt.subplots(nrows=1, ncols=4,
sharex=True, sharey=True, squeeze=True,
figsize=(4 * len(order_list), 4))
fig2, y_vs_x__ax_grid = plt.subplots(nrows=1, ncols=4,
sharex=True, sharey=True, squeeze=True,
figsize=(4 * len(order_list), 4))
for order, perfvsN_ax, xy_ax in zip(order_list, perf_vs_N__ax_grid, y_vs_x__ax_grid):
feature_transformer = PolynomialFeatureTransform(order=order, input_dim=1)
map_train_scores = np.zeros(len(n_train_list))
map_test_scores = np.zeros(len(n_train_list))
ppe_train_scores = np.zeros(len(n_train_list))
ppe_test_scores = np.zeros(len(n_train_list))
print("===== MAPEstimator with alpha %.3g, beta %.3g" % (alpha, beta))
for ff, N in enumerate(n_train_list):
estimator = LinearRegressionMAPEstimator(feature_transformer, alpha=alpha, beta=beta)
## TODO fit estimator on first N examples in train
## TODO record estimator's score on train in map_train_scores
## TODO record estimator's score on test in map_test_scores
print("%6d examples : train score % 9.3f | test score % 9.3f" % (
N, map_train_scores[ff], map_test_scores[ff]))
print("===== PosteriorPredictiveEstimator with alpha %.3g, beta %.3g" % (alpha, beta))
for ff, N in enumerate(n_train_list):
ppe_estimator = LinearRegressionPosteriorPredictiveEstimator(feature_transformer, alpha=alpha, beta=beta)
## TODO fit estimator on first N examples in train
## TODO record estimator's score on train
## TODO record estimator's score on test
print("%6d examples : train score % 9.3f | test score % 9.3f" % (
N, ppe_train_scores[ff], ppe_test_scores[ff]))
# Plot on log scale (manually crafted)
int_list = np.arange(len(n_train_list))
perfvsN_ax.plot(int_list, map_test_scores, 'b.-', label='MAP estimator')
perfvsN_ax.plot(int_list, ppe_test_scores, 'g.-', label='PosteriorPredictive estimator')
perfvsN_ax.legend(loc='lower right')
# Manually crafted x scale
perfvsN_ax.set_xticks([a for a in int_list])
perfvsN_ax.set_xticklabels(['%d' % a for a in n_train_list])
## Plot inputs vs predictions
xy_ax.plot(x_train_ND[:,0], t_train_N, 'k.', alpha=0.3)
G = 200 # num grid points
xmin = x_train_ND[:,0].min()
xmax = x_train_ND[:,0].max()
R = xmax - xmin
xgrid_G = np.linspace(xmin - R, xmax + R, G)
xgrid_G1 = np.reshape(xgrid_G, (G, 1))
## TODO compute mean prediction at each entry of the grid
mean_G = np.zeros(G)
## TODO compute stddev of prediction at each entry of grid
stddev_G = np.ones(G)
## Plot the mean as solid line, plus light fill for (-3, +3 stddev) range
xy_ax.fill_between(xgrid_G, mean_G -3 * stddev_G, mean_G +3 * stddev_G,
facecolor='blue', alpha=0.2, label='3 stddev range')
xy_ax.plot(xgrid_G, mean_G, 'b.-', label='prediction')
xy_ax.legend(loc='lower right')
xy_ax.set_ylim([-5, 5])
plt.show()
| [
"LinearRegressionMAPEstimator.LinearRegressionMAPEstimator",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.zeros",
"numpy.ones",
"LinearRegressionPosteriorPredictiveEstimator.LinearRegressionPosteriorPredictiveEstimator",
"numpy.reshape",
"numpy.linspace",
"FeatureTransformPolynomial.Polynomia... | [((4499, 4509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4507, 4509), True, 'import matplotlib.pyplot as plt\n'), ((748, 798), 'pandas.read_csv', 'pd.read_csv', (["('../data/%s_train.csv' % dataset_name)"], {}), "('../data/%s_train.csv' % dataset_name)\n", (759, 798), True, 'import pandas as pd\n'), ((817, 866), 'pandas.read_csv', 'pd.read_csv', (["('../data/%s_test.csv' % dataset_name)"], {}), "('../data/%s_test.csv' % dataset_name)\n", (828, 866), True, 'import pandas as pd\n'), ((1514, 1566), 'FeatureTransformPolynomial.PolynomialFeatureTransform', 'PolynomialFeatureTransform', ([], {'order': 'order', 'input_dim': '(1)'}), '(order=order, input_dim=1)\n', (1540, 1566), False, 'from FeatureTransformPolynomial import PolynomialFeatureTransform\n'), ((3800, 3834), 'numpy.linspace', 'np.linspace', (['(xmin - R)', '(xmax + R)', 'G'], {}), '(xmin - R, xmax + R, G)\n', (3811, 3834), True, 'import numpy as np\n'), ((3858, 3885), 'numpy.reshape', 'np.reshape', (['xgrid_G', '(G, 1)'], {}), '(xgrid_G, (G, 1))\n', (3868, 3885), True, 'import numpy as np\n'), ((3978, 3989), 'numpy.zeros', 'np.zeros', (['G'], {}), '(G)\n', (3986, 3989), True, 'import numpy as np\n'), ((4084, 4094), 'numpy.ones', 'np.ones', (['G'], {}), '(G)\n', (4091, 4094), True, 'import numpy as np\n'), ((1964, 2037), 'LinearRegressionMAPEstimator.LinearRegressionMAPEstimator', 'LinearRegressionMAPEstimator', (['feature_transformer'], {'alpha': 'alpha', 'beta': 'beta'}), '(feature_transformer, alpha=alpha, beta=beta)\n', (1992, 2037), False, 'from LinearRegressionMAPEstimator import LinearRegressionMAPEstimator\n'), ((2590, 2684), 'LinearRegressionPosteriorPredictiveEstimator.LinearRegressionPosteriorPredictiveEstimator', 'LinearRegressionPosteriorPredictiveEstimator', (['feature_transformer'], {'alpha': 'alpha', 'beta': 'beta'}), '(feature_transformer, alpha=\n alpha, beta=beta)\n', (2634, 2684), False, 'from LinearRegressionPosteriorPredictiveEstimator import LinearRegressionPosteriorPredictiveEstimator\n')] |
from collections import defaultdict
import numpy as np
from rlcard.utils.euchre_utils import LEFT, NON_TRUMP, ACTION_SPACE
class EuchreRuleAgent(object):
def __init__(self):
self.use_raw = False
def step(self, state):
legal_actions = state['raw_legal_actions']
hand = state['hand']
if len(legal_actions) == 1:
return ACTION_SPACE[legal_actions[0]]
if len(hand) == 6:
suit_counts = self.count_suits(hand, include_left=False)
worst_suit = min(suit_counts, key = suit_counts.get)
cards = [card for card in hand if card[0] == worst_suit]
worst_card = [NON_TRUMP.index(card[1]) for card in cards]
discard = cards[np.argmin(worst_card)]
return ACTION_SPACE[f'discard-{discard}']
if not state['trump_called']:
suit_counts = self.count_suits(hand)
best_suit = max(suit_counts, key = suit_counts.get)
if state['flipped'] is not None:
if suit_counts[state['flipped'][0]] >= 3:
return ACTION_SPACE['pick']
return ACTION_SPACE['pass']
else:
if suit_counts[best_suit] >= 3 and best_suit != state['turned_down']:
return ACTION_SPACE[f"call-{best_suit}"]
if 'pass' not in legal_actions:
return ACTION_SPACE[np.random.choice(legal_actions)]
return ACTION_SPACE['pass']
has_right = (state['trump'] + 'J') in legal_actions
if has_right and len(state['center']) == 0:
return ACTION_SPACE[state['trump'] + 'J']
playable_trump = [card for card in legal_actions if card[0] == state['trump']]
if len(playable_trump) > 0:
worst_card = [NON_TRUMP.index(card[1]) for card in playable_trump]
return ACTION_SPACE[playable_trump[np.argmin(worst_card)]]
aces = [card for card in legal_actions if card[0] != state['trump'] and card[1] == 'A']
if len(aces) > 0:
return ACTION_SPACE[aces[0]]
worst_card = [NON_TRUMP.index(card[1]) for card in legal_actions]
if len(worst_card) > 0:
return ACTION_SPACE[legal_actions[np.argmin(worst_card)]]
return ACTION_SPACE[np.random.choice(legal_actions)]
def eval_step(self, state):
return self.step(state), []
@staticmethod
def count_suits(hand, include_left=True):
card_count = defaultdict(int)
for card in hand:
card_count[card[0]] += 1
if include_left:
if card[1] == 'J':
card_count[LEFT[card[0]][0]] += 1
return card_count | [
"collections.defaultdict",
"rlcard.utils.euchre_utils.NON_TRUMP.index",
"numpy.argmin",
"numpy.random.choice"
] | [((2528, 2544), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2539, 2544), False, 'from collections import defaultdict\n'), ((2136, 2160), 'rlcard.utils.euchre_utils.NON_TRUMP.index', 'NON_TRUMP.index', (['card[1]'], {}), '(card[1])\n', (2151, 2160), False, 'from rlcard.utils.euchre_utils import LEFT, NON_TRUMP, ACTION_SPACE\n'), ((2331, 2362), 'numpy.random.choice', 'np.random.choice', (['legal_actions'], {}), '(legal_actions)\n', (2347, 2362), True, 'import numpy as np\n'), ((664, 688), 'rlcard.utils.euchre_utils.NON_TRUMP.index', 'NON_TRUMP.index', (['card[1]'], {}), '(card[1])\n', (679, 688), False, 'from rlcard.utils.euchre_utils import LEFT, NON_TRUMP, ACTION_SPACE\n'), ((736, 757), 'numpy.argmin', 'np.argmin', (['worst_card'], {}), '(worst_card)\n', (745, 757), True, 'import numpy as np\n'), ((1817, 1841), 'rlcard.utils.euchre_utils.NON_TRUMP.index', 'NON_TRUMP.index', (['card[1]'], {}), '(card[1])\n', (1832, 1841), False, 'from rlcard.utils.euchre_utils import LEFT, NON_TRUMP, ACTION_SPACE\n'), ((1917, 1938), 'numpy.argmin', 'np.argmin', (['worst_card'], {}), '(worst_card)\n', (1926, 1938), True, 'import numpy as np\n'), ((2266, 2287), 'numpy.argmin', 'np.argmin', (['worst_card'], {}), '(worst_card)\n', (2275, 2287), True, 'import numpy as np\n'), ((1415, 1446), 'numpy.random.choice', 'np.random.choice', (['legal_actions'], {}), '(legal_actions)\n', (1431, 1446), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Compare using different versions of functional gene networks for prediction of
genetic interactions.
Created: 2015 September 22
"""
import func_net_pred
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
def get_seed_set_aucs(filenames):
netwkFile = filenames[0]
matrixFile = filenames[1]
node2edgewt = func_net_pred.process_func_net(netwkFile)
gene2idx = func_net_pred.assign_gene_indices(node2edgewt)
matrixPath = os.path.join('..', 'data', matrixFile)
try:
adjMat = np.load(matrixPath)
except:
print('Network file not found. Creating network matrix...\n')
adjMat = func_net_pred.build_netwk_adj_matrix(node2edgewt, gene2idx)
np.save(matrixPath, adjMat)
seedAUC, seed2intacts = func_net_pred.seed_set_predictability(gene2idx,
adjMat,
seedSets)
return {p[1]: p[0] for p in seedAUC}
def plot_auc_comparison(version2aucs):
x_coords = list()
y_coords = list()
versionNums = list(version2aucs.keys()).sort()
for gene in version2aucs[versionNums[0]].keys():
if gene in version2aucs[versionNums[1]]:
x_coords.append(version2aucs[versionNums[0]][gene])
y_coords.append(version2aucs[versionNums[1]][gene])
fig = plt.figure()
plt.plot(x_coords, y_coords)
plt.xlabel(versionNums[0] + ' AUCs')
plt.ylabel(versionNums[1] + ' AUCs')
plt.grid(b=True)
plt.show()
def main():
experimentSys = sys.argv[1]
vers_A_num = 'version_' + sys.argv[2]
vers_B_num = 'version_' + sys.argv[3]
version2files = {vers_A_num: ('humannet1.entrez.txt',
'HumanNet1_adj_matrix.npy'),
vers_B_num: ('H6Net_CC.net', 'HumanNet2_adj_matrix.npy')}
biogridFile = 'BIOGRID-3.4.127-human.txt'
seedSets = func_net_pred.read_biogrid(experimentSys, biogridFile)
version2aucs = dict()
for version in (vers_A_num, vers_B_num):
inputFiles = version2files[version]
version2aucs[version] = get_seed_set_aucs(inputFiles)
if __name__=="__main__":
main()
| [
"func_net_pred.process_func_net",
"numpy.load",
"matplotlib.pyplot.show",
"numpy.save",
"matplotlib.pyplot.plot",
"func_net_pred.seed_set_predictability",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure",
"func_net_pred.read_biogrid",
"func_net_pred.assign_gene_indices",
"matplotlib.pyplot... | [((369, 410), 'func_net_pred.process_func_net', 'func_net_pred.process_func_net', (['netwkFile'], {}), '(netwkFile)\n', (399, 410), False, 'import func_net_pred\n'), ((426, 472), 'func_net_pred.assign_gene_indices', 'func_net_pred.assign_gene_indices', (['node2edgewt'], {}), '(node2edgewt)\n', (459, 472), False, 'import func_net_pred\n'), ((490, 528), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', 'matrixFile'], {}), "('..', 'data', matrixFile)\n", (502, 528), False, 'import os\n'), ((798, 863), 'func_net_pred.seed_set_predictability', 'func_net_pred.seed_set_predictability', (['gene2idx', 'adjMat', 'seedSets'], {}), '(gene2idx, adjMat, seedSets)\n', (835, 863), False, 'import func_net_pred\n'), ((1415, 1427), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1460), 'matplotlib.pyplot.plot', 'plt.plot', (['x_coords', 'y_coords'], {}), '(x_coords, y_coords)\n', (1440, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1465, 1501), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["(versionNums[0] + ' AUCs')"], {}), "(versionNums[0] + ' AUCs')\n", (1475, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["(versionNums[1] + ' AUCs')"], {}), "(versionNums[1] + ' AUCs')\n", (1516, 1542), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1563), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)'}), '(b=True)\n', (1555, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1576, 1578), True, 'import matplotlib.pyplot as plt\n'), ((1971, 2025), 'func_net_pred.read_biogrid', 'func_net_pred.read_biogrid', (['experimentSys', 'biogridFile'], {}), '(experimentSys, biogridFile)\n', (1997, 2025), False, 'import func_net_pred\n'), ((555, 574), 'numpy.load', 'np.load', (['matrixPath'], {}), '(matrixPath)\n', (562, 574), True, 'import numpy as np\n'), ((674, 733), 'func_net_pred.build_netwk_adj_matrix', 'func_net_pred.build_netwk_adj_matrix', (['node2edgewt', 'gene2idx'], {}), '(node2edgewt, gene2idx)\n', (710, 733), False, 'import func_net_pred\n'), ((742, 769), 'numpy.save', 'np.save', (['matrixPath', 'adjMat'], {}), '(matrixPath, adjMat)\n', (749, 769), True, 'import numpy as np\n')] |
from tkinter import filedialog
from tkinter import *
import numpy as np
import re
root = Tk()
root.title("UPGMA")
matrix_filepath = ""
T = Text(root, height=30, width=180)
def select():
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("text files","*.txt"),("all files","*.*")))
global matrix_filepath
matrix_filepath = root.filename
generate()
def generate():
(name, label, matrix) = read_matrix()
for i in range(len(name)):
res = ""
res += name[i]
formatted_m = format_matrix(matrix[i])
res += "\n"
res += UPGMA(formatted_m, label[i])
res += "\n\n\n\n"
T.insert(END, res)
#generates table
def read_matrix():
with open(matrix_filepath, "r") as f:
lines = f.readlines()
matrix_name = ""
name_pattern = re.compile('\[.*\]')
col_number = re.compile('\d')
data_pattern = re.compile('\*\S*')
entry_pattern = re.compile('-?\d\.\d\d\d\d')
all_tables = []
all_labels = []
all_names = []
labels = []
table_ref = []
current_index = 0
col_i = 0
row_i = 0
first_loop = True
for line in lines:
name = re.search(name_pattern, line)
col = re.search(col_number, line)
data = re.match(data_pattern,line)
entry_val = re.findall(entry_pattern, line) #gets the whole row
if name:
matrix_name = name.group(0)
rows = int(col.group(0))
table_ref = [[0 for y in range(rows)] for x in range(rows)]
all_names.append(matrix_name)
all_tables.append(table_ref)
all_labels.append(labels)
del labels[:]
if data:
labels.append(data.group(0))
if entry_val:
for vals in entry_val:
table_ref[col_i][row_i] = float(vals)
col_i+= 1
row_i+= 1;
if row_i >= rows:
row_i = 0;
if col_i >= rows:
col_i = 0;
return (all_names, all_labels, all_tables)
def format_matrix(table):
proper_matrix = np.tril(table)
res = [[]for x in range(len(proper_matrix))]
for x in range(len(proper_matrix)):
res[x].extend(proper_matrix[x][0:x].tolist())
return res
#returns index with the smallest min val.
def min_val_cluster(table):
min_cell = float("inf");
x, y = -1,-1
for i in range(len(table)):
for j in range(len(table[i])):
if table[i][j] < min_cell:
min_cell = table[i][j]
x, y = i, j
return x,y
def join_labels(labels, a, b):
if b < a:
a, b = b, a
labels[a] = "(" + labels[a] + "," + labels[b] + ")"
del labels[b]
def join_table(table, a, b):
if b < a:
a, b = b, a
row = []
for i in range(0, a):
row.append((table[a][i] + table[b][i])/2)
table[a] = row
for i in range(a+1, b):
table[i][a] = (table[i][a] + table[b][i])/2
for i in range(b+1, len(table)):
table[i][a] = (table[i][a] + table[i][b]/2)
del table[i][b]
#np.delete(table[i],b)
del table[b]
#np.delete(table,b)
def UPGMA(table, labels):
while len(labels) > 1:
x, y = min_val_cluster(table)
join_table(table, x, y)
join_labels(labels, x, y)
return labels[0]
'''
# alpha_labels:
# Makes labels from a starting letter to an ending letter
def alpha_labels(start, end):
labels = []
for i in range(ord(start), ord(end)+1):
labels.append(chr(i))
return labels
# Test table data and corresponding labels
M_labels = alpha_labels("A", "G") #A through G
M = [
[], #A
[19], #B
[27, 31], #C
[8, 18, 26], #D
[33, 36, 41, 31], #E
[18, 1, 32, 17, 35], #F
[13, 13, 29, 14, 28, 12] #G
]
KIMURA_labels = ["BK001410.1","AY350716.1","AY350722.1","AY350721.1",
"AY350720.1","AY350719.1","AY350717.1","AY350718.1"]
KIMURA_M = [
[],
[0.0858],
[0.0117, 0.0713],
[0.0117, 0.0713, 0.0000],
[0.0117, 0.0713, 0.0000, 0.0000],
[0.0117, 0.0713, 0.0000, 0.0000, 0.0000],
[0.0235, 0.0591, 0.0114, 0.0114, 0.0114, 0.0114],
[0.0235, 0.0591, 0.0114, 0.0114, 0.0114, 0.0114, 0.0000]]
print(UPGMA(M, M_labels))
print(UPGMA(KIMURA_M, KIMURA_labels))
'''
T.pack()
B = Button(root, width = 30, height = 30, text="select file", command=select)
B.pack()
mainloop()
| [
"numpy.tril",
"re.match",
"tkinter.filedialog.askopenfilename",
"re.findall",
"re.search",
"re.compile"
] | [((204, 331), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""/"""', 'title': '"""Select file"""', 'filetypes': "(('text files', '*.txt'), ('all files', '*.*'))"}), "(initialdir='/', title='Select file', filetypes=(\n ('text files', '*.txt'), ('all files', '*.*')))\n", (230, 331), False, 'from tkinter import filedialog\n'), ((1848, 1862), 'numpy.tril', 'np.tril', (['table'], {}), '(table)\n', (1855, 1862), True, 'import numpy as np\n'), ((781, 803), 're.compile', 're.compile', (['"""\\\\[.*\\\\]"""'], {}), "('\\\\[.*\\\\]')\n", (791, 803), False, 'import re\n'), ((817, 834), 're.compile', 're.compile', (['"""\\\\d"""'], {}), "('\\\\d')\n", (827, 834), False, 'import re\n'), ((851, 872), 're.compile', 're.compile', (['"""\\\\*\\\\S*"""'], {}), "('\\\\*\\\\S*')\n", (861, 872), False, 'import re\n'), ((889, 923), 're.compile', 're.compile', (['"""-?\\\\d\\\\.\\\\d\\\\d\\\\d\\\\d"""'], {}), "('-?\\\\d\\\\.\\\\d\\\\d\\\\d\\\\d')\n", (899, 923), False, 'import re\n'), ((1097, 1126), 're.search', 're.search', (['name_pattern', 'line'], {}), '(name_pattern, line)\n', (1106, 1126), False, 'import re\n'), ((1136, 1163), 're.search', 're.search', (['col_number', 'line'], {}), '(col_number, line)\n', (1145, 1163), False, 'import re\n'), ((1174, 1202), 're.match', 're.match', (['data_pattern', 'line'], {}), '(data_pattern, line)\n', (1182, 1202), False, 'import re\n'), ((1217, 1248), 're.findall', 're.findall', (['entry_pattern', 'line'], {}), '(entry_pattern, line)\n', (1227, 1248), False, 'import re\n')] |
import cv2
import threading
import numpy as np
class VideoLoader(object):
""" Generator of face images in the video
Invoke an additional thread to load the video frames in the file and generate the cropped faces of the same size
Attributes
----------
path : Path
Path to the video file
video : cv2.VideoCapture
Video file loader
fd_rst : list of tuple of (int, dict of {str : float})
Face detection results, each item in the list is a tuple of two elements. The first element is the frame ID
and the second element is face detection results :class:`dict` with min_x, min_y, width, height, and confidence
target_size : tuple of (int, int)
The tuple has two element: (height, width), which represents the size of output face images
cache_size : int
Size of the frame pre-loading cache, unit: number of frames
batch_size : int
Batch size of the output of each iteration, i.e., number of faces in the batch
cache : None or dict of {int : np.ndarray}
Pre-loaded video frames mapping frame ID to the frames. None if has not :func:`reset`
last_face_loaded : None or int
Index of :attr:`fd_rst` corresponds to the last pre-loaded frame. None if has not :func:`reset`
num_face_generated : None or int
Index of :attr:`fd_rst`, number of faces has been generated. None if has not :func:`reset`
all_cached : bool
Whether all the frames needed to generate face images are loaded in memory
process : threading.Thread
The thread to pre-load frames from video file
cache_write_lock : threading.Lock
The lock preventing concurrent write attempt to the :attr:`cache`
"""
def __init__(self, video_path, fd_rst, age_rst, target_size, batch_size, frame_cache_size):
self.path = video_path
self.video = cv2.VideoCapture(str(video_path))
self.cache_size = frame_cache_size
self.batch_size = batch_size
self.fd_rst = list()
for frame_id, faces in fd_rst.items():
for face in faces:
self.fd_rst.append((frame_id, face))
self.age_rst = age_rst
self.target_size = target_size
self.cache = None
self.num_face_generated = None
self.last_face_loaded = None
self.all_cached = True
self.process = threading.Thread(target=self._preload_frames)
self.cache_write_lock = threading.Lock()
def __iter__(self):
self.reset()
return self
def __len__(self):
return np.ceil(len(self.fd_rst) / self.batch_size)
def __next__(self):
if self.num_face_generated == len(self.fd_rst):
raise StopIteration
else:
# Generate the next batch of face images
img_batch = list()
video_frame_batch = list()
meta_batch = list()
while len(img_batch) != self.batch_size and self.num_face_generated != len(self.fd_rst):
# Wait for new frame to be loaded
face_meta = self.fd_rst[self.num_face_generated]
frame_id = face_meta[0]
while not self.all_cached and frame_id not in self.cache.keys():
pass
# Filter non child faces
if int(frame_id) not in self.age_rst:
self.num_face_generated += 1
if self.num_face_generated == len(self.fd_rst) or self.fd_rst[self.num_face_generated][0] != frame_id:
self.cache.pop(frame_id)
continue
# Load the next image
frame = self.cache[frame_id]
min_x = max(0, int(round(face_meta[1]['min_x'], 0)))
min_y = max(0, int(round(face_meta[1]['min_y'], 0)))
width = min(frame.shape[1]-min_x, int(round(face_meta[1]['width'], 0)))
height = min(frame.shape[0]-min_y, int(round(face_meta[1]['height'], 0)))
face = frame[min_y:min_y+height, min_x:min_x+width, :]
face = self._resize_face(face)
img_batch.append(face)
meta_batch.append(face_meta)
# Zoom out the face for iMotion Expression detection
center_x = min_x + width / 2
center_y = min_y + height / 2
half_target_size = max(width, height)
space_x_left = center_x - half_target_size
space_x_right = frame.shape[1] - center_x - half_target_size
space_y_top = center_y - half_target_size
space_y_bot = frame.shape[0] - center_y - half_target_size
if space_x_left + space_x_right >= 0:
if space_x_left < 0:
space_x_right += space_x_left
space_x_left = 0
if space_x_right < 0:
space_x_left += space_x_right
space_x_right = 0
else:
diff = abs(space_x_left + space_x_right)
space_y_top += diff / 2
space_y_bot += diff / 2
space_x_left = 0
space_x_right = 0
if space_y_top + space_y_bot >= 0:
if space_y_top < 0:
space_y_bot += space_y_top
space_y_top = 0
if space_y_bot < 0:
space_y_top += space_y_bot
space_y_bot = 0
else:
diff = abs(space_y_top + space_y_bot)
space_x_left += diff / 2
space_x_right += diff / 2
space_y_top = 0
space_y_bot = 0
space_x_left = int(round(space_x_left, 0))
space_x_right = int(round(space_x_right, 0))
space_y_top = int(round(space_y_top, 0))
space_y_bot = int(round(space_y_bot, 0))
#print(space_x_left, space_x_right, space_y_top, space_y_bot, frame.shape[1], frame.shape[0])
#print(space_x_left, frame.shape[1]-space_x_right, space_y_top, frame.shape[0]-space_y_bot)
video_frame = frame[space_y_top:(frame.shape[0]-space_y_bot), space_x_left:(frame.shape[1]-space_x_right), :]
#print(frame.shape, video_frame.shape)
video_frame = self._resize_face(video_frame)
video_frame_batch.append(frame)#video_frame)
# Update status
self.num_face_generated += 1
if self.num_face_generated == len(self.fd_rst) or self.fd_rst[self.num_face_generated][0] != frame_id:
self.cache.pop(frame_id)
print('{}: {} faces have been generated'.format(self.path.stem, self.num_face_generated))
return np.array(img_batch), meta_batch, np.array(video_frame_batch)
@property
def num_frame_in_cache(self):
return len(self.cache)
def _preload_frames(self):
""" Pre-load video frames
Load frames from :attr:`self.video` and store into :attr:`self.cache`.
This function will be executed by :attr:`self.process`
Raises
------
IOError
Cannot retrieve a needed frame from the video file
"""
while not self.all_cached:
if self.num_frame_in_cache < self.cache_size:
if self._load_next_frame():
self.all_cached = True
def _load_next_frame(self):
""" Load a single frame
Load the next unloaded frame needed for face image generation from :attr:`self.video`
and store into :attr:`self.cache`
Returns
-------
hitting_end : bool
Whether all the required video frames are loaded
Raises
------
IOError
Cannot retrieve a needed frame from the video file
"""
# Determine which frame to load
face_to_load = self.last_face_loaded + 1
if self.last_face_loaded != -1:
if face_to_load == len(self.fd_rst):
return True
while self.fd_rst[face_to_load][0] == self.fd_rst[self.last_face_loaded][0]:
face_to_load += 1
if face_to_load == len(self.fd_rst):
return True
# Load the frame
frame_to_load = self.fd_rst[face_to_load][0]
with self.cache_write_lock:
self.video.set(cv2.CAP_PROP_POS_FRAMES, int(frame_to_load))
ret, frame = self.video.read()
if ret:
self.cache[frame_to_load] = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.last_face_loaded = face_to_load
return False
else:
# TODO: Handle the error
#raise IOError('Fail to load the frame {} in the video'.format(face_to_load))
self.last_face_loaded = face_to_load
print(IOError('Fail to load the frame {} in the video'.format(face_to_load)))
def _resize_face(self, face_img):
""" Resize the face image to the target size
Parameters
----------
face_img: np.ndarray
Face image to be resized
Returns
-------
resized_img : np.ndarray
Resized face image
"""
return cv2.resize(face_img, self.target_size)
def reset(self):
""" Reset the face image generator and ready to generate images based on the current configuration """
with self.cache_write_lock:
# Attempt to terminate the previous pre-loading process gracefully
self.all_cached = True
if self.process.is_alive():
del self.process
# Re-initiate the generator
self.cache = dict()
self.last_face_loaded = -1 # Indicate to load the first required frame
self.num_face_generated = 0
self.all_cached = False
# Restart the pre-loading
self.process = threading.Thread(target=self._preload_frames)
self.process.start()
| [
"threading.Thread",
"cv2.cvtColor",
"threading.Lock",
"numpy.array",
"cv2.resize"
] | [((2381, 2426), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._preload_frames'}), '(target=self._preload_frames)\n', (2397, 2426), False, 'import threading\n'), ((2459, 2475), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2473, 2475), False, 'import threading\n'), ((9509, 9547), 'cv2.resize', 'cv2.resize', (['face_img', 'self.target_size'], {}), '(face_img, self.target_size)\n', (9519, 9547), False, 'import cv2\n'), ((10204, 10249), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._preload_frames'}), '(target=self._preload_frames)\n', (10220, 10249), False, 'import threading\n'), ((6959, 6978), 'numpy.array', 'np.array', (['img_batch'], {}), '(img_batch)\n', (6967, 6978), True, 'import numpy as np\n'), ((6992, 7019), 'numpy.array', 'np.array', (['video_frame_batch'], {}), '(video_frame_batch)\n', (7000, 7019), True, 'import numpy as np\n'), ((8767, 8805), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (8779, 8805), False, 'import cv2\n')] |
# Modules
import warnings
import logging
import itertools
import copy
import numpy as np
import pandas as pd
import scipy as sp
from tqdm.auto import tqdm
from sklearn.metrics import accuracy_score, recall_score, precision_score, roc_auc_score
log = logging.getLogger('GSHAP')
class gshapExplainer:
'''
Uses the SHAP approach as implemented by Lundberg and Lee, but instead of calculating
local feature value contributions, this method calculates global feature value
contributions. Moreover, the feature value contributions are in terms of contribution
to a measure of accuracy of choice.
Parameters:
------------------------------------------------------------------------------------
model : function or iml.Modle
function that has a matrix of sample as input and outputs a predition for those
samples
X_background : nump.array or pandas.DataFrame
background data used to mask features, i.e. integrate out features.
'''
def __init__(self, model, X_background):
self.model = convert_to_model(model) # standardise model
self.data = convert_to_data(X_background) # standardise data format
self.N = self.data.data.shape[0]
self.M = self.data.data.shape[1]
if self.N > 100:
log.warning('A large background dataset of size ' + str(self.N) + ' could cause +'
+ 'could cause slow run times. A background dataset of size 100 is recommended.')
# initiate counters
self.nsamplesAdded = 0
self.nsamplesRun = 0
def gshapValues(self, X, y, score_measure = accuracy_score, **kwargs):
'''
Estimates global SHAP values for the given sample, in the unit of the
specified score measure.
Parameters:
------------------------------------------------------------------------------------
X : numpy.array or pandas.DataFrame
matrix of samples (#samples x #features) for which the model output is explained
y : numpy.array
vector of samples corresponding to the observed output, given the input matrix X
score_measure : method (default = accuracy_score)
method to use as measure for model accuracy
Returns:
------------------------------------------------------------------------------------
A M x 1 vector, corresponding to the estimated global feature importance values.
'''
# Instantiate
self.score_measure = score_measure
# Standardise data
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("pandas.core.frame.DataFrame'>"):
index_value = X.index.values
index_name = X.index.name
column_names = X.columns.tolist()
X = X.values
# Conditions on X
assert(X.shape[0] == self.N), 'background data and feature matrix need to be of same length'
assert len(X.shape) == 2, 'input data needs to be a matrix, i.e. at least two features'
# Conditions on y
assert (str(type(y)).endswith("numpy.ndarray'>") and (len(y.shape) == 1)) or str(type(y)).endswith("series.Series'>"), 'response variable needs to be of numpy array or pandas.core.series.Series format'
assert len(y) == self.N, 'response vector needs to have same # of obs as feature matrix'
self.ytrue = y
# Conditions on model function/ iml.Model
if self.score_measure == roc_auc_score:
assert len(self.model.f(self.data.data).shape) == 2, 'if "roc_auc_score" is chosen as score measure, then provided model\
needs to output probabilities. For most models, thus input model.predict_proba instead of model.predict'
# Null model
if self.score_measure != roc_auc_score:
self.fnull = self.model.f(self.data.data)
self.measure_null = self.score_measure(self.ytrue, self.fnull)
elif self.score_measure == roc_auc_score:
self.fnull = self.model.f(self.data.data)[:,1]
self.measure_null = self.score_measure(self.ytrue, self.fnull)
# Full model
if self.score_measure != roc_auc_score:
self.fX = self.model.f(X)
self.measure_fX = self.score_measure(self.ytrue, self.fX)
elif self.score_measure == roc_auc_score:
self.fX = self.model.f(X)[:,1]
self.measure_fX = self.score_measure(self.ytrue, self.fX)
# Explanations
explanations = []
explanations.append(self.glob_explain(X))
#s = explanations[0].shape
#out = np.zeros(s[0])
#out[:] = explanations
return np.array([explanations]).reshape((self.M,1))
# Explain dataset
def glob_explain(self, X, **kwargs):
# 1. BUILD SYNTHETIC DATA
# Define number of samples to take from background data
self.nsamples = kwargs.get('nsamples', 'auto')
if self.nsamples == 'auto':
self.nsamples = 2*self.M+2**11
# make some preperations
self.prep()
# create weight vector
num_subset_sizes = np.int(np.ceil(self.M - 1) / 2.0) # only half of all possible
# subset sizes, due to symmetrical property
# of binomial coefficient
num_paired_subset_sizes = np.int(np.floor(self.M - 1) / 2.0)
# weight vector --> SHAP kernel without binomial coefficient: (M-1) / (S*(M-S))
weight_vector = np.array([(self.M - 1.0) / (i*(self.M-i)) for i in range(1, num_subset_sizes + 1)])
weight_vector[:num_paired_subset_sizes] *= 2 # for the inverse (e.g. inverse of S=1 is
# S=M-1 --> have same kernel)
weight_vector /= np.sum(weight_vector) # normalise s.t. between [0,1], to use for sampling within
# subsets later on
# create synthetic data for subsets that can be completely enumerated (i.e. fill out
# subsets of size s, s.t. MChooseS <= nsamplesleft)
num_full_subsets = 0
num_samples_left = self.nsamples
mask = np.zeros(self.M) # vector to mask features for current subset size (later transposed)
remaining_weight_vector = weight_vector.copy() # to reweight weight_vector if one subset completed
# loop over subsets (N.B. zero subset and full subset are left out, as kernel weight is infinite
# for S = 0 and S = M)
for subset_size in range(1, num_subset_sizes + 1):
# find number of subsets and their inverse for current subset_size
nsubsets = binom(self.M, subset_size)
if subset_size <= num_paired_subset_sizes: nsubsets *= 2 # times 2 to account for inverse
# check if have enough samples to completely enumerate number of subsets
if num_samples_left * remaining_weight_vector[subset_size - 1] / nsubsets >= 1.0 - 1e-8:
num_full_subsets += 1
num_samples_left -= nsubsets
# rescale weight vector s.t. remaining weight vector, i.e. from next subset size onwards
# sums to 1 - iff last weight is not yet 1 already
if remaining_weight_vector[subset_size - 1] < 1.0:
remaining_weight_vector /= (1 - remaining_weight_vector[subset_size - 1])
# add samples of current subset size to the synthetic data set and create corresponding
# masks
w = weight_vector[subset_size - 1] / binom(self.M, subset_size) # create kernel
if subset_size <= num_paired_subset_sizes: w/= 2.0 # get weight for one subset
# previously was for two
# loop over all possible subset combinations of size subset_size
group_indx = np.arange(self.M, dtype='int64')
for groups in itertools.combinations(group_indx, subset_size):
mask[:] = 0.0 # reset to zero
mask[np.array(groups, dtype = 'int64')] = 1.0 # [*]
self.addsample(X, mask, w) # use addsample function to add samples to the
# background (masking) dataset, i.e. to unmask
# features, as defined by mask
if subset_size <= num_paired_subset_sizes:
mask[:] = np.abs(mask - 1) # get the inverse/complement to [*]
self.addsample(X, mask, w)
else:
break
# from left-over nsamples, enumerate over random subset spaces, iff any subset spaces left
nfixed_samples = self.nsamplesAdded
samples_left = self.nsamples - nfixed_samples
# check that the number of full subsets has not reached the number of subset sizes
if num_full_subsets != num_subset_sizes:
remaining_weight_vector = weight_vector.copy() # reset weight vector
remaining_weight_vector[:num_paired_subset_sizes] /= 2 # get weight for one subset
# previously was for two
remaining_weight_vector = remaining_weight_vector[num_full_subsets:] # throw away
# weight vector for full subsizes
remaining_weight_vector /= np.sum(remaining_weight_vector) # reweight to sum to 1
indx_set = np.random.choice(len(remaining_weight_vector), 6 * samples_left,\
p = remaining_weight_vector) # create a weighted
# random sample of subsets of size
# 6*samples_left to randomly choose from
# to enumerate (weights are given by weight vector)
indx_pos = 0
used_masks = {} # create a dictionary of used masks, to keep tab on used
# subset sizes
# loop over left over samples
while samples_left > 0 and indx_pos < len(indx_set):
mask.fill(0.0) # reset to zero
indx = indx_set[indx_pos] # pick a random subset size generated by indx_set
# (only generated once to save time here)
indx_pos += 1
subset_size = indx + num_full_subsets + 1 # adjust subset size, for
# already considered subset sizes, s.t.
# already fully enumerated subset
# sizes are not considered again
mask[np.random.permutation(self.M)[:subset_size]] = 1.0 # randomly
# switch on features, s.t. total switched
# on features is equal the selected subset_size
# check if a sample of the current subset size has already been addded.
# If so, a previous sample's weight is incremented
mask_tuple = tuple(mask)
new_sample = False
if mask_tuple not in used_masks:
new_sample = True
used_masks[mask_tuple] = self.nsamplesAdded # update dicitonary of seen
# samples
samples_left -= 1
self.addsample(X, mask, 1.0) # add samples to the background data set
else:
self.kernelWeights[used_masks[mask_tuple]] += 1.0
# inverse selected features and add sample
if samples_left > 0 and subset_size <= num_paired_subset_sizes:
mask[:] = np.abs(mask - 1) # inverse selected features
# again check if sample of current subset size has already been added
if new_sample:
samples_left -= 1
self.addsample(X, mask, 1.0) # add samples to the background data set
else:
# compliment sample is always the next one thus just skip to next row
self.kernelWeights[used_masks[mask_tuple] + 1] += 1.0
# normalise kernel weights
weights_left_sum = np.sum(weight_vector[num_full_subsets:])
self.kernelWeights[nfixed_samples:] *= weights_left_sum / self.kernelWeights[nfixed_samples:]
# 2. GET PREDICTIONS AND CORRESPONDING SCORE MEASURE
self.run()
# 3. SOLVE FOR ALTERNATIVE GLOBAL SHAP IMPORTANCE VALUES
phi = np.zeros((self.M))
vphi, vphi_var = self.solve()
phi = vphi
return phi
# Notes
# sample from background data large sample
# Auxiliary functions
# -- create synthetic data and containers
def prep(self):
# synthetic data
self.synth_data = np.tile(self.data.data, (self.nsamples,1))
# containers
self.maskMatrix = np.zeros((self.nsamples, self.M))
self.kernelWeights = np.zeros(self.nsamples)
self.nonzero_indx = np.zeros(self.nsamples)
self.measure = np.zeros((self.nsamples,1))
self.y = np.zeros((self.nsamples*self.N,1))
# counters
self.nsamplesAdded = 0
self.nsamplesRun = 0
# -- unmask features
def addsample(self, X, m, w):
shift = self.nsamplesAdded * self.N
#dist = self.dist_parameters(X, indx)
#X_sbar = np.random.multivariate_normal(dist.mu_sbar_s, sigma_sbar_s, self.N)
for k in range(self.M):
if m[k] == 1.0:
self.synth_data[shift:shift+self.N, k] = X[:, k]
self.maskMatrix[self.nsamplesAdded, :] = m
self.kernelWeights[self.nsamplesAdded] = w
self.nsamplesAdded += 1
# -- run model
def run(self):
num_to_run = self.N * (self.nsamplesAdded - self.nsamplesRun)
data = self.synth_data[self.nsamplesRun*self.N : self.nsamplesAdded*self.N,:]
if self.score_measure != roc_auc_score:
modelOut = self.model.f(data)
elif self.score_measure == roc_auc_score:
modelOut = self.model.f(data)[:,1]
if isinstance(modelOut, (pd.DataFrame, pd.Series)):
modelOut = modelOut.values
self.y[:self.nsamples*self.N] = np.reshape(modelOut,(num_to_run, 1))
for i in range(1,self.nsamples):
#print(measure[self.nsamplesRun].shape)
#print(self.score_measure(self.ytrue, self.y[self.nsamplesRun*self.N: i*self.N]))
self.measure[self.nsamplesRun] = self.score_measure(self.ytrue, self.y[self.nsamplesRun*self.N: i*self.N])
self.nsamplesRun += 1
def solve(self):
#for i in range(self.maskMatrix.shape[0]):
#print(self.maskMatrix.shape)
# self.nonzero_indx[i] = np.array(np.nonzero(self.maskMatrix[i,:])[0].tolist())
nonzero_indx = np.arange(self.M)
measure_k = self.measure - self.measure_null
measure_dif = measure_k - np.array([self.maskMatrix[:, nonzero_indx[-1]]*(self.measure_fX - self.measure_null)]).reshape((self.nsamples, 1)) # nsamples x 1
Z = np.transpose(np.transpose(self.maskMatrix[:, nonzero_indx[:-1]])\
- self.maskMatrix[:, nonzero_indx[-1]]) # nsamples x M
ZW = np.transpose(np.transpose(Z) * np.transpose(self.kernelWeights)) # nsamples x M
ZWZ_inv = np.linalg.inv(np.dot(np.transpose(ZW), Z)) # M x M
wlsq_result = np.dot(ZWZ_inv, np.dot(np.transpose(ZW), measure_dif)) # M x 1
result = np.zeros(self.M)
result[nonzero_indx[:-1]] = wlsq_result.reshape(-1)
result[nonzero_indx[-1]] = (self.measure_fX - self.measure_null) - sum(wlsq_result)
return np.array([result]), np.ones(len(result))
# -- find multivariate distribution
def dist_parameters(self, X, indx):
m = indx
setall = np.arange(self.M)
m_bar = [item for item in setall if item not in m]
X_s = X[:,m]
X_sbar = self.data.data[:,m_bar]
mu_s = np.mean(X_s)
mu_sbar = np.mean(X_sbar)
self.mu_sbar_s = np.zeros((X.shape[0], self.M))
self.sigma_sbar_s = np.zeros((self.M, self.M))
if len(m) == 1:
sigma_ss = np.var(X_s)
else:
sigma_ss = np.cov(X_s, rowvar = False ,bias = False)
sigma_s_sbar = np.cov(X_s, y = X_sbar, rowvar = False, bias = False)
sigma_sbar_s = np.cov(X_sbar, y = X_s, rowvar = False, bias = False)
if len(m_bar) == 1:
sigma_sbar_sbar = np.var(X_sbar)
else:
sigma_sbar_sbar = np.cov(X_sbar, rowvar = False, bias = False)
#print('sigma: {}'.format(sigma_ss))
#print('X_sbar shape: {}'.format(X_sbar.shape))
print('m_bar: {}'.format(m_bar))
print('m_bar: {}'.format(len(m_bar)))
for i in range(X_s.shape[1]):
if len(m) == 1:
self.mu_sbar_s[:,i] = mu_sbar + (sigma_sbar_s/sigma_ss) * (X_s[:,i] - mu_s)
else:
self.mu_sbar_s[:,i] = mu_sbar + np.dot(sigma_sbar_s, np.linalg.inv(sigma_ss)) * (X_s[:,i] - mu_s)
if len(m) == 1:
self.sigma_sbar_s = sigma_sbar_sbar - np.dot((sigma_sbar_s / sigma_ss), sigma_s_sbar)
else:
self.sigma_sbar_s = sigma_sbar_sbar - np.dot(np.dot(sigma_sbar_s, np.linalg.inv(sigma_ss)), sigma_s_sbar)
# Additional functions
class Data:
def __init__(self, data, col_names):
self.data = data
#self.indx = data.index
self.col_names = col_names
self.n = data.shape[0]
self.weights = np.ones(self.n)
self.weights /= self.n
def convert_to_data(value):
if isinstance(value, Data):
return value
elif type(value) == np.ndarray:
return Data(value, [str(i) for i in range(value.shape[1])])
elif str(type(value)).endswith("pandas.core.series.Series'>"):
return Data(value.values.reshape((1,len(values))), value.index.tolist())
elif str(type(value)).endswith("pandas.core.frame.DataFrame'>"):
return Data(value.values, value.columns.tolist())
else:
assert False, str(type(value)) + "is currently not a supported format type"
# Convert model to standard model class
class Model:
def __init__(self, f):
self.f = f
def convert_to_model(value):
if isinstance(value, Model):
return value
else:
return Model(value) | [
"numpy.sum",
"numpy.abs",
"numpy.floor",
"numpy.ones",
"numpy.mean",
"numpy.arange",
"numpy.tile",
"numpy.transpose",
"numpy.reshape",
"numpy.cov",
"numpy.var",
"numpy.ceil",
"itertools.combinations",
"numpy.linalg.inv",
"numpy.random.permutation",
"numpy.dot",
"numpy.zeros",
"nump... | [((251, 277), 'logging.getLogger', 'logging.getLogger', (['"""GSHAP"""'], {}), "('GSHAP')\n", (268, 277), False, 'import logging\n'), ((5954, 5975), 'numpy.sum', 'np.sum', (['weight_vector'], {}), '(weight_vector)\n', (5960, 5975), True, 'import numpy as np\n'), ((6341, 6357), 'numpy.zeros', 'np.zeros', (['self.M'], {}), '(self.M)\n', (6349, 6357), True, 'import numpy as np\n'), ((13068, 13084), 'numpy.zeros', 'np.zeros', (['self.M'], {}), '(self.M)\n', (13076, 13084), True, 'import numpy as np\n'), ((13372, 13415), 'numpy.tile', 'np.tile', (['self.data.data', '(self.nsamples, 1)'], {}), '(self.data.data, (self.nsamples, 1))\n', (13379, 13415), True, 'import numpy as np\n'), ((13463, 13496), 'numpy.zeros', 'np.zeros', (['(self.nsamples, self.M)'], {}), '((self.nsamples, self.M))\n', (13471, 13496), True, 'import numpy as np\n'), ((13526, 13549), 'numpy.zeros', 'np.zeros', (['self.nsamples'], {}), '(self.nsamples)\n', (13534, 13549), True, 'import numpy as np\n'), ((13578, 13601), 'numpy.zeros', 'np.zeros', (['self.nsamples'], {}), '(self.nsamples)\n', (13586, 13601), True, 'import numpy as np\n'), ((13625, 13653), 'numpy.zeros', 'np.zeros', (['(self.nsamples, 1)'], {}), '((self.nsamples, 1))\n', (13633, 13653), True, 'import numpy as np\n'), ((13670, 13707), 'numpy.zeros', 'np.zeros', (['(self.nsamples * self.N, 1)'], {}), '((self.nsamples * self.N, 1))\n', (13678, 13707), True, 'import numpy as np\n'), ((14811, 14848), 'numpy.reshape', 'np.reshape', (['modelOut', '(num_to_run, 1)'], {}), '(modelOut, (num_to_run, 1))\n', (14821, 14848), True, 'import numpy as np\n'), ((15418, 15435), 'numpy.arange', 'np.arange', (['self.M'], {}), '(self.M)\n', (15427, 15435), True, 'import numpy as np\n'), ((16075, 16091), 'numpy.zeros', 'np.zeros', (['self.M'], {}), '(self.M)\n', (16083, 16091), True, 'import numpy as np\n'), ((16416, 16433), 'numpy.arange', 'np.arange', (['self.M'], {}), '(self.M)\n', (16425, 16433), True, 'import numpy as np\n'), ((16570, 16582), 'numpy.mean', 'np.mean', (['X_s'], {}), '(X_s)\n', (16577, 16582), True, 'import numpy as np\n'), ((16601, 16616), 'numpy.mean', 'np.mean', (['X_sbar'], {}), '(X_sbar)\n', (16608, 16616), True, 'import numpy as np\n'), ((16643, 16673), 'numpy.zeros', 'np.zeros', (['(X.shape[0], self.M)'], {}), '((X.shape[0], self.M))\n', (16651, 16673), True, 'import numpy as np\n'), ((16702, 16728), 'numpy.zeros', 'np.zeros', (['(self.M, self.M)'], {}), '((self.M, self.M))\n', (16710, 16728), True, 'import numpy as np\n'), ((16899, 16946), 'numpy.cov', 'np.cov', (['X_s'], {'y': 'X_sbar', 'rowvar': '(False)', 'bias': '(False)'}), '(X_s, y=X_sbar, rowvar=False, bias=False)\n', (16905, 16946), True, 'import numpy as np\n'), ((16976, 17023), 'numpy.cov', 'np.cov', (['X_sbar'], {'y': 'X_s', 'rowvar': '(False)', 'bias': '(False)'}), '(X_sbar, y=X_s, rowvar=False, bias=False)\n', (16982, 17023), True, 'import numpy as np\n'), ((18159, 18174), 'numpy.ones', 'np.ones', (['self.n'], {}), '(self.n)\n', (18166, 18174), True, 'import numpy as np\n'), ((9667, 9698), 'numpy.sum', 'np.sum', (['remaining_weight_vector'], {}), '(remaining_weight_vector)\n', (9673, 9698), True, 'import numpy as np\n'), ((12758, 12798), 'numpy.sum', 'np.sum', (['weight_vector[num_full_subsets:]'], {}), '(weight_vector[num_full_subsets:])\n', (12764, 12798), True, 'import numpy as np\n'), ((16260, 16278), 'numpy.array', 'np.array', (['[result]'], {}), '([result])\n', (16268, 16278), True, 'import numpy as np\n'), ((16785, 16796), 'numpy.var', 'np.var', (['X_s'], {}), '(X_s)\n', (16791, 16796), True, 'import numpy as np\n'), ((16834, 16871), 'numpy.cov', 'np.cov', (['X_s'], {'rowvar': '(False)', 'bias': '(False)'}), '(X_s, rowvar=False, bias=False)\n', (16840, 16871), True, 'import numpy as np\n'), ((17088, 17102), 'numpy.var', 'np.var', (['X_sbar'], {}), '(X_sbar)\n', (17094, 17102), True, 'import numpy as np\n'), ((17147, 17187), 'numpy.cov', 'np.cov', (['X_sbar'], {'rowvar': '(False)', 'bias': '(False)'}), '(X_sbar, rowvar=False, bias=False)\n', (17153, 17187), True, 'import numpy as np\n'), ((4774, 4798), 'numpy.array', 'np.array', (['[explanations]'], {}), '([explanations])\n', (4782, 4798), True, 'import numpy as np\n'), ((5238, 5257), 'numpy.ceil', 'np.ceil', (['(self.M - 1)'], {}), '(self.M - 1)\n', (5245, 5257), True, 'import numpy as np\n'), ((5524, 5544), 'numpy.floor', 'np.floor', (['(self.M - 1)'], {}), '(self.M - 1)\n', (5532, 5544), True, 'import numpy as np\n'), ((8089, 8121), 'numpy.arange', 'np.arange', (['self.M'], {'dtype': '"""int64"""'}), "(self.M, dtype='int64')\n", (8098, 8121), True, 'import numpy as np\n'), ((8152, 8199), 'itertools.combinations', 'itertools.combinations', (['group_indx', 'subset_size'], {}), '(group_indx, subset_size)\n', (8174, 8199), False, 'import itertools\n'), ((15678, 15729), 'numpy.transpose', 'np.transpose', (['self.maskMatrix[:, nonzero_indx[:-1]]'], {}), '(self.maskMatrix[:, nonzero_indx[:-1]])\n', (15690, 15729), True, 'import numpy as np\n'), ((15836, 15851), 'numpy.transpose', 'np.transpose', (['Z'], {}), '(Z)\n', (15848, 15851), True, 'import numpy as np\n'), ((15854, 15886), 'numpy.transpose', 'np.transpose', (['self.kernelWeights'], {}), '(self.kernelWeights)\n', (15866, 15886), True, 'import numpy as np\n'), ((15942, 15958), 'numpy.transpose', 'np.transpose', (['ZW'], {}), '(ZW)\n', (15954, 15958), True, 'import numpy as np\n'), ((16017, 16033), 'numpy.transpose', 'np.transpose', (['ZW'], {}), '(ZW)\n', (16029, 16033), True, 'import numpy as np\n'), ((17755, 17800), 'numpy.dot', 'np.dot', (['(sigma_sbar_s / sigma_ss)', 'sigma_s_sbar'], {}), '(sigma_sbar_s / sigma_ss, sigma_s_sbar)\n', (17761, 17800), True, 'import numpy as np\n'), ((12162, 12178), 'numpy.abs', 'np.abs', (['(mask - 1)'], {}), '(mask - 1)\n', (12168, 12178), True, 'import numpy as np\n'), ((15523, 15616), 'numpy.array', 'np.array', (['[self.maskMatrix[:, nonzero_indx[-1]] * (self.measure_fX - self.measure_null)]'], {}), '([self.maskMatrix[:, nonzero_indx[-1]] * (self.measure_fX - self.\n measure_null)])\n', (15531, 15616), True, 'import numpy as np\n'), ((8276, 8307), 'numpy.array', 'np.array', (['groups'], {'dtype': '"""int64"""'}), "(groups, dtype='int64')\n", (8284, 8307), True, 'import numpy as np\n'), ((8688, 8704), 'numpy.abs', 'np.abs', (['(mask - 1)'], {}), '(mask - 1)\n', (8694, 8704), True, 'import numpy as np\n'), ((11023, 11052), 'numpy.random.permutation', 'np.random.permutation', (['self.M'], {}), '(self.M)\n', (11044, 11052), True, 'import numpy as np\n'), ((17895, 17918), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_ss'], {}), '(sigma_ss)\n', (17908, 17918), True, 'import numpy as np\n'), ((17627, 17650), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma_ss'], {}), '(sigma_ss)\n', (17640, 17650), True, 'import numpy as np\n')] |
'''
Created on 9. des. 2017
@author: ljb
'''
from __future__ import division, print_function
import matplotlib.pyplot as plt
from fys4150.project5.tridiagonal_solvers import (tridiagonal_solve_specific,
tridiagonal_solve_specific_periodic,
solve_specific_periodic_with_lu,
tridiagonal_solve)
from numpy import pi
import numpy as np
def psi_periodic(x, t, n=1):
"""
Stream function for periodic boundary conditions
"""
return np.cos(n * 2 * pi * x + 0.5 * t / (n * pi))
def dpsi_periodic(x, t, n=1):
"""
derivative of Stream function for periodic boundary conditions
"""
return -n * 2 * pi * np.sin(n * 2 * pi * x + 0.5 * t / (n * pi))
def ddpsi_periodic(x, t, n=1):
"""
double derivative of Stream function for periodic boundary conditions
"""
return -(n * 2 * pi)**2 * np.cos(n * 2 * pi * x + 0.5 * t / (n * pi))
def zeta_periodic(x, t, n=1):
"""
Return d^2(psi_periodic)/dx^2
"""
return - (2 * pi * n)**2 * np.cos(n * 2 * pi * x + 0.5 * t / (n * pi))
def psi_wall(x, t, n=1):
"""
Stream function for rigid walls
"""
return np.sin(n * pi * x) * np.cos(n * pi * x + 0.5 * t / (n * pi))
def zeta_wall(x, t, n=1):
"""
Return d^2(psi_wall)/dx^2
"""
return -2 * (pi * n)**2 * np.cos(-t / (n * 2 * pi) - 2 * pi * n * x + pi / 2)
def central_diff_x(psi_n, dx):
return (psi_n[2:] - psi_n[:-2]) / (2*dx)
def forward_diff_t(dpsi_n, zetas, dt):
return zetas[-1] -dt * dpsi_n
def central_diff_t(dpsi_n, zetas, dt):
return zetas[-2] - 2*dt * dpsi_n
def solve_rossby_with_walls(j_max, n_max=100, dt=0.01, method='forward'):
"""
Return
"""
dx = 1.0 / (j_max + 1) # step_length
x = np.arange(1, j_max + 1) * dx
t = np.arange(n_max) * dt
zetas = [zeta_wall(x, t=0)] # Initial condition
psis = []
diff_x = central_diff_x
diff_t = forward_diff_t
if method.startswith('central'):
# Do one iteration with forward difference before using the central method
psi_n = tridiagonal_solve_specific(-zetas[-1] * dx ** 2) # substitue Eq 16 into Eq 20 and solve
psis.append(psi_n)
zetas.append(diff_t(diff_x(np.hstack((0, psi_n, 0)), dx), zetas, dt))
diff_t = central_diff_t
n_max = n_max - 1
for n in range(n_max):
psi_n = tridiagonal_solve_specific(-zetas[-1] * dx ** 2) # substitue Eq 16 into Eq 20 and solve
psis.append(psi_n)
zeta_np1 = diff_t(diff_x(np.hstack((0, psi_n, 0)), dx), zetas, dt)
zetas.append(zeta_np1)
return t, x, zetas, psis
def solve_rossby_periodic1(j_max, n_max=100, dt=0.01, method='forward'):
"""
Return
NB! This does not work because the matrix is singular!
"""
dx = 1.0 / (j_max + 1) # step_length
x = np.arange(0, j_max + 1) * dx
t = np.arange(n_max) * dt
zetas = [zeta_wall(x, t=0)] # Initial condition
psis = []
diff_x = central_diff_x
diff_t = forward_diff_t
n_start = 0
if method.startswith('central'):
# Do one iteration with forward difference before using the central method
psi_n = tridiagonal_solve_specific_periodic(-zetas[-1] * dx ** 2) # substitue Eq 16 into Eq 20 and solve
psis.append(psi_n)
zetas.append(diff_t(diff_x(np.hstack((0, psi_n, 0)), dx), zetas, dt))
diff_t = central_diff_t
n_start = 1
for n in range(n_start, n_max):
psi_n = tridiagonal_solve_specific_periodic(-zetas[-1] * dx ** 2) # substitue Eq 16 into Eq 20 and solve
psis.append(psi_n)
zeta_np1 = diff_t(diff_x(np.hstack((0, psi_n, 0)), dx), zetas, dt)
zetas.append(zeta_np1)
return t, x, zetas, psis
def solve_rossby_periodic(j_max, zeta0, dpsi0=0, psi0=0, n_max=100, dt=0.01, method='forward'):
"""
Return
"""
dx = 1.0 / (j_max + 1) # step_length
x = np.arange(0, j_max + 1) * dx
t = np.arange(n_max) * dt
zetas = [zeta0(x, t=0)] # Initial condition
psis = []
a = c = -np.ones(j_max+1)
b = 2.0 * np.ones(j_max+1)
b[0] = 1
diff_x = central_diff_x
diff_t = forward_diff_t
n_start = 0
B = B_matrix(j_max+1)
psi0s = [psi0]
if method.startswith('central'):
# Do one iteration with forward difference before using the central method
F = -zetas[-1] * dx ** 2
F[0] = F[0]/2 - dpsi0 * dx
F[-1] += psi0
psi_n = tridiagonal_solve(a, b, c, F)
psis.append(psi_n)
dpsi_n = diff_x(np.hstack((psi_n[-1], psi_n, psi_n[0])), dx)
ddpsi0 = (psi_n[-1] - 2*psi_n[0] + psi_n[1]) / dx**2
# dpsi0 = dpsi_n[0] + dt * ddpsi0
# psi0 = psi_n[0] + dt * dpsi_n[0]
dpsi0 = dpsi_periodic(0, dt) # dpsi_n[0] + dt * ddpsi0
psi0 = psi_periodic(0, dt) # psi_n[0] + dt * dpsi_n[0]
psi0s.append(psi0)
zetas.append(diff_t(dpsi_n, zetas, dt))
# zetas.append(zeta0(x, t=dt))
diff_t = central_diff_t
n_start = 1
for n in range(n_start, n_max):
F = -zetas[-1] * dx ** 2
F[0] = F[0] / 2 - dpsi0 * dx
F[-1] += psi0
psi_n = tridiagonal_solve(a, b, c, F)
psi00 = psi_n[0]
# while np.abs(psi0-psi00)>1e-4:
# F[-1] += psi00-psi0
# psi0 = psi00
# psi_n = tridiagonal_solve(a, b, c, F)
# psi00 = psi_n[0]
# pass
# psi_n2 = np.dot(F, B.T)
psis.append(psi_n)
dpsi_n = diff_x(np.hstack((psi_n[-1], psi_n, psi_n[0])), dx)
if n>1:
psi0_n1 = psis[-2][0]
psi0 = psi0_n1 + 2*dt * dpsi0
else:
psi0 = psi0 + dt * dpsi0
psi0s.append(psi0)
ddpsi0 = (psi_n[-1] - 2*psi_n[0] + psi_n[1])/dx**2
dpsi0 = dpsi_periodic(0, (n+1)*dt) # dpsi_n[0] + dt * ddpsi0
psi0 = psi_periodic(0, (n+1)*dt) #
zeta_np1 = diff_t(dpsi_n, zetas, dt)
zetas.append(zeta_np1)
return t, x, zetas, psis
def B_matrix(N):
a = np.arange(N, 0, -1)
B = [a]
for i in range(N-1, 0, -1):
B.append(a.clip(1, i))
return np.array(B)
def task_5c_test():
plot = plt.semilogy
periodic=True
if periodic:
psi_fun = psi_periodic
zeta_fun = zeta_periodic
msg = 'Periodic'
else:
psi_fun = psi_wall
zeta_fun = zeta_wall
msg = 'Wall'
dt = 1e-2
nmax = int(10/dt)
for method in ['forward', 'central']:
plt.figure()
if periodic:
t, x, zetas, psis = solve_rossby_periodic(j_max=40,
zeta0=zeta_fun,
dpsi0=0,
psi0=psi_fun(x=0, t=0),
method=method,
dt=1e-2, n_max=nmax)
else:
t, x, zetas, psis = solve_rossby_with_walls(j_max=40, method=method, dt=dt, n_max=nmax)
# plot(x, np.abs(psi_fun(x, t=0)-psis[0]), 'b-', label=method + ' t={}'.format(t[0]))
# plot(x, np.abs(psi_fun(x, t=t[-1])- psis[-1]), 'r.', label=method + ' t={}'.format(t[-1]))
# plot(x, np.abs(zeta_fun(x, t=0)-zetas[0]) + 1e-10, 'k--', label=method + ' z t={}'.format(t[0]))
# plot(x, np.abs(zeta_fun(x, t=t[-1])- zetas[-1]), 'go', label=method + ' z t={}'.format(t[-1]))
dx = x[1]-x[0]
dt = t[1]-t[0]
plt.title('{}, psi, dt={:2.1g}, dx={:2.1g}, {}'.format(msg, dt, dx, method))
#plt.plot(x, psi_fun(x, t=0),'-')
#plt.plot(x, psis[0], '.')
colors = ['r', 'g', 'b']
indices = [nmax // 3, 2 * nmax // 3, -1]
for i, color in zip(indices, colors):
plt.plot(x, psi_fun(x, t=t[i]),color + '-', label='t={:2.1f}'.format(t[i]))
plt.plot(x, psis[i], color + '.', label='exact, t={:2.1f}'.format(t[i]))
plt.legend()
plt.xlabel('x')
plt.ylabel('psi')
plt.savefig('task_5c_psi_{}_{}{}.png'.format(msg, method, nmax))
plt.figure()
# plt.plot(x, zeta_fun(x, t=0),'-')
# plt.plot(x,zetas[0], '.')
for i, color in zip(indices, colors):
plt.plot(x, zeta_fun(x, t=t[i]),color+'-', label='t={:2.1f}'.format(t[i]))
plt.plot(x, zetas[i], color + '.', label='exact, t={:2.1f}'.format(t[i]))
plt.title('{}, zeta, dt={:2.1g}, dx={:2.1g} {}'.format(msg, dt, dx, method))
plt.xlabel('x')
plt.ylabel('zeta')
plt.legend()
plt.savefig('task_5c_zeta_{}_{}{}.png'.format(msg, method, nmax))
plt.show('hold')
def task_5d_stability_and_truncation_errors():
plot = plt.semilogy
periodic = False
if periodic:
psi_fun = psi_periodic
zeta_fun = zeta_periodic
msg = 'Periodic'
else:
psi_fun = psi_wall
zeta_fun = zeta_wall
msg = 'Wall'
# dt = 1e-2
results = dict()
dts = [0.5, 1e-1, 1e-2, 1e-3, 1e-4]
for method in ['forward', 'central']:
res = []
for dt in dts:
nmax = int(100/dt)
print(dt, nmax)
if periodic:
t, x, zetas, psis = solve_rossby_periodic(j_max=40,
zeta0=zeta_fun,
dpsi0=0,
psi0=psi_fun(x=0, t=0),
method=method,
dt=1e-2, n_max=nmax)
else:
t, x, zetas, psis = solve_rossby_with_walls(j_max=40, method=method, dt=dt, n_max=nmax)
indices = [nmax // 3, 2 * nmax // 3, -1]
dx = x[1]-x[0]
# dt = t[1]-t[0]
for i in indices:
ti = t[i]
psi0 = psi_fun(x, t=ti)
zeta0 = zeta_fun(x, t=ti)
res.append((dx, dt, ti,
np.max(np.abs(psi0- psis[i])),
np.max(np.abs(zeta0- zetas[i]))
))
results[method] = res
ni = len(indices)
colors = ['r', 'g', 'b', 'm', 'c', 'y']
for method, values in results.items():
plt.figure()
vals = np.array(values)
dts = vals[:, 1].reshape(-1, ni)
tis = vals[:, 2].reshape(-1, ni)
error_psis = vals[:,3].reshape(-1, ni)
error_zetas = vals[:,4].reshape(-1, ni)
for dt, ti, error_psi, error_zeta, color,in zip(dts.T, tis.T, error_psis.T, error_zetas.T, colors):
plt.loglog(dt, error_psi, color + '-', label='psi, t={:2.1f}'.format(np.mean(ti)))
plt.loglog(dt, error_zeta, color + '-.', label='zeta, t={:2.1f}'.format(np.mean(ti)))
plt.legend()
plt.title('method={}, {}'.format(method, msg))
plt.xlabel('dt')
plt.ylabel('Max absolute error')
plt.savefig('task_5d_stability_{}_{}.png'.format(msg, method))
plt.show('hold')
if __name__ == '__main__':
# task_5c_test()
task_5d_stability_and_truncation_errors()
| [
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.legend",
"fys4150.project5.tridiagonal_solvers.tridiagonal_solve",
"numpy.ones",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.cos",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplot... | [((594, 637), 'numpy.cos', 'np.cos', (['(n * 2 * pi * x + 0.5 * t / (n * pi))'], {}), '(n * 2 * pi * x + 0.5 * t / (n * pi))\n', (600, 637), True, 'import numpy as np\n'), ((6152, 6171), 'numpy.arange', 'np.arange', (['N', '(0)', '(-1)'], {}), '(N, 0, -1)\n', (6161, 6171), True, 'import numpy as np\n'), ((6258, 6269), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (6266, 6269), True, 'import numpy as np\n'), ((8798, 8814), 'matplotlib.pyplot.show', 'plt.show', (['"""hold"""'], {}), "('hold')\n", (8806, 8814), True, 'import matplotlib.pyplot as plt\n'), ((11240, 11256), 'matplotlib.pyplot.show', 'plt.show', (['"""hold"""'], {}), "('hold')\n", (11248, 11256), True, 'import matplotlib.pyplot as plt\n'), ((778, 821), 'numpy.sin', 'np.sin', (['(n * 2 * pi * x + 0.5 * t / (n * pi))'], {}), '(n * 2 * pi * x + 0.5 * t / (n * pi))\n', (784, 821), True, 'import numpy as np\n'), ((974, 1017), 'numpy.cos', 'np.cos', (['(n * 2 * pi * x + 0.5 * t / (n * pi))'], {}), '(n * 2 * pi * x + 0.5 * t / (n * pi))\n', (980, 1017), True, 'import numpy as np\n'), ((1131, 1174), 'numpy.cos', 'np.cos', (['(n * 2 * pi * x + 0.5 * t / (n * pi))'], {}), '(n * 2 * pi * x + 0.5 * t / (n * pi))\n', (1137, 1174), True, 'import numpy as np\n'), ((1265, 1283), 'numpy.sin', 'np.sin', (['(n * pi * x)'], {}), '(n * pi * x)\n', (1271, 1283), True, 'import numpy as np\n'), ((1286, 1325), 'numpy.cos', 'np.cos', (['(n * pi * x + 0.5 * t / (n * pi))'], {}), '(n * pi * x + 0.5 * t / (n * pi))\n', (1292, 1325), True, 'import numpy as np\n'), ((1430, 1481), 'numpy.cos', 'np.cos', (['(-t / (n * 2 * pi) - 2 * pi * n * x + pi / 2)'], {}), '(-t / (n * 2 * pi) - 2 * pi * n * x + pi / 2)\n', (1436, 1481), True, 'import numpy as np\n'), ((1866, 1889), 'numpy.arange', 'np.arange', (['(1)', '(j_max + 1)'], {}), '(1, j_max + 1)\n', (1875, 1889), True, 'import numpy as np\n'), ((1903, 1919), 'numpy.arange', 'np.arange', (['n_max'], {}), '(n_max)\n', (1912, 1919), True, 'import numpy as np\n'), ((2185, 2233), 'fys4150.project5.tridiagonal_solvers.tridiagonal_solve_specific', 'tridiagonal_solve_specific', (['(-zetas[-1] * dx ** 2)'], {}), '(-zetas[-1] * dx ** 2)\n', (2211, 2233), False, 'from fys4150.project5.tridiagonal_solvers import tridiagonal_solve_specific, tridiagonal_solve_specific_periodic, solve_specific_periodic_with_lu, tridiagonal_solve\n'), ((2483, 2531), 'fys4150.project5.tridiagonal_solvers.tridiagonal_solve_specific', 'tridiagonal_solve_specific', (['(-zetas[-1] * dx ** 2)'], {}), '(-zetas[-1] * dx ** 2)\n', (2509, 2531), False, 'from fys4150.project5.tridiagonal_solvers import tridiagonal_solve_specific, tridiagonal_solve_specific_periodic, solve_specific_periodic_with_lu, tridiagonal_solve\n'), ((2950, 2973), 'numpy.arange', 'np.arange', (['(0)', '(j_max + 1)'], {}), '(0, j_max + 1)\n', (2959, 2973), True, 'import numpy as np\n'), ((2987, 3003), 'numpy.arange', 'np.arange', (['n_max'], {}), '(n_max)\n', (2996, 3003), True, 'import numpy as np\n'), ((3285, 3342), 'fys4150.project5.tridiagonal_solvers.tridiagonal_solve_specific_periodic', 'tridiagonal_solve_specific_periodic', (['(-zetas[-1] * dx ** 2)'], {}), '(-zetas[-1] * dx ** 2)\n', (3320, 3342), False, 'from fys4150.project5.tridiagonal_solvers import tridiagonal_solve_specific, tridiagonal_solve_specific_periodic, solve_specific_periodic_with_lu, tridiagonal_solve\n'), ((3595, 3652), 'fys4150.project5.tridiagonal_solvers.tridiagonal_solve_specific_periodic', 'tridiagonal_solve_specific_periodic', (['(-zetas[-1] * dx ** 2)'], {}), '(-zetas[-1] * dx ** 2)\n', (3630, 3652), False, 'from fys4150.project5.tridiagonal_solvers import tridiagonal_solve_specific, tridiagonal_solve_specific_periodic, solve_specific_periodic_with_lu, tridiagonal_solve\n'), ((4034, 4057), 'numpy.arange', 'np.arange', (['(0)', '(j_max + 1)'], {}), '(0, j_max + 1)\n', (4043, 4057), True, 'import numpy as np\n'), ((4071, 4087), 'numpy.arange', 'np.arange', (['n_max'], {}), '(n_max)\n', (4080, 4087), True, 'import numpy as np\n'), ((4171, 4189), 'numpy.ones', 'np.ones', (['(j_max + 1)'], {}), '(j_max + 1)\n', (4178, 4189), True, 'import numpy as np\n'), ((4202, 4220), 'numpy.ones', 'np.ones', (['(j_max + 1)'], {}), '(j_max + 1)\n', (4209, 4220), True, 'import numpy as np\n'), ((4575, 4604), 'fys4150.project5.tridiagonal_solvers.tridiagonal_solve', 'tridiagonal_solve', (['a', 'b', 'c', 'F'], {}), '(a, b, c, F)\n', (4592, 4604), False, 'from fys4150.project5.tridiagonal_solvers import tridiagonal_solve_specific, tridiagonal_solve_specific_periodic, solve_specific_periodic_with_lu, tridiagonal_solve\n'), ((5287, 5316), 'fys4150.project5.tridiagonal_solvers.tridiagonal_solve', 'tridiagonal_solve', (['a', 'b', 'c', 'F'], {}), '(a, b, c, F)\n', (5304, 5316), False, 'from fys4150.project5.tridiagonal_solvers import tridiagonal_solve_specific, tridiagonal_solve_specific_periodic, solve_specific_periodic_with_lu, tridiagonal_solve\n'), ((6614, 6626), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6624, 6626), True, 'import matplotlib.pyplot as plt\n'), ((8106, 8118), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8116, 8118), True, 'import matplotlib.pyplot as plt\n'), ((8127, 8142), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8137, 8142), True, 'import matplotlib.pyplot as plt\n'), ((8151, 8168), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""psi"""'], {}), "('psi')\n", (8161, 8168), True, 'import matplotlib.pyplot as plt\n'), ((8250, 8262), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8260, 8262), True, 'import matplotlib.pyplot as plt\n'), ((8656, 8671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8666, 8671), True, 'import matplotlib.pyplot as plt\n'), ((8680, 8698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""zeta"""'], {}), "('zeta')\n", (8690, 8698), True, 'import matplotlib.pyplot as plt\n'), ((8707, 8719), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8717, 8719), True, 'import matplotlib.pyplot as plt\n'), ((10497, 10509), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10507, 10509), True, 'import matplotlib.pyplot as plt\n'), ((10525, 10541), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (10533, 10541), True, 'import numpy as np\n'), ((11030, 11042), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11040, 11042), True, 'import matplotlib.pyplot as plt\n'), ((11107, 11123), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""dt"""'], {}), "('dt')\n", (11117, 11123), True, 'import matplotlib.pyplot as plt\n'), ((11132, 11164), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Max absolute error"""'], {}), "('Max absolute error')\n", (11142, 11164), True, 'import matplotlib.pyplot as plt\n'), ((4657, 4696), 'numpy.hstack', 'np.hstack', (['(psi_n[-1], psi_n, psi_n[0])'], {}), '((psi_n[-1], psi_n, psi_n[0]))\n', (4666, 4696), True, 'import numpy as np\n'), ((5631, 5670), 'numpy.hstack', 'np.hstack', (['(psi_n[-1], psi_n, psi_n[0])'], {}), '((psi_n[-1], psi_n, psi_n[0]))\n', (5640, 5670), True, 'import numpy as np\n'), ((2635, 2659), 'numpy.hstack', 'np.hstack', (['(0, psi_n, 0)'], {}), '((0, psi_n, 0))\n', (2644, 2659), True, 'import numpy as np\n'), ((3756, 3780), 'numpy.hstack', 'np.hstack', (['(0, psi_n, 0)'], {}), '((0, psi_n, 0))\n', (3765, 3780), True, 'import numpy as np\n'), ((2338, 2362), 'numpy.hstack', 'np.hstack', (['(0, psi_n, 0)'], {}), '((0, psi_n, 0))\n', (2347, 2362), True, 'import numpy as np\n'), ((3447, 3471), 'numpy.hstack', 'np.hstack', (['(0, psi_n, 0)'], {}), '((0, psi_n, 0))\n', (3456, 3471), True, 'import numpy as np\n'), ((10910, 10921), 'numpy.mean', 'np.mean', (['ti'], {}), '(ti)\n', (10917, 10921), True, 'import numpy as np\n'), ((11008, 11019), 'numpy.mean', 'np.mean', (['ti'], {}), '(ti)\n', (11015, 11019), True, 'import numpy as np\n'), ((10234, 10256), 'numpy.abs', 'np.abs', (['(psi0 - psis[i])'], {}), '(psi0 - psis[i])\n', (10240, 10256), True, 'import numpy as np\n'), ((10293, 10317), 'numpy.abs', 'np.abs', (['(zeta0 - zetas[i])'], {}), '(zeta0 - zetas[i])\n', (10299, 10317), True, 'import numpy as np\n')] |
"""
demo03_balance.py 样本类别均衡化
"""
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
data = np.loadtxt('../ml_data/imbalance.txt',
delimiter=',', dtype='f8')
x = data[:, :-1]
y = data[:, -1]
# 选择svm做分类
train_x, test_x, train_y, test_y = \
ms.train_test_split(x, y, test_size=0.25,
random_state=5)
model = svm.SVC(kernel='linear',
class_weight='balanced')
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))
# 绘制分类边界线
n = 500
l, r = x[:, 0].min() - 1, x[:, 0].max() + 1
b, t = x[:, 1].min() - 1, x[:, 1].max() + 1
grid_x = np.meshgrid(np.linspace(l, r, n),
np.linspace(b, t, n))
flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
mp.figure('Class Balanced', facecolor='lightgray')
mp.title('Class Balanced', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y,
cmap='gray')
mp.scatter(x[:, 0], x[:, 1], c=y, cmap='brg', s=80)
mp.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.pcolormesh",
"sklearn.svm.SVC",
"matplotlib.pyplot.tick_params",
... | [((185, 250), 'numpy.loadtxt', 'np.loadtxt', (['"""../ml_data/imbalance.txt"""'], {'delimiter': '""","""', 'dtype': '"""f8"""'}), "('../ml_data/imbalance.txt', delimiter=',', dtype='f8')\n", (195, 250), True, 'import numpy as np\n'), ((338, 395), 'sklearn.model_selection.train_test_split', 'ms.train_test_split', (['x', 'y'], {'test_size': '(0.25)', 'random_state': '(5)'}), '(x, y, test_size=0.25, random_state=5)\n', (357, 395), True, 'import sklearn.model_selection as ms\n'), ((410, 459), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'class_weight': '"""balanced"""'}), "(kernel='linear', class_weight='balanced')\n", (417, 459), True, 'import sklearn.svm as svm\n'), ((913, 963), 'matplotlib.pyplot.figure', 'mp.figure', (['"""Class Balanced"""'], {'facecolor': '"""lightgray"""'}), "('Class Balanced', facecolor='lightgray')\n", (922, 963), True, 'import matplotlib.pyplot as mp\n'), ((964, 1003), 'matplotlib.pyplot.title', 'mp.title', (['"""Class Balanced"""'], {'fontsize': '(20)'}), "('Class Balanced', fontsize=20)\n", (972, 1003), True, 'import matplotlib.pyplot as mp\n'), ((1004, 1031), 'matplotlib.pyplot.xlabel', 'mp.xlabel', (['"""x"""'], {'fontsize': '(14)'}), "('x', fontsize=14)\n", (1013, 1031), True, 'import matplotlib.pyplot as mp\n'), ((1032, 1059), 'matplotlib.pyplot.ylabel', 'mp.ylabel', (['"""y"""'], {'fontsize': '(14)'}), "('y', fontsize=14)\n", (1041, 1059), True, 'import matplotlib.pyplot as mp\n'), ((1060, 1088), 'matplotlib.pyplot.tick_params', 'mp.tick_params', ([], {'labelsize': '(10)'}), '(labelsize=10)\n', (1074, 1088), True, 'import matplotlib.pyplot as mp\n'), ((1089, 1145), 'matplotlib.pyplot.pcolormesh', 'mp.pcolormesh', (['grid_x[0]', 'grid_x[1]', 'grid_y'], {'cmap': '"""gray"""'}), "(grid_x[0], grid_x[1], grid_y, cmap='gray')\n", (1102, 1145), True, 'import matplotlib.pyplot as mp\n'), ((1147, 1198), 'matplotlib.pyplot.scatter', 'mp.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': 'y', 'cmap': '"""brg"""', 's': '(80)'}), "(x[:, 0], x[:, 1], c=y, cmap='brg', s=80)\n", (1157, 1198), True, 'import matplotlib.pyplot as mp\n'), ((1199, 1208), 'matplotlib.pyplot.show', 'mp.show', ([], {}), '()\n', (1206, 1208), True, 'import matplotlib.pyplot as mp\n'), ((531, 576), 'sklearn.metrics.classification_report', 'sm.classification_report', (['test_y', 'pred_test_y'], {}), '(test_y, pred_test_y)\n', (555, 576), True, 'import sklearn.metrics as sm\n'), ((706, 726), 'numpy.linspace', 'np.linspace', (['l', 'r', 'n'], {}), '(l, r, n)\n', (717, 726), True, 'import numpy as np\n'), ((749, 769), 'numpy.linspace', 'np.linspace', (['b', 't', 'n'], {}), '(b, t, n)\n', (760, 769), True, 'import numpy as np\n')] |
import numpy as np
import os
from model.text.ContainsText import ContainsText
from log import get_logger
class Voogle(object):
'''
A query-by-voice system.
'''
def __init__(
self,
model,
dataset,
require_text_match,
text_handler=ContainsText(),
matches=15):
'''
Voogle constructor
Arguments:
model: A QueryByVoiceModel. The model used to perform similarity
calculations
dataset: A python generator. The generator used to load audio
representations for similarity ranking.
require_text_match: A boolean. If true ranking is performed only on
dataset items that match the user's text query.
text_handler: A TextHandler object. The model for determining if
the user's text matches the audio text description.
matches: An int. The number of matches to return during search.
'''
self.logger = get_logger('Voogle')
self.logger.debug('Initializing')
self.model = model
self.dataset = dataset
self.require_text_match = require_text_match
self.text_handler = text_handler
self.matches = matches
self.logger.debug('Initialization complete')
def search(self, query, sampling_rate, text_input=''):
'''
Search the dataset for the closest match to the given vocal query.
Arguments:
query: A 1D numpy array. The vocal query.
sampling_rate: An integer. The sampling rate of the query.
text_input: A string. Optional text input describing the target
sound.
Returns:
Three equal-sized lists.
- The names of audio files within the database sorted in
descending order of similarity with the user query.
- A list of booleans indicating if the audio file text matches
the user's text query.
- A list of float-valued similarities corresponding to the
similarity score of the audio file located at the same
index.
'''
# Construct query representation
query = self.model.construct_representation(
[query], [sampling_rate], is_query=True)
# Seed the text handler with the user's text query
self.text_handler.set_query_text(text_input)
# Retrieve the similarity measure between query and each dataset entry
model_output = {}
previous_handle = ''
previous_index = 0
generator = self.dataset.data_generator(
query, self.text_handler, self.require_text_match)
for batch_query, batch_items, file_tracker in generator:
# Run inference on this batch
ranks = self.model.measure_similarity(batch_query, batch_items)
# Determine the best score for each audio file
for index, handle in file_tracker.items():
if index != 0 and previous_handle != '':
max_file_rank = np.max(ranks[previous_index:index])
model_output = self._update_model_output(
model_output, previous_handle, max_file_rank)
previous_handle = handle
previous_index = index
max_file_rank = np.max(ranks[previous_index:])
model_output = self._update_model_output(
model_output, previous_handle, max_file_rank)
# Retrieve the top audio filenames
match_list = sorted(model_output, key=model_output.get)[-self.matches:]
match_list.reverse()
filenames = [self.dataset.handle_to_filename(m) for m in match_list]
display_names = [os.path.basename(f) for f in filenames]
# Find the audio files also containing the user's text query
if self.require_text_match or not text_input:
text_matches = [False] * len(match_list)
else:
text_features = [
self.dataset.handle_to_text_features(m) for m in match_list]
text_matches = [
self.text_handler.is_match([t]) for t in text_features]
# Retrieve the normalized similarity scores of the matches
max_score = model_output[match_list[0]]
similarity_scores = [model_output[m] / max_score for m in match_list]
return display_names, filenames, text_matches, similarity_scores
def _update_model_output(self, model_output, handle, max_file_rank):
if handle in model_output:
model_output[handle] = max(
model_output[handle], max_file_rank)
else:
model_output[handle] = max_file_rank
return model_output
| [
"log.get_logger",
"numpy.max",
"model.text.ContainsText.ContainsText",
"os.path.basename"
] | [((288, 302), 'model.text.ContainsText.ContainsText', 'ContainsText', ([], {}), '()\n', (300, 302), False, 'from model.text.ContainsText import ContainsText\n'), ((1019, 1039), 'log.get_logger', 'get_logger', (['"""Voogle"""'], {}), "('Voogle')\n", (1029, 1039), False, 'from log import get_logger\n'), ((3427, 3457), 'numpy.max', 'np.max', (['ranks[previous_index:]'], {}), '(ranks[previous_index:])\n', (3433, 3457), True, 'import numpy as np\n'), ((3829, 3848), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (3845, 3848), False, 'import os\n'), ((3149, 3184), 'numpy.max', 'np.max', (['ranks[previous_index:index]'], {}), '(ranks[previous_index:index])\n', (3155, 3184), True, 'import numpy as np\n')] |
import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from models import *
from datasets import *
from utils import *
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
os.makedirs('saved_models', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=0, help='epoch to start training from')
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--dataset_name', type=str, default="facades", help='name of the dataset')
parser.add_argument('--batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--decay_epoch', type=int, default=100, help='epoch from which to start lr decay')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--img_height', type=int, default=128, help='size of image height')
parser.add_argument('--img_width', type=int, default=128, help='size of image width')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=500, help='interval between sampling of images from generators')
parser.add_argument('--checkpoint_interval', type=int, default=-1, help='interval between model checkpoints')
parser.add_argument('--generator_type', type=str, default='unet', help="'resnet' or 'unet'")
parser.add_argument('--n_residual_blocks', type=int, default=6, help='number of residual blocks in resnet generator')
opt = parser.parse_args()
print(opt)
# Loss functions
criterion_GAN = torch.nn.MSELoss()
criterion_translation = torch.nn.L1Loss()
cuda = True if torch.cuda.is_available() else False
# Calculate output of image discriminator (PatchGAN)
patch_h, patch_w = int(opt.img_height / 2**4), int(opt.img_width / 2**4)
patch = (opt.batch_size, 1, patch_h, patch_w)
# Initialize generator and discriminator
generator = GeneratorResNet(resblocks=opt.n_residual_blocks) if opt.generator_type == 'resnet' else GeneratorUNet()
discriminator = Discriminator()
if cuda:
generator = generator.cuda()
discriminator = discriminator.cuda()
criterion_GAN.cuda()
criterion_translation.cuda()
if opt.epoch != 0:
# Load pretrained models
generator.load_state_dict(torch.load('saved_models/generator_%d.pth'))
discriminator.load_state_dict(torch.load('saved_models/discriminator_%d.pth'))
else:
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Loss weight of L1 pixel-wise loss between translated image and real image
lambda_trans = 100
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
# Learning rate update schedulers
lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)
lr_scheduler_D = torch.optim.lr_scheduler.LambdaLR(optimizer_D, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step)
# Inputs & targets memory allocation
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
input_A = Tensor(opt.batch_size, opt.channels, opt.img_height, opt.img_width)
input_B = Tensor(opt.batch_size, opt.channels, opt.img_height, opt.img_width)
# Adversarial ground truths
valid = Variable(Tensor(np.ones(patch)), requires_grad=False)
fake = Variable(Tensor(np.zeros(patch)), requires_grad=False)
# Dataset loader
transforms_ = [ transforms.Resize((opt.img_height, opt.img_width*2), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_),
batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
# Progress logger
logger = Logger(opt.n_epochs, len(dataloader), opt.sample_interval)
# ----------
# Training
# ----------
for epoch in range(opt.epoch, opt.n_epochs):
for i, batch in enumerate(dataloader):
# Set model input
real_A = Variable(input_A.copy_(batch['A']))
real_B = Variable(input_B.copy_(batch['B']))
# ------------------
# Train Generators
# ------------------
optimizer_G.zero_grad()
# GAN loss
fake_A = generator(real_B)
pred_fake = discriminator(fake_A, real_B)
loss_GAN = criterion_GAN(pred_fake, valid)
loss_trans = criterion_translation(fake_A, real_A)
# Total loss
loss_G = loss_GAN + lambda_trans * loss_trans
loss_G.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Real loss
pred_real = discriminator(real_A, real_B)
loss_real = criterion_GAN(pred_real, valid)
# Fake loss
pred_fake = discriminator(fake_A.detach(), real_B)
loss_fake = criterion_GAN(pred_fake, fake)
# Total loss
loss_D = 0.5 * (loss_real + loss_fake)
loss_D.backward()
optimizer_D.step()
# --------------
# Log Progress
# --------------
logger.log({'loss_G': loss_G, 'loss_G_trans': loss_trans, 'loss_D': loss_D},
images={'real_B': real_B, 'fake_A': fake_A, 'real_A': real_A},
epoch=epoch, batch=i)
# Update learning rates
lr_scheduler_G.step()
lr_scheduler_D.step()
if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:
# Save model checkpoints
torch.save(generator.state_dict(), 'saved_models/generator_%d.pth' % epoch)
torch.save(discriminator.state_dict(), 'saved_models/discriminator_%d.pth' % epoch)
| [
"torch.nn.MSELoss",
"os.makedirs",
"argparse.ArgumentParser",
"torch.nn.L1Loss",
"torch.load",
"numpy.zeros",
"numpy.ones",
"torchvision.transforms.ToTensor",
"torch.cuda.is_available",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize"
] | [((404, 440), 'os.makedirs', 'os.makedirs', (['"""images"""'], {'exist_ok': '(True)'}), "('images', exist_ok=True)\n", (415, 440), False, 'import os\n'), ((441, 483), 'os.makedirs', 'os.makedirs', (['"""saved_models"""'], {'exist_ok': '(True)'}), "('saved_models', exist_ok=True)\n", (452, 483), False, 'import os\n'), ((494, 519), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (517, 519), False, 'import argparse\n'), ((2181, 2199), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (2197, 2199), False, 'import torch\n'), ((2224, 2241), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (2239, 2241), False, 'import torch\n'), ((2258, 2283), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2281, 2283), False, 'import torch\n'), ((4153, 4222), 'torchvision.transforms.Resize', 'transforms.Resize', (['(opt.img_height, opt.img_width * 2)', 'Image.BICUBIC'], {}), '((opt.img_height, opt.img_width * 2), Image.BICUBIC)\n', (4170, 4222), True, 'import torchvision.transforms as transforms\n'), ((4238, 4259), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4257, 4259), True, 'import torchvision.transforms as transforms\n'), ((4277, 4331), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (4297, 4331), True, 'import torchvision.transforms as transforms\n'), ((2879, 2922), 'torch.load', 'torch.load', (['"""saved_models/generator_%d.pth"""'], {}), "('saved_models/generator_%d.pth')\n", (2889, 2922), False, 'import torch\n'), ((2958, 3005), 'torch.load', 'torch.load', (['"""saved_models/discriminator_%d.pth"""'], {}), "('saved_models/discriminator_%d.pth')\n", (2968, 3005), False, 'import torch\n'), ((4019, 4033), 'numpy.ones', 'np.ones', (['patch'], {}), '(patch)\n', (4026, 4033), True, 'import numpy as np\n'), ((4080, 4095), 'numpy.zeros', 'np.zeros', (['patch'], {}), '(patch)\n', (4088, 4095), True, 'import numpy as np\n')] |
"""
:class:`~pyUSID.io.image.ImageTranslator` class that translates conventional 2D images to USID HDF5 files
Created on Feb 9, 2016
@author: <NAME>, <NAME>
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import sys
import h5py
import numpy as np
from PIL import Image
from sidpy.base.num_utils import contains_integers
from sidpy.hdf.hdf_utils import write_simple_attrs
from .numpy_translator import ArrayTranslator
from .write_utils import Dimension
if sys.version_info.major == 3:
unicode = str
else:
FileExistsError = ValueError
FileNotFoundError = ValueError
class ImageTranslator(ArrayTranslator):
"""
Translates data from an image file to an HDF5 file
"""
def __init__(self, *args, **kwargs):
super(ImageTranslator, self).__init__(*args, **kwargs)
@staticmethod
def _parse_file_path(image_path, h5_path=None):
"""
Returns a list of all files in the directory given by path
Parameters
---------------
image_path : str
absolute path to the image file
h5_path : str, optional
absolute path to the desired output HDF5 file. If nothing is provided, a valid file path will be provided
Returns
----------
image_path : str
Absolute file path to the image
h5_path : str
absolute path to the desired output HDF5 file.
"""
if not isinstance(image_path, (str, unicode)):
raise TypeError("'image_path' argument for ImageTranslator should be a str or unicode")
if not os.path.exists(os.path.abspath(image_path)):
raise FileNotFoundError('Specified image does not exist.')
else:
image_path = os.path.abspath(image_path)
if h5_path is not None:
if not isinstance(h5_path, (str, unicode)):
raise TypeError("'h5_path' argument for ImageTranslator should be a str or unicode (if provided)")
# NOT checking the extension of the file path for simplicity
else:
base_name, _ = os.path.splitext(image_path)
h5_name = base_name + '.h5'
h5_path = os.path.join(image_path, h5_name)
if os.path.exists(os.path.abspath(h5_path)):
raise FileExistsError("ImageTranslator: There is already a valid (output HDF5) file at:\n{}\n"
"Please consider providing an alternate path or deleting the "
"specified file".format(h5_path))
return image_path, h5_path
def translate(self, image_path, h5_path=None, bin_factor=None, interp_func=Image.BICUBIC, normalize=False,
**image_args):
"""
Translates the image in the provided file into a USID HDF5 file
Parameters
----------------
image_path : str
Absolute path to folder holding the image files
h5_path : str, optional
Absolute path to where the HDF5 file should be located.
Default is None
bin_factor : uint or array-like of uint, optional
Down-sampling factor for each dimension. Default is None.
If specifying different binning for each dimension, please specify as (height binning, width binning)
interp_func : int, optional. Default = :attr:`PIL.Image.BICUBIC`
How the image will be interpolated to provide the down-sampled or binned image.
For more information see instructions for the `resample` argument for :meth:`PIL.Image.resize`
normalize : boolean, optional. Default = False
Should the raw image be normalized between the values of 0 and 1
image_args : dict
Arguments to be passed to read_image. Arguments depend on the type of image.
Returns
----------
h5_main : h5py.Dataset
HDF5 Dataset object that contains the flattened images
"""
image_path, h5_path = self._parse_file_path(image_path, h5_path=h5_path)
image = read_image(image_path, **image_args)
image_parms = dict()
usize, vsize = image.shape[:2]
'''
Check if a bin_factor is given. Set up binning objects if it is.
'''
if bin_factor is not None:
if isinstance(bin_factor, (list, tuple)):
if not contains_integers(bin_factor, min_val=1):
raise TypeError('bin_factor should contain positive whole integers')
if len(bin_factor) == 2:
bin_factor = tuple(bin_factor)
else:
raise ValueError('Input parameter `bin_factor` must be a length 2 array-like or an integer.\n' +
'{} was given.'.format(bin_factor))
elif isinstance(bin_factor, int):
bin_factor = (bin_factor, bin_factor)
else:
raise TypeError('bin_factor should either be an integer or an iterable of positive integers')
if np.min(bin_factor) < 0:
raise ValueError('bin_factor must consist of positive factors')
if interp_func not in [Image.NEAREST, Image.BILINEAR, Image.BICUBIC, Image.LANCZOS]:
raise ValueError("'interp_func' argument for ImageTranslator.translate must be one of "
"PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC, PIL.Image.LANCZOS")
image_parms.update({'image_binning_size': bin_factor, 'image_PIL_resample_mode': interp_func})
usize = int(usize / bin_factor[0])
vsize = int(vsize / bin_factor[1])
# Unfortunately, we need to make a round-trip through PIL for the interpolation. Not possible with numpy
img_obj = Image.fromarray(image)
img_obj = img_obj.resize((vsize, usize), resample=interp_func)
image = np.asarray(img_obj)
# Working around occasional "cannot modify read-only array" error
image = image.copy()
'''
Normalize Raw Image
'''
if normalize:
image -= np.min(image)
image = image / np.float32(np.max(image))
image_parms.update({'normalized': normalize,
'image_min': np.min(image), 'image_max': np.max(image)})
"""
Enable the line below if there is a need make the image "look" the right side up. This would be manipulation
# of the original data. Therefore it remains commented
"""
# image = np.flipud(image)
'''
Ready to write to h5
'''
pos_dims = [Dimension('Y', 'a.u.', np.arange(usize)), Dimension('X', 'a.u.', np.arange(vsize))]
spec_dims = Dimension('arb', 'a.u.', 1)
# Need to transpose to for correct reshaping
image = image.transpose()
h5_path = super(ImageTranslator, self).translate(h5_path, 'Raw_Data', image.reshape((-1, 1)),
'Intensity', 'a.u.', pos_dims, spec_dims,
translator_name='ImageTranslator', parm_dict=image_parms)
with h5py.File(h5_path, mode='r+') as h5_f:
# For legacy reasons:
write_simple_attrs(h5_f, {'data_type': 'ImageData'})
return h5_path
def read_image(image_path, as_grayscale=True, as_numpy_array=True, *args, **kwargs):
"""
Read the image file at `image_path` into a numpy array either via numpy (.txt) or via pillow (.jpg, .tif, etc.)
Parameters
----------
image_path : str
Path to the image file
as_grayscale : bool, optional. Default = True
Whether or not to read the image as a grayscale image
as_numpy_array : bool, optional. Default = True
If set to True, the image is read into a numpy array. If not, it is returned as a pillow Image
Returns
-------
image : :class:`numpy.ndarray` or :class:`PIL.Image.Image`
if `as_numpy_array` is set to True - Array containing the image from the file `image_path`.
If `as_numpy_array` is set to False - PIL.Image object containing the image within the file - `image_path`.
"""
ext = os.path.splitext(image_path)[-1]
if ext in ['.txt', '.csv']:
if ext == '.csv' and 'delimiter' not in kwargs.keys():
kwargs['delimiter'] = ','
img_data = np.loadtxt(image_path, *args, **kwargs)
if as_numpy_array:
return img_data
else:
img_obj = Image.fromarray(img_data)
img_obj = img_obj.convert(mode="L")
return img_obj
else:
img_obj = Image.open(image_path)
if as_grayscale:
img_obj = img_obj.convert(mode="L", **kwargs)
if as_numpy_array:
# Open the image as a numpy array
return np.asarray(img_obj)
return img_obj
| [
"sidpy.hdf.hdf_utils.write_simple_attrs",
"os.path.abspath",
"h5py.File",
"numpy.asarray",
"PIL.Image.open",
"numpy.min",
"numpy.max",
"numpy.arange",
"numpy.loadtxt",
"os.path.splitext",
"sidpy.base.num_utils.contains_integers",
"PIL.Image.fromarray",
"os.path.join"
] | [((8325, 8353), 'os.path.splitext', 'os.path.splitext', (['image_path'], {}), '(image_path)\n', (8341, 8353), False, 'import os\n'), ((8510, 8549), 'numpy.loadtxt', 'np.loadtxt', (['image_path', '*args'], {}), '(image_path, *args, **kwargs)\n', (8520, 8549), True, 'import numpy as np\n'), ((8770, 8792), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (8780, 8792), False, 'from PIL import Image\n'), ((1784, 1811), 'os.path.abspath', 'os.path.abspath', (['image_path'], {}), '(image_path)\n', (1799, 1811), False, 'import os\n'), ((2130, 2158), 'os.path.splitext', 'os.path.splitext', (['image_path'], {}), '(image_path)\n', (2146, 2158), False, 'import os\n'), ((2221, 2254), 'os.path.join', 'os.path.join', (['image_path', 'h5_name'], {}), '(image_path, h5_name)\n', (2233, 2254), False, 'import os\n'), ((2282, 2306), 'os.path.abspath', 'os.path.abspath', (['h5_path'], {}), '(h5_path)\n', (2297, 2306), False, 'import os\n'), ((5868, 5890), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (5883, 5890), False, 'from PIL import Image\n'), ((5986, 6005), 'numpy.asarray', 'np.asarray', (['img_obj'], {}), '(img_obj)\n', (5996, 6005), True, 'import numpy as np\n'), ((6206, 6219), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (6212, 6219), True, 'import numpy as np\n'), ((7279, 7308), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r+"""'}), "(h5_path, mode='r+')\n", (7288, 7308), False, 'import h5py\n'), ((7365, 7417), 'sidpy.hdf.hdf_utils.write_simple_attrs', 'write_simple_attrs', (['h5_f', "{'data_type': 'ImageData'}"], {}), "(h5_f, {'data_type': 'ImageData'})\n", (7383, 7417), False, 'from sidpy.hdf.hdf_utils import write_simple_attrs\n'), ((8641, 8666), 'PIL.Image.fromarray', 'Image.fromarray', (['img_data'], {}), '(img_data)\n', (8656, 8666), False, 'from PIL import Image\n'), ((8969, 8988), 'numpy.asarray', 'np.asarray', (['img_obj'], {}), '(img_obj)\n', (8979, 8988), True, 'import numpy as np\n'), ((1644, 1671), 'os.path.abspath', 'os.path.abspath', (['image_path'], {}), '(image_path)\n', (1659, 1671), False, 'import os\n'), ((5108, 5126), 'numpy.min', 'np.min', (['bin_factor'], {}), '(bin_factor)\n', (5114, 5126), True, 'import numpy as np\n'), ((6369, 6382), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (6375, 6382), True, 'import numpy as np\n'), ((6397, 6410), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (6403, 6410), True, 'import numpy as np\n'), ((6751, 6767), 'numpy.arange', 'np.arange', (['usize'], {}), '(usize)\n', (6760, 6767), True, 'import numpy as np\n'), ((6793, 6809), 'numpy.arange', 'np.arange', (['vsize'], {}), '(vsize)\n', (6802, 6809), True, 'import numpy as np\n'), ((4428, 4468), 'sidpy.base.num_utils.contains_integers', 'contains_integers', (['bin_factor'], {'min_val': '(1)'}), '(bin_factor, min_val=1)\n', (4445, 4468), False, 'from sidpy.base.num_utils import contains_integers\n'), ((6259, 6272), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (6265, 6272), True, 'import numpy as np\n')] |
import numpy
class Field:
"""
This is an implementation of a 2D matrix that holds RGB color values.
It has a few convenience methods that allow better expression of the intent, compared to a pure 2D array.
"""
BLACK = [0, 0, 0]
def __init__(self, width: int, height: int):
self.width: int = width
self.height: int = height
self.field = []
self.field = self._generate_field(self.width, self.height)
def set_all_pixels_to(self, color: list) -> None:
"""
Sets all pixels of the field to the same color.
:param color: The color to be applied to all pixels.
:return: None. Side effect is the internal state of the field.
"""
self.set_all_pixels_on_array(color, self.field)
def set_all_pixels_on_array(self, color, field):
for y in range(self.height):
for x in range(self.width):
field[y][x] = color
def set_all_pixels_to_black(self) -> None:
"""
Clears the field by setting all pixels to black.
:return: None. Side effect is the internal state of the field.
"""
self.set_all_pixels_to(Field.BLACK)
def _generate_field(self, width, height) -> []:
field = []
for y in range(height):
field.append([]) # empty line
for x in range(width):
field[y].append(Field.BLACK)
return field
def set_pixel(self, x: int, y: int, color: list) -> None:
"""
Sets the color of a pixel at a defined position.
If the position is outside of the field, nothing happens.
:param x: X coordinate of the pixel to be changed, 0-based
:param y: Y coordinate of the pixel to be changed, 0-based
:param color: Color of the pixel, a list of R, G and B
:return: None. Side effect is the internal state of the field.
"""
if self.pixel_is_inside_field(x, y):
self.field[y][x] = color
def pixel_is_inside_field(self, x: int, y: int) -> bool:
"""
Tests whether a pixel is within the boundaries of the field.
:param x: X coordinate of the pixel to be checked, 0-based
:param y: Y coordinate of the pixel to be checked, 0-based
:return: True if the pixel is on the field, False if it's outside
"""
return 0 <= y < self.height and 0 <= x < self.width
def is_equal(self, field_to_compare: 'Field'):
"""
Compares the pixels of this field with a reference field.
This can be used to check whether the state has changed, e.g. for repaint decisions.
:param field_to_compare: Reference field
:return: True if all pixels are equal, False if one or more pixels are different.
"""
if field_to_compare is None:
return False
return numpy.array_equal(field_to_compare.field, self.field)
| [
"numpy.array_equal"
] | [((2873, 2926), 'numpy.array_equal', 'numpy.array_equal', (['field_to_compare.field', 'self.field'], {}), '(field_to_compare.field, self.field)\n', (2890, 2926), False, 'import numpy\n')] |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
sys.path.append('/data/private/chenyutong/VLP/')
import logging
import glob
import math
import json
import argparse
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import random, math
import copy
from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer, Indexer
from transformers import XLMTokenizer
from pytorch_pretrained_bert.modeling import BertForPreTrainingLossMask, BertForSeq2SeqDecoder
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from vlp.loader_utils import batch_list_to_batch_tensors
import vlp.seq2seq_loader as seq2seq_loader
from vlp.decode_img2txt import detokenize
#from vlp.seq2seq_loader import Preprocess4Seq2seqBilingual
from vlp.scst_utils import *
from vlp.lang_utils import language_eval
from misc.data_parallel import DataParallelImbalance
from mosestokenizer import MosesDetokenizer
def _get_max_step_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if not fn_model_list:
return None
both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list])
if both_set:
return max(both_set)
else:
return None
def _get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list]
) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def main():
parser = argparse.ArgumentParser()
# Data augmentation
parser.add_argument('--dataset', default='coco', type=str, nargs='*', help='')
parser.add_argument('--sampling_alpha', default=0.5, type=float)
parser.add_argument('--sampling_beta', default=0.5, type=float, help='#samples per epoch=sampling_beta*total_num_samples')
parser.add_argument('--max_len_en', default=25, type=int, help='maximum length of English in **bilingual** corpus')
parser.add_argument('--max_len_zh', default=25, type=int, help='maximum length of Chinese in **bilingual** corpus')
parser.add_argument('--max_len_en_cap', default=25, type=int, help='maximum length of English in **img2txt** corpus')
parser.add_argument('--max_len_zh_cap', default=25, type=int, help='maximum length of Chinese in **img2txt** corpus')
parser.add_argument('--len_vis_input', type=int, default=100, help="The length of visual token input")
parser.add_argument('--wmt_N_lines', type=int, default=24752392, help='The total number of wmt lines')
parser.add_argument("--src_file", default='$DATA_ROOT/{}/annotations/{}_dataset.json',
type=str, help="The input data file name.")
parser.add_argument('--file_valid_jpgs', default='$DATA_ROOT/{}/annotations/{}_valid_jpgs.json', type=str)
parser.add_argument('--image_root', type=str, default='$DATA_ROOT/{}/region_feat_gvd_wo_bgd')
parser.add_argument('--region_bbox_file', default='raw_bbox/{}_detection_vg_100dets_vlp_checkpoint_trainval_bbox', type=str)
parser.add_argument('--region_det_file_prefix', default='feat_cls_1000/{}_detection_vg_100dets_vlp_checkpoint_trainval', type=str)
# General
parser.add_argument("--bert_model", default="bert-base-cased", type=str,
help="Bert pre-trained model selected in the list: bert-base-cased, bert-large-cased.")
parser.add_argument("--xml_vocab",type=str, default='./download_models/xml_vocab.json')
parser.add_argument("--xml_merge",type=str, default='./download_models/xml_merges.txt')
parser.add_argument("--config_path", default=None, type=str,
help="Bert config file path.")
parser.add_argument("--output_dir",
default='tmp',type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--log_file",
default="training.log",
type=str,
help="The output directory where the log will be written.")
parser.add_argument("--model_recover_path",
default=None,
type=str,
help="The file of fine-tuned pretraining model.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training. This should ALWAYS be set to True.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=64,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate", default=3e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--label_smoothing", default=0, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay",
default=0.01,
type=float,
help="The weight decay rate for Adam.")
parser.add_argument("--finetune_decay",
action='store_true',
help="Weight decay to the original weights.")
parser.add_argument("--num_train_epochs",
default=30,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--global_rank",
type=int,
default=-1,
help="global_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
# parser.add_argument('--fp16', action='store_true',
# help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp32_embedding', action='store_true',
help="Whether to use 32-bit float precision instead of 32-bit for embeddings")
# parser.add_argument('--loss_scale', type=float, default=0,
# help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
# "0 (default value): dynamic loss scaling.\n"
# "Positive power of 2: static loss scaling value.\n")
parser.add_argument('--amp', action='store_true',
help="Whether to use amp for fp16")
parser.add_argument('--from_scratch', action='store_true',
help="Initialize parameters with random values (i.e., training from scratch).")
parser.add_argument('--new_segment_ids', action='store_true',
help="Use new segment ids for bi-uni-directional LM.")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--trunc_seg', default='b',
help="Truncate_config: first truncate segment A/B (option: a, b).")
parser.add_argument('--always_truncate_tail', action='store_true',
help="Truncate_config: Whether we should always truncate tail.")
parser.add_argument("--mask_prob", default=0.15, type=float,
help="Number of prediction is sometimes less than max_pred when sequence is short.")
parser.add_argument('--max_pred', type=int, default=3,
help="Max tokens of prediction.")
parser.add_argument("--num_workers", default=4, type=int,
help="Number of workers for the data loader.")
parser.add_argument('--max_position_embeddings', type=int, default=None,
help="max position embeddings")
# Others for VLP
parser.add_argument('--enable_visdom', action='store_true')
parser.add_argument('--visdom_port', type=int, default=8888)
parser.add_argument('--enable_tensorboard', action='store_true')
parser.add_argument('--summary_steps', type=int, default=100)
parser.add_argument('--save_steps', type=int, default=5000)
# parser.add_argument('--resnet_model', type=str, default='imagenet_weights/resnet101.pth')
parser.add_argument('--split', type=str, nargs='+', default=['train', 'restval'])
parser.add_argument('--world_size', default = 1, type = int,
help = 'number of distributed processes')
parser.add_argument('--dist_url', default='file://[PT_OUTPUT_DIR]/nonexistent_file', type = str,
help = 'url used to set up distributed training')
parser.add_argument('--sche_mode', default='warmup_linear', type=str,
help="warmup_linear | warmup_constant | warmup_cosine")
parser.add_argument('--drop_prob', default=0.1, type=float)
parser.add_argument('--use_num_imgs', default=-1, type=int)
parser.add_argument('--vis_mask_prob', default=0, type=float)
parser.add_argument('--max_drop_worst_ratio', default=0, type=float)
parser.add_argument('--drop_after', default=6, type=int)
parser.add_argument('--s2s_prob', default=1, type=float,
help="Percentage of examples that are bi-uni-directional LM (seq2seq).")
parser.add_argument('--bi_prob', default=0, type=float,
help="Percentage of examples that are bidirectional LM.")
parser.add_argument('--enable_butd', action='store_true',
help='set to take in region features')
parser.add_argument('--tasks', default='img2txt',
help='img2txt | vqa2')
parser.add_argument('--relax_projection',
action='store_true',
help="Use different projection layers for tasks.")
parser.add_argument('--scst', action='store_true',
help='Self-critical sequence training')
parser.add_argument('--length_penalty', type=float, default=0,
help="Length penalty for beam search")
parser.add_argument('--ngram_size', type=int, default=3)
args = parser.parse_args()
dataset = {}
for d in args.dataset:
assert d in ['coco','aic','wmt']
if d == 'coco':
dataset[d] = {'max_len_a': args.len_vis_input, 'max_len_b': args.max_len_en_cap}
elif d == 'aic':
dataset[d] = {'max_len_a': args.len_vis_input, 'max_len_b': args.max_len_zh_cap}
else:# d == 'wmt':
dataset[d] = {'max_len_a': args.max_len_en, 'max_len_b': args.max_len_zh}
dataset[d]['max_seq_length'] = dataset[d]['max_len_a'] + dataset[d]['max_len_b'] + 3
args.dataset = dataset
print('global_rank: {}, local rank: {} Corpora: {}'.format(args.global_rank, args.local_rank, args.dataset))
#input()
args.mask_image_regions = (args.vis_mask_prob > 0) # whether to mask out image regions
args.dist_url = args.dist_url.replace('[PT_OUTPUT_DIR]', args.output_dir)
# arguments inspection
assert(args.tasks in ('img2txt', 'vqa2'))
assert args.enable_butd == True, 'only support region attn! featmap attn deprecated'
assert (not args.scst) or args.dataset == 'coco', 'scst support on coco only!'
if args.scst:
assert args.dataset == 'coco', 'scst support on coco only!'
assert args.max_pred == 0 and args.mask_prob == 0, 'no mask for scst!'
rl_crit = RewardCriterion()
if args.enable_butd:
assert(args.len_vis_input == 100)
args.region_bbox_file = os.path.join(args.image_root, args.region_bbox_file)
#args.region_det_file_prefix = os.path.join(args.image_root, args.region_det_file_prefix) if args.dataset in ('cc', 'coco') and args.region_det_file_prefix != '' else ''
args.region_det_file_prefix = os.path.join(args.image_root, args.region_det_file_prefix) # not support flickr30k now
recover_step = _get_max_step_model(args.output_dir)
# output config
if args.local_rank in [-1,0]:
os.makedirs(args.output_dir, exist_ok=True)
if recover_step:
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'opt_recover_from_{}.json'.format(recover_step)), 'w'), sort_keys=True, indent=2)
else:
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'opt.json'), 'w'), sort_keys=True, indent=2)
logging.basicConfig(
filename=os.path.join(args.output_dir, 'rank{}_'.format(args.local_rank)+args.log_file),
filemode='w',
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info(dataset)
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
# torch.distributed.init_process_group(backend='nccl', init_method = args.dist_url,
# world_size=args.world_size, rank=args.global_rank)
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, amp training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.amp))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(
args.train_batch_size / args.gradient_accumulation_steps)
# fix random seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# plotting loss, optional
if args.enable_visdom:
import visdom
vis = visdom.Visdom(port=args.visdom_port, env=args.output_dir)
vis_window={'iter': None, 'score':None}
if args.enable_tensorboard:
from tensorboardX import SummaryWriter
if args.local_rank in [-1,0]:
writer = SummaryWriter(args.output_dir)
tokenizer_en = BertTokenizer.from_pretrained(
args.bert_model, do_lower_case=args.do_lower_case,
cache_dir=args.output_dir+'/.pretrained_model')
if args.max_position_embeddings:
tokenizer_en.max_len = args.max_position_embeddings
tokenizer_en= WhitespaceTokenizer() if args.tokenized_input else tokenizer_en
tokenizers = {'en':tokenizer_en}
if 'aic' in args.dataset or 'wmt' in args.dataset:
tokenizer_zh = XLMTokenizer(args.xml_vocab, args.xml_merge)
tokenizer_zh.tokenize = lambda x: tokenizer_zh._tokenize(x, lang='zh', bypass_tokenizer=True)
with open(args.xml_vocab,'r') as f:
tokenizer_zh.vocab = json.load(f)
tokenizers['zh'] = tokenizer_zh
indexer = Indexer([os.path.join(args.bert_model,'vocab.txt'), args.xml_vocab])
if args.do_train:
for corpus in args.dataset:
logger.info('\nCorpus {}'.format(corpus))
if corpus in ['coco', 'aic']:
tokenizer = tokenizers['en'] if corpus=='coco' else tokenizers['zh']
bi_uni_pipeline = [seq2seq_loader.Preprocess4Seq2seq(
corpus,
'zh' if corpus in ['aic'] else 'en',
args.max_pred, args.mask_prob,
list(tokenizer.vocab.keys()), indexer, max_len=args.dataset[corpus]['max_seq_length'],
preprocessed=True,
new_segment_ids=args.new_segment_ids,
truncate_config={
'max_len_b': args.dataset[corpus]['max_len_b'], 'trunc_seg': args.trunc_seg, 'always_truncate_tail':
args.always_truncate_tail},
mask_image_regions=args.mask_image_regions,
mode="s2s", len_vis_input=args.len_vis_input,
vis_mask_prob=args.vis_mask_prob, enable_butd=args.enable_butd,
region_bbox_file=args.region_bbox_file.format(corpus.upper(), corpus.lower()),
region_det_file_prefix=args.region_det_file_prefix.format(corpus.upper(), corpus.lower()),
local_rank=args.local_rank, load_vqa_ann=(args.tasks=='vqa2'))]
bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seq(
corpus,
'zh' if corpus in ['aic'] else 'en',
args.max_pred, args.mask_prob,
list(tokenizer.vocab.keys()), indexer, max_len=args.dataset[corpus]['max_seq_length'],
preprocessed=True,
new_segment_ids=args.new_segment_ids,
truncate_config={
'max_len_b': args.dataset[corpus]['max_len_b'], 'trunc_seg': args.trunc_seg, 'always_truncate_tail':
args.always_truncate_tail},
mask_image_regions=args.mask_image_regions,
mode="bi", len_vis_input=args.len_vis_input,
vis_mask_prob=args.vis_mask_prob, enable_butd=args.enable_butd,
region_bbox_file=args.region_bbox_file.format(corpus.upper(), corpus.lower()),
region_det_file_prefix=args.region_det_file_prefix.format(corpus.upper(), corpus.lower()),
local_rank=args.local_rank, load_vqa_ann=(args.tasks=='vqa2')))
split = args.split #'['train']
if corpus=='coco' and split[0]=='train':
split = split+['restval']
args.dataset[corpus]['train_dataset'] = seq2seq_loader.Img2txtDataset(
args.src_file.format(corpus.upper(), corpus.lower()),
args.image_root.format(corpus.upper()),
split, args.train_batch_size,
tokenizer,
args.dataset[corpus]['max_seq_length'],
preprocessed=True,
file_valid_jpgs=args.file_valid_jpgs.format(corpus.upper(), corpus.lower()),
bi_uni_pipeline=bi_uni_pipeline, use_num_imgs=args.use_num_imgs,
s2s_prob=args.s2s_prob, bi_prob=args.bi_prob,
enable_butd=args.enable_butd, tasks=args.tasks)
#----------------for validation----------------
if args.local_rank in [-1,0]:
decode_pipeline = [seq2seq_loader.Preprocess4Seq2seqDecoder(
corpus,
'zh' if corpus in ['aic'] else 'en',
list(tokenizer.vocab.keys()), indexer,
max_len=args.dataset[corpus]['max_seq_length'],
max_tgt_length=args.dataset[corpus]['max_len_b'], new_segment_ids=args.new_segment_ids,
mode='s2s', len_vis_input=args.len_vis_input, enable_butd=args.enable_butd,
region_bbox_file=args.region_bbox_file.format(corpus.upper(), corpus.lower()),
region_det_file_prefix=args.region_det_file_prefix.format(corpus.upper(), corpus.lower()))]
args.dataset[corpus]['valid_dataset'] = seq2seq_loader.Img2txtDataset(
args.src_file.format(corpus.upper(), corpus.lower()),
args.image_root.format(corpus.upper()),
'val', args.train_batch_size,
tokenizer,
args.dataset[corpus]['max_seq_length'],
preprocessed=True,
file_valid_jpgs=args.file_valid_jpgs.format(corpus.upper(), corpus.lower()),
bi_uni_pipeline=decode_pipeline, use_num_imgs=args.use_num_imgs,
s2s_prob=1, bi_prob=0,
enable_butd=args.enable_butd, tasks=args.tasks)
elif corpus == 'wmt':
#print(seq2seq_loader.__dict__)
bi_uni_pipeline = [seq2seq_loader.Preprocess4Seq2seqBilingual(
'wmt',
args.src_file.format(corpus.upper(), corpus.lower()),
args.max_pred, args.mask_prob,
list(indexer.vocab.keys()), tokenizers,
indexer, args.dataset[corpus]['max_seq_length'],
split=args.split,
preprocessed=True,
new_segment_ids=args.new_segment_ids,
truncate_config={
'max_len_a': args.dataset[corpus]['max_len_a'],
'max_len_b': args.dataset[corpus]['max_len_b'],
'trunc_seg': None, 'always_truncate_tail':args.always_truncate_tail},
mode='s2s', local_rank=args.local_rank)]
bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqBilingual(
'wmt',
args.src_file.format(corpus.upper(), corpus.lower()),
args.max_pred, args.mask_prob,
list(indexer.vocab.keys()), tokenizers,
indexer, args.dataset[corpus]['max_seq_length'],
split=args.split,
preprocessed=True,
new_segment_ids=args.new_segment_ids,
truncate_config={
'max_len_a': args.dataset[corpus]['max_len_a'],
'max_len_b': args.dataset[corpus]['max_len_b'],
'trunc_seg': None, 'always_truncate_tail':args.always_truncate_tail},
mode='bi', local_rank=args.local_rank)
)
args.dataset[corpus]['train_dataset'] = seq2seq_loader.Txt2txtDataset(
args.wmt_N_lines,
args.split, args.train_batch_size,
tokenizers, args.dataset[corpus]['max_seq_length'],
preprocessed=True,
bi_uni_pipeline=bi_uni_pipeline,s2s_prob=args.s2s_prob, bi_prob=args.bi_prob)
if args.local_rank in [-1,0]:
#args.dataset[corpus+'_zh2en'] = {}
#sargs.dataset[corpus+'_en2zh'] = {}
pipeline = [seq2seq_loader.Preprocess4Seq2SeqBilingualDecoder(
corpus='wmt',file_src=args.src_file.format(corpus.upper(), corpus.lower()), src_lang='zh',
indexer=indexer, tokenizers=tokenizers,
max_len=args.max_len_en+args.max_len_zh+3, max_tgt_length=args.max_len_en,
preprocessed=True, new_segment_ids=args.new_segment_ids, mode='s2s')]
args.dataset[corpus]['valid_dataset_zh2en'] = seq2seq_loader.Txt2txtDataset(N_lines=1999, split='dev',
batch_size=1, tokenizers=tokenizers,
max_len=args.max_len_en+args.max_len_zh+3,
preprocessed=True, bi_uni_pipeline=pipeline, s2s_prob=1, bi_prob=0)
pipeline = [seq2seq_loader.Preprocess4Seq2SeqBilingualDecoder(
corpus='wmt',file_src=args.src_file.format(corpus.upper(), corpus.lower()), src_lang='en',
indexer=indexer, tokenizers=tokenizers,
max_len=args.max_len_en+args.max_len_zh+3, max_tgt_length=args.max_len_zh,
preprocessed=True, new_segment_ids=args.new_segment_ids, mode='s2s')]
args.dataset[corpus]['valid_dataset_en2zh'] = seq2seq_loader.Txt2txtDataset(N_lines=1999, split='dev',
batch_size=1, tokenizers=tokenizers,
max_len=args.max_len_en+args.max_len_zh+3,
preprocessed=True, bi_uni_pipeline=pipeline, s2s_prob=1, bi_prob=0)
train_dataset = seq2seq_loader.CombinedDataset(
datasets_dict={c: args.dataset[c]['train_dataset'] for c in args.dataset})
#train_sampler = RandomSampler(train_dataset, replacement=False)
logger.info('************Data statistics******************')
num_samples = []
total_num_samples = 0
for corpus in args.dataset:
N = len(args.dataset[corpus]['train_dataset'])
logger.info('{} #{}'.format(corpus, N))
num_samples.append(N)
total_num_samples += N
logger.info('total number samples {}'.format(total_num_samples))
logger.info('number samples per epoch {}'.format(total_num_samples*args.sampling_beta))
if args.local_rank == -1:
train_batch_sampler = seq2seq_loader.WeightedRandom_BatchSampler(
num_samples, args.train_batch_size,
args.sampling_alpha, num_batches=math.ceil(total_num_samples*args.sampling_beta
/args.train_batch_size),
drop_last=False) #to-check
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size,
# sampler=SequentialSampler(train_dataset), num_workers=args.num_workers,
# collate_fn=batch_list_to_batch_tensors, pin_memory=True)
#num_batch the total number of batch per epoch
else:
num_batches = math.ceil(total_num_samples*args.sampling_beta/torch.distributed.get_world_size()/args.train_batch_size)
train_batch_sampler = seq2seq_loader.WeightedRandom_DistributedBatchSampler(
num_samples, args.train_batch_size,
args.sampling_alpha, num_batches=num_batches,
drop_last=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset,
batch_sampler=train_batch_sampler, num_workers=args.num_workers,
collate_fn=batch_list_to_batch_tensors, pin_memory=False)
#batch_sampler batch_sampler (Sampler or Iterable, optional)
#– like sampler, but returns a batch of indices at a time.
#Mutually exclusive with batch_size, shuffle, sampler, and drop_last.
# note: args.train_batch_size has been changed to (/= args.gradient_accumulation_steps)
t_total = int(len(train_dataloader) * args.num_train_epochs * 1. /
args.gradient_accumulation_steps) # number of backward steps
amp_handle = None
if args.amp:
from apex import amp
# amp_handle = amp.init(enable_caching=True)
# logger.info("enable fp16 with amp")
# Prepare model
cls_num_labels = 2
#type_vocab_size = 6 if args.new_segment_ids else 2
type_vocab_size = 12 if args.new_segment_ids else 12
relax_projection = 4 if args.relax_projection else 0
task_idx_proj = 3 if args.tasks == 'img2txt' else 0
mask_word_id, eos_word_ids, pad_word_ids = indexer(["[MASK]", "[SEP]", "[PAD]"]) # index in BERT vocab: 103, 102, 0
if (recover_step is None) and (args.model_recover_path is None):
# if _state_dict == {}, the parameters are randomly initialized
# if _state_dict == None, the parameters are initialized with bert-init
assert args.scst == False, 'must init from maximum likelihood training'
_state_dict = {} if args.from_scratch else None
model = BertForPreTrainingLossMask.from_pretrained(
args.bert_model, state_dict=_state_dict, num_labels=cls_num_labels,
vocab_size=len(indexer),
type_vocab_size=type_vocab_size, relax_projection=relax_projection,
config_path=args.config_path, task_idx=task_idx_proj,
max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing,
fp32_embedding=args.fp32_embedding, cache_dir=args.output_dir+'/.pretrained_model_{}'.format(args.local_rank),
drop_prob=args.drop_prob, enable_butd=args.enable_butd,
len_vis_input=args.len_vis_input, tasks=args.tasks,
mask_word_id=mask_word_id, eos_id=eos_word_ids,
length_penalty=args.length_penalty, forbid_duplicate_ngrams=False, forbid_ignore_set=None,
ngram_size=args.ngram_size, min_len=0)
global_step = 0
else:
if not recover_step==None:
logger.info("***** Recover model: %d *****", recover_step)
model_recover = torch.load(os.path.join(
args.output_dir, "model.{0}.bin".format(recover_step)))
# recover_step == number of epochs
# global_step = math.floor(
# recover_step * t_total * 1. / args.num_train_epochs)
global_step = recover_step
elif args.model_recover_path:
logger.info("***** Recover model: %s *****",
args.model_recover_path)
model_recover = torch.load(args.model_recover_path)
global_step = 0
if not args.scst:
model = BertForPreTrainingLossMask.from_pretrained(
args.bert_model, state_dict=model_recover, num_labels=cls_num_labels,
vocab_size=len(indexer),
type_vocab_size=type_vocab_size, relax_projection=relax_projection,
config_path=args.config_path, task_idx=task_idx_proj,
max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing,
fp32_embedding=args.fp32_embedding, cache_dir=args.output_dir+'/.pretrained_model_{}'.format(args.local_rank),
drop_prob=args.drop_prob, enable_butd=args.enable_butd,
mask_word_id=mask_word_id, eos_id=eos_word_ids,
length_penalty=args.length_penalty, forbid_duplicate_ngrams=False, forbid_ignore_set=None,
len_vis_input=args.len_vis_input, tasks=args.tasks)
else:
model = BertForSeq2SeqDecoder.from_pretrained(args.bert_model,
max_position_embeddings=args.max_position_embeddings, config_path=args.config_path,
state_dict=model_recover, num_labels=cls_num_labels, type_vocab_size=type_vocab_size,
task_idx=task_idx_proj, mask_word_id=mask_word_id, search_beam_size=1,
eos_id=eos_word_ids, enable_butd=args.enable_butd,
len_vis_input=args.len_vis_input)
del model_recover
torch.cuda.empty_cache()
# deprecated
# from vlp.resnet import resnet
# cnn = resnet(args.resnet_model, _num_layers=101, _fixed_block=4, pretrained=True) # no finetuning
if args.amp:
#model.half()
# cnn.half()
if args.fp32_embedding:
raise NotImplementedError
model.bert.embeddings.word_embeddings.float()
model.bert.embeddings.position_embeddings.float()
model.bert.embeddings.token_type_embeddings.float()
model.to(device)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.amp:
try:
# from apex.optimizers import FP16_Optimizer
#from pytorch_pretrained_bert.optimization_fp16 import FP16_Optimizer_State
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False)
# if args.loss_scale == 0:
# optimizer = FP16_Optimizer_State(
# optimizer, dynamic_loss_scale=True)
# else:
# optimizer = FP16_Optimizer_State(
# optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
schedule=args.sche_mode,
t_total=t_total)
if args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')#'02')
if args.local_rank != -1:
if args.amp:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
'Please install apex from https://www.github.com/nvidia/apex to use distributed fp16 for training.')
model = DDP(model,delay_allreduce=True)
else:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
elif n_gpu > 1:
model = DataParallelImbalance(model)
if recover_step:
logger.info("***** Recover optimizer: %d *****", recover_step)
optim_file = os.path.join(args.output_dir, "optim.{0}.bin".format(recover_step))
if os.path.exists(optim_file):
optim_recover = torch.load(optim_file)
if hasattr(optim_recover, 'state_dict'):
optim_recover = optim_recover.state_dict()
optimizer.load_state_dict(optim_recover)
else:
logger.info("{} does not exists. Fail to recover optim".format(optim_file))
#disable
# if args.loss_scale == 0:
# logger.info("***** Recover optimizer: dynamic_loss_scale *****")
# optimizer.dynamic_loss_scale = True
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.do_train:
logger.info("***** Running training *****")
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", t_total)
logger.info(" Loader length = %d", len(train_dataloader))
model.train()
if recover_step:
step_per_epoch = len(train_dataloader)/args.gradient_accumulation_steps
start_epoch = math.ceil(recover_step/step_per_epoch)
else:
start_epoch = 1
for i_epoch in trange(start_epoch, args.num_train_epochs+1, desc="Epoch"):
if args.local_rank >= 0:
train_batch_sampler.set_epoch(i_epoch-1)
if args.local_rank in [-1,0]:
iter_bar = tqdm(train_dataloader, desc='Iter (loss=X.XXX)')
else:
iter_bar = train_dataloader
nbatches = len(train_dataloader)
train_loss = []
pretext_loss = []
vqa2_loss = []
scst_reward = []
is_first=True
corpus_count = {}
for step, iter_output in enumerate(iter_bar):
info_, batch = iter_output[0], iter_output[1]
batch = [t.to(device) for t in batch]
curr_corpus = info_[0][0]
if curr_corpus not in corpus_count:
corpus_count[curr_corpus] = 1
else:
corpus_count[curr_corpus] += 1
if info_[0][0] in ['coco','aic']:
input_ids, segment_ids, input_mask, lm_label_ids, masked_pos, masked_weights, is_next, task_idx, img, vis_masked_pos, vis_pe, ans_labels = batch
if args.enable_butd:
conv_feats = img.data # Bx100x2048
vis_pe = vis_pe.data
else:
conv_feats, _ = cnn(img.data) # Bx2048x7x7
conv_feats = conv_feats.view(conv_feats.size(0), conv_feats.size(1),
-1).permute(0,2,1).contiguous()
if not args.scst:
model.train()
loss_tuple = model('img2txt',conv_feats, vis_pe, input_ids, segment_ids,
input_mask, lm_label_ids, ans_labels, is_next, masked_pos=masked_pos,
masked_weights=masked_weights, task_idx=task_idx,
vis_masked_pos=vis_masked_pos, mask_image_regions=args.mask_image_regions,
drop_worst_ratio=args.max_drop_worst_ratio if i_epoch > args.drop_after else 0)
mean_reward = loss_tuple[0].new(1).fill_(0)
else:
# scst training
model.eval()
position_ids = torch.arange(input_ids.size(1), dtype=input_ids.dtype,
device=input_ids.device).unsqueeze(0).expand_as(input_ids)
input_dummy = input_ids[:, :args.len_vis_input + 2] # +2 for [CLS] and [SEP]
greedy_res = input_ids.new(input_ids.size(0), input_ids.size(1)-args.len_vis_input-2).fill_(0)
gen_result = input_ids.new(input_ids.size(0), input_ids.size(1)-args.len_vis_input-2).fill_(0)
with torch.no_grad():
greedy_res_raw, _ = model(conv_feats, vis_pe, input_dummy, segment_ids,
position_ids, input_mask, task_idx=task_idx, sample_mode='greedy')
for b in range(greedy_res_raw.size(0)):
for idx in range(greedy_res_raw.size(1)):
if greedy_res_raw[b][idx] not in [eos_word_ids, pad_word_ids]:
greedy_res[b][idx] = greedy_res_raw[b][idx]
else:
if greedy_res_raw[b][idx] == eos_word_ids:
greedy_res[b][idx] = eos_word_ids
break
model.train()
gen_result_raw, sample_logprobs = model(conv_feats, vis_pe, input_dummy, segment_ids,
position_ids, input_mask, task_idx=task_idx, sample_mode='sample')
for b in range(gen_result_raw.size(0)):
for idx in range(gen_result_raw.size(1)):
if gen_result_raw[b][idx] not in [eos_word_ids, pad_word_ids]:
gen_result[b][idx] = gen_result_raw[b][idx]
else:
if gen_result_raw[b][idx] == eos_word_ids:
gen_result[b][idx] = eos_word_ids
break
gt_ids = input_ids[:, args.len_vis_input+2:]
reward = get_self_critical_reward(greedy_res, gt_ids, gen_result, gt_ids.size(0))
reward = torch.from_numpy(reward).float().to(gen_result.device)
mean_reward = reward.mean()
loss = rl_crit(sample_logprobs, gen_result.data, reward)
loss_tuple = [loss, loss.new(1).fill_(0.), loss.new(1).fill_(0.)]
else: #wmt
input_ids, segment_ids, input_mask, lm_label_ids, masked_pos, masked_weights, is_next, task_idx = batch
model.train()
loss_tuple = model('txt2txt',
input_ids=input_ids, token_type_ids=segment_ids,
attention_mask=input_mask, masked_lm_labels=lm_label_ids,
ans_labels=None, next_sentence_label=is_next,
masked_pos=masked_pos, masked_weights=masked_weights, task_idx=task_idx,
drop_worst_ratio=args.max_drop_worst_ratio if i_epoch > args.drop_after else 0)
mean_reward = loss_tuple[0].new(1).fill_(0)
# disable pretext_loss_deprecated for now
masked_lm_loss, pretext_loss_deprecated, ans_loss = loss_tuple
if n_gpu > 1: # mean() to average on multi-gpu. For dist, this is done through gradient addition.
masked_lm_loss = masked_lm_loss.mean()
pretext_loss_deprecated = pretext_loss_deprecated.mean()
ans_loss = ans_loss.mean()
loss = masked_lm_loss #+ pretext_loss_deprecated + ans_loss
# logging for each step (i.e., before normalization by args.gradient_accumulation_steps)
#iter_bar.set_description('Iter (loss=%5.3f)' % loss.item())
train_loss.append(loss.item())
pretext_loss.append(pretext_loss_deprecated.item())
vqa2_loss.append(ans_loss.item())
scst_reward.append(mean_reward.item())
# if step%100 == 0:
# logger.info("Epoch {}, Iter {}, Loss {:.2f}, Pretext {:.2f}, VQA2 {:.2f}, Mean R {:.3f}\n".format(i_epoch, step, np.mean(train_loss), np.mean(pretext_loss), np.mean(vqa2_loss), np.mean(scst_reward)))
if args.enable_visdom:
if vis_window['iter'] is None:
vis_window['iter'] = vis.line(
X=np.tile(np.arange((i_epoch-1)*nbatches+step,
(i_epoch-1)*nbatches+step+1), (1,1)).T,
Y=np.column_stack((np.asarray([np.mean(train_loss)]),)),
opts=dict(title='Training Loss',
xlabel='Training Iteration',
ylabel='Loss',
legend=['total'])
)
else:
vis.line(
X=np.tile(np.arange((i_epoch-1)*nbatches+step,
(i_epoch-1)*nbatches+step+1), (1,1)).T,
Y=np.column_stack((np.asarray([np.mean(train_loss)]),)),
opts=dict(title='Training Loss',
xlabel='Training Iteration',
ylabel='Loss',
legend=['total']),
win=vis_window['iter'],
update='append'
)
# ensure that accumlated gradients are normalized
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# if amp_handle:
# amp_handle._clear_cache()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
lr_this_step = args.learning_rate * \
warmup_linear(global_step/t_total,
args.warmup_proportion)
if args.amp:
# modify learning rate with special warm up BERT uses
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
if args.enable_tensorboard and global_step%args.summary_steps==0:
if args.local_rank in [-1,0]:
writer.add_scalar('Training_Loss_{}'.format(info_[0][0]), train_loss[-1], global_step)
if global_step%args.save_steps==0:
# Save a trained model
logger.info(
"** ** * Saving fine-tuned model and optimizer ** ** * ")
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(
args.output_dir, "model.{0}.bin".format(global_step))
output_optim_file = os.path.join(
args.output_dir, "optim.{0}.bin".format(global_step))
if args.local_rank in (-1, 0): # save model if the first device or no dist
torch.save(copy.deepcopy(model_to_save).cpu().state_dict(), output_model_file)
#torch.save(optimizer.state_dict(), output_optim_file) # disable for now, need to sanitize state and ship everthing back to cpu
if args.local_rank in [0,-1] and global_step:
print('\n')
logger.info("** ** * Validation global steps {} ** ** * ".format(global_step))
corpus_valset = []
if 'wmt' in args.dataset:
corpus_valset = [['wmt_zh2en', args.dataset['wmt']['valid_dataset_zh2en']],
['wmt_en2zh', args.dataset['wmt']['valid_dataset_en2zh']]]
if 'aic' in args.dataset:
corpus_valset.append(['aic', args.dataset['aic']['valid_dataset']])
if 'coco' in args.dataset:
corpus_valset.append(['coco', args.dataset['coco']['valid_dataset']])
for corpus, val_dataset in corpus_valset:
print('Begin decoding '+corpus)
batch_size = 1 if 'wmt' in corpus else args.train_batch_size
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size,
sampler=SequentialSampler(val_dataset), num_workers=args.num_workers,
collate_fn=batch_list_to_batch_tensors, pin_memory=False)
logger.info("corpus {}".format(corpus))
model.eval()
output_lines = {}
predictions = []
val_iter_bar = tqdm(val_dataloader)
for step_val, val_iter_output in enumerate(val_iter_bar):
info_, batch = val_iter_output[0],val_iter_output[1]
with torch.no_grad():
batch = [t.to(device) for t in batch]
if 'wmt' in corpus:
input_ids, segment_ids, position_ids, input_mask, task_idx = batch
conv_feats, vis_pe = None,None
else:
input_ids, segment_ids, position_ids, input_mask, task_idx, img, vis_pe = batch
if args.enable_butd:
conv_feats = img.data # Bx100x2048
vis_pe = vis_pe.data
else:
conv_feats, _ = cnn(img.data) # Bx2048x7x7
conv_feats = conv_feats.view(conv_feats.size(0), conv_feats.size(1),
-1).permute(0,2,1).contiguous()
if args.amp:
conv_feats = conv_feats.half()
vis_pe = vis_pe.half()
beam_size = 1
traces = model.module.decode(
vis_feats=conv_feats, vis_pe=vis_pe,
input_ids=input_ids, token_type_ids=segment_ids,
position_ids=position_ids, input_mask=input_mask,
search_beam_size=beam_size, task_idx=task_idx,
mode='txt2txt' if 'wmt' in corpus else 'img2txt',
sample_mode='greedy') #validation greedy
if beam_size > 1:
traces = {k: v.tolist() for k, v in traces.items()}
output_ids = traces['pred_seq']
else:
output_ids = traces[0].tolist()
for ii,w_ids in enumerate(output_ids):
output_buf = indexer.convert_ids_to_tokens(w_ids)
output_tokens = []
for t in output_buf:
if t in ("[SEP]", "[PAD]"):
break
output_tokens.append(t)
output_sequence = ' '.join(detokenize(output_tokens))
#print(output_sequence)
if corpus in ['aic','coco']:
if corpus=='coco':
id_ = int(info_[ii][2].split('_')[2])
else:
id_ = info_[ii][2]
predictions.append({'image_id':id_,'caption':output_sequence})
else:
if corpus=='wmt_zh2en':
#output_sequence = ' '.join(detokenize(output_tokens))
logging.disable(logging.ERROR)
with MosesDetokenizer('en') as mosedetokenize:
output_sequence = mosedetokenize(detokenize(output_tokens))
logging.disable(logging.NOTSET)
output_sequence = output_sequence.replace(' @ - @ ','-')
if corpus=='wmt_en2zh':
output_sequence = ''.join(detokenize(output_tokens)).replace('</w>','')
predictions.append(output_sequence)
if corpus in ['aic','coco']:
with open(os.path.join(args.output_dir,'{}_{}_predictions.json').format(global_step, corpus),'w') as f:
json.dump(predictions, f)
print('\nBegin evaluating '+corpus)
lang_stats = language_eval(corpus, predictions,
args.model_recover_path.split('/')[-2]+'-'+'val'+'-'+args.model_recover_path.split('/')[-1].split('.')[-2],
'val',
['Bleu','METEOR','Rouge','CIDEr'])
with open(os.path.join(args.output_dir,'{}_{}_scores.json').format(global_step, corpus),'w') as f:
json.dump(lang_stats, f)
for s in lang_stats:
if s in ['Bleu_4','CIDEr','METEOR','Rouge']:
logger.info('{}:{:.4}'.format(s, lang_stats[s]))
if args.enable_tensorboard:
for s in lang_stats:
if s in ['Bleu_4','CIDEr','METEOR','Rouge']:
writer.add_scalar('{}_{}'.format(s,corpus), lang_stats[s], global_step)
else:
print('\nBegin evaluating '+corpus)
tgtlang = corpus[-2:]
with open(os.path.join(args.output_dir,'{}_{}_predictions.json').format(global_step, corpus),'w') as f:
for p in predictions:
f.writelines(p+'\n')
root = '/data/private/chenyutong/dataset/wmt/devset/newstest2017_detok.'
with open(root+tgtlang,'r') as f:
reference = f.readlines()
reference = [r.strip() for r in reference]
import sacrebleu
if tgtlang=='zh':
bleu = sacrebleu.corpus_bleu(predictions, [reference[:len(predictions)]], tokenize='zh')
else:
bleu = sacrebleu.corpus_bleu(predictions, [reference[:len(predictions)]])
bleu = bleu.score
logger.info('Sacrebleu.corpus_bleu {:.4}'.format(bleu))
if args.enable_tensorboard:
writer.add_scalar('sacrebleu_{}'.format(corpus), bleu, global_step)
global_step += 1
logger.info("***** CUDA.empty_cache() *****")
torch.cuda.empty_cache()
if args.local_rank >= 0:
torch.distributed.barrier()
logger.info("*Corpus Count:{}".format(corpus_count))
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"pytorch_pretrained_bert.optimization.BertAdam",
"pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained",
"visdom.Visdom",
"torch.cuda.device_count",
"transformers.XLMTokenizer",
"pytorch_pretrained_bert.optimization.warmup_linear",
"pathl... | [((162, 210), 'sys.path.append', 'sys.path.append', (['"""/data/private/chenyutong/VLP/"""'], {}), "('/data/private/chenyutong/VLP/')\n", (177, 210), False, 'import sys\n'), ((2108, 2133), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2131, 2133), False, 'import argparse\n'), ((14404, 14431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (14421, 14431), False, 'import logging\n'), ((15575, 15597), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (15586, 15597), False, 'import random, math\n'), ((15602, 15627), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15616, 15627), True, 'import numpy as np\n'), ((15632, 15660), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15649, 15660), False, 'import torch\n'), ((16114, 16249), 'pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_model'], {'do_lower_case': 'args.do_lower_case', 'cache_dir': "(args.output_dir + '/.pretrained_model')"}), "(args.bert_model, do_lower_case=args.\n do_lower_case, cache_dir=args.output_dir + '/.pretrained_model')\n", (16143, 16249), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer, Indexer\n'), ((35998, 36022), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (36020, 36022), False, 'import torch\n'), ((1270, 1309), 'os.path.join', 'os.path.join', (['output_dir', '"""model.*.bin"""'], {}), "(output_dir, 'model.*.bin')\n", (1282, 1309), False, 'import os\n'), ((1341, 1380), 'os.path.join', 'os.path.join', (['output_dir', '"""optim.*.bin"""'], {}), "(output_dir, 'optim.*.bin')\n", (1353, 1380), False, 'import os\n'), ((1655, 1694), 'os.path.join', 'os.path.join', (['output_dir', '"""model.*.bin"""'], {}), "(output_dir, 'model.*.bin')\n", (1667, 1694), False, 'import os\n'), ((1726, 1765), 'os.path.join', 'os.path.join', (['output_dir', '"""optim.*.bin"""'], {}), "(output_dir, 'optim.*.bin')\n", (1738, 1765), False, 'import os\n'), ((13240, 13292), 'os.path.join', 'os.path.join', (['args.image_root', 'args.region_bbox_file'], {}), '(args.image_root, args.region_bbox_file)\n', (13252, 13292), False, 'import os\n'), ((13509, 13567), 'os.path.join', 'os.path.join', (['args.image_root', 'args.region_det_file_prefix'], {}), '(args.image_root, args.region_det_file_prefix)\n', (13521, 13567), False, 'import os\n'), ((13720, 13763), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (13731, 13763), False, 'import os\n'), ((14632, 14657), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14655, 14657), False, 'import torch\n'), ((14676, 14714), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (14697, 14714), False, 'import torch\n'), ((14732, 14769), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (14744, 14769), False, 'import torch\n'), ((15047, 15099), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (15083, 15099), False, 'import torch\n'), ((15687, 15724), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (15713, 15724), False, 'import torch\n'), ((15819, 15876), 'visdom.Visdom', 'visdom.Visdom', ([], {'port': 'args.visdom_port', 'env': 'args.output_dir'}), '(port=args.visdom_port, env=args.output_dir)\n', (15832, 15876), False, 'import visdom\n'), ((16375, 16396), 'pytorch_pretrained_bert.tokenization.WhitespaceTokenizer', 'WhitespaceTokenizer', ([], {}), '()\n', (16394, 16396), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer, Indexer\n'), ((16555, 16599), 'transformers.XLMTokenizer', 'XLMTokenizer', (['args.xml_vocab', 'args.xml_merge'], {}), '(args.xml_vocab, args.xml_merge)\n', (16567, 16599), False, 'from transformers import XLMTokenizer\n'), ((25928, 26038), 'vlp.seq2seq_loader.CombinedDataset', 'seq2seq_loader.CombinedDataset', ([], {'datasets_dict': "{c: args.dataset[c]['train_dataset'] for c in args.dataset}"}), "(datasets_dict={c: args.dataset[c][\n 'train_dataset'] for c in args.dataset})\n", (25958, 26038), True, 'import vlp.seq2seq_loader as seq2seq_loader\n'), ((27730, 27905), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_sampler': 'train_batch_sampler', 'num_workers': 'args.num_workers', 'collate_fn': 'batch_list_to_batch_tensors', 'pin_memory': '(False)'}), '(train_dataset, batch_sampler=\n train_batch_sampler, num_workers=args.num_workers, collate_fn=\n batch_list_to_batch_tensors, pin_memory=False)\n', (27757, 27905), False, 'import torch\n'), ((32318, 32342), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (32340, 32342), False, 'import torch\n'), ((33691, 33780), 'apex.optimizers.FusedAdam', 'FusedAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'bias_correction': '(False)'}), '(optimizer_grouped_parameters, lr=args.learning_rate,\n bias_correction=False)\n', (33700, 33780), False, 'from apex.optimizers import FusedAdam\n'), ((34132, 34271), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 'schedule': 'args.sche_mode', 't_total': 't_total'}), '(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.\n warmup_proportion, schedule=args.sche_mode, t_total=t_total)\n', (34140, 34271), False, 'from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n'), ((34428, 34476), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': '"""O2"""'}), "(model, optimizer, opt_level='O2')\n", (34442, 34476), False, 'from apex import amp\n'), ((35423, 35449), 'os.path.exists', 'os.path.exists', (['optim_file'], {}), '(optim_file)\n', (35437, 35449), False, 'import os\n'), ((36540, 36600), 'tqdm.trange', 'trange', (['start_epoch', '(args.num_train_epochs + 1)'], {'desc': '"""Epoch"""'}), "(start_epoch, args.num_train_epochs + 1, desc='Epoch')\n", (36546, 36600), False, 'from tqdm import tqdm, trange\n'), ((16063, 16093), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.output_dir'], {}), '(args.output_dir)\n', (16076, 16093), False, 'from tensorboardX import SummaryWriter\n'), ((16779, 16791), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16788, 16791), False, 'import json\n'), ((16856, 16898), 'os.path.join', 'os.path.join', (['args.bert_model', '"""vocab.txt"""'], {}), "(args.bert_model, 'vocab.txt')\n", (16868, 16898), False, 'import os\n'), ((27499, 27660), 'vlp.seq2seq_loader.WeightedRandom_DistributedBatchSampler', 'seq2seq_loader.WeightedRandom_DistributedBatchSampler', (['num_samples', 'args.train_batch_size', 'args.sampling_alpha'], {'num_batches': 'num_batches', 'drop_last': '(False)'}), '(num_samples, args.\n train_batch_size, args.sampling_alpha, num_batches=num_batches,\n drop_last=False)\n', (27552, 27660), True, 'import vlp.seq2seq_loader as seq2seq_loader\n'), ((31822, 32225), 'pytorch_pretrained_bert.modeling.BertForSeq2SeqDecoder.from_pretrained', 'BertForSeq2SeqDecoder.from_pretrained', (['args.bert_model'], {'max_position_embeddings': 'args.max_position_embeddings', 'config_path': 'args.config_path', 'state_dict': 'model_recover', 'num_labels': 'cls_num_labels', 'type_vocab_size': 'type_vocab_size', 'task_idx': 'task_idx_proj', 'mask_word_id': 'mask_word_id', 'search_beam_size': '(1)', 'eos_id': 'eos_word_ids', 'enable_butd': 'args.enable_butd', 'len_vis_input': 'args.len_vis_input'}), '(args.bert_model,\n max_position_embeddings=args.max_position_embeddings, config_path=args.\n config_path, state_dict=model_recover, num_labels=cls_num_labels,\n type_vocab_size=type_vocab_size, task_idx=task_idx_proj, mask_word_id=\n mask_word_id, search_beam_size=1, eos_id=eos_word_ids, enable_butd=args\n .enable_butd, len_vis_input=args.len_vis_input)\n', (31859, 32225), False, 'from pytorch_pretrained_bert.modeling import BertForPreTrainingLossMask, BertForSeq2SeqDecoder\n'), ((34834, 34866), 'apex.parallel.DistributedDataParallel', 'DDP', (['model'], {'delay_allreduce': '(True)'}), '(model, delay_allreduce=True)\n', (34837, 34866), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((34900, 35043), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], output_device=args.local_rank, find_unused_parameters=True)\n', (34941, 35043), False, 'import torch\n'), ((35199, 35227), 'misc.data_parallel.DataParallelImbalance', 'DataParallelImbalance', (['model'], {}), '(model)\n', (35220, 35227), False, 'from misc.data_parallel import DataParallelImbalance\n'), ((35479, 35501), 'torch.load', 'torch.load', (['optim_file'], {}), '(optim_file)\n', (35489, 35501), False, 'import torch\n'), ((36436, 36476), 'math.ceil', 'math.ceil', (['(recover_step / step_per_epoch)'], {}), '(recover_step / step_per_epoch)\n', (36445, 36476), False, 'import random, math\n'), ((56545, 56569), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (56567, 56569), False, 'import torch\n'), ((30806, 30841), 'torch.load', 'torch.load', (['args.model_recover_path'], {}), '(args.model_recover_path)\n', (30816, 30841), False, 'import torch\n'), ((36763, 36811), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iter (loss=X.XXX)"""'}), "(train_dataloader, desc='Iter (loss=X.XXX)')\n", (36767, 36811), False, 'from tqdm import tqdm, trange\n'), ((56624, 56651), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (56649, 56651), False, 'import torch\n'), ((14016, 14057), 'os.path.join', 'os.path.join', (['args.output_dir', '"""opt.json"""'], {}), "(args.output_dir, 'opt.json')\n", (14028, 14057), False, 'import os\n'), ((14557, 14582), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14580, 14582), False, 'import torch\n'), ((23793, 24039), 'vlp.seq2seq_loader.Txt2txtDataset', 'seq2seq_loader.Txt2txtDataset', (['args.wmt_N_lines', 'args.split', 'args.train_batch_size', 'tokenizers', "args.dataset[corpus]['max_seq_length']"], {'preprocessed': '(True)', 'bi_uni_pipeline': 'bi_uni_pipeline', 's2s_prob': 'args.s2s_prob', 'bi_prob': 'args.bi_prob'}), "(args.wmt_N_lines, args.split, args.\n train_batch_size, tokenizers, args.dataset[corpus]['max_seq_length'],\n preprocessed=True, bi_uni_pipeline=bi_uni_pipeline, s2s_prob=args.\n s2s_prob, bi_prob=args.bi_prob)\n", (23822, 24039), True, 'import vlp.seq2seq_loader as seq2seq_loader\n'), ((26850, 26923), 'math.ceil', 'math.ceil', (['(total_num_samples * args.sampling_beta / args.train_batch_size)'], {}), '(total_num_samples * args.sampling_beta / args.train_batch_size)\n', (26859, 26923), False, 'import random, math\n'), ((24813, 25029), 'vlp.seq2seq_loader.Txt2txtDataset', 'seq2seq_loader.Txt2txtDataset', ([], {'N_lines': '(1999)', 'split': '"""dev"""', 'batch_size': '(1)', 'tokenizers': 'tokenizers', 'max_len': '(args.max_len_en + args.max_len_zh + 3)', 'preprocessed': '(True)', 'bi_uni_pipeline': 'pipeline', 's2s_prob': '(1)', 'bi_prob': '(0)'}), "(N_lines=1999, split='dev', batch_size=1,\n tokenizers=tokenizers, max_len=args.max_len_en + args.max_len_zh + 3,\n preprocessed=True, bi_uni_pipeline=pipeline, s2s_prob=1, bi_prob=0)\n", (24842, 25029), True, 'import vlp.seq2seq_loader as seq2seq_loader\n'), ((25616, 25832), 'vlp.seq2seq_loader.Txt2txtDataset', 'seq2seq_loader.Txt2txtDataset', ([], {'N_lines': '(1999)', 'split': '"""dev"""', 'batch_size': '(1)', 'tokenizers': 'tokenizers', 'max_len': '(args.max_len_en + args.max_len_zh + 3)', 'preprocessed': '(True)', 'bi_uni_pipeline': 'pipeline', 's2s_prob': '(1)', 'bi_prob': '(0)'}), "(N_lines=1999, split='dev', batch_size=1,\n tokenizers=tokenizers, max_len=args.max_len_en + args.max_len_zh + 3,\n preprocessed=True, bi_uni_pipeline=pipeline, s2s_prob=1, bi_prob=0)\n", (25645, 25832), True, 'import vlp.seq2seq_loader as seq2seq_loader\n'), ((27407, 27441), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (27439, 27441), False, 'import torch\n'), ((44891, 44922), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (44905, 44922), False, 'from apex import amp\n'), ((45286, 45346), 'pytorch_pretrained_bert.optimization.warmup_linear', 'warmup_linear', (['(global_step / t_total)', 'args.warmup_proportion'], {}), '(global_step / t_total, args.warmup_proportion)\n', (45299, 45346), False, 'from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n'), ((39372, 39387), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (39385, 39387), False, 'import torch\n'), ((1452, 1460), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1456, 1460), False, 'from pathlib import Path\n'), ((48725, 48745), 'tqdm.tqdm', 'tqdm', (['val_dataloader'], {}), '(val_dataloader)\n', (48729, 48745), False, 'from tqdm import tqdm, trange\n'), ((1862, 1870), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1866, 1870), False, 'from pathlib import Path\n'), ((1949, 1957), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1953, 1957), False, 'from pathlib import Path\n'), ((41156, 41180), 'torch.from_numpy', 'torch.from_numpy', (['reward'], {}), '(reward)\n', (41172, 41180), False, 'import torch\n'), ((43524, 43603), 'numpy.arange', 'np.arange', (['((i_epoch - 1) * nbatches + step)', '((i_epoch - 1) * nbatches + step + 1)'], {}), '((i_epoch - 1) * nbatches + step, (i_epoch - 1) * nbatches + step + 1)\n', (43533, 43603), True, 'import numpy as np\n'), ((44085, 44164), 'numpy.arange', 'np.arange', (['((i_epoch - 1) * nbatches + step)', '((i_epoch - 1) * nbatches + step + 1)'], {}), '((i_epoch - 1) * nbatches + step, (i_epoch - 1) * nbatches + step + 1)\n', (44094, 44164), True, 'import numpy as np\n'), ((48297, 48327), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['val_dataset'], {}), '(val_dataset)\n', (48314, 48327), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((48966, 48981), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (48979, 48981), False, 'import torch\n'), ((53701, 53726), 'json.dump', 'json.dump', (['predictions', 'f'], {}), '(predictions, f)\n', (53710, 53726), False, 'import json\n'), ((54330, 54354), 'json.dump', 'json.dump', (['lang_stats', 'f'], {}), '(lang_stats, f)\n', (54339, 54354), False, 'import json\n'), ((43698, 43717), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (43705, 43717), True, 'import numpy as np\n'), ((44259, 44278), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (44266, 44278), True, 'import numpy as np\n'), ((46778, 46806), 'copy.deepcopy', 'copy.deepcopy', (['model_to_save'], {}), '(model_to_save)\n', (46791, 46806), False, 'import copy\n'), ((51844, 51869), 'vlp.decode_img2txt.detokenize', 'detokenize', (['output_tokens'], {}), '(output_tokens)\n', (51854, 51869), False, 'from vlp.decode_img2txt import detokenize\n'), ((52687, 52717), 'logging.disable', 'logging.disable', (['logging.ERROR'], {}), '(logging.ERROR)\n', (52702, 52717), False, 'import logging\n'), ((52985, 53016), 'logging.disable', 'logging.disable', (['logging.NOTSET'], {}), '(logging.NOTSET)\n', (53000, 53016), False, 'import logging\n'), ((53567, 53622), 'os.path.join', 'os.path.join', (['args.output_dir', '"""{}_{}_predictions.json"""'], {}), "(args.output_dir, '{}_{}_predictions.json')\n", (53579, 53622), False, 'import os\n'), ((54201, 54251), 'os.path.join', 'os.path.join', (['args.output_dir', '"""{}_{}_scores.json"""'], {}), "(args.output_dir, '{}_{}_scores.json')\n", (54213, 54251), False, 'import os\n'), ((55138, 55193), 'os.path.join', 'os.path.join', (['args.output_dir', '"""{}_{}_predictions.json"""'], {}), "(args.output_dir, '{}_{}_predictions.json')\n", (55150, 55193), False, 'import os\n'), ((52775, 52797), 'mosestokenizer.MosesDetokenizer', 'MosesDetokenizer', (['"""en"""'], {}), "('en')\n", (52791, 52797), False, 'from mosestokenizer import MosesDetokenizer\n'), ((52906, 52931), 'vlp.decode_img2txt.detokenize', 'detokenize', (['output_tokens'], {}), '(output_tokens)\n', (52916, 52931), False, 'from vlp.decode_img2txt import detokenize\n'), ((53329, 53354), 'vlp.decode_img2txt.detokenize', 'detokenize', (['output_tokens'], {}), '(output_tokens)\n', (53339, 53354), False, 'from vlp.decode_img2txt import detokenize\n')] |
# from https://stackoverflow.com/questions/12643079/b%C3%A9zier-curve-fitting-with-scipy
import random
import multiprocessing
import numpy as np
from scipy.misc import comb
from tqdm import tqdm
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def line_line_intersection(x1,y1,x2,y2,x3,y3,x4,y4):
"""
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
Line 1 point 1 = (x1,y1)
Line 1 point 2 = (x2,y2)
Line 2 point 1 = (x3,y3)
Line 2 point 2 = (x4,y4)
"""
intersection_x = ((x1*y2-y1*x2)*(x3-x4)-(x1-x2)*(x3*y4-y3*x4))/((x1-x2)*(y3-y4)-(y1-y2)*(x3-x4))
intersection_y = ((x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4))/((x1-x2)*(y3-y4)-(y1-y2)*(x3-x4))
return intersection_x,intersection_y
def make_squiggle(plotit=False):
if plotit:
from matplotlib import pyplot as plt
# np.random.seed(8)
all_xs = []
all_ys = []
lastPoint = None
for i in range(np.random.randint(4,8)):
points = np.random.rand(4,2)*200
if lastPoint is not None:
points[0] = lastPoint
xvals, yvals = bezier_curve(points, nTimes=50)
all_xs += list(reversed(xvals))
all_ys += list(reversed(yvals))
lastPoint = points[-1]
if plotit:
# plt.scatter(all_xs, all_ys,c=list(range(len(all_xs))))
plt.plot(all_xs,all_ys)
num = 0
for i, _ in enumerate(all_xs):
if i < 2:
continue
for j, _ in enumerate(all_xs):
if j<=i:
continue
x1 = all_xs[i-1]
y1 = all_ys[i-1]
x2 = all_xs[i]
y2 = all_ys[i]
x3 = all_xs[j-1]
y3 = all_ys[j-1]
x4 = all_xs[j]
y4 = all_ys[j]
intersection_x,intersection_y = line_line_intersection(x1,y1,x2,y2,x3,y3,x4,y4)
minx = x1
maxx = x2
if x2 < x1:
minx = x2
maxx = x1
if not (intersection_x > minx and intersection_x < maxx):
continue
miny = y1
maxy = y2
if y2 < y1:
miny = y2
maxy = y1
if not (intersection_y > miny and intersection_y < maxy):
continue
minx = x3
maxx = x4
if x4 < x3:
minx = x4
maxx = x3
if not (intersection_x > minx and intersection_x < maxx):
continue
miny = y3
maxy = y4
if y4 < y3:
miny = y4
maxy = y3
if not (intersection_y > miny and intersection_y < maxy):
continue
num += 1
if plotit:
plt.plot(intersection_x, intersection_y, "ro")
# if num == 3:
# print(i,j,len(all_xs))
# plt.plot(intersection_x, intersection_y, "ro")
# plt.plot(x1, y1, "go")
# plt.plot(x2, y2, "yo")
# plt.plot(x3, y3, "mo")
# plt.plot(x4, y4, "bo")
# plt.plot(xpoints, ypoints, "ro")
# for nr in range(len(points)):
# plt.text(points[nr][0], points[nr][1], nr)
if plotit:
plt.show()
return num
if __name__ == "__main__":
# make_squiggle(True)
# make_squiggle(True)
# make_squiggle(True)
# make_squiggle(True)
with open('random.txt','w') as f:
count = 300000
pbar = tqdm(total=count)
p = multiprocessing.Pool()
for i, data in enumerate(p.imap_unordered(make_squiggle,[False]*count), 0):
pbar.update()
f.write('{}\n'.format(data % 6))
pbar.close()
print("""Now run
dieharder -f random.txt -a""")
| [
"tqdm.tqdm",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.misc.comb",
"numpy.random.randint",
"numpy.array",
"numpy.linspace",
"multiprocessing.Pool",
"numpy.random.rand",
"numpy.dot"
] | [((824, 856), 'numpy.array', 'np.array', (['[p[0] for p in points]'], {}), '([p[0] for p in points])\n', (832, 856), True, 'import numpy as np\n'), ((871, 903), 'numpy.array', 'np.array', (['[p[1] for p in points]'], {}), '([p[1] for p in points])\n', (879, 903), True, 'import numpy as np\n'), ((913, 942), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'nTimes'], {}), '(0.0, 1.0, nTimes)\n', (924, 942), True, 'import numpy as np\n'), ((1055, 1088), 'numpy.dot', 'np.dot', (['xPoints', 'polynomial_array'], {}), '(xPoints, polynomial_array)\n', (1061, 1088), True, 'import numpy as np\n'), ((1101, 1134), 'numpy.dot', 'np.dot', (['yPoints', 'polynomial_array'], {}), '(yPoints, polynomial_array)\n', (1107, 1134), True, 'import numpy as np\n'), ((1844, 1867), 'numpy.random.randint', 'np.random.randint', (['(4)', '(8)'], {}), '(4, 8)\n', (1861, 1867), True, 'import numpy as np\n'), ((2232, 2256), 'matplotlib.pyplot.plot', 'plt.plot', (['all_xs', 'all_ys'], {}), '(all_xs, all_ys)\n', (2240, 2256), True, 'from matplotlib import pyplot as plt\n'), ((4164, 4174), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4172, 4174), True, 'from matplotlib import pyplot as plt\n'), ((4399, 4416), 'tqdm.tqdm', 'tqdm', ([], {'total': 'count'}), '(total=count)\n', (4403, 4416), False, 'from tqdm import tqdm\n'), ((4429, 4451), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (4449, 4451), False, 'import multiprocessing\n'), ((313, 323), 'scipy.misc.comb', 'comb', (['n', 'i'], {}), '(n, i)\n', (317, 323), False, 'from scipy.misc import comb\n'), ((1886, 1906), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (1900, 1906), True, 'import numpy as np\n'), ((3667, 3713), 'matplotlib.pyplot.plot', 'plt.plot', (['intersection_x', 'intersection_y', '"""ro"""'], {}), "(intersection_x, intersection_y, 'ro')\n", (3675, 3713), True, 'from matplotlib import pyplot as plt\n')] |
import multiprocessing as mp
import numpy as np
import random
import copy
import os
from attackgraph import file_op as fp
from attackgraph.uniform_str_init import act_def, act_att
from baselines.deepq.load_action import load_action_class
# print(os.getcwd())
def parallel_sim(env, game, nn_att, nn_def, num_episodes):
G_list, att_list, def_list = copy_env(env, num_episodes)
arg = list(zip(G_list,[game]*num_episodes, att_list,[nn_att]*num_episodes,def_list,[nn_def]*num_episodes,[env.T]*num_episodes))
with mp.Pool() as pool:
r = pool.map_async(single_sim, arg)
a = r.get()
return np.sum(np.array(a),0)/num_episodes
def single_sim(param): #single for single episode.
# TODO: Dealing with uniform str
aReward = 0
dReward = 0
def_uniform_flag = False
att_uniform_flag = False
#nn_att and nn_def here can be either np.ndarray or str. np.ndarray represents a mixed strategy.
# A str represents the name of a strategy.
G, game, attacker, nn_att, defender, nn_def, T = param
if isinstance(nn_att, np.ndarray) and isinstance(nn_def, str):
str_set = game.att_str
nn_att = np.random.choice(str_set, p=nn_att)
if isinstance(nn_att, str) and isinstance(nn_def, np.ndarray):
str_set = game.def_str
nn_def = np.random.choice(str_set, p=nn_def)
if isinstance(nn_att, np.ndarray) and isinstance(nn_def, np.ndarray):
str_set = game.att_str
nn_att = np.random.choice(str_set, p=nn_att)
str_set = game.def_str
nn_def = np.random.choice(str_set, p=nn_def)
if "epoch1" in nn_att:
att_uniform_flag = True
if "epoch1" in nn_def:
def_uniform_flag = True
path = os.getcwd() + "/attacker_strategies/" + nn_att
if att_uniform_flag:
nn_att = fp.load_pkl(path)
else:
training_flag = 1
nn_att, sess1, graph1 = load_action_class(path,game,training_flag)
path = os.getcwd() + "/defender_strategies/" + nn_def
if def_uniform_flag:
nn_def = fp.load_pkl(path)
else:
training_flag = 0
nn_def, sess2, graph2 = load_action_class(path,game,training_flag)
for t in range(T):
timeleft = T - t
if att_uniform_flag:
attacker.att_greedy_action_builder_single(G, timeleft, nn_att)
else:
with graph1.as_default():
with sess1.as_default():
attacker.att_greedy_action_builder_single(G, timeleft, nn_att)
if def_uniform_flag:
defender.def_greedy_action_builder_single(G, timeleft, nn_def)
else:
with graph2.as_default():
with sess2.as_default():
defender.def_greedy_action_builder_single(G, timeleft, nn_def)
att_action_set = attacker.attact
def_action_set = defender.defact
# print('att:', att_action_set)
# print('def:', def_action_set)
for attack in att_action_set:
if isinstance(attack, tuple):
# check OR node
aReward += G.edges[attack]['cost']
if random.uniform(0, 1) <= G.edges[attack]['actProb']:
G.nodes[attack[-1]]['state'] = 1
else:
# check AND node
aReward += G.nodes[attack]['aCost']
if random.uniform(0, 1) <= G.nodes[attack]['actProb']:
G.nodes[attack]['state'] = 1
# defender's action
for node in def_action_set:
G.nodes[node]['state'] = 0
dReward += G.nodes[node]['dCost']
_, targetset = get_Targets(G)
for node in targetset:
if G.nodes[node]['state'] == 1:
aReward += G.nodes[node]['aReward']
dReward += G.nodes[node]['dPenalty']
# print(aReward)
# print(aReward, dReward)
return aReward, dReward
def get_Targets(G):
count = 0
targetset = set()
for node in G.nodes:
if G.nodes[node]['type'] == 1:
count += 1
targetset.add(node)
return count,targetset
def copy_env(env, num_episodes):
G_list = []
att_list = []
def_list = []
env.reset_everything()
for _ in np.arange(num_episodes):
G_list.append(copy.deepcopy(env.G_reserved))
att_list.append(copy.deepcopy(env.attacker))
def_list.append(copy.deepcopy(env.defender))
return G_list, att_list, def_list
| [
"copy.deepcopy",
"random.uniform",
"os.getcwd",
"attackgraph.file_op.load_pkl",
"multiprocessing.Pool",
"numpy.arange",
"baselines.deepq.load_action.load_action_class",
"numpy.array",
"numpy.random.choice"
] | [((4228, 4251), 'numpy.arange', 'np.arange', (['num_episodes'], {}), '(num_episodes)\n', (4237, 4251), True, 'import numpy as np\n'), ((524, 533), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (531, 533), True, 'import multiprocessing as mp\n'), ((1158, 1193), 'numpy.random.choice', 'np.random.choice', (['str_set'], {'p': 'nn_att'}), '(str_set, p=nn_att)\n', (1174, 1193), True, 'import numpy as np\n'), ((1310, 1345), 'numpy.random.choice', 'np.random.choice', (['str_set'], {'p': 'nn_def'}), '(str_set, p=nn_def)\n', (1326, 1345), True, 'import numpy as np\n'), ((1469, 1504), 'numpy.random.choice', 'np.random.choice', (['str_set'], {'p': 'nn_att'}), '(str_set, p=nn_att)\n', (1485, 1504), True, 'import numpy as np\n'), ((1553, 1588), 'numpy.random.choice', 'np.random.choice', (['str_set'], {'p': 'nn_def'}), '(str_set, p=nn_def)\n', (1569, 1588), True, 'import numpy as np\n'), ((1810, 1827), 'attackgraph.file_op.load_pkl', 'fp.load_pkl', (['path'], {}), '(path)\n', (1821, 1827), True, 'from attackgraph import file_op as fp\n'), ((1896, 1940), 'baselines.deepq.load_action.load_action_class', 'load_action_class', (['path', 'game', 'training_flag'], {}), '(path, game, training_flag)\n', (1913, 1940), False, 'from baselines.deepq.load_action import load_action_class\n'), ((2040, 2057), 'attackgraph.file_op.load_pkl', 'fp.load_pkl', (['path'], {}), '(path)\n', (2051, 2057), True, 'from attackgraph import file_op as fp\n'), ((2126, 2170), 'baselines.deepq.load_action.load_action_class', 'load_action_class', (['path', 'game', 'training_flag'], {}), '(path, game, training_flag)\n', (2143, 2170), False, 'from baselines.deepq.load_action import load_action_class\n'), ((626, 637), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (634, 637), True, 'import numpy as np\n'), ((1721, 1732), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1730, 1732), False, 'import os\n'), ((1951, 1962), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1960, 1962), False, 'import os\n'), ((4275, 4304), 'copy.deepcopy', 'copy.deepcopy', (['env.G_reserved'], {}), '(env.G_reserved)\n', (4288, 4304), False, 'import copy\n'), ((4330, 4357), 'copy.deepcopy', 'copy.deepcopy', (['env.attacker'], {}), '(env.attacker)\n', (4343, 4357), False, 'import copy\n'), ((4383, 4410), 'copy.deepcopy', 'copy.deepcopy', (['env.defender'], {}), '(env.defender)\n', (4396, 4410), False, 'import copy\n'), ((3124, 3144), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3138, 3144), False, 'import random\n'), ((3351, 3371), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3365, 3371), False, 'import random\n')] |
""" This file is modified from:
https://raw.githubusercontent.com/piergiaj/pytorch-i3d/master/videotransforms.py
"""
import numpy as np
import cv2
import numbers
import random
import torch
def scales_to_point(scales, image_size, input_size):
"""Transform the predicted scaling factor ranging from -1 to 1
into the image plane with extends=[480, 640] by considering the image padding
"""
scale_x, scale_y = scales[:, 0], scales[:, 1]
rows_rate = image_size[0] / input_size[0] # 660 / 480
cols_rate = image_size[1] / input_size[1] # 1584 / 640
if rows_rate > cols_rate:
new_cols = (image_size[1] * input_size[0]) // image_size[0]
c = torch.clamp_max(new_cols / 2.0 * (1 + scale_x), new_cols-1)
r = torch.clamp_max(input_size[0] / 2.0 * (1 - scale_y), input_size[0]-1)
c = c + (input_size[1] - new_cols) // 2
else:
new_rows = (image_size[0] * input_size[1]) // image_size[1] # 266
r = torch.clamp_max(new_rows / 2.0 * (1 - scale_y), new_rows-1)
c = torch.clamp_max(input_size[1] / 2.0 * (1 + scale_x), input_size[1]-1)
r = r + (input_size[0] - new_rows) // 2
point = torch.cat((c.unsqueeze(1), r.unsqueeze(1)), dim=1) # (B, 2): (x, y)
return point
def norm_fix(fixation, input_size):
fix_norm = fixation.clone()
fix_norm[:, 0] /= input_size[1] # x / w
fix_norm[:, 1] /= input_size[0] # y / h
return fix_norm
def padding_inv(pred, shape_r, shape_c):
predictions_shape = pred.shape
rows_rate = shape_r / predictions_shape[0]
cols_rate = shape_c / predictions_shape[1]
if rows_rate > cols_rate:
new_cols = (predictions_shape[1] * shape_r) // predictions_shape[0]
pred = cv2.resize(pred, (new_cols, shape_r))
img = pred[:, ((pred.shape[1] - shape_c) // 2):((pred.shape[1] - shape_c) // 2 + shape_c)]
else:
new_rows = (predictions_shape[0] * shape_c) // predictions_shape[1]
pred = cv2.resize(pred, (shape_c, new_rows))
img = pred[((pred.shape[0] - shape_r) // 2):((pred.shape[0] - shape_r) // 2 + shape_r), :]
return img / (np.max(img) + 1e-6) * 255
def padding(img, shape_r=480, shape_c=640, channels=3):
img_padded = np.zeros((shape_r, shape_c, channels), dtype=np.uint8)
if channels == 1:
img_padded = np.zeros((shape_r, shape_c), dtype=np.uint8)
original_shape = img.shape
rows_rate = original_shape[0]/shape_r
cols_rate = original_shape[1]/shape_c
if rows_rate > cols_rate:
new_cols = (original_shape[1] * shape_r) // original_shape[0]
img = cv2.resize(img, (new_cols, shape_r))
if new_cols > shape_c:
new_cols = shape_c
img_padded[:, ((img_padded.shape[1] - new_cols) // 2):((img_padded.shape[1] - new_cols) // 2 + new_cols)] = img
else:
new_rows = (original_shape[0] * shape_c) // original_shape[1]
img = cv2.resize(img, (shape_c, new_rows))
if new_rows > shape_r:
new_rows = shape_r
img_padded[((img_padded.shape[0] - new_rows) // 2):((img_padded.shape[0] - new_rows) // 2 + new_rows), :] = img
return img_padded
def padding_point(point, img_shape, shape_r=480, shape_c=640):
"""
img_shape: [height, width]
"""
def scale_point(point, img_shape, rows, cols):
# compute the scale factor
factor_scale_r = rows / img_shape[0]
factor_scale_c = cols / img_shape[1]
r = int(np.round(point[1] * factor_scale_r))
c = int(np.round(point[0] * factor_scale_c))
if r == rows:
r -= 1
if c == cols:
c -= 1
return r, c
rows_rate = img_shape[0] / shape_r
cols_rate = img_shape[1] / shape_c
if rows_rate > cols_rate:
new_cols = (img_shape[1] * shape_r) // img_shape[0]
# scaling
r, c = scale_point(point, img_shape, rows=shape_r, cols=new_cols)
# shifting
c = c + (shape_c - new_cols) // 2
else:
new_rows = (img_shape[0] * shape_c) // img_shape[1]
# scaling
r, c = scale_point(point, img_shape, rows=new_rows, cols=shape_c)
# shifting
r = r + (shape_r - new_rows) // 2
new_point = np.array([c, r], dtype=np.int32) # (x, y)
return new_point
class ProcessImages(object):
"""Pre-process images with padded resize operation, and normalize
Args:
input_shape: (shape_r, shape_c)
"""
def __init__(self, input_shape, mean=[0, 0, 0], std=[1, 1, 1]):
if isinstance(input_shape, numbers.Number):
self.input_shape = (int(input_shape), int(input_shape))
else:
self.input_shape = input_shape
self.mean = mean
self.std = std
def __call__(self, imgs):
"""
imgs: RGB images (T, H, W, C)
"""
t, h, w, c = imgs.shape
shape_r, shape_c = self.input_shape
ims = np.zeros((t, shape_r, shape_c, c), dtype=np.float32)
for i, im in enumerate(imgs):
padded_image = padding(im, shape_r, shape_c, c)
if c == 1:
padded_image = np.expand_dims(padded_image, axis=-1)
ims[i] = padded_image.astype(np.float32)
# normalize
ims /= 255.0
ims = np.rollaxis(ims, 3, 1) # (t, c, h, w)
# standardize
for i in range(c):
ims[:, i] = (ims[:, i] - self.mean[i]) / self.std[i]
return ims
def __repr__(self):
return self.__class__.__name__ + '(input_shape={0})'.format(self.input_shape)
class ProcessFixations(object):
"""Pre-process fixation points to accord with the pre-processed images
Args:
input_shape: (shape_r, shape_c)
"""
def __init__(self, input_shape, img_shape):
if isinstance(input_shape, numbers.Number):
self.input_shape = (int(input_shape), int(input_shape))
else:
self.input_shape = input_shape
self.img_shape = img_shape
def __call__(self, coords):
"""
coords: fixation points, (L, 2) x, y
"""
shape_r, shape_c = self.input_shape
new_coords = np.zeros_like(coords, dtype=np.int32)
for i, fixpt in enumerate(coords):
if fixpt[0] > 0 and fixpt[1] > 0:
new_coords[i] = padding_point(fixpt, self.img_shape, shape_r, shape_c)
return new_coords
def __repr__(self):
return self.__class__.__name__ + '(input_shape={0})'.format(self.input_shape) | [
"numpy.zeros_like",
"numpy.zeros",
"numpy.expand_dims",
"numpy.max",
"numpy.array",
"torch.clamp_max",
"numpy.rollaxis",
"numpy.round",
"cv2.resize"
] | [((2225, 2279), 'numpy.zeros', 'np.zeros', (['(shape_r, shape_c, channels)'], {'dtype': 'np.uint8'}), '((shape_r, shape_c, channels), dtype=np.uint8)\n', (2233, 2279), True, 'import numpy as np\n'), ((4219, 4251), 'numpy.array', 'np.array', (['[c, r]'], {'dtype': 'np.int32'}), '([c, r], dtype=np.int32)\n', (4227, 4251), True, 'import numpy as np\n'), ((681, 742), 'torch.clamp_max', 'torch.clamp_max', (['(new_cols / 2.0 * (1 + scale_x))', '(new_cols - 1)'], {}), '(new_cols / 2.0 * (1 + scale_x), new_cols - 1)\n', (696, 742), False, 'import torch\n'), ((753, 824), 'torch.clamp_max', 'torch.clamp_max', (['(input_size[0] / 2.0 * (1 - scale_y))', '(input_size[0] - 1)'], {}), '(input_size[0] / 2.0 * (1 - scale_y), input_size[0] - 1)\n', (768, 824), False, 'import torch\n'), ((968, 1029), 'torch.clamp_max', 'torch.clamp_max', (['(new_rows / 2.0 * (1 - scale_y))', '(new_rows - 1)'], {}), '(new_rows / 2.0 * (1 - scale_y), new_rows - 1)\n', (983, 1029), False, 'import torch\n'), ((1040, 1111), 'torch.clamp_max', 'torch.clamp_max', (['(input_size[1] / 2.0 * (1 + scale_x))', '(input_size[1] - 1)'], {}), '(input_size[1] / 2.0 * (1 + scale_x), input_size[1] - 1)\n', (1055, 1111), False, 'import torch\n'), ((1730, 1767), 'cv2.resize', 'cv2.resize', (['pred', '(new_cols, shape_r)'], {}), '(pred, (new_cols, shape_r))\n', (1740, 1767), False, 'import cv2\n'), ((1968, 2005), 'cv2.resize', 'cv2.resize', (['pred', '(shape_c, new_rows)'], {}), '(pred, (shape_c, new_rows))\n', (1978, 2005), False, 'import cv2\n'), ((2323, 2367), 'numpy.zeros', 'np.zeros', (['(shape_r, shape_c)'], {'dtype': 'np.uint8'}), '((shape_r, shape_c), dtype=np.uint8)\n', (2331, 2367), True, 'import numpy as np\n'), ((2599, 2635), 'cv2.resize', 'cv2.resize', (['img', '(new_cols, shape_r)'], {}), '(img, (new_cols, shape_r))\n', (2609, 2635), False, 'import cv2\n'), ((2912, 2948), 'cv2.resize', 'cv2.resize', (['img', '(shape_c, new_rows)'], {}), '(img, (shape_c, new_rows))\n', (2922, 2948), False, 'import cv2\n'), ((4927, 4979), 'numpy.zeros', 'np.zeros', (['(t, shape_r, shape_c, c)'], {'dtype': 'np.float32'}), '((t, shape_r, shape_c, c), dtype=np.float32)\n', (4935, 4979), True, 'import numpy as np\n'), ((5278, 5300), 'numpy.rollaxis', 'np.rollaxis', (['ims', '(3)', '(1)'], {}), '(ims, 3, 1)\n', (5289, 5300), True, 'import numpy as np\n'), ((6155, 6192), 'numpy.zeros_like', 'np.zeros_like', (['coords'], {'dtype': 'np.int32'}), '(coords, dtype=np.int32)\n', (6168, 6192), True, 'import numpy as np\n'), ((3458, 3493), 'numpy.round', 'np.round', (['(point[1] * factor_scale_r)'], {}), '(point[1] * factor_scale_r)\n', (3466, 3493), True, 'import numpy as np\n'), ((3511, 3546), 'numpy.round', 'np.round', (['(point[0] * factor_scale_c)'], {}), '(point[0] * factor_scale_c)\n', (3519, 3546), True, 'import numpy as np\n'), ((2124, 2135), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2130, 2135), True, 'import numpy as np\n'), ((5132, 5169), 'numpy.expand_dims', 'np.expand_dims', (['padded_image'], {'axis': '(-1)'}), '(padded_image, axis=-1)\n', (5146, 5169), True, 'import numpy as np\n')] |
"""
Module Docstring
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
import logging
import sys
sys.path.append('../')
from funcs import utils
from tqdm.autonotebook import tqdm,trange
import networkx as nx
import numpy as np
import random
parser = argparse.ArgumentParser(description='Evaluate paths, FC paths and random paths type A,B between source and destination nodes')
parser.add_argument('srcnodes_file', type=str, help='Source nodes IDs in one column')
parser.add_argument('destnodes_file', type=str, help='Destination nodes IDs in one column')
parser.add_argument('net_path', type=str, default=None, help='Network filepath')
parser.add_argument('flows_file', type=str, help='Flow centrality table')
parser.add_argument('out_fcpaths', type=str, help='Output filepath of FC paths between srcnodes and destnodes')
parser.add_argument('--fc_thresh', type=float, default=2, help='Flow centrality threshold')
parser.add_argument('--npath_thresh', type=int, default=1, help='Number of paths threshold')
parser.add_argument('--N_cores', type=int, default=1, help='Number of cores')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO,
format='%(module)s:%(levelname)s:%(asctime)s:%(message)s',
handlers=[logging.FileHandler("../logs/report.log"),
logging.StreamHandler()])
logging.info(args)
net = utils.read_network(args.net_path)
srcnodes = utils.read_gene_list(args.srcnodes_file)
destnodes = utils.read_gene_list(args.destnodes_file)
flows = utils.read_flows(args.flows_file)
fcnodes = flows[(flows.FCS >= args.fc_thresh) & (flows.N_paths >= args.npath_thresh)].index.tolist()
logging.info('Num of FC nodes: {}'.format(len(fcnodes)))
all_paths = []
for src_gene in tqdm(srcnodes):
def get_sps(dest_gene):
return list(nx.all_shortest_paths(net, src_gene, dest_gene))
all_paths = all_paths + sum(utils.parallel_process(get_sps, destnodes, n_jobs=args.N_cores),[])
logging.info('Num of all paths: {}'.format(len(all_paths)))
fc_paths = []
for i in trange(len(all_paths)):
fullpath = all_paths[i]
if len(fullpath) > 2:
path = all_paths[i][1:-1]
if np.all([node in fcnodes for node in path]):
fc_paths.append(fullpath)
logging.info('Num of FC paths: {}'.format(len(fc_paths)))
utils.write_paths(args.out_fcpaths, fc_paths)
| [
"sys.path.append",
"funcs.utils.read_gene_list",
"funcs.utils.read_network",
"argparse.ArgumentParser",
"logging.FileHandler",
"tqdm.autonotebook.tqdm",
"logging.StreamHandler",
"funcs.utils.write_paths",
"logging.info",
"networkx.all_shortest_paths",
"funcs.utils.read_flows",
"funcs.utils.par... | [((135, 157), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (150, 157), False, 'import sys\n'), ((290, 426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate paths, FC paths and random paths type A,B between source and destination nodes"""'}), "(description=\n 'Evaluate paths, FC paths and random paths type A,B between source and destination nodes'\n )\n", (313, 426), False, 'import argparse\n'), ((1401, 1419), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (1413, 1419), False, 'import logging\n'), ((1426, 1459), 'funcs.utils.read_network', 'utils.read_network', (['args.net_path'], {}), '(args.net_path)\n', (1444, 1459), False, 'from funcs import utils\n'), ((1471, 1511), 'funcs.utils.read_gene_list', 'utils.read_gene_list', (['args.srcnodes_file'], {}), '(args.srcnodes_file)\n', (1491, 1511), False, 'from funcs import utils\n'), ((1524, 1565), 'funcs.utils.read_gene_list', 'utils.read_gene_list', (['args.destnodes_file'], {}), '(args.destnodes_file)\n', (1544, 1565), False, 'from funcs import utils\n'), ((1575, 1608), 'funcs.utils.read_flows', 'utils.read_flows', (['args.flows_file'], {}), '(args.flows_file)\n', (1591, 1608), False, 'from funcs import utils\n'), ((1801, 1815), 'tqdm.autonotebook.tqdm', 'tqdm', (['srcnodes'], {}), '(srcnodes)\n', (1805, 1815), False, 'from tqdm.autonotebook import tqdm, trange\n'), ((2364, 2409), 'funcs.utils.write_paths', 'utils.write_paths', (['args.out_fcpaths', 'fc_paths'], {}), '(args.out_fcpaths, fc_paths)\n', (2381, 2409), False, 'from funcs import utils\n'), ((2222, 2266), 'numpy.all', 'np.all', (['[(node in fcnodes) for node in path]'], {}), '([(node in fcnodes) for node in path])\n', (2228, 2266), True, 'import numpy as np\n'), ((1302, 1343), 'logging.FileHandler', 'logging.FileHandler', (['"""../logs/report.log"""'], {}), "('../logs/report.log')\n", (1321, 1343), False, 'import logging\n'), ((1375, 1398), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1396, 1398), False, 'import logging\n'), ((1865, 1912), 'networkx.all_shortest_paths', 'nx.all_shortest_paths', (['net', 'src_gene', 'dest_gene'], {}), '(net, src_gene, dest_gene)\n', (1886, 1912), True, 'import networkx as nx\n'), ((1946, 2009), 'funcs.utils.parallel_process', 'utils.parallel_process', (['get_sps', 'destnodes'], {'n_jobs': 'args.N_cores'}), '(get_sps, destnodes, n_jobs=args.N_cores)\n', (1968, 2009), False, 'from funcs import utils\n')] |
"""
Functions to help with download and basic processing of GPS data
"""
from datetime import datetime, timedelta
import io
import json
import logging
import multiprocessing
import os
import re
from typing import cast, Dict, Iterable, Optional, Sequence, Tuple
import zipfile
import numpy
import requests
import georinex
import xarray
from laika import AstroDog
from laika.dgps import get_station_position
from laika.downloader import download_cors_station, download_and_cache_file
from laika.gps_time import GPSTime
from laika.rinex_file import DownloadError
from tid import config, tec, types, util
LOG = logging.getLogger(__name__)
DENSE_TYPE = [
("tick", "i4"), # tick number the observation was made
("C1C", "f8"), # GNSS measurements, if available
("C2C", "f8"),
("L1C", "f8"),
("L2C", "f8"),
("sat_pos", "3f8"), # satellite position XYZ ECEF in meters
]
DOWNLOAD_WORKERS = 20 # how many processes to spawn for downloading files
# ecef locations for stations, so we can know what is nearby
with open(
os.path.dirname(__file__) + "/lookup_tables/station_locations.json", "rb"
) as f:
STATION_LOCATIONS = json.load(f)
# which network stations belong to, if we know, to speed up downloading
with open(
os.path.dirname(__file__) + "/lookup_tables/station_networks.json", "rb"
) as f:
STATION_NETWORKS = json.load(f)
conf = config.Configuration()
def get_nearby_stations(
dog: AstroDog, point: Sequence, dist: int = 400000
) -> Sequence[str]:
"""
Find all known/downloadable station names within a given distance from
the target point.
Args:
dog: laika AstroDog object
point: tuple of ECEF xyz location, in meters
dist: allowable distance from the target point, in meters
Returns:
a list of strings representing station names close to the target point
"""
cache_dir = dog.cache_dir
cors_pos_path = cache_dir + "cors_coord/cors_station_positions"
with open(cors_pos_path, "rb") as cors_pos:
# pylint:disable=unexpected-keyword-arg
# (confused about numpy, I guess)
cors_pos_dict = numpy.load(cors_pos, allow_pickle=True).item()
station_names = []
station_pos = []
for name, (_, pos, _) in cors_pos_dict.items():
station_names.append(name)
station_pos.append(pos)
for name, pos in STATION_LOCATIONS.items():
station_names.append(name)
station_pos.append(pos)
np_station_names = numpy.array(station_names)
np_station_pos = numpy.array(station_pos)
dists = numpy.sqrt(((np_station_pos - numpy.array(point)) ** 2).sum(1))
return list(np_station_names[numpy.where(dists < dist)[0]])
def _download_misc_igs_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> Optional[str]:
"""
Downloader for non-CORS stations. Attempts to download rinex observables
for the given station and time
Should only be used internally by data_for_station
Args:
dog: laika AstroDog object
time: laika GPSTime object
station_name: string representation a station name
Returns:
string representing a path to the downloaded file
or None, if the file was not able to be downloaded
"""
cache_subdir = dog.cache_dir + "misc_igs_obs/"
t = time.as_datetime()
# different path formats...
folder_path = t.strftime("%Y/%j/")
filename = station_name + t.strftime("%j0.%yo")
url_bases = (
"ftp://garner.ucsd.edu/archive/garner/rinex/",
"ftp://data-out.unavco.org/pub/rinex/obs/",
)
try:
filepath = download_and_cache_file(
url_bases, folder_path, cache_subdir, filename, compression=".Z"
)
return filepath
except IOError:
url_bases = (
"ftp://igs.gnsswhu.cn/pub/gps/data/daily/",
"ftp://cddis.nasa.gov/gnss/data/daily/",
)
folder_path += t.strftime("%yo/")
try:
filepath = download_and_cache_file(
url_bases, folder_path, cache_subdir, filename, compression=".Z"
)
return filepath
except IOError:
return None
def _download_korean_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> Optional[str]:
"""
Downloader for Korean stations. Attempts to download rinex observables
for the given station and time.
Should only be used internally by data_for_station
TODO: we can download from multiple stations at once and save some time here....
Args:
dog: laika AstroDog object
time: laika GPSTime object
station_name: string representation a station name
Returns:
string representing a path to the downloaded file
or None, if the file was not able to be downloaded
"""
json_url = "http://gnssdata.or.kr/download/createToZip.json"
zip_url = "http://gnssdata.or.kr/download/getZip.do?key=%d"
cache_subdir = dog.cache_dir + "korean_obs/"
t = time.as_datetime()
# different path formats...
folder_path = cache_subdir + t.strftime("%Y/%j/")
filename = folder_path + station_name + t.strftime("%j0.%yo")
if os.path.isfile(filename):
return filename
if not os.path.exists(folder_path):
os.makedirs(folder_path, exist_ok=True)
start_day = t.strftime("%Y%m%d")
postdata = {
"corsId": station_name.upper(),
"obsStDay": start_day,
"obsEdDay": start_day,
"dataTyp": util.DATA_RATE,
}
res = requests.post(json_url, data=postdata).text
if not res:
raise DownloadError
res_dat = json.loads(res)
if not res_dat.get("result", None):
raise DownloadError
key = res_dat["key"]
zipstream = requests.get(zip_url % key, stream=True)
with zipfile.ZipFile(io.BytesIO(zipstream.content)) as zipdat:
for zipf in zipdat.filelist:
with zipfile.ZipFile(io.BytesIO(zipdat.read(zipf))) as station:
for rinex in station.filelist:
if rinex.filename.endswith("o"):
with open(filename, "wb") as rinex_out:
rinex_out.write(station.read(rinex))
return filename
def _download_japanese_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> Optional[str]:
"""
Downloader for Japanese stations. Attempts to download rinex observables
for the given station and time.
Should only be used internally by data_for_station
Args:
dog: laika AstroDog object
time: laika GPSTime object
station_name: string representation a station name
Returns:
string representing a path to the downloaded file
or None, if the file was not able to be downloaded
"""
cache_subdir = dog.cache_dir + "japanese_obs/"
t = time.as_datetime()
# different path formats...
folder_path = t.strftime("%Y/%j/")
filename = station_name + t.strftime("%j0.%yo")
url_bases = ("http://copyfighter.org:6670/japan/data/GR_2.11/",)
try:
filepath = download_and_cache_file(
url_bases, folder_path, cache_subdir, filename, compression=".gz"
)
return filepath
except IOError:
return None
def cors_get_station_lists_for_day(date: datetime) -> Iterable[str]:
"""
Given a date, returns the stations that the US CORS network
reports as available
"""
url = "https://geodesy.noaa.gov/corsdata/rinex/"
resp = requests.get(url + date.strftime("%Y/%j/"))
pat = '<a href="..../">([a-z0-9]{4})/</a>'
return re.findall(pat, resp.text)
def fetch_rinex_for_station(
dog: Optional[AstroDog], time: GPSTime, station_name: str
) -> Optional[str]:
"""
Given a particular time and station, get the rinex obs file that
corresponds to it
Args:
dog: laika AstroDog object or None
time: laika GPSTime object for the time in question
station_name: string of the station in question
station names are CORS names or similar (eg: 'slac')
Returns:
the string containing the file path, or None
"""
if dog is None:
dog = AstroDog(cache_dir=conf.cache_dir)
# handlers for specific networks
handlers = {
"Korea": _download_korean_station,
"Japan": _download_japanese_station,
}
network = STATION_NETWORKS.get(station_name, None)
# no special network, so try using whatever
if network is None:
# step 1: get the station rinex data
try:
rinex_obs_file = download_cors_station(
time, station_name, cache_dir=dog.cache_dir
)
except (KeyError, DownloadError):
# station position not in CORS map, try another thing
if station_name in STATION_LOCATIONS:
rinex_obs_file = _download_misc_igs_station(dog, time, station_name)
else:
return None
else:
rinex_obs_file = handlers[network](dog, time, station_name)
return rinex_obs_file
def location_for_station(
dog: AstroDog, time: GPSTime, station_name: str
) -> types.ECEF_XYZ:
"""
Get location for a particular station at a particular time.
Time is needed so we can look at RINEX files and sanity check
the location data.
Args:
dog: laika AstroDog object
time: laika GPSTime object for the time in question
station_name: string of the station in question
station names are CORS names or similar (eg: 'slac')
Returns:
aproximate x,y,z location in ECEF meters
Raises:
DownloadError if the RINEX could not be fetched
"""
rinex_obs_file = fetch_rinex_for_station(dog, time, station_name)
if rinex_obs_file is None:
raise DownloadError
# start with most accurate positions (from known databases)
approx_position = util.station_location_from_rinex(rinex_obs_file)
try:
station_pos = get_station_position(station_name, cache_dir=dog.cache_dir)
except KeyError:
station_pos = numpy.array(
STATION_LOCATIONS.get(station_name) or approx_position
)
# while databases are more accurate, there are some cases of name collsions
# (eg Korea and US CORS may pick same 4 letter name). To resolve this, favor
# positions reported from RINEX files if there is a big (>100m) divergence
if station_pos is not None and approx_position is not None:
if numpy.linalg.norm(station_pos - approx_position) > 100:
LOG.warning(
"for station %s, we have large differences in position reports",
station_name,
)
station_pos = approx_position
return station_pos
def from_xarray_sat(rinex: xarray.Dataset, start_date: GPSTime) -> types.Observations:
"""
Convert the georinex xarray for a satellite to Observations
Args:
xarray: the georinex xarray thing
start_date: time at which tick 0 occurred
Returns:
Observations for the satellite
"""
# truncate to observations with data
rinex = rinex.dropna("time", how="all", subset=["C1"])
outp = numpy.zeros(rinex.dims["time"], dtype=DENSE_TYPE)
obs_map = {"C1C": "C1", "C2C": "C2", "C2P": "P2", "L1C": "L1", "L2C": "L2"}
for obs in ["C1C", "C2C", "L1C", "L2C"]:
# if the channel doesn't exist, set to NaN
if obs_map[obs] not in rinex:
outp[obs][:] = numpy.nan
else:
outp[obs][:] = rinex[obs_map[obs]]
# if the C2C channel is empty/crap, replace it with C2P
if numpy.all(numpy.isnan(outp["C2C"])):
outp["C2C"][:] = rinex[obs_map["C2P"]]
timedeltas = rinex["time"].astype(numpy.datetime64).to_numpy() - numpy.datetime64(
start_date.as_datetime()
)
outp["tick"] = (timedeltas / numpy.timedelta64(util.DATA_RATE, "s")).astype(int)
return cast(types.Observations, outp)
def from_xarray(rinex: xarray.Dataset, start_date: GPSTime) -> types.DenseMeasurements:
"""
Convert georinex's xarray format into our sparser format
Args:
rinex: the georinex xarray file
start_date: when tick 0 occurred
Returns:
dense raw gps data
"""
sv_dict_out = cast(types.DenseMeasurements, {})
for svid in rinex.sv.to_numpy():
sv_dict_out[svid] = from_xarray_sat(rinex.sel(sv=svid), start_date)
return sv_dict_out
def data_for_station(
dog: AstroDog,
time: GPSTime,
station_name: str,
start_date: GPSTime,
) -> types.DenseMeasurements:
"""
Get data from a particular station and time. Wrapper for data_for_station
inside of get_data
Args:
dog: laika AstroDog object
time: laika GPSTime object for the time in question
station_name: the station for which we want data
start_date: when index 0 occurred
Returns:
dense raw gps data
Raises:
DownloadError if the data could not be fetched
TODO: caching of the results on disk? or should that happen later?
"""
rinex_obs_file = fetch_rinex_for_station(dog, time, station_name)
if rinex_obs_file is None:
raise DownloadError
rinex = georinex.load(rinex_obs_file, interval=30)
return from_xarray(rinex, start_date)
def populate_sat_info(
dog: AstroDog,
start_time: GPSTime,
duration: timedelta,
station_dict: types.StationPrnMap[types.Observations],
) -> None:
"""
Populate the satellite locations for our measurements
Args:
dog: laika AstroDog to use
start_time: when the 0th tick occurs
duration: how long until the last tick
station_dict: mapping to the Observations that need correcting
TODO: can numba (or something) help us parallelize the lower loops?
"""
satellites = {sat: idx for idx, sat in enumerate(dog.get_all_sat_info(start_time))}
tick_count = int(duration.total_seconds() / util.DATA_RATE)
# get an accurate view of the satellites at 30 second intervals
sat_info = numpy.zeros(
(len(satellites), tick_count + 1), dtype=[("pos", "3f8"), ("vel", "3f8")]
)
for tick in range(tick_count + 1):
tick_info = dog.get_all_sat_info(start_time + util.DATA_RATE * tick)
for svid, info in tick_info.items():
sat_info[satellites[svid]][tick] = (info[0], info[1])
bad_datas = set()
for station in station_dict:
for sat in station_dict[station]:
if sat not in satellites:
# no info for this satellite, probably not orbiting, remove it
bad_datas.add((station, sat))
continue
ticks = station_dict[station][sat]["tick"]
time_delays = station_dict[station][sat]["C1C"] / tec.C
delta_pos = (
sat_info[satellites[sat]]["vel"][ticks] * time_delays[:, numpy.newaxis]
)
corrected_pos = sat_info[satellites[sat]]["pos"][ticks] - delta_pos
station_dict[station][sat]["sat_pos"][:] = corrected_pos
for station, sat in bad_datas:
del station_dict[station][sat]
def merge_data(
data1: types.DenseMeasurements, data2: types.DenseMeasurements
) -> types.DenseMeasurements:
"""
Merges two sets of dense measurements together
Args:
data1: the first (chronologically) set of data
data2: the second (chronologically) set of data
Returns:
the combined data
"""
combined = data1.copy()
for prn in data2:
# prn only has data in the second dataset
if prn not in data1:
combined[prn] = data2[prn]
# otherwise we need an actual merge
else:
combined[prn] = numpy.append(data1[prn], data2[prn])
return cast(types.DenseMeasurements, combined)
def populate_data(
stations: Iterable[str],
start_date: GPSTime,
duration: timedelta,
dog: AstroDog,
) -> Tuple[Dict[str, types.ECEF_XYZ], types.StationPrnMap[types.Observations]]:
"""
Download/populate the station data and station location info
Args:
stations: list of station names
date_list: ordered list of the dates for which to fetch data
dog: astro dog to use
Returns:
dictionary of station names to their locations,
dictionary of station names to sat names to their dense data
TODO: is this a good place to be caching results?
"""
# dict of station names -> XYZ ECEF locations in meters
station_locs: Dict[str, types.ECEF_XYZ] = {}
# dict of station names -> dict of prn -> numpy observation data
station_data = cast(types.StationPrnMap[types.Observations], {})
for station in stations:
gps_date = start_date
while (gps_date) < start_date + duration.total_seconds():
try:
latest_data = data_for_station(
dog, gps_date, station, start_date=start_date
)
if station not in station_locs:
station_locs[station] = location_for_station(dog, gps_date, station)
except DownloadError:
continue
except IndexError:
print("index error: ", station)
continue
finally:
gps_date += (1 * util.DAYS).total_seconds()
if station not in station_data:
station_data[station] = latest_data
else:
# we've already got some data, so merge it together
# give mypy a hint here about our type aliases
station_data[station] = merge_data(
cast(types.DenseMeasurements, station_data[station]),
latest_data,
)
# didn't download data, ignore it
if station not in station_data:
continue
populate_sat_info(dog, start_date, duration, station_data)
return station_locs, station_data
def download_and_process(
argtuple: Tuple[GPSTime, str]
) -> Tuple[GPSTime, str, Optional[str]]:
"""
Fetch the data for a station at a date, return a path to the NetCDF4 version of it
Args:
argtuple: the date and station for which we want the data
Returns:
date requested, station requested, and the path to the nc file, or
None if it can't be retrieved
"""
date, station = argtuple
# first search for already processed NetCDF4 files
path_name = date.as_datetime().strftime(f"%Y/%j/{station}%j0.%yo.nc")
for cache_folder in ["misc_igs_obs", "japanese_obs", "korean_obs", "cors_obs"]:
fname = f"{conf.cache_dir}/{cache_folder}/{path_name}"
if os.path.exists(fname):
return date, station, fname
rinex_obs_file = fetch_rinex_for_station(None, date, station)
if rinex_obs_file is not None:
if os.path.exists(rinex_obs_file + ".nc"):
return date, station, rinex_obs_file + ".nc"
rinex = georinex.load(rinex_obs_file, interval=30)
rinex["time"] = rinex.time.astype(numpy.datetime64)
rinex.to_netcdf(rinex_obs_file + ".nc")
return date, station, rinex_obs_file + ".nc"
return date, station, None
def parallel_populate_data(
stations: Iterable[str],
start_date: GPSTime,
duration: timedelta,
dog: AstroDog,
) -> Tuple[Dict[str, types.ECEF_XYZ], types.StationPrnMap[types.Observations]]:
"""
Download/populate the station data and station location info
Args:
stations: list of station names
date_list: ordered list of the dates for which to fetch data
dog: astro dog to use
Returns:
dictionary of station names to their locations,
dictionary of station names to sat names to their dense data
TODO: is this a good place to be caching results?
"""
# dict of station names -> XYZ ECEF locations in meters
station_locs: Dict[str, types.ECEF_XYZ] = {}
# dict of station names -> dict of prn -> numpy observation data
station_data = cast(types.StationPrnMap[types.Observations], {})
to_download = []
for station in stations:
gps_date = start_date
while gps_date < start_date + duration.total_seconds():
to_download.append((gps_date, station))
gps_date += (1 * util.DAYS).total_seconds()
with multiprocessing.Pool(DOWNLOAD_WORKERS) as pool:
download_res = pool.map(download_and_process, to_download)
downloaded_map = {
# break it up like this to deal with GPSTime not being hashable
(start_date.week, start_date.tow, station): result
for start_date, station, result in download_res
}
for station in stations:
gps_date = start_date
while gps_date < start_date + duration.total_seconds():
result = downloaded_map.get((gps_date.week, gps_date.tow, station))
gps_date += (1 * util.DAYS).total_seconds()
if result is None:
continue
latest_data = xarray.load_dataset(result)
if station not in station_locs:
station_locs[station] = latest_data.position
dense_data = from_xarray(latest_data, start_date)
if station not in station_data:
station_data[station] = dense_data
else:
# we've already got some data, so merge it together
# give mypy a hint here about our type aliases
station_data[station] = merge_data(
cast(types.DenseMeasurements, station_data[station]),
dense_data,
)
# didn't download data, ignore it
if station not in station_data:
continue
populate_sat_info(dog, start_date, duration, station_data)
return station_locs, station_data
| [
"numpy.load",
"typing.cast",
"numpy.isnan",
"os.path.isfile",
"laika.downloader.download_cors_station",
"numpy.linalg.norm",
"requests.post",
"json.loads",
"xarray.load_dataset",
"os.path.dirname",
"os.path.exists",
"georinex.load",
"numpy.append",
"re.findall",
"requests.get",
"laika.... | [((613, 640), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (630, 640), False, 'import logging\n'), ((1380, 1402), 'tid.config.Configuration', 'config.Configuration', ([], {}), '()\n', (1400, 1402), False, 'from tid import config, tec, types, util\n'), ((1154, 1166), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1163, 1166), False, 'import json\n'), ((1359, 1371), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1368, 1371), False, 'import json\n'), ((2486, 2512), 'numpy.array', 'numpy.array', (['station_names'], {}), '(station_names)\n', (2497, 2512), False, 'import numpy\n'), ((2534, 2558), 'numpy.array', 'numpy.array', (['station_pos'], {}), '(station_pos)\n', (2545, 2558), False, 'import numpy\n'), ((5202, 5226), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (5216, 5226), False, 'import os\n'), ((5650, 5665), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (5660, 5665), False, 'import json\n'), ((5776, 5816), 'requests.get', 'requests.get', (['(zip_url % key)'], {'stream': '(True)'}), '(zip_url % key, stream=True)\n', (5788, 5816), False, 'import requests\n'), ((7626, 7652), 're.findall', 're.findall', (['pat', 'resp.text'], {}), '(pat, resp.text)\n', (7636, 7652), False, 'import re\n'), ((9947, 9995), 'tid.util.station_location_from_rinex', 'util.station_location_from_rinex', (['rinex_obs_file'], {}), '(rinex_obs_file)\n', (9979, 9995), False, 'from tid import config, tec, types, util\n'), ((11240, 11289), 'numpy.zeros', 'numpy.zeros', (["rinex.dims['time']"], {'dtype': 'DENSE_TYPE'}), "(rinex.dims['time'], dtype=DENSE_TYPE)\n", (11251, 11289), False, 'import numpy\n'), ((11978, 12008), 'typing.cast', 'cast', (['types.Observations', 'outp'], {}), '(types.Observations, outp)\n', (11982, 12008), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((12327, 12360), 'typing.cast', 'cast', (['types.DenseMeasurements', '{}'], {}), '(types.DenseMeasurements, {})\n', (12331, 12360), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((13282, 13324), 'georinex.load', 'georinex.load', (['rinex_obs_file'], {'interval': '(30)'}), '(rinex_obs_file, interval=30)\n', (13295, 13324), False, 'import georinex\n'), ((15860, 15899), 'typing.cast', 'cast', (['types.DenseMeasurements', 'combined'], {}), '(types.DenseMeasurements, combined)\n', (15864, 15899), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((16722, 16771), 'typing.cast', 'cast', (['types.StationPrnMap[types.Observations]', '{}'], {}), '(types.StationPrnMap[types.Observations], {})\n', (16726, 16771), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((20141, 20190), 'typing.cast', 'cast', (['types.StationPrnMap[types.Observations]', '{}'], {}), '(types.StationPrnMap[types.Observations], {})\n', (20145, 20190), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((3622, 3715), 'laika.downloader.download_and_cache_file', 'download_and_cache_file', (['url_bases', 'folder_path', 'cache_subdir', 'filename'], {'compression': '""".Z"""'}), "(url_bases, folder_path, cache_subdir, filename,\n compression='.Z')\n", (3645, 3715), False, 'from laika.downloader import download_cors_station, download_and_cache_file\n'), ((5263, 5290), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (5277, 5290), False, 'import os\n'), ((5300, 5339), 'os.makedirs', 'os.makedirs', (['folder_path'], {'exist_ok': '(True)'}), '(folder_path, exist_ok=True)\n', (5311, 5339), False, 'import os\n'), ((5548, 5586), 'requests.post', 'requests.post', (['json_url'], {'data': 'postdata'}), '(json_url, data=postdata)\n', (5561, 5586), False, 'import requests\n'), ((7106, 7200), 'laika.downloader.download_and_cache_file', 'download_and_cache_file', (['url_bases', 'folder_path', 'cache_subdir', 'filename'], {'compression': '""".gz"""'}), "(url_bases, folder_path, cache_subdir, filename,\n compression='.gz')\n", (7129, 7200), False, 'from laika.downloader import download_cors_station, download_and_cache_file\n'), ((8210, 8244), 'laika.AstroDog', 'AstroDog', ([], {'cache_dir': 'conf.cache_dir'}), '(cache_dir=conf.cache_dir)\n', (8218, 8244), False, 'from laika import AstroDog\n'), ((10027, 10086), 'laika.dgps.get_station_position', 'get_station_position', (['station_name'], {'cache_dir': 'dog.cache_dir'}), '(station_name, cache_dir=dog.cache_dir)\n', (10047, 10086), False, 'from laika.dgps import get_station_position\n'), ((11681, 11705), 'numpy.isnan', 'numpy.isnan', (["outp['C2C']"], {}), "(outp['C2C'])\n", (11692, 11705), False, 'import numpy\n'), ((18786, 18807), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (18800, 18807), False, 'import os\n'), ((18962, 19000), 'os.path.exists', 'os.path.exists', (["(rinex_obs_file + '.nc')"], {}), "(rinex_obs_file + '.nc')\n", (18976, 19000), False, 'import os\n'), ((19075, 19117), 'georinex.load', 'georinex.load', (['rinex_obs_file'], {'interval': '(30)'}), '(rinex_obs_file, interval=30)\n', (19088, 19117), False, 'import georinex\n'), ((20454, 20492), 'multiprocessing.Pool', 'multiprocessing.Pool', (['DOWNLOAD_WORKERS'], {}), '(DOWNLOAD_WORKERS)\n', (20474, 20492), False, 'import multiprocessing\n'), ((1048, 1073), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1063, 1073), False, 'import os\n'), ((1255, 1280), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1270, 1280), False, 'import os\n'), ((5842, 5871), 'io.BytesIO', 'io.BytesIO', (['zipstream.content'], {}), '(zipstream.content)\n', (5852, 5871), False, 'import io\n'), ((8610, 8676), 'laika.downloader.download_cors_station', 'download_cors_station', (['time', 'station_name'], {'cache_dir': 'dog.cache_dir'}), '(time, station_name, cache_dir=dog.cache_dir)\n', (8631, 8676), False, 'from laika.downloader import download_cors_station, download_and_cache_file\n'), ((10536, 10584), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(station_pos - approx_position)'], {}), '(station_pos - approx_position)\n', (10553, 10584), False, 'import numpy\n'), ((15811, 15847), 'numpy.append', 'numpy.append', (['data1[prn]', 'data2[prn]'], {}), '(data1[prn], data2[prn])\n', (15823, 15847), False, 'import numpy\n'), ((21131, 21158), 'xarray.load_dataset', 'xarray.load_dataset', (['result'], {}), '(result)\n', (21150, 21158), False, 'import xarray\n'), ((2136, 2175), 'numpy.load', 'numpy.load', (['cors_pos'], {'allow_pickle': '(True)'}), '(cors_pos, allow_pickle=True)\n', (2146, 2175), False, 'import numpy\n'), ((2670, 2695), 'numpy.where', 'numpy.where', (['(dists < dist)'], {}), '(dists < dist)\n', (2681, 2695), False, 'import numpy\n'), ((3997, 4090), 'laika.downloader.download_and_cache_file', 'download_and_cache_file', (['url_bases', 'folder_path', 'cache_subdir', 'filename'], {'compression': '""".Z"""'}), "(url_bases, folder_path, cache_subdir, filename,\n compression='.Z')\n", (4020, 4090), False, 'from laika.downloader import download_cors_station, download_and_cache_file\n'), ((11915, 11953), 'numpy.timedelta64', 'numpy.timedelta64', (['util.DATA_RATE', '"""s"""'], {}), "(util.DATA_RATE, 's')\n", (11932, 11953), False, 'import numpy\n'), ((17747, 17799), 'typing.cast', 'cast', (['types.DenseMeasurements', 'station_data[station]'], {}), '(types.DenseMeasurements, station_data[station])\n', (17751, 17799), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((21643, 21695), 'typing.cast', 'cast', (['types.DenseMeasurements', 'station_data[station]'], {}), '(types.DenseMeasurements, station_data[station])\n', (21647, 21695), False, 'from typing import cast, Dict, Iterable, Optional, Sequence, Tuple\n'), ((2602, 2620), 'numpy.array', 'numpy.array', (['point'], {}), '(point)\n', (2613, 2620), False, 'import numpy\n')] |
'''Utility functions for BData'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from .bdata import BData
def vstack(bdata_list, successive=[]):
'''Concatenate datasets vertically.
Currently, `concat_dataset` does not validate the consistency of meta-data
among data.
Parameters
----------
bdata_list : list of BData
Data to be concatenated
successsive : list, optional
Sucessive columns. The values of columns specified here are inherited
from the preceding data.
Returns
-------
dat : BData
Concatenated data
Example
-------
data = vstack([data0, data1, data2], successive=['Session', 'Run', 'Block'])
'''
suc_cols = {s : 0 for s in successive}
dat = BData() # Concatenated BData
for ds in bdata_list:
ds_copy = copy.deepcopy(ds)
# Update sucessive columns
for s in successive:
v = ds_copy.select(s)
v += suc_cols[s]
ds_copy.update(s, v)
# Concatenate BDatas
if dat.dataset.shape[0] == 0:
# Create new BData
dat.dataset = ds_copy.dataset
dat.metadata = ds_copy.metadata
else:
# Concatenate BDatas
dat.dataset = np.vstack([dat.dataset, ds_copy.dataset])
# Check metadata consistency
if not dat.metadata.key == ds_copy.metadata.key:
raise ValueError('Metadata keys are inconsistent. ')
if not dat.metadata.description == ds_copy.metadata.description:
raise ValueError('Metadata descriptions are inconsistent. ')
# np.array_equal doesn't work because np.nan != np.nan
try:
np.testing.assert_equal(dat.metadata.value, ds_copy.metadata.value)
except AssertionError:
raise ValueError('Metadata values are inconsistent. ')
# Update the last values in sucessive columns
for s in successive:
v = dat.select(s)
suc_cols[s] = np.max(v)
return dat
def concat_dataset(data_list, successive=[]):
'''Concatenate datasets
Currently, `concat_dataset` does not validate the consistency of meta-data
among data.
Parameters
----------
data_list : list of BData
Data to be concatenated
successsive : list, optional
Sucessive columns. The values of columns specified here are inherited
from the preceding data.
Returns
-------
dat : BData
Concatenated data
Example
-------
data = concat_dataset([data0, data1, data2], successive=['Session', 'Run', 'Block'])
'''
return vstack(data_list, successive=successive)
| [
"numpy.testing.assert_equal",
"copy.deepcopy",
"numpy.max",
"numpy.vstack"
] | [((933, 950), 'copy.deepcopy', 'copy.deepcopy', (['ds'], {}), '(ds)\n', (946, 950), False, 'import copy\n'), ((1370, 1411), 'numpy.vstack', 'np.vstack', (['[dat.dataset, ds_copy.dataset]'], {}), '([dat.dataset, ds_copy.dataset])\n', (1379, 1411), True, 'import numpy as np\n'), ((2152, 2161), 'numpy.max', 'np.max', (['v'], {}), '(v)\n', (2158, 2161), True, 'import numpy as np\n'), ((1838, 1905), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['dat.metadata.value', 'ds_copy.metadata.value'], {}), '(dat.metadata.value, ds_copy.metadata.value)\n', (1861, 1905), True, 'import numpy as np\n')] |
from Extractors.PostExtractor import PostExtractor
import pprint
pp = pprint.PrettyPrinter(indent=4)
files = {
# 'Badges': 'Badges.xml',
# 'Comments': 'Comments.xml',
# 'PostHistory': 'PostHistory.xml',
# 'PostLinks': 'PostLinks.xml',
'Posts': 'Posts.xml',
# 'Tags': 'Tags.xml',
# 'Users': 'Users.xml',
# 'Votes': 'Votes.xml',
}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
def run(ctx, data_dir):
"""Preprocess data files for algorithm"""
extracted_posts = PostExtractor(data_dir + files['Posts'])
pd_posts = extracted_posts.getPdSeries()
x = pd_posts.iloc[:, :1].values ## independent
y = pd_posts.iloc[:, -1:].values ## depdendent
print('Finished extracing series')
pp.pprint(y)
# Splitting the dataset into the Training set and Test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=1/3, random_state=0)
print('Finished splitting series')
x_train = np.array(x_train)
x_train = x_train.astype(np.float64)
x_train = np.reshape(x_train, (-1,1))
# Fitting Simple Linear Regression to the Training set
regressor = LinearRegression()
regressor.fit(x_train, y_train)
predicted = regressor.predict(y_test)
#visualize results
plt.scatter(x_test, y_test)
plt.plot(x_test, predicted)
plt.show()
return predicted
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.model_selection.train_test_split",
"Extractors.PostExtractor.PostExtractor",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression",
"pprint.PrettyPrinter",
"numpy.array",
"numpy.reshape"
] | [((70, 100), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (90, 100), False, 'import pprint\n'), ((624, 664), 'Extractors.PostExtractor.PostExtractor', 'PostExtractor', (["(data_dir + files['Posts'])"], {}), "(data_dir + files['Posts'])\n", (637, 664), False, 'from Extractors.PostExtractor import PostExtractor\n'), ((974, 1029), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(1 / 3)', 'random_state': '(0)'}), '(x, y, test_size=1 / 3, random_state=0)\n', (990, 1029), False, 'from sklearn.model_selection import train_test_split\n'), ((1082, 1099), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1090, 1099), True, 'import numpy as np\n'), ((1155, 1183), 'numpy.reshape', 'np.reshape', (['x_train', '(-1, 1)'], {}), '(x_train, (-1, 1))\n', (1165, 1183), True, 'import numpy as np\n'), ((1259, 1277), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1275, 1277), False, 'from sklearn.linear_model import LinearRegression\n'), ((1385, 1412), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (1396, 1412), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1444), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 'predicted'], {}), '(x_test, predicted)\n', (1425, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1449, 1459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1457, 1459), True, 'import matplotlib.pyplot as plt\n')] |
""" DeepF for sample loss
Keep but not tested (you-yi on 07/13/2020)
Authors: <NAME>, <NAME>
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.nn.functional import grid_sample
import numpy as np
import cv2
import dsac_tools.utils_F as utils_F # If cannot find: export KITTI_UTILS_PATH='/home/ruizhu/Documents/Projects/kitti_instance_RGBD_utils'
# import utils_F.compute_epi_residual as compute_epi_residual
# import utils_F.compute_epi_residual_non_rob as compute_epi_residual_non_rob
from models.GoodCorresNet import GoodCorresNet
# from models.ConfNet import VggDepthEstimator
# from models.ConfNet import VggDepthEstimatorOLD as VggDepthEstimator
# from models.ImageFeatNet import Conv
# from models.ImageFeatNet import VggDepthEstimatorSeperate as VggDepthEstimator
from models.ErrorEstimators import *
from batch_svd import batch_svd # https://github.com/KinglittleQ/torch-batch-svd.git
class NormalizeAndExpand(nn.Module):
def __init__(self, is_cuda=True, is_test=False):
super(NormalizeAndExpand, self).__init__()
self.ones_b = Variable(torch.ones((1, 1, 1)), volatile=is_test)
self.T_b = Variable(torch.zeros(1, 3, 3), volatile=is_test)
if is_cuda:
self.ones_b = self.ones_b.cuda()
self.T_b = self.T_b.cuda()
def normalize(self, pts):
T = self.T_b.expand(pts.size(0), 3, 3).clone()
ones = self.ones_b.expand(pts.size(0), pts.size(1), 1)
pts = torch.cat((pts, ones), 2)
c = torch.mean(pts,1)
newpts_ = (pts - c.unsqueeze(1)) # First center to zero mean
meandist = newpts_[:,:,:2].pow(2).sum(2).sqrt().mean(1)
scale = 1.0/meandist
T[:,0,0] = scale
T[:,1,1] = scale
T[:,2,2] = 1
T[:,0,2] = -c[:,0]*scale
T[:,1,2] = -c[:,1]*scale
pts_out = torch.bmm(T, pts.permute(0,2,1))
return pts_out, T
def forward(self, pts):
pts1, T1 = self.normalize(pts[:,:,:2])
pts2, T2 = self.normalize(pts[:,:,2:])
return pts1, pts2, T1, T2
class NormalizeAndExpand_K(nn.Module):
def __init__(self, is_cuda=True, is_test=False):
super(NormalizeAndExpand_K, self).__init__()
self.ones_b = Variable(torch.ones((1, 1, 1)), volatile=is_test)
self.T_b = Variable(torch.zeros(1, 3, 3), volatile=is_test)
if is_cuda:
self.ones_b = self.ones_b.cuda()
self.T_b = self.T_b.cuda()
def normalize(self, pts, K_invs):
T = K_invs
ones = self.ones_b.expand(pts.size(0), pts.size(1), 1)
pts = torch.cat((pts, ones), 2)
pts_out = torch.bmm(T, pts.permute(0,2,1))
return pts_out, T
def forward(self, pts, K_invs):
pts1, T1 = self.normalize(pts[:,:,:2], K_invs)
pts2, T2 = self.normalize(pts[:,:,2:], K_invs)
return pts1, pts2, T1, T2
class NormalizeAndExpand_HW(nn.Module):
# so that the coordintes are normalized to [-1, 1] for both H and W
def __init__(self, image_size, is_cuda=True, is_test=False):
super(NormalizeAndExpand_HW, self).__init__()
self.ones_b = Variable(torch.ones((1, 1, 1)), volatile=is_test)
# self.T_b = Variable(torch.zeros(1, 3, 3), volatile=is_test)
H, W = image_size[0], image_size[1]
self.T = torch.tensor([[2./W, 0., -1.], [0., 2./H, -1.], [0., 0., 1.]]).float().unsqueeze(0)
if is_cuda:
self.ones_b = self.ones_b.cuda()
# self.T_b = self.T_b.cuda()
self.T = self.T.cuda()
def normalize(self, pts):
ones = self.ones_b.expand(pts.size(0), pts.size(1), 1)
pts = torch.cat((pts, ones), 2)
pts_out = self.T @ pts.permute(0,2,1)
return pts_out, self.T
def forward(self, pts):
pts1, T1 = self.normalize(pts[:,:,:2])
pts2, T2 = self.normalize(pts[:,:,2:])
return pts1, pts2, T1, T2
class Fit(nn.Module):
def __init__(self, is_cuda=True, is_test=False, if_cpu_svd=False, normalize_SVD=True, if_sample_loss=False):
super(Fit, self).__init__()
# self.svd = bsvd(is_cuda, is_test)
self.ones_b = Variable(torch.ones((1, 1, 1)).float())
self.zero_b = Variable(torch.zeros((1, 1, 1)).float())
self.T_b = torch.zeros(1, 3, 3).float()
self.mask = Variable(torch.ones(3))
self.mask[-1] = 0
self.normalize_SVD = normalize_SVD
self.if_cpu_svd = if_cpu_svd
if self.if_cpu_svd:
self.mask_cpu = self.mask.clone()
self.if_sample_loss = if_sample_loss
if is_cuda:
self.ones_b = self.ones_b.cuda()
self.zero_b = self.zero_b.cuda()
self.T_b = self.T_b.cuda()
self.mask = self.mask.cuda()
self.is_cuda = is_cuda
# self.bsvd = bsvd_torch()
def normalize(self, pts, weights):
device = pts.device
T = Variable(self.T_b.to(device).expand(pts.size(0), 3, 3)).clone()
ones = self.ones_b.to(device).expand(pts.size(0), pts.size(1), 1)
denom = weights.sum(1)
#
# c = torch.mean(pts,1)
# newpts_ = (pts - c.unsqueeze(1))
# meandist = newpts_[:,:,:2].pow(2).sum(2).sqrt().mean(1)
c = torch.sum(pts*weights,1)/denom
# print(c.size(), pts.size())
newpts_ = (pts - c.unsqueeze(1))
meandist = ((weights*(newpts_[:,:,:2].pow(2).sum(2).sqrt().unsqueeze(2))).sum(1)/denom).squeeze(1)
scale = 1.4142/meandist
T[:,0,0] = scale
T[:,1,1] = scale
T[:,2,2] = 1
T[:,0,2] = -c[:,0]*scale
T[:,1,2] = -c[:,1]*scale
# pts_ = torch.cat((pts, ones), 2)
# print(pts.device, weights.device, T.device, self.T_b.device)
pts_out = torch.bmm(T, pts.permute(0,2,1))
return pts_out, T
# def weighted_svd(self, pts1, pts2, weights):
# weights = weights.squeeze(1).unsqueeze(2)
# pts1n, T1 = self.normalize(pts1, weights)
# pts2n, T2 = self.normalize(pts2, weights)
# p = torch.cat((pts1n[:,0].unsqueeze(1)*pts2n,
# pts1n[:,1].unsqueeze(1)*pts2n,
# pts2n), 1).permute(0,2,1)
# X = p*weights
# out_b = []
# for b in range(X.size(0)):
# _, _, V = torch.svd(X[b])
# F = V[:,-1].view(3,3)
# U, S, V = torch.svd(F)
# F_ = U.mm((S*self.mask).diag()).mm(V.t())
# out_b.append(F_.unsqueeze(0))
# out = torch.cat(out_b, 0)
# out = T1.permute(0,2,1).bmm(out).bmm(T2)
# return out
def weighted_svd(self, pts1, pts2, weights, if_print=False):
device = weights.device
weights = weights.squeeze(1).unsqueeze(2)
ones = torch.ones_like(weights)
if self.is_cuda:
ones = ones.cuda()
pts1n, T1 = self.normalize(pts1, ones)
pts2n, T2 = self.normalize(pts2, ones)
# pts1n, T1 = self.normalize(pts1, weights)
# pts2n, T2 = self.normalize(pts2, weights)
p = torch.cat((pts2n[:,0].unsqueeze(1)*pts1n,
pts2n[:,1].unsqueeze(1)*pts1n,
pts1n), 1).permute(0,2,1)
# # if self.normalize_SVD:
# # p = torch.nn.functional.normalize(p, dim=2)
# X = p*torch.sqrt(weights)
if self.normalize_SVD:
p = torch.nn.functional.normalize(p, dim=2)
X = p*weights
out_b = []
F_vecs_list = []
if self.if_cpu_svd:
for b in range(X.size(0)):
_, _, V = torch.svd(X[b].cpu())
F = V[:,-1].view(3,3)
F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
U, S, V = torch.svd(F)
F_ = U.mm((S*self.mask.cpu()).diag()).mm(V.t())
out_b.append(F_.unsqueeze(0))
out = torch.cat(out_b, 0).cuda()
F_vecs= torch.stack(F_vecs_list).cuda()
else:
for b in range(X.size(0)):
_, _, V = torch.svd(X[b])
F = V[:,-1].view(3,3)
F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
U, S, V = torch.svd(F)
F_ = U.mm((S*self.mask.to(device)).diag()).mm(V.t())
out_b.append(F_.unsqueeze(0))
out = torch.cat(out_b, 0)
F_vecs = torch.stack(F_vecs_list)
if if_print:
print(F_vecs.size(), p.size(), weights.size())
print('----F_vecs')
print(F_vecs[0].detach().cpu().numpy())
print('----p')
print(p[0].detach().cpu().numpy())
print('----weights')
print(weights[:2].squeeze().detach().cpu().numpy(), torch.sum(weights[:2], dim=1).squeeze().detach().cpu().numpy())
residual = (X @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# residual_nonWeighted = (p @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# print(residual.size())
# print(residual.norm(p=2, dim=1).size())
out = T2.permute(0,2,1).bmm(out).bmm(T1)
return out, residual.squeeze(-1)
def weighted_svd_batch(self, pts1, pts2, weights, if_print=False):
device = weights.device
weights = weights.squeeze(1).unsqueeze(2)
ones = torch.ones_like(weights)
if self.is_cuda:
ones = ones.cuda()
pts1n, T1 = self.normalize(pts1, ones)
pts2n, T2 = self.normalize(pts2, ones)
# pts1n, T1 = self.normalize(pts1, weights)
# pts2n, T2 = self.normalize(pts2, weights)
p = torch.cat((pts2n[:,0].unsqueeze(1)*pts1n,
pts2n[:,1].unsqueeze(1)*pts1n,
pts1n), 1).permute(0,2,1)
# # if self.normalize_SVD:
# # p = torch.nn.functional.normalize(p, dim=2)
# X = p*torch.sqrt(weights)
if self.normalize_SVD:
p = torch.nn.functional.normalize(p, dim=2)
X = p*weights
Us, Ss, Vs = batch_svd(X)
Fs = Vs[:, :, -1].view(-1, 3, 3)
F_vecs = torch.nn.functional.normalize(Vs[:, :, -1], p=2, dim=1)
Us, Ss, Vs = batch_svd(Fs)
out = Us @ torch.diag_embed(Ss*self.mask.unsqueeze(0)) @ Vs.transpose(1, 2)
# out_b = []
# F_vecs_list = []
# if self.if_cpu_svd:
# for b in range(X.size(0)):
# _, _, V = torch.svd(X[b].cpu())
# F = V[:,-1].view(3,3)
# F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
# U, S, V = torch.svd(F)
# F_ = U.mm((S*self.mask.cpu()).diag()).mm(V.t())
# out_b.append(F_.unsqueeze(0))
# out = torch.cat(out_b, 0).cuda()
# F_vecs= torch.stack(F_vecs_list).cuda()
# else:
# for b in range(X.size(0)):
# _, _, V = torch.svd(X[b])
# F = V[:,-1].view(3,3)
# F_vecs_list.append(V[:,-1]/(V[:,-1].norm()))
# U, S, V = torch.svd(F)
# F_ = U.mm((S*self.mask.to(device)).diag()).mm(V.t())
# out_b.append(F_.unsqueeze(0))
# out = torch.cat(out_b, 0)
# F_vecs = torch.stack(F_vecs_list)
# if if_print:
# print(F_vecs.size(), p.size(), weights.size())
# print('----F_vecs')
# print(F_vecs[0].detach().cpu().numpy())
# print('----p')
# print(p[0].detach().cpu().numpy())
# print('----weights')
# print(weights[:2].squeeze().detach().cpu().numpy(), torch.sum(weights[:2], dim=1).squeeze().detach().cpu().numpy())
residual = (X @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# residual_nonWeighted = (p @ F_vecs.unsqueeze(-1)).squeeze(-1) # [B, N, 1]
# print(residual.size())
# print(residual.norm(p=2, dim=1).size())
out = T2.permute(0,2,1).bmm(out).bmm(T1)
return out, residual.squeeze(-1)
def get_unique(self, xs, topk, matches_good_unique_nums, pts1, pts2): # [B, N]
xs_topk_list = []
topK_indices_list = []
pts1_list = []
pts2_list = []
for x, matches_good_unique_num, pt1, pt2 in zip(xs, matches_good_unique_nums, pts1, pts2):
# x_unique = torch.unique(x) # no gradients!!!
x_unique = x[:, :matches_good_unique_num]
# print(x_unique_topK)
x_unique_topK, topK_indices = torch.topk(x_unique, topk, dim=1)
xs_topk_list.append(x_unique_topK)
topK_indices_list.append(topK_indices.squeeze())
pt1_topK, pt2_topK = pt1[topK_indices.squeeze(), :], pt2[topK_indices.squeeze(), :]
pts1_list.append(pt1_topK)
pts2_list.append(pt2_topK)
return torch.stack(xs_topk_list), torch.stack(topK_indices_list), torch.stack(pts1_list), torch.stack(pts2_list)
def forward(self, pts1, pts2, weights, if_print=False, matches_good_unique_nums=None):
out, residual = self.weighted_svd(pts1, pts2, weights, if_print=if_print)
out_dict = {'out': out, 'residual': residual}
# if not(self.if_sample_loss):
# return out, residual, None, None
topK = 20
selects_each_sample = 100
# print(weights.size()) # [B, 1, N]
weights_topK, indices_topK, pts1_topK, pts2_topK = self.get_unique(weights, topK, matches_good_unique_nums, pts1, pts2)
# print(indices_topK, indices_topK.size())
# print(indices_topK.size()) # [8, 10]
weights_mask = torch.zeros(weights.size(0), weights.size(2), device=weights.device).float() # [B, topK]
# print(indices_topK.size(), torch.max(indices_topK), weights_mask.size())
weights_mask = weights_mask.scatter_(1, indices_topK, 1.)
# print(torch.sum(weights_mask, dim=1))
# print(pts1.size(), weights.size(), indices_topK.size()) # torch.Size([8, 1000, 3]) torch.Size([8, 1, 1000]) torch.Size([8, 100])
pts1_topK = torch.gather(pts1, 1, indices_topK.unsqueeze(-1).expand(-1, -1, 3))
pts2_topK = torch.gather(pts2, 1, indices_topK.unsqueeze(-1).expand(-1, -1, 3))
weights_topK = torch.gather(weights, 2, indices_topK.unsqueeze(1))
# a = torch.index_select(pts1, 1, indices_topK.unsqueeze(-1))
# mask_select = weights_mask.byte().unsqueeze(-1)
# a = torch.masked_select(pts1, mask_select)
# out_topK, residual_topK = self.weighted_svd(pts1_topK, pts2_topK, weights_topK, if_print=if_print)
out_topK, residual_topK = self.weighted_svd_batch(pts1_topK, pts2_topK, weights_topK, if_print=if_print)
out_dict.update({'out_topK': out_topK, 'residual_topK': residual_topK})
# out, residual = self.weighted_svd(pts1, pts2, weights * weights_mask.unsqueeze(1), if_print=if_print)
out_sample_selected_list = []
weights_sample_selected_accu_list = []
for batch_idx, (matches_good_unique_num, weights_sample) in enumerate(zip(matches_good_unique_nums.cpu().numpy(), weights.detach().cpu().numpy())):
selected_corres_idx_per_sample_list = []
p = weights_sample.flatten()[:matches_good_unique_num]
p = p / np.sum(p)
for select_idx in range(selects_each_sample):
selected_corres_idx = np.random.choice(matches_good_unique_num, topK, p=p)
# selected_corres_idx = np.random.choice(matches_good_unique_num, topK)
selected_corres_idx_per_sample_list.append(selected_corres_idx)
selected_corres_idx_per_sample = np.stack(selected_corres_idx_per_sample_list) # [selects_each_sample, topK]
pts1_sample = pts1[batch_idx:batch_idx+1].expand(selects_each_sample, -1, -1)
pts1_sample_selected = torch.gather(pts1_sample, 1, torch.from_numpy(selected_corres_idx_per_sample).unsqueeze(-1).expand(-1, -1, 3).cuda()) # [selects_each_sample, topK, 3]
pts2_sample = pts2[batch_idx:batch_idx+1].expand(selects_each_sample, -1, -1)
pts2_sample_selected = torch.gather(pts2_sample, 1, torch.from_numpy(selected_corres_idx_per_sample).unsqueeze(-1).expand(-1, -1, 3).cuda()) # [selects_each_sample, topK, 3]
weights_sample = weights[batch_idx:batch_idx+1].expand(selects_each_sample, -1, -1)
weights_sample_selected = torch.gather(weights_sample, 2, torch.from_numpy(selected_corres_idx_per_sample).unsqueeze(1).cuda()) # [selects_each_sample, 1, topK]
weights_sample_selected_normalized = torch.nn.functional.normalize(weights_sample_selected, p=1, dim=2) # [selects_each_sample, 1, topK]
weights_sample_selected_accu = torch.prod(weights_sample_selected * 1000., dim=2) # [selects_each_sample, 1]
weights_sample_selected_accu = weights_sample_selected_accu / (torch.sum(weights_sample_selected_accu)+1e-10)
# print(weights_sample_selected_accu, torch.sum(weights_sample_selected_accu))
weights_sample_selected_accu_list.append(weights_sample_selected_accu)
# out_sample_selected, _ = self.weighted_svd(pts1_sample_selected, pts2_sample_selected, weights_sample_selected_normalized, if_print=False) # [selects_each_sample, 3, 3]
out_sample_selected, _ = self.weighted_svd_batch(pts1_sample_selected, pts2_sample_selected, weights_sample_selected, if_print=False) # [selects_each_sample, 3, 3]
out_sample_selected_list.append(out_sample_selected)
out_sample_selected_batch = torch.stack(out_sample_selected_list) # [B, selects_each_sample, 3, 3]
weights_sample_selected_accu_batch = torch.stack(weights_sample_selected_accu_list) # [B, selects_each_sample, 1]
# return out_topK, residual_topK, out_sample_selected_batch, weights_sample_selected_accu_batch
out_dict.update({'out_sample_selected_batch': out_sample_selected_batch, 'weights_sample_selected_accu_batch': weights_sample_selected_accu_batch})
return out_dict
class Norm8PointNet(nn.Module):
def __init__(self, depth, image_size, if_quality, if_goodCorresArch=False, if_tri_depth=False, if_sample_loss=False, if_learn_offsets=False, if_des=False, des_size=None, quality_size=0, is_cuda=True, is_test=False, if_cpu_svd=False, **params):
super(Norm8PointNet, self).__init__()
print('====Loading Norm8PointNet@DeepFNetSampleLoss.py')
if not if_quality:
quality_size = 0
self.if_quality = if_quality
if if_quality:
print('----Quality!!!!!!@Norm8PointNet')
if if_learn_offsets:
print('----if_learn_offsets!!!!!!@Norm8PointNet')
print('----CPU svd@Norm8PointNet!!!!!!' if if_cpu_svd else '----GPU svd@Norm8PointNet!!!!!!')
self.if_des = if_des
self.if_goodCorresArch = if_goodCorresArch
self.if_learn_offsets = if_learn_offsets
self.image_size = image_size # list of [H, W, 3]
self.if_tri_depth = if_tri_depth
self.depth_size = 1 if self.if_tri_depth else 0
if if_tri_depth:
print('----Tri depth!!!!!!@Norm8PointNet')
self.if_sample_loss = if_sample_loss
if if_sample_loss:
print('----if_sample_loss!!!!!!@Norm8PointNet')
if if_des:
# self.input_weights = ErrorEstimatorDes(4+quality_size, des_size)
# self.update_weights = ErrorEstimatorDes(6+quality_size, des_size)
# self.input_weights = ErrorEstimatorFeatFusion(4+quality_size, des_size)
# self.update_weights = ErrorEstimatorFeatFusion(6+quality_size+1, des_size) # +1 for the added in residual
# if if_learn_offsets:
# self.update_offsets = ErrorEstimatorFeatFusion(6+quality_size+1, des_size, output_size=4) # +1 for the added in residual
self.input_weights = ErrorEstimator(4+quality_size+des_size)
self.update_weights = ErrorEstimator(6+quality_size+1+des_size) # +1 for the added in residual
# self.input_weights = ErrorEstimatorFeatFusion2Head(4+quality_size, des_size)
# self.update_weights = ErrorEstimatorFeatFusion2Head(6+quality_size+1, des_size) # +1 for the added in residual
if if_learn_offsets:
self.update_offsets = ErrorEstimator(6+quality_size+1+des_size, output_size=4) # +1 for the added in residual
print('----DES feat@Norm8PointNet!!!!!!')
else:
if self.if_goodCorresArch:
print('----goodCorresArch@Norm8PointNet!!!!!!')
self.input_weights = GoodCorresNet(4+quality_size, bn=False)
self.update_weights = GoodCorresNet(6+quality_size, bn=False)
else:
self.input_weights = ErrorEstimator(4+quality_size)
self.update_weights = ErrorEstimator(4+quality_size+3+self.depth_size) # +3 for weights, epi_res and redisual, +1 for tri depth!
if if_learn_offsets:
self.update_offsets = ErrorEstimator(4+quality_size+2+self.depth_size, output_size=4, if_bn=False) # +1 for the added in residual
if is_test:
self.input_weights.eval()
self.update_weights.eval()
if if_learn_offsets:
self.update_offsets.eval()
self.norm = NormalizeAndExpand(is_cuda, is_test)
self.norm_K = NormalizeAndExpand_K(is_cuda, is_test)
self.norm_HW = NormalizeAndExpand_HW(self.image_size, is_cuda, is_test)
self.fit = Fit(is_cuda, is_test, if_cpu_svd, if_sample_loss=if_sample_loss)
self.depth = depth
self.mask = Variable(torch.ones(3)).cuda()
self.mask[-1] = 0
def get_input(self, data_batch, offsets=None, iter=None):
pts = data_batch['matches_xy_ori']
if offsets is not None:
# print('------ ', iter)
# print(pts.permute(0, 2, 1)[0, :2, :].clone().detach().cpu().numpy())
# print(offsets[0, :2, :].clone().detach().cpu().numpy())
pts = pts + offsets.permute(0, 2, 1)
# pts1, pts2, T1, T2 = self.norm(pts) # pts: [b, N, 2] # \in [-1, 1]
# pts1, pts2, T1, T2 = self.norm_K(pts, data_batch['K_invs']) # pts: [b, N, 2] # \in [-1, 1]
pts1, pts2, T1, T2 = self.norm_HW(pts)
# print(pts1.max(-1)[0].max(0)[0], pts1.min(-1)[0].min(0)[0])
# pts1_recover = torch.inverse(T1) @ pts1
# print(pts1_recover.max(-1)[0].max(0)[0], pts1_recover.min(-1)[0].min(0)[0])
pts1 = pts1.permute(0,2,1)
pts2 = pts2.permute(0,2,1)
if self.if_quality:
quality = data_batch['quality']
weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2, quality), 2).permute(0,2,1) # [0, 1]
else:
weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2), 2).permute(0,2,1) # [0, 1]
# if self.if_quality:
# quality = data_batch['quality']
# weight_in = torch.cat((pts1[:,:,:2], pts2[:,:,:2], quality), 2).permute(0,2,1) # [0, 1]
# else:
# weight_in = torch.cat((pts1[:,:,:2], pts2[:,:,:2]), 2).permute(0,2,1) # [0, 1]
# f1 = data_batch['Ks'][:, 0, 0]
# f2 = data_batch['Ks'][:, 1, 1]
# w2 = data_batch['Ks'][:, 0, 2]
# h2 = data_batch['Ks'][:, 1, 2]
# print(w2/f1)
# print(h2/f2)
# print(f1, f2)
return weight_in, pts1, pts2, T1, T2
def get_depth(self, data_batch, F_out, T1, T2):
F_ests = T2.permute(0,2,1) @ F_out @ T1
E_ests = data_batch['Ks'].transpose(1, 2) @ F_ests @ data_batch['Ks']
depth_list = []
for E_hat, K, match in zip(E_ests, data_batch['Ks'], data_batch['matches_xy_ori']):
K = K.cpu().numpy()
p1p2 = match.cpu().numpy()
x1 = p1p2[:, :2]
x2 = p1p2[:, 2:]
num_inlier, R, t, mask_new = cv2.recoverPose(E_hat.detach().cpu().numpy().astype(np.float64), x1, x2, focal=K[0, 0], pp=(K[0, 2], K[1, 2]))
R1 = np.eye(3)
t1 = np.zeros((3, 1))
M1 = np.hstack((R1, t1))
M2 = np.hstack((R, t))
# print(np.linalg.norm(t))
X_tri_homo = cv2.triangulatePoints(np.matmul(K, M1), np.matmul(K, M2), x1.T, x2.T)
X_tri = X_tri_homo[:3, :]/X_tri_homo[-1, :]
depth = X_tri[-1, :].T
depth_list.append(depth)
# print(depth.flatten()[:10])
depths = np.stack(depth_list) # [B, N]
return torch.from_numpy(depths).unsqueeze(1).float().cuda()
def forward(self, data_batch):
pts_normalized_in, pts1, pts2, T1, T2 = self.get_input(data_batch)
if self.if_des:
# des1, des2 = data_batch['feats_im1'], data_batch['feats_im2'] # [B, D, N]
# des_in = torch.cat((des1, des2), 1)
# des_in = data_batch['feats_im12_var']
des_in = data_batch['feats_im12_groupConv']
# logits = self.input_weights(pts_normalized_in, des_in)
logits = self.input_weights(torch.cat((pts_normalized_in, des_in), 1))
else:
logits = self.input_weights(pts_normalized_in)
weights = F.softmax(logits, dim=2)
# weights = torch.sigmoid(logits)
matches_good_unique_nums = data_batch['matches_good_unique_nums'] # [B]
# matches_good_unique_num = None
if self.if_tri_depth:
t_scene_scale = data_batch['t_scene_scale']
out_layers = []
out_topK_layers = []
epi_res_layers = []
residual_layers = []
weights_layers = [weights]
logits_layers = [logits]
out_sample_selected_batch_layers = []
weights_sample_selected_accu_batch_layers = []
for iter in range(self.depth-1):
out_dict = self.fit(pts1, pts2, weights, matches_good_unique_nums=matches_good_unique_nums)
out, residual = out_dict['out'], out_dict['residual']
residual_layers.append(residual)
out_layers.append(out)
out_topK_layers.append(out_dict['out_topK'])
out_sample_selected_batch_layers.append(out_dict['out_sample_selected_batch'])
weights_sample_selected_accu_batch_layers.append(out_dict['weights_sample_selected_accu_batch'])
if self.if_tri_depth:
tri_depths = self.get_depth(data_batch, out, T1, T2) # [B, 1, N]
tri_depths = torch.clamp(tri_depths * t_scene_scale, -150., 150.)
epi_res = utils_F.compute_epi_residual(pts1, pts2, out).unsqueeze(1)
epi_res_layers.append(epi_res)
if self.if_tri_depth:
net_in = torch.cat((pts_normalized_in, weights, epi_res, tri_depths), 1)
else:
# net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
if self.if_learn_offsets:
if self.if_des:
offsets = self.update_offsets(net_in, des_in)
else:
offsets = self.update_offsets(net_in)
# if iter == 0:
offsets_accu = offsets
# else:
# offsets_accu += offsets
pts_normalized_in, pts1, pts2, T1, T2 = self.get_input(data_batch, offsets_accu, iter)
if self.if_tri_depth:
net_in = torch.cat((pts_normalized_in, weights, epi_res, tri_depths), 1)
else:
# net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
net_in = torch.cat((pts_normalized_in, weights, epi_res), 1)
if self.if_des:
logits = self.update_weights(net_in, des_in)
else:
logits = self.update_weights(net_in)
weights = F.softmax(logits, dim=2)
# weights = torch.sigmoid(logits)
weights_layers.append(weights)
logits_layers.append(logits)
out_dict = self.fit(pts1, pts2, weights, matches_good_unique_nums=matches_good_unique_nums)
out, residual = out_dict['out'], out_dict['residual']
residual_layers.append(residual)
out_layers.append(out)
out_topK_layers.append(out_dict['out_topK'])
out_sample_selected_batch_layers.append(out_dict['out_sample_selected_batch'])
weights_sample_selected_accu_batch_layers.append(out_dict['weights_sample_selected_accu_batch'])
preds = {
# "cls_logit": cls_logit,
"logits": logits.squeeze(1), # [batch_size, N]
'logits_layers': logits_layers,
'F_est': out,
'epi_res_layers': epi_res_layers,
'T1': T1,
'T2': T2,
'out_layers': out_layers,
'out_topK_layers': out_topK_layers,
'pts1': pts1,
'pts2': pts2,
'weights': weights,
'residual_layers': residual_layers,
'weights_layers': weights_layers,
'out_sample_selected_batch_layers': out_sample_selected_batch_layers,
'weights_sample_selected_accu_batch_layers': weights_sample_selected_accu_batch_layers
}
if self.if_learn_offsets:
preds.update({'offsets': offsets_accu})
if self.if_tri_depth:
preds.update({'tri_depths': tri_depths})
return preds
# class Norm8PointNet_bkg(nn.Module):
# def __init__(self, depth, if_quality, if_goodCorresArch=False, if_learn_offsets=False, if_des=False, des_size=None, quality_size=0, is_cuda=True, is_test=False, if_cpu_svd=False, **params):
# super(Norm8PointNet, self).__init__()
# print('====Loading Norm8PointNet@<EMAIL>F<EMAIL>.py')
# if not if_quality:
# quality_size = 0
# self.if_quality = if_quality
# if if_quality:
# print('----Quality!!!!!!')
# self.if_des = if_des
# self.if_goodCorresArch = if_goodCorresArch
# if if_des:
# # self.input_weights = ErrorEstimatorDes(4+quality_size, des_size)
# # self.update_weights = ErrorEstimatorDes(6+quality_size, des_size)
# self.input_weights = ErrorEstimatorFeatFusion(4+quality_size, des_size*2)
# self.update_weights = ErrorEstimatorFeatFusion(6+quality_size, des_size*2)
# if if_learn_offsets:
# self.update_offsets = ErrorEstimatorFeatFusion(6+quality_size+1, des_size*2, output_size=4, if_bn=False)
# print('----DES feat@Norm8PointNet!!!!!!')
# else:
# if self.if_goodCorresArch:
# print('----goodCorresArch@Norm8PointNet!!!!!!')
# self.input_weights = GoodCorresNet(4+quality_size)
# self.update_weights = GoodCorresNet(6+quality_size)
# else:
# self.input_weights = ErrorEstimator(4+quality_size)
# self.update_weights = ErrorEstimator(6+quality_size)
# if if_learn_offsets:
# self.update_offsets = ErrorEstimator(6+quality_size+1, output_size=4, if_bn=False)
# if is_test:
# self.input_weights.eval()
# self.update_weights.eval()
# if if_learn_offsets:
# self.update_offsets.eval()
# self.norm = NormalizeAndExpand(is_cuda, is_test)
# self.norm_K = NormalizeAndExpand_K(is_cuda, is_test)
# self.fit = Fit(is_cuda, is_test, if_cpu_svd)
# print('----CPU svd!!!!!!' if if_cpu_svd else '----GPU svd!!!!!!')
# self.depth = depth
# self.mask = Variable(torch.ones(3)).cuda()
# self.mask[-1] = 0
# def forward(self, data_batch):
# pts = data_batch['matches_xy_ori']
# # pts1, pts2, T1, T2 = self.norm(pts) # pts: [b, N, 2] # \in [-1, 1]
# pts1, pts2, T1, T2 = self.norm_K(pts, data_batch['K_invs']) # pts: [b, N, 2] # \in [-1, 1]
# if self.if_des:
# # des1, des2 = data_batch['des1'].transpose(1, 2), data_batch['des2'].transpose(1, 2)
# des1, des2 = data_batch['feats_im1'], data_batch['feats_im2'] # [B, D, N]
# des_in = torch.cat((des1, des2), 1)
# pts1 = pts1.permute(0,2,1)
# pts2 = pts2.permute(0,2,1)
# if self.if_quality:
# quality = data_batch['quality']
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2, quality), 2).permute(0,2,1) # [0, 1]
# else:
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2), 2).permute(0,2,1) # [0, 1]
# if self.if_des:
# logits = self.input_weights(weight_in, des_in)
# else:
# logits = self.input_weights(weight_in)
# weights = F.softmax(logits, dim=2)
# # weights = torch.sigmoid(logits)
# out_layers = []
# epi_res_layers = []
# residual_layers = []
# weights_layers = [weights]
# for iter in range(self.depth-1):
# out, residual = self.fit(pts1, pts2, weights)
# out_layers.append(out)
# residual_layers.append(residual)
# epi_res = utils_F.compute_epi_residual(pts1, pts2, out).unsqueeze(1)
# epi_res_layers.append(epi_res)
# net_in = torch.cat((weight_in, weights, epi_res), 1)
# if self.if_des:
# logits = self.update_weights(net_in, des_in)
# else:
# logits = self.update_weights(net_in)
# weights = F.softmax(logits, dim=2)
# # weights = torch.sigmoid(logits)
# weights_layers.append(weights)
# out, residual = self.fit(pts1, pts2, weights, if_print=False)
# residual_layers.append(residual)
# preds = {
# # "cls_logit": cls_logit,
# "logits": logits.squeeze(1), # [batch_size, N]
# 'F_est': out,
# 'epi_res_layers': epi_res_layers,
# 'T1': T1,
# 'T2': T2,
# 'out_layers': out_layers,
# 'pts1': pts1,
# 'pts2': pts2,
# 'weights': weights,
# 'residual_layers': residual_layers,
# 'weights_layers': weights_layers
# }
# return preds
# class Norm8PointNetMixWeights(nn.Module):
# def __init__(self, depth, if_quality, if_des, if_goodCorresArch, quality_size=0, is_cuda=True, is_test=False):
# super(Norm8PointNetMixWeights, self).__init__()
# if not if_quality:
# quality_size = 0
# self.if_quality = if_quality
# if if_quality:
# print('----Quality!!!!!!')
# self.if_des = if_des
# self.if_goodCorresArch = if_goodCorresArch
# self.input_weights = ErrorEstimator(4+quality_size)
# self.update_weights = ErrorEstimator(6+quality_size)
# if is_test:
# self.input_weights.eval()
# self.update_weights.eval()
# self.norm = NormalizeAndExpand(is_cuda, is_test)
# self.norm_K = NormalizeAndExpand_K(is_cuda, is_test)
# self.fit = Fit(is_cuda, is_test)
# self.depth = depth
# self.mask = Variable(torch.ones(3)).cuda()
# self.mask[-1] = 0
# def forward(self, data_batch):
# pts = data_batch['matches_xy_ori']
# # pts1, pts2, T1, T2 = self.norm(pts) # pts: [b, N, 2] # \in [-1, 1]
# pts1, pts2, T1, T2 = self.norm_K(pts, data_batch['K_invs']) # pts: [b, N, 2] # \in [-1, 1]
# pts1 = pts1.permute(0,2,1)
# pts2 = pts2.permute(0,2,1)
# weights_im1 = data_batch['feats_im1'] # [B, 1, N]
# weights_im2 = data_batch['feats_im2'] # [B, 1, N]
# if self.if_quality:
# quality = data_batch['quality']
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2, quality), 2).permute(0,2,1) # [B, D, N]
# else:
# weight_in = torch.cat(((pts1[:,:,:2]+1)/2, (pts2[:,:,:2]+1)/2), 2).permute(0,2,1) # [0, 1]
# logits = self.input_weights(weight_in)
# weights = F.softmax(logits, dim=2) # [B, 1, N]
# weights = weights * weights_im1 * weights_im2
# out_a = []
# for iter in range(self.depth-1):
# out = self.fit(pts1, pts2, weights)
# out_a.append(out)
# res = utils_F.compute_epi_residual(pts1, pts2, out, clamp_at=0.05).unsqueeze(1)
# # res_np = res.detach().cpu().numpy().squeeze()
# # print(res_np, res_np.shape, np.amax(res_np, 1), np.amin(res_np, 1), np.mean(res_np, 1), np.median(res_np, 1))
# net_in = torch.cat((weight_in, weights, res), 1)
# logits = self.update_weights(net_in)
# weights = F.softmax(logits, dim=2) * weights_im1 * weights_im2
# out = self.fit(pts1, pts2, weights)
# preds = {
# # "cls_logit": cls_logit,
# "logits": logits.squeeze(1), # [batch_size, N]
# 'F_est': out,
# 'res': weights.squeeze(1),
# 'T1': T1,
# 'T2': T2,
# 'out_a': out_a,
# 'pts1': pts1,
# 'pts2': pts2,
# 'weights': weights
# }
# return preds
# class NWeightMixer(nn.Module):
# def __init__(self, input_size):
# super(NWeightMixer, self).__init__()
# inplace = True
# hasbias = True
# learn_affine = True
# self.fw = nn.Sequential(
# nn.Conv1d(input_size, 16, kernel_size=1, bias=hasbias),
# # nn.InstanceNorm1d(64, affine=learn_affine),
# nn.LeakyReLU(inplace=inplace),
# nn.Conv1d(16,32, kernel_size=1, bias=hasbias),
# # nn.InstanceNorm1d(128, affine=learn_affine),
# nn.LeakyReLU(inplace=inplace),
# nn.Conv1d(32,16,kernel_size=1, bias=hasbias),
# # nn.InstanceNorm1d(1024, affine=learn_affine),
# nn.LeakyReLU(inplace=inplace),
# nn.Conv1d(16,1, kernel_size=1, bias=hasbias),
# nn.
# def forward(self, data):
# # print('ErrorEstimator')
# return self.fw(data)
| [
"numpy.sum",
"torch.cat",
"torch.nn.functional.normalize",
"torch.ones",
"numpy.random.choice",
"torch.zeros",
"dsac_tools.utils_F.compute_epi_residual",
"torch.mean",
"numpy.stack",
"torch.topk",
"torch.svd",
"numpy.hstack",
"torch.clamp",
"torch.sum",
"torch.from_numpy",
"torch.ones_... | [((1529, 1554), 'torch.cat', 'torch.cat', (['(pts, ones)', '(2)'], {}), '((pts, ones), 2)\n', (1538, 1554), False, 'import torch\n'), ((1568, 1586), 'torch.mean', 'torch.mean', (['pts', '(1)'], {}), '(pts, 1)\n', (1578, 1586), False, 'import torch\n'), ((2651, 2676), 'torch.cat', 'torch.cat', (['(pts, ones)', '(2)'], {}), '((pts, ones), 2)\n', (2660, 2676), False, 'import torch\n'), ((3708, 3733), 'torch.cat', 'torch.cat', (['(pts, ones)', '(2)'], {}), '((pts, ones), 2)\n', (3717, 3733), False, 'import torch\n'), ((6829, 6853), 'torch.ones_like', 'torch.ones_like', (['weights'], {}), '(weights)\n', (6844, 6853), False, 'import torch\n'), ((9346, 9370), 'torch.ones_like', 'torch.ones_like', (['weights'], {}), '(weights)\n', (9361, 9370), False, 'import torch\n'), ((10048, 10060), 'batch_svd.batch_svd', 'batch_svd', (['X'], {}), '(X)\n', (10057, 10060), False, 'from batch_svd import batch_svd\n'), ((10119, 10174), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['Vs[:, :, -1]'], {'p': '(2)', 'dim': '(1)'}), '(Vs[:, :, -1], p=2, dim=1)\n', (10148, 10174), False, 'import torch\n'), ((10196, 10209), 'batch_svd.batch_svd', 'batch_svd', (['Fs'], {}), '(Fs)\n', (10205, 10209), False, 'from batch_svd import batch_svd\n'), ((17572, 17609), 'torch.stack', 'torch.stack', (['out_sample_selected_list'], {}), '(out_sample_selected_list)\n', (17583, 17609), False, 'import torch\n'), ((17688, 17734), 'torch.stack', 'torch.stack', (['weights_sample_selected_accu_list'], {}), '(weights_sample_selected_accu_list)\n', (17699, 17734), False, 'import torch\n'), ((24508, 24528), 'numpy.stack', 'np.stack', (['depth_list'], {}), '(depth_list)\n', (24516, 24528), True, 'import numpy as np\n'), ((25233, 25257), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(2)'}), '(logits, dim=2)\n', (25242, 25257), True, 'import torch.nn.functional as F\n'), ((1151, 1172), 'torch.ones', 'torch.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (1161, 1172), False, 'import torch\n'), ((1220, 1240), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1231, 1240), False, 'import torch\n'), ((2302, 2323), 'torch.ones', 'torch.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (2312, 2323), False, 'import torch\n'), ((2371, 2391), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2382, 2391), False, 'import torch\n'), ((3200, 3221), 'torch.ones', 'torch.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (3210, 3221), False, 'import torch\n'), ((4389, 4402), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (4399, 4402), False, 'import torch\n'), ((5303, 5330), 'torch.sum', 'torch.sum', (['(pts * weights)', '(1)'], {}), '(pts * weights, 1)\n', (5312, 5330), False, 'import torch\n'), ((7447, 7486), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['p'], {'dim': '(2)'}), '(p, dim=2)\n', (7476, 7486), False, 'import torch\n'), ((8380, 8399), 'torch.cat', 'torch.cat', (['out_b', '(0)'], {}), '(out_b, 0)\n', (8389, 8399), False, 'import torch\n'), ((8421, 8445), 'torch.stack', 'torch.stack', (['F_vecs_list'], {}), '(F_vecs_list)\n', (8432, 8445), False, 'import torch\n'), ((9964, 10003), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['p'], {'dim': '(2)'}), '(p, dim=2)\n', (9993, 10003), False, 'import torch\n'), ((12499, 12532), 'torch.topk', 'torch.topk', (['x_unique', 'topk'], {'dim': '(1)'}), '(x_unique, topk, dim=1)\n', (12509, 12532), False, 'import torch\n'), ((12831, 12856), 'torch.stack', 'torch.stack', (['xs_topk_list'], {}), '(xs_topk_list)\n', (12842, 12856), False, 'import torch\n'), ((12858, 12888), 'torch.stack', 'torch.stack', (['topK_indices_list'], {}), '(topK_indices_list)\n', (12869, 12888), False, 'import torch\n'), ((12890, 12912), 'torch.stack', 'torch.stack', (['pts1_list'], {}), '(pts1_list)\n', (12901, 12912), False, 'import torch\n'), ((12914, 12936), 'torch.stack', 'torch.stack', (['pts2_list'], {}), '(pts2_list)\n', (12925, 12936), False, 'import torch\n'), ((15637, 15682), 'numpy.stack', 'np.stack', (['selected_corres_idx_per_sample_list'], {}), '(selected_corres_idx_per_sample_list)\n', (15645, 15682), True, 'import numpy as np\n'), ((16583, 16649), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['weights_sample_selected'], {'p': '(1)', 'dim': '(2)'}), '(weights_sample_selected, p=1, dim=2)\n', (16612, 16649), False, 'import torch\n'), ((16727, 16778), 'torch.prod', 'torch.prod', (['(weights_sample_selected * 1000.0)'], {'dim': '(2)'}), '(weights_sample_selected * 1000.0, dim=2)\n', (16737, 16778), False, 'import torch\n'), ((24071, 24080), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (24077, 24080), True, 'import numpy as np\n'), ((24098, 24114), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (24106, 24114), True, 'import numpy as np\n'), ((24132, 24151), 'numpy.hstack', 'np.hstack', (['(R1, t1)'], {}), '((R1, t1))\n', (24141, 24151), True, 'import numpy as np\n'), ((24169, 24186), 'numpy.hstack', 'np.hstack', (['(R, t)'], {}), '((R, t))\n', (24178, 24186), True, 'import numpy as np\n'), ((27924, 27948), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(2)'}), '(logits, dim=2)\n', (27933, 27948), True, 'import torch.nn.functional as F\n'), ((4330, 4350), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (4341, 4350), False, 'import torch\n'), ((7794, 7806), 'torch.svd', 'torch.svd', (['F'], {}), '(F)\n', (7803, 7806), False, 'import torch\n'), ((8093, 8108), 'torch.svd', 'torch.svd', (['X[b]'], {}), '(X[b])\n', (8102, 8108), False, 'import torch\n'), ((8234, 8246), 'torch.svd', 'torch.svd', (['F'], {}), '(F)\n', (8243, 8246), False, 'import torch\n'), ((15265, 15274), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (15271, 15274), True, 'import numpy as np\n'), ((15371, 15423), 'numpy.random.choice', 'np.random.choice', (['matches_good_unique_num', 'topK'], {'p': 'p'}), '(matches_good_unique_num, topK, p=p)\n', (15387, 15423), True, 'import numpy as np\n'), ((20628, 20669), 'models.GoodCorresNet.GoodCorresNet', 'GoodCorresNet', (['(4 + quality_size)'], {'bn': '(False)'}), '(4 + quality_size, bn=False)\n', (20641, 20669), False, 'from models.GoodCorresNet import GoodCorresNet\n'), ((20706, 20747), 'models.GoodCorresNet.GoodCorresNet', 'GoodCorresNet', (['(6 + quality_size)'], {'bn': '(False)'}), '(6 + quality_size, bn=False)\n', (20719, 20747), False, 'from models.GoodCorresNet import GoodCorresNet\n'), ((24273, 24289), 'numpy.matmul', 'np.matmul', (['K', 'M1'], {}), '(K, M1)\n', (24282, 24289), True, 'import numpy as np\n'), ((24291, 24307), 'numpy.matmul', 'np.matmul', (['K', 'M2'], {}), '(K, M2)\n', (24300, 24307), True, 'import numpy as np\n'), ((25098, 25139), 'torch.cat', 'torch.cat', (['(pts_normalized_in, des_in)', '(1)'], {}), '((pts_normalized_in, des_in), 1)\n', (25107, 25139), False, 'import torch\n'), ((26485, 26539), 'torch.clamp', 'torch.clamp', (['(tri_depths * t_scene_scale)', '(-150.0)', '(150.0)'], {}), '(tri_depths * t_scene_scale, -150.0, 150.0)\n', (26496, 26539), False, 'import torch\n'), ((26722, 26785), 'torch.cat', 'torch.cat', (['(pts_normalized_in, weights, epi_res, tri_depths)', '(1)'], {}), '((pts_normalized_in, weights, epi_res, tri_depths), 1)\n', (26731, 26785), False, 'import torch\n'), ((26908, 26959), 'torch.cat', 'torch.cat', (['(pts_normalized_in, weights, epi_res)', '(1)'], {}), '((pts_normalized_in, weights, epi_res), 1)\n', (26917, 26959), False, 'import torch\n'), ((4217, 4238), 'torch.ones', 'torch.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (4227, 4238), False, 'import torch\n'), ((4279, 4301), 'torch.zeros', 'torch.zeros', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (4290, 4301), False, 'import torch\n'), ((7935, 7954), 'torch.cat', 'torch.cat', (['out_b', '(0)'], {}), '(out_b, 0)\n', (7944, 7954), False, 'import torch\n'), ((7982, 8006), 'torch.stack', 'torch.stack', (['F_vecs_list'], {}), '(F_vecs_list)\n', (7993, 8006), False, 'import torch\n'), ((16880, 16919), 'torch.sum', 'torch.sum', (['weights_sample_selected_accu'], {}), '(weights_sample_selected_accu)\n', (16889, 16919), False, 'import torch\n'), ((21680, 21693), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (21690, 21693), False, 'import torch\n'), ((22705, 22780), 'torch.cat', 'torch.cat', (['((pts1[:, :, :2] + 1) / 2, (pts2[:, :, :2] + 1) / 2, quality)', '(2)'], {}), '(((pts1[:, :, :2] + 1) / 2, (pts2[:, :, :2] + 1) / 2, quality), 2)\n', (22714, 22780), False, 'import torch\n'), ((22831, 22897), 'torch.cat', 'torch.cat', (['((pts1[:, :, :2] + 1) / 2, (pts2[:, :, :2] + 1) / 2)', '(2)'], {}), '(((pts1[:, :, :2] + 1) / 2, (pts2[:, :, :2] + 1) / 2), 2)\n', (22840, 22897), False, 'import torch\n'), ((26560, 26605), 'dsac_tools.utils_F.compute_epi_residual', 'utils_F.compute_epi_residual', (['pts1', 'pts2', 'out'], {}), '(pts1, pts2, out)\n', (26588, 26605), True, 'import dsac_tools.utils_F as utils_F\n'), ((27490, 27553), 'torch.cat', 'torch.cat', (['(pts_normalized_in, weights, epi_res, tri_depths)', '(1)'], {}), '((pts_normalized_in, weights, epi_res, tri_depths), 1)\n', (27499, 27553), False, 'import torch\n'), ((27688, 27739), 'torch.cat', 'torch.cat', (['(pts_normalized_in, weights, epi_res)', '(1)'], {}), '((pts_normalized_in, weights, epi_res), 1)\n', (27697, 27739), False, 'import torch\n'), ((3373, 3448), 'torch.tensor', 'torch.tensor', (['[[2.0 / W, 0.0, -1.0], [0.0, 2.0 / H, -1.0], [0.0, 0.0, 1.0]]'], {}), '([[2.0 / W, 0.0, -1.0], [0.0, 2.0 / H, -1.0], [0.0, 0.0, 1.0]])\n', (3385, 3448), False, 'import torch\n'), ((16431, 16479), 'torch.from_numpy', 'torch.from_numpy', (['selected_corres_idx_per_sample'], {}), '(selected_corres_idx_per_sample)\n', (16447, 16479), False, 'import torch\n'), ((24553, 24577), 'torch.from_numpy', 'torch.from_numpy', (['depths'], {}), '(depths)\n', (24569, 24577), False, 'import torch\n'), ((15867, 15915), 'torch.from_numpy', 'torch.from_numpy', (['selected_corres_idx_per_sample'], {}), '(selected_corres_idx_per_sample)\n', (15883, 15915), False, 'import torch\n'), ((16143, 16191), 'torch.from_numpy', 'torch.from_numpy', (['selected_corres_idx_per_sample'], {}), '(selected_corres_idx_per_sample)\n', (16159, 16191), False, 'import torch\n'), ((8782, 8811), 'torch.sum', 'torch.sum', (['weights[:2]'], {'dim': '(1)'}), '(weights[:2], dim=1)\n', (8791, 8811), False, 'import torch\n')] |
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import json
def tsne(data: np.ndarray, labels: list):
tsne = TSNE(n_components=2, )
X_tsne = tsne.fit_transform(data)
X_tsne_data = np.vstack((X_tsne.T, labels)).T
df_tsne = pd.DataFrame(X_tsne_data, columns=['Dim1', 'Dim2', 'label'])
plt.figure(figsize=(8, 8))
sns.scatterplot(data=df_tsne, hue='label', x='Dim1', y='Dim2')
plt.savefig('x.jpg', dpi=100)
def kmeans(data: np.ndarray, labels):
km = KMeans(n_clusters=8,
init='k-means++',
n_init=10,
max_iter=300,
random_state=0)
pca = PCA(64)
data_pca = pca.fit_transform(data)
km.fit(data_pca)
# 用TSNE进行数据降维并展示聚类结果
from sklearn.manifold import TSNE
tsne = TSNE()
X_tsne = tsne.fit_transform(data_pca) # 进行数据降维,并返回结果
X_tsne_data = np.vstack((X_tsne.T, labels)).T
df_tsne = pd.DataFrame(X_tsne_data, columns=['Dim1', 'Dim2', 'label'])
# 将index化成原本的数据的index,tsne后index会变化
plt.figure(figsize=(8, 8))
sns.scatterplot(data=df_tsne, hue='label', x='Dim1', y='Dim2')
plt.savefig('x.jpg', dpi=100)
return km.labels_
if __name__ == '__main__':
features = np.load('train.npy')
labels = [json.loads(x.strip()) for x in open('train')]
idx = [i for i, x in enumerate(labels) if x['性感_胸部'] == 1]
s_feature = features[idx]
s_data = ['sexy_breast' for i in idx]
cluster_labels = kmeans(s_feature, s_data)
with open('x.txt', 'w') as f:
for l, i in zip(cluster_labels, idx):
tmp = labels[i]
tmp['cluster_label'] = int(l)
f.write(json.dumps(tmp, ensure_ascii=False) + '\n')
| [
"pandas.DataFrame",
"numpy.load",
"sklearn.manifold.TSNE",
"seaborn.scatterplot",
"sklearn.cluster.KMeans",
"json.dumps",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"numpy.vstack",
"matplotlib.pyplot.savefig"
] | [((278, 298), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (282, 298), False, 'from sklearn.manifold import TSNE\n'), ((403, 463), 'pandas.DataFrame', 'pd.DataFrame', (['X_tsne_data'], {'columns': "['Dim1', 'Dim2', 'label']"}), "(X_tsne_data, columns=['Dim1', 'Dim2', 'label'])\n", (415, 463), True, 'import pandas as pd\n'), ((468, 494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (478, 494), True, 'import matplotlib.pyplot as plt\n'), ((499, 561), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df_tsne', 'hue': '"""label"""', 'x': '"""Dim1"""', 'y': '"""Dim2"""'}), "(data=df_tsne, hue='label', x='Dim1', y='Dim2')\n", (514, 561), True, 'import seaborn as sns\n'), ((566, 595), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""x.jpg"""'], {'dpi': '(100)'}), "('x.jpg', dpi=100)\n", (577, 595), True, 'import matplotlib.pyplot as plt\n'), ((645, 724), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(8)', 'init': '"""k-means++"""', 'n_init': '(10)', 'max_iter': '(300)', 'random_state': '(0)'}), "(n_clusters=8, init='k-means++', n_init=10, max_iter=300, random_state=0)\n", (651, 724), False, 'from sklearn.cluster import KMeans\n'), ((799, 806), 'sklearn.decomposition.PCA', 'PCA', (['(64)'], {}), '(64)\n', (802, 806), False, 'from sklearn.decomposition import PCA\n'), ((942, 948), 'sklearn.manifold.TSNE', 'TSNE', ([], {}), '()\n', (946, 948), False, 'from sklearn.manifold import TSNE\n'), ((1071, 1131), 'pandas.DataFrame', 'pd.DataFrame', (['X_tsne_data'], {'columns': "['Dim1', 'Dim2', 'label']"}), "(X_tsne_data, columns=['Dim1', 'Dim2', 'label'])\n", (1083, 1131), True, 'import pandas as pd\n'), ((1176, 1202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1186, 1202), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1269), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df_tsne', 'hue': '"""label"""', 'x': '"""Dim1"""', 'y': '"""Dim2"""'}), "(data=df_tsne, hue='label', x='Dim1', y='Dim2')\n", (1222, 1269), True, 'import seaborn as sns\n'), ((1274, 1303), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""x.jpg"""'], {'dpi': '(100)'}), "('x.jpg', dpi=100)\n", (1285, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1371, 1391), 'numpy.load', 'np.load', (['"""train.npy"""'], {}), "('train.npy')\n", (1378, 1391), True, 'import numpy as np\n'), ((357, 386), 'numpy.vstack', 'np.vstack', (['(X_tsne.T, labels)'], {}), '((X_tsne.T, labels))\n', (366, 386), True, 'import numpy as np\n'), ((1025, 1054), 'numpy.vstack', 'np.vstack', (['(X_tsne.T, labels)'], {}), '((X_tsne.T, labels))\n', (1034, 1054), True, 'import numpy as np\n'), ((1805, 1840), 'json.dumps', 'json.dumps', (['tmp'], {'ensure_ascii': '(False)'}), '(tmp, ensure_ascii=False)\n', (1815, 1840), False, 'import json\n')] |
#!/usr/bin/env python
#########################
# convert all FLUX_APER, MAG_APER, and associated errors, from 2D ldac columns to vector columns
import sys, unittest, re
import astropy, astropy.io.fits as pyfits, numpy
import ldac
#############################
__cvs_id__ = "$Id: convert_aper.py,v 1.1 2010-04-16 23:30:54 dapple Exp $"
#############################
############################################
# USER FUNCTIONS
############################################
containsAper_regex = re.compile('APER')
def convertAperColumns(cat):
cols = []
for key in cat.keys():
if len(cat[key].shape) == 2 and containsAper_regex.search(key):
cols.extend(convert2DAperColumn(key, cat[key]))
else:
cols.append(cat.extractColumn(key))
newcat = ldac.LDACCat(pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols)))
newcat.sourcefile = cat.sourcefile
return newcat
###############################
def convertAper(hdulist):
newhdus = [pyfits.PrimaryHDU()]
for hdu in hdulist:
if 'EXTNAME' in hdu.header:
if hdu.header['EXTNAME'] == 'OBJECTS' or hdu.header['EXTNAME'] == 'LDAC_OBJECTS':
newhdu = convertAperColumns(ldac.LDACCat(hdu)).hdu
newhdu.header['EXTNAME']= hdu.header['EXTNAME']
newhdus.append(newhdu)
else:
newhdus.append(hdu)
return pyfits.HDUList(newhdus)
################################################
# MAIN
################################################
def main(argv = sys.argv):
if len(argv) != 3:
sys.stderr.write('Usage: convert_aper.py inputcat outputcat\n')
sys.exit(1)
input = argv[1]
output = argv[2]
hdulist = pyfits.open(input)
newhdulist = convertAper(hdulist)
newhdulist.writeto(output, overwrite=True)
#########################################################
# Utility Functions
#########################################################
class ConfusionException(Exception): pass
aper_keys = 'FLUX_APER FLUXERR_APER MAG_APER MAGERR_APER'.split()
aper_regexs = [ re.compile('^%s(.+)?' % key) for key in aper_keys ]
def convert2DAperColumn(key, data):
cols = []
for aper_key, aper_regex in zip(aper_keys, aper_regexs):
match = aper_regex.match(key)
if match is not None:
break
if match is None:
raise ConfusionException('Unrecognized Column Name: %s' % key)
napers = data.shape[1]
for i in xrange(napers):
if match.group(1) is None:
newcolname = '%s%d' % (aper_key, i)
else:
newcolname = '%s%d%s' % (aper_key, i, match.group(1))
cols.append(pyfits.Column(name=newcolname,
format='E',
array=data[:,i]))
return cols
########################################################
## TESTING
##############################
class TestConvert2DAperColumn(unittest.TestCase):
def setUp(self):
self.napers = 5
self.data = numpy.ones((30, self.napers))
for i in xrange(self.napers):
self.data[:,i] = i+1
def testTypes(self):
colList = convert2DAperColumn('FLUX_APER', self.data)
for col in colList:
self.assertTrue(isinstance(col, pyfits.Column))
def testTransform(self):
colList = convert2DAperColumn('FLUX_APER', self.data)
self.assertEquals(len(colList), self.napers)
for i in xrange(self.napers):
self.assertEqual(colList[i].name, '%s%d' % ('FLUX_APER', i))
self.assertTrue((colList[i].array == i+1).all())
def testConfusion(self):
self.assertRaises(ConfusionException, convert2DAperColumn, 'FAKE_NAME', numpy.zeros((30,4)))
def testFancyFilterNames(self):
colList = convert2DAperColumn('MAG_APER-SUBARU-10_2-1-W-J-B', self.data)
self.assertEquals(len(colList), self.napers)
for i in xrange(self.napers):
self.assertEqual(colList[i].name, 'MAG_APER%d-SUBARU-10_2-1-W-J-B' % i)
self.assertTrue((colList[i].array == i+1).all())
##########################################
def tablesEqual(table1, table2):
cat1 = ldac.LDACCat(table1)
cat2 = ldac.LDACCat(table2)
if not len(cat1) == len(cat2):
return False
if not len(cat1.keys()) == len(cat2.keys()):
return False
for key in cat1.keys():
if not (cat1[key] == cat2[key]).all():
return False
return True
################
class TestConvertAper(unittest.TestCase):
def setUp(self):
cols = [pyfits.Column(name='SeqNr',
format='E',
array=numpy.arange(100))]
self.vector_one_cols = 'MAG_ISO MAGERR_ISO FLUX_ISO FLUXERR_ISO FLUX_RADIUS FLUX_APER7 FLUXERR_APER7 MAG_APER7 MAGERR_APER7'.split()
for col in self.vector_one_cols:
cols.append(pyfits.Column(name=col,
format='E',
array=numpy.ones(100)))
self.block_one_cols_basic = 'FLUX_APER FLUXERR_APER MAG_APER MAGERR_APER'.split()
self.block_one_cols_fancy = ['%s-MEGAPRIME-0-1-u' % x for x in self.block_one_cols_basic]
self.block_one_cols = self.block_one_cols_basic + self.block_one_cols_fancy
self.napers = 5
for col in self.block_one_cols:
data = numpy.ones((100,self.napers))
for i in xrange(self.napers):
data[:,i] = i + 1
cols.append(pyfits.Column(name=col,
format='%dE' % self.napers,
array = data))
self.objectshdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
self.objectshdu.header['EXTNAME']= 'OBJECTS'
cols=[pyfits.Column(name='Seeing',
format='E',
array=numpy.array([1.0])),
pyfits.Column(name='Background',
format='E',
array=numpy.array([3.0]))]
self.fieldhdu = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
self.fieldhdu.header['EXTNAME']= 'FIELDS'
self.hdulist = [pyfits.PrimaryHDU(), self.objectshdu, self.fieldhdu]
##############
def testConvertColumns_ReturnsHDU(self):
newcat = convertAperColumns(ldac.LDACCat(self.objectshdu))
self.assertTrue(isinstance(newcat, ldac.LDACCat))
###################
def testConvertColumns_KeepOthers(self):
newcat = convertAperColumns(ldac.LDACCat(self.objectshdu))
headerkeys = newcat.keys()
for key in self.vector_one_cols:
self.assertTrue(key in headerkeys)
self.assertTrue('SeqNr' in headerkeys)
self.assertTrue((newcat['SeqNr'] == numpy.arange(100)).all())
###################
def testConvertColumns_SplitAper(self):
newcat = convertAperColumns(ldac.LDACCat(self.objectshdu))
headerkeys = newcat.keys()
for col in self.block_one_cols:
root, sep, filter = col.partition('-')
for i in xrange(self.napers):
if filter == '':
colname = '%s%d' % (root, i)
else:
colname = '%s%d-%s' % (root, i, filter)
self.assertTrue(colname in headerkeys)
self.assertEquals(len(newcat[colname].shape), 1)
self.assertTrue((newcat[colname] == i+1).all())
#######################
def testConvertAper(self):
newhdulist = convertAper(self.hdulist)
self.assertTrue(isinstance(newhdulist, pyfits.HDUList))
self.assertEquals(len(newhdulist), 3)
seenObjects = False
seenFields = False
for hdu in newhdulist:
if 'EXTNAME' in hdu.header.keys():
tablename = hdu.header['EXTNAME']
if tablename == 'OBJECTS':
seenObjects = True
self.assertTrue(tablesEqual(hdu, convertAperColumns(ldac.LDACCat(self.objectshdu)).hdu))
elif tablename == 'FIELDS':
seenFields = True
self.assertTrue(tablesEqual(hdu, self.fieldhdu))
else:
self.asssertFail('Unexpected Table')
self.assertTrue(seenObjects)
self.assertTrue(seenFields)
###################
def testLDACObject(self):
self.objectshdu.header['EXTNAME']= 'LDAC_OBJECTS'
newhdulist = convertAper(self.hdulist)
seenObjects = False
for hdu in newhdulist:
if 'EXTNAME' in hdu.header.keys() and hdu.header['EXTNAME'] == 'LDAC_OBJECTS':
seenObjects = True
self.assertTrue(tablesEqual(hdu, convertAperColumns(ldac.LDACCat(self.objectshdu)).hdu))
self.assertTrue(seenObjects)
##############################
def test():
testcases = [TestConvertAper, TestConvert2DAperColumn]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
##############################
# COMMANDLINE EXECUTABLE
##############################
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
test()
else:
main()
| [
"ldac.LDACCat",
"unittest.TextTestRunner",
"astropy.io.fits.ColDefs",
"astropy.io.fits.PrimaryHDU",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"astropy.io.fits.open",
"unittest.TestLoader",
"numpy.array",
"astropy.io.fits.Column",
"sys.stderr.write",
"astropy.io.fits.HDUList",
"sys.exit"... | [((502, 520), 're.compile', 're.compile', (['"""APER"""'], {}), "('APER')\n", (512, 520), False, 'import sys, unittest, re\n'), ((1458, 1481), 'astropy.io.fits.HDUList', 'pyfits.HDUList', (['newhdus'], {}), '(newhdus)\n', (1472, 1481), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((1790, 1808), 'astropy.io.fits.open', 'pyfits.open', (['input'], {}), '(input)\n', (1801, 1808), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((2159, 2187), 're.compile', 're.compile', (["('^%s(.+)?' % key)"], {}), "('^%s(.+)?' % key)\n", (2169, 2187), False, 'import sys, unittest, re\n'), ((4327, 4347), 'ldac.LDACCat', 'ldac.LDACCat', (['table1'], {}), '(table1)\n', (4339, 4347), False, 'import ldac\n'), ((4359, 4379), 'ldac.LDACCat', 'ldac.LDACCat', (['table2'], {}), '(table2)\n', (4371, 4379), False, 'import ldac\n'), ((1008, 1027), 'astropy.io.fits.PrimaryHDU', 'pyfits.PrimaryHDU', ([], {}), '()\n', (1025, 1027), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((1648, 1712), 'sys.stderr.write', 'sys.stderr.write', (['"""Usage: convert_aper.py inputcat outputcat\n"""'], {}), "('Usage: convert_aper.py inputcat outputcat\\n')\n", (1664, 1712), False, 'import sys, unittest, re\n'), ((1721, 1732), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1729, 1732), False, 'import sys, unittest, re\n'), ((3112, 3141), 'numpy.ones', 'numpy.ones', (['(30, self.napers)'], {}), '((30, self.napers))\n', (3122, 3141), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((851, 871), 'astropy.io.fits.ColDefs', 'pyfits.ColDefs', (['cols'], {}), '(cols)\n', (865, 871), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((2747, 2807), 'astropy.io.fits.Column', 'pyfits.Column', ([], {'name': 'newcolname', 'format': '"""E"""', 'array': 'data[:, i]'}), "(name=newcolname, format='E', array=data[:, i])\n", (2760, 2807), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((3836, 3856), 'numpy.zeros', 'numpy.zeros', (['(30, 4)'], {}), '((30, 4))\n', (3847, 3856), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((5564, 5594), 'numpy.ones', 'numpy.ones', (['(100, self.napers)'], {}), '((100, self.napers))\n', (5574, 5594), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((5905, 5925), 'astropy.io.fits.ColDefs', 'pyfits.ColDefs', (['cols'], {}), '(cols)\n', (5919, 5925), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((6317, 6337), 'astropy.io.fits.ColDefs', 'pyfits.ColDefs', (['cols'], {}), '(cols)\n', (6331, 6337), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((6413, 6432), 'astropy.io.fits.PrimaryHDU', 'pyfits.PrimaryHDU', ([], {}), '()\n', (6430, 6432), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((6569, 6598), 'ldac.LDACCat', 'ldac.LDACCat', (['self.objectshdu'], {}), '(self.objectshdu)\n', (6581, 6598), False, 'import ldac\n'), ((6775, 6804), 'ldac.LDACCat', 'ldac.LDACCat', (['self.objectshdu'], {}), '(self.objectshdu)\n', (6787, 6804), False, 'import ldac\n'), ((7163, 7192), 'ldac.LDACCat', 'ldac.LDACCat', (['self.objectshdu'], {}), '(self.objectshdu)\n', (7175, 7192), False, 'import ldac\n'), ((9462, 9498), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (9485, 9498), False, 'import sys, unittest, re\n'), ((5694, 5757), 'astropy.io.fits.Column', 'pyfits.Column', ([], {'name': 'col', 'format': "('%dE' % self.napers)", 'array': 'data'}), "(name=col, format='%dE' % self.napers, array=data)\n", (5707, 5757), True, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((9366, 9387), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (9385, 9387), False, 'import sys, unittest, re\n'), ((4833, 4850), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (4845, 4850), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((6098, 6116), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (6109, 6116), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((6240, 6258), 'numpy.array', 'numpy.array', (['[3.0]'], {}), '([3.0])\n', (6251, 6258), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((1246, 1263), 'ldac.LDACCat', 'ldac.LDACCat', (['hdu'], {}), '(hdu)\n', (1258, 1263), False, 'import ldac\n'), ((5179, 5194), 'numpy.ones', 'numpy.ones', (['(100)'], {}), '(100)\n', (5189, 5194), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((7030, 7047), 'numpy.arange', 'numpy.arange', (['(100)'], {}), '(100)\n', (7042, 7047), False, 'import astropy, astropy.io.fits as pyfits, numpy\n'), ((9061, 9090), 'ldac.LDACCat', 'ldac.LDACCat', (['self.objectshdu'], {}), '(self.objectshdu)\n', (9073, 9090), False, 'import ldac\n'), ((8299, 8328), 'ldac.LDACCat', 'ldac.LDACCat', (['self.objectshdu'], {}), '(self.objectshdu)\n', (8311, 8328), False, 'import ldac\n')] |
import cv2
import numpy as np
import maxflow
# app imports
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,disable_multitouch')
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.graphics.texture import Texture
from kivy.graphics import Rectangle
from kivy.base import runTouchApp
from kivy.graphics import Color, Line
from kivy.properties import ObjectProperty
# imports
import os
import random
import sys
import math
sigma = 2
lamb = 1
k = 1
'''
taken from https://stackoverflow.com/questions/32328179/opencv-3-0-python-lineiterator
'''
def createLineIterator(P1, P2, img):
"""
Produces and array that consists of the coordinates and intensities of each pixel in a line between two points
Parameters:
-P1: a numpy array that consists of the coordinate of the first point (x,y)
-P2: a numpy array that consists of the coordinate of the second point (x,y)
-img: the image being processed
Returns:
-it: a numpy array that consists of the coordinates and intensities of each pixel in the radii (shape: [numPixels, 2], row = [x,y])
"""
#define local variables for readability
imageH = img.shape[0]
imageW = img.shape[1]
P1X = P1[0]
P1Y = P1[1]
P2X = P2[0]
P2Y = P2[1]
#difference and absolute difference between points
#used to calculate slope and relative location between points
dX = P2X - P1X
dY = P2Y - P1Y
dXa = np.abs(dX)
dYa = np.abs(dY)
#predefine numpy array for output based on distance between points
ma = np.maximum(dYa,dXa)
itbuffer = np.empty(shape=(ma,2),dtype=np.float32)
itbuffer.fill(np.nan)
#Obtain coordinates along the line using a form of Bresenham's algorithm
negY = P1Y > P2Y
negX = P1X > P2X
if P1X == P2X: #vertical line segment
itbuffer[:,0] = P1X
if negY:
itbuffer[:,1] = np.arange(P1Y - 1,P1Y - dYa - 1,-1)
else:
itbuffer[:,1] = np.arange(P1Y + 1,P1Y + dYa + 1)
elif P1Y == P2Y: #horizontal line segment
itbuffer[:,1] = P1Y
if negX:
itbuffer[:,0] = np.arange(P1X - 1,P1X - dXa - 1,-1)
else:
itbuffer[:,0] = np.arange(P1X + 1,P1X + dXa + 1)
else: #diagonal line segment
steepSlope = dYa > dXa
if steepSlope:
slope = dX.astype(np.float32) / dY.astype(np.float32)
if negY:
itbuffer[:,1] = np.arange(P1Y - 1,P1Y - dYa - 1,-1)
else:
itbuffer[:,1] = np.arange(P1Y + 1,P1Y + dYa + 1)
itbuffer[:,0] = (slope * (itbuffer[:,1] - P1Y)).astype(np.int) + P1X
else:
slope = dY.astype(np.float32) / dX.astype(np.float32)
if negX:
itbuffer[:,0] = np.arange(P1X - 1,P1X - dXa - 1,-1)
else:
itbuffer[:,0] = np.arange(P1X + 1,P1X + dXa + 1)
itbuffer[:,1] = (slope * (itbuffer[:,0] - P1X)).astype(np.int) + P1Y
#Remove points outside of image
colX = itbuffer[:,0]
colY = itbuffer[:,1]
itbuffer = itbuffer[(colX >= 0) & (colY >= 0) & (colX < imageW) & (colY < imageH)]
return itbuffer.astype(np.int)
class SegmentationWidget(Widget):
def __init__(self, *args, **kwargs):
#init the firsttouch and call the super
super(SegmentationWidget, self).__init__(*args, **kwargs)
self.firstTouch = True
self.isKeep = True
self.bind(size=self._update_rect, pos=self._update_rect)
#keyboard stuff
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
imgName = sys.argv[1]
img = cv2.imread(imgName).astype(np.uint8)
img = np.flip(img,axis=0)
#align image
self.imgWidth = img.shape[1]
self.imgHeight = img.shape[0]
self.flatShape = (self.imgHeight,self.imgWidth,1)
self.flatestShape = (self.imgHeight,self.imgWidth)
#attach the image to a texture
self.texture = Texture.create(size=(self.imgWidth , self.imgHeight), colorfmt="bgr")
self.texture.blit_buffer(img.tostring(), bufferfmt="ubyte", colorfmt="bgr")
#create the copy
self.imgOut = img
self.img = img
self.intensity = np.average(img,axis=2).astype(np.uint8).reshape(self.flatShape)
img = np.flip(img,axis=0)
cv2.imwrite('out2.jpg', img)
#create the graph and do initialization
self.g = maxflow.Graph[int]()
self.nodes = self.g.add_grid_nodes(self.flatShape)
self.f = np.zeros(self.flatShape)
self.b = np.full(self.flatShape,np.inf,dtype=np.float)
self.wf = np.zeros(self.flatShape)
self.wb = np.zeros(self.flatShape)
with self.canvas.before:
#attach the texture to the app
self.rect = Rectangle(texture=self.texture, size=self.size, pos=self.pos)
def on_touch_down(self, touch):
with self.canvas:
if self.firstTouch:
#create the first set of points for the rectangle
self.Ax, self.Ay = touch.x, touch.y
#start the new Line gand give the first point
touch.ud['line'] = Line(points=(touch.x, touch.y))
def on_touch_move(self, touch):
if self.firstTouch:
# Assign the position of the touch at the point C
self.Cx, self.Cy = touch.x, touch.y
# There are two known points A (starting point) and C (endpoint)
# Assign the positions x and y known of the points
self.Bx, self.By = self.Cx, self.Ay
self.Dx, self.Dy = self.Ax, self.Cy
# Assign points positions to the last line created
touch.ud['line'].points.clear()
touch.ud['line'].points += [self.Ax, self.Ay]
touch.ud['line'].points += [self.Bx, self.By]
touch.ud['line'].points += [self.Cx, self.Cy]
touch.ud['line'].points += [self.Dx, self.Dy]
touch.ud['line'].points += [self.Ax, self.Ay]
else:
#continue the list of points
touch.ud['line'].points += [touch.x, touch.y]
def on_touch_up(self, touch):
#TODO update graph based on firstTouch and the lines
if self.firstTouch:
ys = np.sort([self.yResize(self.Ay),self.yResize(self.Cy)]).astype(np.int)
xs = np.sort([self.xResize(self.Ax),self.xResize(self.Cx)]).astype(np.int)
self.f[ys[0]:ys[1],xs[0]:xs[1]] = 1.
self.b[ys[0]:ys[1],xs[0]:xs[1]] = 0.
#change the draw type
self.firstTouch = False
self.bgdModel = np.zeros((1,65),np.float64)
self.fgdModel = np.zeros((1,65),np.float64)
#the opencv implementation
self.mask = np.zeros(self.flatShape,dtype=np.uint8)
self.mask[self.f == np.inf] = 1
self.mask[self.b == np.inf] = 0
cv2.grabCut(self.img,self.mask,(xs[0],ys[0],xs[1],ys[1]),self.bgdModel,self.fgdModel,1,cv2.GC_INIT_WITH_RECT)
else:
points = touch.ud['line'].points
for i in range(1,len(points) // 2):
x1 = self.xResize(points[2 * (i - 1)])
y1 = self.yResize(points[2 * (i - 1) + 1])
x2 = self.xResize(points[2 * i + 0])
y2 = self.yResize(points[2 * i + 1])
buffer = createLineIterator(np.array([x1,y1]).astype(np.int),np.array([x2,y2]).astype(np.int),self.imgOut)
for data in buffer:
'''
The default draw is Keep. Press s to change it
'''
if self.isKeep:
self.f[data[1]][data[0]] = np.inf
self.b[data[1]][data[0]] = 0.
else:
self.f[data[1]][data[0]] = 0.
self.b[data[1]][data[0]] = np.inf
self.mask[self.f == np.inf] = 1
self.mask[self.b == np.inf] = 0
#the opencv implementation
self.mask, self.bgdModel, self.fgdModel =cv2.grabCut(self.img,self.mask,None,self.bgdModel,self.fgdModel,5,cv2.GC_INIT_WITH_MASK)
'''
#do all the graph stuff, our implementation (broken)
segments = self.cut()
'''
#remove the lines and update the graph based on this line
self.canvas.clear()
'''
#create the new image and display it
self.imgOut = (self.img * segments.astype(int)).astype(np.uint8)
'''
#alternatively
mask2 = np.where((self.mask==2)|(self.mask==0),0,1).astype('uint8')
self.imgOut = self.img*mask2
self.texture.blit_buffer(self.imgOut.tostring(), bufferfmt="ubyte", colorfmt="bgr")
self.imgOut = np.flip(self.imgOut,axis=0)
cv2.imwrite('out2.jpg', self.imgOut)
self.imgOut = np.flip(self.imgOut,axis=0)
self.f = np.flip(self.f,axis=0)
cv2.imwrite('fMarks2.jpg', self.f)
self.f = np.flip(self.f,axis=0)
self.b = np.flip(self.b, axis=0)
cv2.imwrite('bMarks2.jpg', self.b)
self.b = np.flip(self.b,axis=0)
self.wf = np.flip(self.wf,axis=0)
cv2.imwrite('fWeights2.jpg', self.wf)
self.wf = np.flip(self.wf,axis=0)
self.wb = np.flip(self.wb, axis=0)
cv2.imwrite('bWeights2.jpg', self.wb)
self.wb = np.flip(self.wb,axis=0)
'''
All the graph work goes here
'''
def cut(self):
fMean = np.average(self.intensity[self.f>=1])
bMean = np.average(self.intensity[self.b>=1])
self.wf = -lamb * np.log(np.abs(self.intensity - fMean)/(np.abs(self.intensity - fMean)+np.abs(self.intensity - bMean)))
self.wb = -lamb * np.log(np.abs(self.intensity - bMean)/(np.abs(self.intensity - bMean)+np.abs(self.intensity - fMean)))
self.wf = np.maximum(self.wf ,self.f )
self.wb = np.maximum(self.wb ,self.b )
diff = np.abs(np.gradient(self.intensity.reshape(self.flatestShape)))
#diff = np.average(diff, axis = 0).reshape(self.flatShape)
self.wx = np.exp(-np.square(diff[0]) / (2 * np.square(sigma))).reshape(self.flatShape)
self.wy = np.exp(-np.square(diff[1]) / (2 * np.square(sigma))).reshape(self.flatShape)
#add the weights to the grid
structureX= np.array([[0, 0, 0],[1, 0, 1],[0, 0, 0]])
structureY = np.array([[0, 1, 0],[0, 0, 0],[0, 1, 0]])
self.g.add_grid_edges(self.nodes, weights=self.wx, structure=structureX,symmetric=False)
self.g.add_grid_edges(self.nodes, weights=self.wy, structure=structureY,symmetric=False)
# Add the terminal edges. The [2] are the capacities
# of the edges from the source node. The [3]
# are the capacities of the edges to the sink node.
self.g.add_grid_tedges(self.nodes, self.wf, self.wb)
self.g.maxflow()
return self.g.get_grid_segments(self.nodes)
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 's':
self.isKeep = not self.isKeep
return True
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def xResize(self,x):
return x / self.size[0] * self.imgWidth
def yResize(self,y):
return y / self.size[1] * self.imgHeight
def _update_rect(self, instance, value):
#resizing app
self.rect.pos = instance.pos
self.rect.size = instance.size
class SegmentationApp(App):
def build(self):
#load the image and pass it to the widget to render
self.root = root = SegmentationWidget()
return root
if __name__ == "__main__":
SegmentationApp().run() | [
"numpy.maximum",
"numpy.abs",
"kivy.config.Config.set",
"kivy.graphics.Rectangle",
"numpy.empty",
"numpy.arange",
"numpy.full",
"cv2.imwrite",
"kivy.core.window.Window.request_keyboard",
"numpy.average",
"numpy.square",
"cv2.grabCut",
"kivy.graphics.Line",
"numpy.flip",
"kivy.graphics.te... | [((101, 157), 'kivy.config.Config.set', 'Config.set', (['"""input"""', '"""mouse"""', '"""mouse,disable_multitouch"""'], {}), "('input', 'mouse', 'mouse,disable_multitouch')\n", (111, 157), False, 'from kivy.config import Config\n'), ((1512, 1522), 'numpy.abs', 'np.abs', (['dX'], {}), '(dX)\n', (1518, 1522), True, 'import numpy as np\n'), ((1533, 1543), 'numpy.abs', 'np.abs', (['dY'], {}), '(dY)\n', (1539, 1543), True, 'import numpy as np\n'), ((1630, 1650), 'numpy.maximum', 'np.maximum', (['dYa', 'dXa'], {}), '(dYa, dXa)\n', (1640, 1650), True, 'import numpy as np\n'), ((1665, 1706), 'numpy.empty', 'np.empty', ([], {'shape': '(ma, 2)', 'dtype': 'np.float32'}), '(shape=(ma, 2), dtype=np.float32)\n', (1673, 1706), True, 'import numpy as np\n'), ((3642, 3694), 'kivy.core.window.Window.request_keyboard', 'Window.request_keyboard', (['self._keyboard_closed', 'self'], {}), '(self._keyboard_closed, self)\n', (3665, 3694), False, 'from kivy.core.window import Window\n'), ((3855, 3875), 'numpy.flip', 'np.flip', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3862, 3875), True, 'import numpy as np\n'), ((4152, 4220), 'kivy.graphics.texture.Texture.create', 'Texture.create', ([], {'size': '(self.imgWidth, self.imgHeight)', 'colorfmt': '"""bgr"""'}), "(size=(self.imgWidth, self.imgHeight), colorfmt='bgr')\n", (4166, 4220), False, 'from kivy.graphics.texture import Texture\n'), ((4494, 4514), 'numpy.flip', 'np.flip', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (4501, 4514), True, 'import numpy as np\n'), ((4522, 4550), 'cv2.imwrite', 'cv2.imwrite', (['"""out2.jpg"""', 'img'], {}), "('out2.jpg', img)\n", (4533, 4550), False, 'import cv2\n'), ((4715, 4739), 'numpy.zeros', 'np.zeros', (['self.flatShape'], {}), '(self.flatShape)\n', (4723, 4739), True, 'import numpy as np\n'), ((4757, 4804), 'numpy.full', 'np.full', (['self.flatShape', 'np.inf'], {'dtype': 'np.float'}), '(self.flatShape, np.inf, dtype=np.float)\n', (4764, 4804), True, 'import numpy as np\n'), ((4822, 4846), 'numpy.zeros', 'np.zeros', (['self.flatShape'], {}), '(self.flatShape)\n', (4830, 4846), True, 'import numpy as np\n'), ((4865, 4889), 'numpy.zeros', 'np.zeros', (['self.flatShape'], {}), '(self.flatShape)\n', (4873, 4889), True, 'import numpy as np\n'), ((9023, 9051), 'numpy.flip', 'np.flip', (['self.imgOut'], {'axis': '(0)'}), '(self.imgOut, axis=0)\n', (9030, 9051), True, 'import numpy as np\n'), ((9059, 9095), 'cv2.imwrite', 'cv2.imwrite', (['"""out2.jpg"""', 'self.imgOut'], {}), "('out2.jpg', self.imgOut)\n", (9070, 9095), False, 'import cv2\n'), ((9118, 9146), 'numpy.flip', 'np.flip', (['self.imgOut'], {'axis': '(0)'}), '(self.imgOut, axis=0)\n', (9125, 9146), True, 'import numpy as np\n'), ((9164, 9187), 'numpy.flip', 'np.flip', (['self.f'], {'axis': '(0)'}), '(self.f, axis=0)\n', (9171, 9187), True, 'import numpy as np\n'), ((9195, 9229), 'cv2.imwrite', 'cv2.imwrite', (['"""fMarks2.jpg"""', 'self.f'], {}), "('fMarks2.jpg', self.f)\n", (9206, 9229), False, 'import cv2\n'), ((9247, 9270), 'numpy.flip', 'np.flip', (['self.f'], {'axis': '(0)'}), '(self.f, axis=0)\n', (9254, 9270), True, 'import numpy as np\n'), ((9288, 9311), 'numpy.flip', 'np.flip', (['self.b'], {'axis': '(0)'}), '(self.b, axis=0)\n', (9295, 9311), True, 'import numpy as np\n'), ((9320, 9354), 'cv2.imwrite', 'cv2.imwrite', (['"""bMarks2.jpg"""', 'self.b'], {}), "('bMarks2.jpg', self.b)\n", (9331, 9354), False, 'import cv2\n'), ((9372, 9395), 'numpy.flip', 'np.flip', (['self.b'], {'axis': '(0)'}), '(self.b, axis=0)\n', (9379, 9395), True, 'import numpy as np\n'), ((9414, 9438), 'numpy.flip', 'np.flip', (['self.wf'], {'axis': '(0)'}), '(self.wf, axis=0)\n', (9421, 9438), True, 'import numpy as np\n'), ((9446, 9483), 'cv2.imwrite', 'cv2.imwrite', (['"""fWeights2.jpg"""', 'self.wf'], {}), "('fWeights2.jpg', self.wf)\n", (9457, 9483), False, 'import cv2\n'), ((9502, 9526), 'numpy.flip', 'np.flip', (['self.wf'], {'axis': '(0)'}), '(self.wf, axis=0)\n', (9509, 9526), True, 'import numpy as np\n'), ((9545, 9569), 'numpy.flip', 'np.flip', (['self.wb'], {'axis': '(0)'}), '(self.wb, axis=0)\n', (9552, 9569), True, 'import numpy as np\n'), ((9578, 9615), 'cv2.imwrite', 'cv2.imwrite', (['"""bWeights2.jpg"""', 'self.wb'], {}), "('bWeights2.jpg', self.wb)\n", (9589, 9615), False, 'import cv2\n'), ((9634, 9658), 'numpy.flip', 'np.flip', (['self.wb'], {'axis': '(0)'}), '(self.wb, axis=0)\n', (9641, 9658), True, 'import numpy as np\n'), ((9749, 9788), 'numpy.average', 'np.average', (['self.intensity[self.f >= 1]'], {}), '(self.intensity[self.f >= 1])\n', (9759, 9788), True, 'import numpy as np\n'), ((9803, 9842), 'numpy.average', 'np.average', (['self.intensity[self.b >= 1]'], {}), '(self.intensity[self.b >= 1])\n', (9813, 9842), True, 'import numpy as np\n'), ((10119, 10146), 'numpy.maximum', 'np.maximum', (['self.wf', 'self.f'], {}), '(self.wf, self.f)\n', (10129, 10146), True, 'import numpy as np\n'), ((10166, 10193), 'numpy.maximum', 'np.maximum', (['self.wb', 'self.b'], {}), '(self.wb, self.b)\n', (10176, 10193), True, 'import numpy as np\n'), ((10589, 10632), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 1], [0, 0, 0]]'], {}), '([[0, 0, 0], [1, 0, 1], [0, 0, 0]])\n', (10597, 10632), True, 'import numpy as np\n'), ((10652, 10695), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 0, 0], [0, 1, 0]])\n', (10660, 10695), True, 'import numpy as np\n'), ((1966, 2003), 'numpy.arange', 'np.arange', (['(P1Y - 1)', '(P1Y - dYa - 1)', '(-1)'], {}), '(P1Y - 1, P1Y - dYa - 1, -1)\n', (1975, 2003), True, 'import numpy as np\n'), ((2044, 2077), 'numpy.arange', 'np.arange', (['(P1Y + 1)', '(P1Y + dYa + 1)'], {}), '(P1Y + 1, P1Y + dYa + 1)\n', (2053, 2077), True, 'import numpy as np\n'), ((4991, 5052), 'kivy.graphics.Rectangle', 'Rectangle', ([], {'texture': 'self.texture', 'size': 'self.size', 'pos': 'self.pos'}), '(texture=self.texture, size=self.size, pos=self.pos)\n', (5000, 5052), False, 'from kivy.graphics import Rectangle\n'), ((5358, 5389), 'kivy.graphics.Line', 'Line', ([], {'points': '(touch.x, touch.y)'}), '(points=(touch.x, touch.y))\n', (5362, 5389), False, 'from kivy.graphics import Color, Line\n'), ((6840, 6869), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (6848, 6869), True, 'import numpy as np\n'), ((6896, 6925), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (6904, 6925), True, 'import numpy as np\n'), ((6988, 7028), 'numpy.zeros', 'np.zeros', (['self.flatShape'], {'dtype': 'np.uint8'}), '(self.flatShape, dtype=np.uint8)\n', (6996, 7028), True, 'import numpy as np\n'), ((7130, 7253), 'cv2.grabCut', 'cv2.grabCut', (['self.img', 'self.mask', '(xs[0], ys[0], xs[1], ys[1])', 'self.bgdModel', 'self.fgdModel', '(1)', 'cv2.GC_INIT_WITH_RECT'], {}), '(self.img, self.mask, (xs[0], ys[0], xs[1], ys[1]), self.\n bgdModel, self.fgdModel, 1, cv2.GC_INIT_WITH_RECT)\n', (7141, 7253), False, 'import cv2\n'), ((8313, 8412), 'cv2.grabCut', 'cv2.grabCut', (['self.img', 'self.mask', 'None', 'self.bgdModel', 'self.fgdModel', '(5)', 'cv2.GC_INIT_WITH_MASK'], {}), '(self.img, self.mask, None, self.bgdModel, self.fgdModel, 5, cv2\n .GC_INIT_WITH_MASK)\n', (8324, 8412), False, 'import cv2\n'), ((2210, 2247), 'numpy.arange', 'np.arange', (['(P1X - 1)', '(P1X - dXa - 1)', '(-1)'], {}), '(P1X - 1, P1X - dXa - 1, -1)\n', (2219, 2247), True, 'import numpy as np\n'), ((2288, 2321), 'numpy.arange', 'np.arange', (['(P1X + 1)', '(P1X + dXa + 1)'], {}), '(P1X + 1, P1X + dXa + 1)\n', (2297, 2321), True, 'import numpy as np\n'), ((3804, 3823), 'cv2.imread', 'cv2.imread', (['imgName'], {}), '(imgName)\n', (3814, 3823), False, 'import cv2\n'), ((8802, 8853), 'numpy.where', 'np.where', (['((self.mask == 2) | (self.mask == 0))', '(0)', '(1)'], {}), '((self.mask == 2) | (self.mask == 0), 0, 1)\n', (8810, 8853), True, 'import numpy as np\n'), ((2527, 2564), 'numpy.arange', 'np.arange', (['(P1Y - 1)', '(P1Y - dYa - 1)', '(-1)'], {}), '(P1Y - 1, P1Y - dYa - 1, -1)\n', (2536, 2564), True, 'import numpy as np\n'), ((2613, 2646), 'numpy.arange', 'np.arange', (['(P1Y + 1)', '(P1Y + dYa + 1)'], {}), '(P1Y + 1, P1Y + dYa + 1)\n', (2622, 2646), True, 'import numpy as np\n'), ((2860, 2897), 'numpy.arange', 'np.arange', (['(P1X - 1)', '(P1X - dXa - 1)', '(-1)'], {}), '(P1X - 1, P1X - dXa - 1, -1)\n', (2869, 2897), True, 'import numpy as np\n'), ((2946, 2979), 'numpy.arange', 'np.arange', (['(P1X + 1)', '(P1X + dXa + 1)'], {}), '(P1X + 1, P1X + dXa + 1)\n', (2955, 2979), True, 'import numpy as np\n'), ((9875, 9905), 'numpy.abs', 'np.abs', (['(self.intensity - fMean)'], {}), '(self.intensity - fMean)\n', (9881, 9905), True, 'import numpy as np\n'), ((10004, 10034), 'numpy.abs', 'np.abs', (['(self.intensity - bMean)'], {}), '(self.intensity - bMean)\n', (10010, 10034), True, 'import numpy as np\n'), ((4415, 4438), 'numpy.average', 'np.average', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (4425, 4438), True, 'import numpy as np\n'), ((9907, 9937), 'numpy.abs', 'np.abs', (['(self.intensity - fMean)'], {}), '(self.intensity - fMean)\n', (9913, 9937), True, 'import numpy as np\n'), ((9938, 9968), 'numpy.abs', 'np.abs', (['(self.intensity - bMean)'], {}), '(self.intensity - bMean)\n', (9944, 9968), True, 'import numpy as np\n'), ((10036, 10066), 'numpy.abs', 'np.abs', (['(self.intensity - bMean)'], {}), '(self.intensity - bMean)\n', (10042, 10066), True, 'import numpy as np\n'), ((10067, 10097), 'numpy.abs', 'np.abs', (['(self.intensity - fMean)'], {}), '(self.intensity - fMean)\n', (10073, 10097), True, 'import numpy as np\n'), ((7613, 7631), 'numpy.array', 'np.array', (['[x1, y1]'], {}), '([x1, y1])\n', (7621, 7631), True, 'import numpy as np\n'), ((7646, 7664), 'numpy.array', 'np.array', (['[x2, y2]'], {}), '([x2, y2])\n', (7654, 7664), True, 'import numpy as np\n'), ((10367, 10385), 'numpy.square', 'np.square', (['diff[0]'], {}), '(diff[0])\n', (10376, 10385), True, 'import numpy as np\n'), ((10393, 10409), 'numpy.square', 'np.square', (['sigma'], {}), '(sigma)\n', (10402, 10409), True, 'import numpy as np\n'), ((10462, 10480), 'numpy.square', 'np.square', (['diff[1]'], {}), '(diff[1])\n', (10471, 10480), True, 'import numpy as np\n'), ((10488, 10504), 'numpy.square', 'np.square', (['sigma'], {}), '(sigma)\n', (10497, 10504), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import math
import numpy as np
#Input: latency
#Output: QoE and Slope
x = [ 940., 1360., 2180., 3360., 4260., 5160., 6400., 8000., 9960., 12860., 20520., 30520.]
y = [4.62, 4.6, 4.58, 4.33, 3.99, 3.73, 3.06, 2.64, 2.42, 2.24, 1.85, 1.45]
slopes = [ 0.00000000e+00, -4.76190476e-05, -2.43902439e-05, -2.11864407e-04,
-3.77777778e-04, -2.88888889e-04, -5.40322581e-04, -2.62500000e-04,
-1.12244898e-04, -6.20689655e-05, -5.09138381e-05, -4.00000000e-05,
0.00000000e+00]
b = [0., 4.6647619, 4.63317073, 5.04186441, 5.59933333, 5.22066667,
6.51806452, 4.74, 3.53795918, 3.0382069, 2.89475196, 2.6708,
0. ]
def load_curve():
x = np.load("x_amazon.npy")
y = np.load("y_amazon.npy")
x = x * 1000
# print(x)
return x, y
def QoECurve(e2e_latency):
Slope = 0 # the slope on the curve when e2e latency is e2e_latency
QoE = 0 # the QoE when e2e latency is e2e_latency
# if (e2e_latency < 1000.0):
# QoE = -0.0005 * e2e_latency + 1.0
# Slope = -0.0005
# elif (e2e_latency > 1000.0 and e2e_latency < 1500):
# QoE = -0.0002999999999998 * e2e_latency + 1.249999999998
# Slope = -0.0002999999999998
# elif (e2e_latency <= 2400 and e2e_latency >= 1500):
# QoE = -0.0008666666666667 * e2e_latency + 2.1
# Slope = -0.0008666666666667
# else:
# QoE = -0.00000416666666666667 * e2e_latency + 0.03
# Slope = -0.00000416666666666667
# x, y = load_curve()
# n = 12
# f = np.load("curve_params.npz")
# slopes = f['slopes']
# b = f['b']
n = 12
zone_flag = 0
for i in range(n):
if e2e_latency > x[i]: zone_flag = zone_flag + 1
else: break
if zone_flag == 0:
QoE = y[0]
Slope = 0
elif zone_flag == n:
QoE = y[n-1]
Slope = 0
else:
Slope = slopes[zone_flag]
QoE = Slope * e2e_latency + b[zone_flag]
# print(zone_flag, e2e_latency, Slope)
return QoE, Slope
| [
"numpy.load"
] | [((693, 716), 'numpy.load', 'np.load', (['"""x_amazon.npy"""'], {}), "('x_amazon.npy')\n", (700, 716), True, 'import numpy as np\n'), ((725, 748), 'numpy.load', 'np.load', (['"""y_amazon.npy"""'], {}), "('y_amazon.npy')\n", (732, 748), True, 'import numpy as np\n')] |
import numpy
class Author:
"""Class representing authors"""
def __init__(self, name):
self.name = name#.replace("\u202a+1 ", "").replace("\u202c", "")
self.leave_count = 0
# List of time deltas from messages
self._time_deltas = []
# Message objects for all messages from this author
self._messages = []
# How many times this person has mentioned other people (keys are author objects)
self.mentions = {}
@property
def message_count(self):
return len(self._messages)
@property
def longest_message(self):
return max(self._messages, key=lambda msg: len(msg.text))
@property
def message_length_histogram(self):
return [len(x.text) for x in self._messages]
@property
def message_length_stdev(self):
return round(numpy.std(numpy.array(self.message_length_histogram)), 1)
@property
def shortest_message(self):
return min(self._messages, key=lambda msg: len(msg.text))
@property
def get_max_response_time(self):
return max(self._time_deltas)
@property
def get_min_response_time(self):
return min(self._time_deltas)
@property
def get_avg_response_time(self):
try:
return round(sum(self._time_deltas) / len(self._time_deltas))
except:
raise ZeroDivisionError('Cannot calculate average response time, as no messages from', self.name)
def __repr__(self):
return "<ChatParticipant %s>" % self.name
| [
"numpy.array"
] | [((794, 836), 'numpy.array', 'numpy.array', (['self.message_length_histogram'], {}), '(self.message_length_histogram)\n', (805, 836), False, 'import numpy\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
np.set_printoptions(precision=2)
def plot_confusion_matrix(cm, classes, names=None,
normalize=True, title='Sekaannusmatriisi'):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
rcParams.update({'figure.autolayout': True})
import itertools
cmap = plt.cm.Blues
fig, ax = plt.subplots(figsize=(8, 4))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
ax.set_title(title)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
if names:
plt.yticks(tick_marks, names)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
| [
"numpy.set_printoptions",
"matplotlib.pyplot.imshow",
"matplotlib.rcParams.update",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots"
] | [((84, 128), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (99, 128), False, 'from matplotlib import rcParams\n'), ((130, 162), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (149, 162), True, 'import numpy as np\n'), ((664, 708), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (679, 708), False, 'from matplotlib import rcParams\n'), ((768, 796), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (780, 796), True, 'import matplotlib.pyplot as plt\n'), ((801, 851), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (811, 851), True, 'import matplotlib.pyplot as plt\n'), ((949, 963), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (961, 963), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1052), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(0)'}), '(tick_marks, classes, rotation=0)\n', (1019, 1052), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1088), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (1067, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1140), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'names'], {}), '(tick_marks, names)\n', (1121, 1140), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from collections import Counter
import os
import numpy as np
def read_nodes(filename):
data = []
with open(filename, 'r') as f:
for line in f:
line = line.split(':')[0]
data.extend(line.replace('\n', '').split(','))
return data
def read_graph(filename, node_to_id):
N = len(node_to_id)
A = np.zeros((N,N), dtype=np.float32)
with open(filename, 'r') as f:
for line in f:
edge = line.strip().split()
if edge[0] in node_to_id and edge[1] in node_to_id:
source_id = node_to_id[edge[0]]
target_id = node_to_id[edge[1]]
if len(edge) >= 3:
A[source_id,target_id] = float(edge[2])
else:
A[source_id,target_id] = 1.0
return A
def build_vocab(filename):
data = read_nodes(filename)
counter = Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
nodes, _ = list(zip(*count_pairs))
nodes = list(nodes)
nodes.insert(0,'-1')
node_to_id = dict(zip(nodes, range(len(nodes))))
# print node_to_id
# nodes = list(set(data))
# nodes.insert(0,'-1')
# node_to_id = {}
# index = 0
# for node in nodes:
# node_to_id[node] = index
# index += 1 # index begins from 1, 0 represents padding mark
return nodes, node_to_id
def _file_to_node_ids(filename, node_to_id):
data = []
len_list = []
with open(filename, 'r') as f:
for line in f:
seq = line.strip().split(',')
ix_seq = [node_to_id[x] for x in seq if x in node_to_id]
if len(ix_seq)>=2:
data.append(ix_seq)
len_list.append(len(ix_seq)-1)
size = len(data)
total_num = np.sum(len_list)
return (data, len_list, size, total_num)
def _file_to_node_ids_withtime(filename, node_to_id):
cas_data = []
time_data = []
len_list = []
with open(filename, 'r') as f:
for line in f:
time = line.strip().split(':')[-1].split(',')
time = [float(i) for i in time]
seq = line.strip().split(':')[0].split(',')
ix_seq = [node_to_id[x] for x in seq if x in node_to_id]
if len(ix_seq)>=2:
cas_data.append(ix_seq)
time_data.append(time)
len_list.append(len(ix_seq)-1)
size = len(cas_data)
total_num = np.sum(len_list)
return (cas_data, time_data, len_list, size, total_num)
def to_nodes(seq, nodes):
return list(map(lambda x: nodes[x], seq))
def read_raw_data(data_path=None):
train_path = data_path + '-train'
valid_path = data_path + '-val'
test_path = data_path + '-test'
nodes, node_to_id = build_vocab(train_path)
train_data = _file_to_node_ids(train_path, node_to_id)
valid_data = _file_to_node_ids(valid_path, node_to_id)
test_data = _file_to_node_ids(test_path, node_to_id)
return train_data, valid_data, test_data, nodes, node_to_id
def read_raw_data_withtime(data_path=None):
train_path = data_path + '-time' + '-train'
valid_path = data_path + '-time' + '-val'
test_path = data_path + '-time' + '-test'
nodes, node_to_id = build_vocab(train_path)
train_data = _file_to_node_ids_withtime(train_path, node_to_id)
valid_data = _file_to_node_ids_withtime(valid_path, node_to_id)
test_data = _file_to_node_ids_withtime(test_path, node_to_id)
return train_data, valid_data, test_data, nodes, node_to_id
def batch_generator(train_data, batch_size, max_length):
x = []
y = []
xs = []
ys = []
ss = []
train_seq = train_data[0]
train_steps = train_data[1]
batch_len = len(train_seq) // batch_size
for i in range(batch_len):
batch_steps = np.array(train_steps[i * batch_size : (i + 1) * batch_size])
max_batch_steps = batch_steps.max()
if max_batch_steps > max_length:
max_batch_steps = max_length
for j in range(batch_size):
seq = train_seq[i * batch_size + j]
for k in range(len(seq)-1):
if k+1 > max_length:
start_id = k - (max_length-1)
else:
start_id = 0
padded_seq = np.pad(np.array(seq[start_id:k+1]),(0, max_batch_steps-len(seq[start_id:k+1])),'constant')
x.append(padded_seq)
y.append(seq[k+1])
x = np.array(x)
y = np.array(y)
xs.append(x)
ys.append(y)
ss.append(batch_steps)
x = []
y = []
rest_len = len(train_steps[batch_len * batch_size : ])
if rest_len != 0:
batch_steps = np.array(train_steps[batch_len * batch_size : ])
max_batch_steps = batch_steps.max()
if max_batch_steps > max_length:
max_batch_steps = max_length
for j in range(rest_len):
seq = train_seq[batch_len * batch_size + j]
for k in range(len(seq)-1):
if k+1 > max_length:
start_id = k - (max_length-1)
else:
start_id = 0
padded_seq = np.pad(np.array(seq[start_id:k+1]),(0, max_batch_steps-len(seq[start_id:k+1])),'constant')
x.append(padded_seq)
y.append(seq[k+1])
x = np.array(x)
y = np.array(y)
xs.append(x)
ys.append(y)
ss.append(batch_steps)
# Enumerator over the batches.
return xs, ys, ss
def batch_generator_withtime(train_data, batch_size, max_length, n_ti, max_time, time_unit):
x = []
y = []
t = []
xs = []
ys = []
ts = []
ss = []
train_seq = train_data[0]
train_time = train_data[1]
train_steps = train_data[2]
ti = max_time/n_ti
batch_len = len(train_seq) // batch_size
for i in range(batch_len):
batch_steps = np.array(train_steps[i * batch_size : (i + 1) * batch_size])
max_batch_steps = batch_steps.max()
if max_batch_steps > max_length:
max_batch_steps = max_length
for j in range(batch_size):
seq = train_seq[i * batch_size + j]
time = train_time[i * batch_size + j]
for k in range(len(seq)-1):
if k+1 > max_length:
start_id = k - (max_length-1)
else:
start_id = 0
padded_seq = np.pad(np.array(seq[start_id:k+1]),(0, max_batch_steps-len(seq[start_id:k+1])),'constant')
trunc_time = np.array(time[start_id:k+1])
trunc_time = np.ceil((trunc_time[-1] - trunc_time)/(ti*time_unit))
for _ in range(len(trunc_time)):
if trunc_time[_] > n_ti:
trunc_time[_] = n_ti
# trunc_time.astype(int)
padded_time = np.pad(trunc_time,(0, max_batch_steps-len(trunc_time)),'constant')
x.append(padded_seq)
y.append(seq[k+1])
t.append(padded_time)
x = np.array(x)
y = np.array(y)
t = np.array(t)
xs.append(x)
ys.append(y)
ts.append(t)
ss.append(batch_steps)
x = []
y = []
t = []
rest_len = len(train_steps[batch_len * batch_size : ])
if rest_len != 0:
batch_steps = np.array(train_steps[batch_len * batch_size : ])
max_batch_steps = batch_steps.max()
if max_batch_steps > max_length:
max_batch_steps = max_length
for j in range(rest_len):
seq = train_seq[batch_len * batch_size + j]
time = train_time[batch_len * batch_size + j]
for k in range(len(seq)-1):
if k+1 > max_length:
start_id = k - (max_length-1)
else:
start_id = 0
padded_seq = np.pad(np.array(seq[start_id:k+1]),(0, max_batch_steps-len(seq[start_id:k+1])),'constant')
trunc_time = np.array(time[start_id:k+1])
trunc_time = np.ceil((trunc_time[-1] - trunc_time)/(ti*time_unit))
for _ in range(len(trunc_time)):
if trunc_time[_] > n_ti:
trunc_time[_] = n_ti
# trunc_time.astype(int)
padded_time = np.pad(trunc_time,(0, max_batch_steps-len(trunc_time)),'constant')
x.append(padded_seq)
y.append(seq[k+1])
t.append(padded_time)
x = np.array(x)
y = np.array(y)
t = np.array(t)
xs.append(x)
ys.append(y)
ts.append(t)
ss.append(batch_steps)
# Enumerator over the batches.
return xs, ys, ts, ss
def main():
train_data, valid_data, test_data, nodes, node_to_id = \
read_raw_data_withtime('data/meme-cascades')
x_train, y_train, t_train, seq_length = batch_generator_withtime(test_data, 5, 5, 50, 100, 3600.)
len1 = seq_length[0][0]
len2 = seq_length[0][1]
for i in range(len1):
print (x_train[0][i], y_train[0][i], t_train[0][i])
print (x_train[0][len1], y_train[0][len1], t_train[0][len1])
# print(x_train.shape)
# print(to_nodes(x_train[0][1], nodes))
# print(to_nodes(y_train[0][1], nodes))
# print(seq_length)
if __name__ == '__main__':
main() | [
"numpy.sum",
"numpy.ceil",
"numpy.zeros",
"numpy.array",
"collections.Counter"
] | [((410, 444), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'np.float32'}), '((N, N), dtype=np.float32)\n', (418, 444), True, 'import numpy as np\n'), ((972, 985), 'collections.Counter', 'Counter', (['data'], {}), '(data)\n', (979, 985), False, 'from collections import Counter\n'), ((1896, 1912), 'numpy.sum', 'np.sum', (['len_list'], {}), '(len_list)\n', (1902, 1912), True, 'import numpy as np\n'), ((2569, 2585), 'numpy.sum', 'np.sum', (['len_list'], {}), '(len_list)\n', (2575, 2585), True, 'import numpy as np\n'), ((3979, 4037), 'numpy.array', 'np.array', (['train_steps[i * batch_size:(i + 1) * batch_size]'], {}), '(train_steps[i * batch_size:(i + 1) * batch_size])\n', (3987, 4037), True, 'import numpy as np\n'), ((4650, 4661), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4658, 4661), True, 'import numpy as np\n'), ((4675, 4686), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4683, 4686), True, 'import numpy as np\n'), ((4901, 4947), 'numpy.array', 'np.array', (['train_steps[batch_len * batch_size:]'], {}), '(train_steps[batch_len * batch_size:])\n', (4909, 4947), True, 'import numpy as np\n'), ((5566, 5577), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5574, 5577), True, 'import numpy as np\n'), ((5591, 5602), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5599, 5602), True, 'import numpy as np\n'), ((6151, 6209), 'numpy.array', 'np.array', (['train_steps[i * batch_size:(i + 1) * batch_size]'], {}), '(train_steps[i * batch_size:(i + 1) * batch_size])\n', (6159, 6209), True, 'import numpy as np\n'), ((7337, 7348), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7345, 7348), True, 'import numpy as np\n'), ((7362, 7373), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7370, 7373), True, 'import numpy as np\n'), ((7387, 7398), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (7395, 7398), True, 'import numpy as np\n'), ((7651, 7697), 'numpy.array', 'np.array', (['train_steps[batch_len * batch_size:]'], {}), '(train_steps[batch_len * batch_size:])\n', (7659, 7697), True, 'import numpy as np\n'), ((8839, 8850), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (8847, 8850), True, 'import numpy as np\n'), ((8864, 8875), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (8872, 8875), True, 'import numpy as np\n'), ((8889, 8900), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (8897, 8900), True, 'import numpy as np\n'), ((6816, 6846), 'numpy.array', 'np.array', (['time[start_id:k + 1]'], {}), '(time[start_id:k + 1])\n', (6824, 6846), True, 'import numpy as np\n'), ((6875, 6932), 'numpy.ceil', 'np.ceil', (['((trunc_time[-1] - trunc_time) / (ti * time_unit))'], {}), '((trunc_time[-1] - trunc_time) / (ti * time_unit))\n', (6882, 6932), True, 'import numpy as np\n'), ((8318, 8348), 'numpy.array', 'np.array', (['time[start_id:k + 1]'], {}), '(time[start_id:k + 1])\n', (8326, 8348), True, 'import numpy as np\n'), ((8377, 8434), 'numpy.ceil', 'np.ceil', (['((trunc_time[-1] - trunc_time) / (ti * time_unit))'], {}), '((trunc_time[-1] - trunc_time) / (ti * time_unit))\n', (8384, 8434), True, 'import numpy as np\n'), ((4479, 4508), 'numpy.array', 'np.array', (['seq[start_id:k + 1]'], {}), '(seq[start_id:k + 1])\n', (4487, 4508), True, 'import numpy as np\n'), ((5395, 5424), 'numpy.array', 'np.array', (['seq[start_id:k + 1]'], {}), '(seq[start_id:k + 1])\n', (5403, 5424), True, 'import numpy as np\n'), ((6702, 6731), 'numpy.array', 'np.array', (['seq[start_id:k + 1]'], {}), '(seq[start_id:k + 1])\n', (6710, 6731), True, 'import numpy as np\n'), ((8204, 8233), 'numpy.array', 'np.array', (['seq[start_id:k + 1]'], {}), '(seq[start_id:k + 1])\n', (8212, 8233), True, 'import numpy as np\n')] |
# coding: utf-8
# In[12]:
'''
Support Vector Machine
'''
from sklearn import svm
import matplotlib.pyplot as plt
import pandas
import math
import numpy as np
def init():
# Load input data
ipdata = pandas.read_csv("C://Users//User//Desktop//ML//Project//bitcoin_dataset.csv", parse_dates=['Date'])
# Drop rows with NaN
for key in ipdata:
try:
ipdata = ipdata[np.isfinite(ipdata[key])]
except:
pass
ipdata['next'] = pandas.Series([0] * len(ipdata['btc_market_price']), index = ipdata.index)
ipdata = ipdata.drop('btc_trade_volume', 1)
for ind in ipdata.index:
try:
ipdata.ix[ind, 'next'] = ipdata['btc_market_price'][ind+1]
except:
if ind == max(ipdata.index):
pass
return ipdata.drop([max(ipdata.index)])
def accuracyStats(l1, l2, *args):
levels = set([100, 50, 25, 10])
for l in args:
levels.add(l)
levels = list(levels)
levels.sort(reverse=True)
for l in levels:
print("Accuracy with a margin of", str(l) + "$ : ", accuracy(l1, l2, l))
def accuracy(predicted, actual, margin=100):
if len(predicted) != len(actual):
raise ValueError('"predicted list" and "actual" list are of unequal lengths!')
total = len(predicted)
correct = 0
for p, a in zip(predicted, actual):
if math.fabs(p - a) < margin:
correct += 1
return (correct/total)
def plot_results(predicted_data, true_data, title='', xlab='', ylab=''):
plt.title(title)
plt.plot(range(len(predicted_data)), predicted_data, label='Prediction')
plt.plot(range(len(true_data)), true_data, label='True Data')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend()
plt.show()
return
data = init()
ndata = data.drop('Date', 1)
nrow1 = int(ndata.shape[0]*0.8)
nrow2 = int(ndata.shape[0]*0.2)
train_data = ndata
test_data = ndata
train_data = ndata.sample(n = nrow1) # training data
test_data = ndata.sample(n = nrow2) # testing data
model1 = svm.SVR(kernel='rbf', C = 1e4, gamma = 0.3)
model1 = model1.fit(train_data.drop('next', 1), train_data['next']) # fit the model
res1 = model1.predict(test_data.drop('next', 1))
print('Accuracy stats of SVM with radial kernel : ')
accuracyStats(res1, test_data['next'])
plot_results(res1, test_data['next'], 'SVM with radial kernel', 'Day', 'Price (in USD)')
errors = [math.fabs(x-y) for x,y in zip(res1, test_data['next'])]
print("Average error : ", np.average(errors))
plt.plot(errors)
plt.title('SVM (radial kernel) Errors')
plt.xlabel('Day')
plt.ylabel('Price (in USD)')
plt.show()
# In[ ]:
| [
"matplotlib.pyplot.title",
"sklearn.svm.SVR",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.plot",
"math.fabs",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.isfinite",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((2090, 2133), 'sklearn.svm.SVR', 'svm.SVR', ([], {'kernel': '"""rbf"""', 'C': '(10000.0)', 'gamma': '(0.3)'}), "(kernel='rbf', C=10000.0, gamma=0.3)\n", (2097, 2133), False, 'from sklearn import svm\n'), ((2570, 2586), 'matplotlib.pyplot.plot', 'plt.plot', (['errors'], {}), '(errors)\n', (2578, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2587, 2626), 'matplotlib.pyplot.title', 'plt.title', (['"""SVM (radial kernel) Errors"""'], {}), "('SVM (radial kernel) Errors')\n", (2596, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Day"""'], {}), "('Day')\n", (2637, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2645, 2673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price (in USD)"""'], {}), "('Price (in USD)')\n", (2655, 2673), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2682, 2684), True, 'import matplotlib.pyplot as plt\n'), ((209, 312), 'pandas.read_csv', 'pandas.read_csv', (['"""C://Users//User//Desktop//ML//Project//bitcoin_dataset.csv"""'], {'parse_dates': "['Date']"}), "('C://Users//User//Desktop//ML//Project//bitcoin_dataset.csv',\n parse_dates=['Date'])\n", (224, 312), False, 'import pandas\n'), ((1560, 1576), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1569, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (1734, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1761), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (1755, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1778), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1776, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1791, 1793), True, 'import matplotlib.pyplot as plt\n'), ((2468, 2484), 'math.fabs', 'math.fabs', (['(x - y)'], {}), '(x - y)\n', (2477, 2484), False, 'import math\n'), ((2550, 2568), 'numpy.average', 'np.average', (['errors'], {}), '(errors)\n', (2560, 2568), True, 'import numpy as np\n'), ((1401, 1417), 'math.fabs', 'math.fabs', (['(p - a)'], {}), '(p - a)\n', (1410, 1417), False, 'import math\n'), ((400, 424), 'numpy.isfinite', 'np.isfinite', (['ipdata[key]'], {}), '(ipdata[key])\n', (411, 424), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from utils.preprocessor import csv_to_pd
def create_dataloader(data_path, is_train, scaler, batch_size, window_size, ahead):
df = csv_to_pd(data_path)
print(f'df.shape: {df.shape}')
print(df.describe())
outbreaks = np.array(df[:])
if is_train:
outbreaks = outbreaks.reshape(-1) # reshape 2d matrix to 1d vector for MinMaxScaler().transform
scaler = MinMaxScaler()
scaler = scaler.fit(np.expand_dims(outbreaks, axis=1)) # np.expand_dims(data, axis=1).shape = (data_len, 1)
scaled_outbreaks = scaler.transform(np.expand_dims(outbreaks, axis=1)) # normalize data between 0 and 1
scaled_outbreaks = scaled_outbreaks.reshape(-1, df.shape[-1]) # reshape 1d vector back to 2d matrix
print(f'boundary_check: {boundary_check(scaled_outbreaks)}')
scaled_outbreaks = torch.from_numpy(scaled_outbreaks).float() # numpy default float64 -> torch default float32
dataset = OIEDataset(scaled_outbreaks, window_size, ahead)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
return df, dataloader, scaler
else:
outbreaks = outbreaks.reshape(-1)
scaled_outbreaks = scaler.transform(np.expand_dims(outbreaks, axis=1))
scaled_outbreaks = scaled_outbreaks.reshape(-1, df.shape[-1])
print(f'boundary_check: {boundary_check(scaled_outbreaks)}')
scaled_outbreaks = torch.from_numpy(scaled_outbreaks).float() # numpy default float64 -> torch default float32
dataset = OIEDataset(scaled_outbreaks, window_size, ahead)
dataloader = DataLoader(dataset, batch_size=batch_size)
return df, dataloader
def boundary_check(x):
return np.any(x > 1.0), np.any(x < 0), np.any(np.isnan(x))
class OIEDataset(Dataset):
def __init__ (self, data, window_size, ahead):
self.data = data
self.window_size = window_size
self.x, self.y = create_sequences(self.data, self.window_size, ahead)
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.y[index]
def create_sequences(data, window_size, ahead):
xs = []
ys = []
for i in range(len(data) - window_size - ahead):
x = data[i:(i + window_size)]
y = data[(i + window_size):(i + window_size + ahead)]
# y = data[i + window_size]
xs.append(x)
ys.append(y)
# torch.stack(xs).shape = ((data_len - window_size - 1), window_size, n_features) ex) (1341, 10, 4)
# torch.stack(ys).shape = ((data_len - window_size - 1), ahead, n_features) ex) (1341, 2, 4)
return torch.stack(xs), torch.stack(ys)
| [
"torch.stack",
"torch.utils.data.DataLoader",
"sklearn.preprocessing.MinMaxScaler",
"numpy.expand_dims",
"numpy.isnan",
"numpy.any",
"numpy.array",
"utils.preprocessor.csv_to_pd",
"torch.from_numpy"
] | [((289, 309), 'utils.preprocessor.csv_to_pd', 'csv_to_pd', (['data_path'], {}), '(data_path)\n', (298, 309), False, 'from utils.preprocessor import csv_to_pd\n'), ((394, 409), 'numpy.array', 'np.array', (['df[:]'], {}), '(df[:])\n', (402, 409), True, 'import numpy as np\n'), ((575, 589), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (587, 589), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1246, 1302), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True)\n', (1256, 1302), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1866, 1908), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size'}), '(dataset, batch_size=batch_size)\n', (1876, 1908), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1979, 1994), 'numpy.any', 'np.any', (['(x > 1.0)'], {}), '(x > 1.0)\n', (1985, 1994), True, 'import numpy as np\n'), ((1996, 2009), 'numpy.any', 'np.any', (['(x < 0)'], {}), '(x < 0)\n', (2002, 2009), True, 'import numpy as np\n'), ((2950, 2965), 'torch.stack', 'torch.stack', (['xs'], {}), '(xs)\n', (2961, 2965), False, 'import torch\n'), ((2967, 2982), 'torch.stack', 'torch.stack', (['ys'], {}), '(ys)\n', (2978, 2982), False, 'import torch\n'), ((618, 651), 'numpy.expand_dims', 'np.expand_dims', (['outbreaks'], {'axis': '(1)'}), '(outbreaks, axis=1)\n', (632, 651), True, 'import numpy as np\n'), ((751, 784), 'numpy.expand_dims', 'np.expand_dims', (['outbreaks'], {'axis': '(1)'}), '(outbreaks, axis=1)\n', (765, 784), True, 'import numpy as np\n'), ((1467, 1500), 'numpy.expand_dims', 'np.expand_dims', (['outbreaks'], {'axis': '(1)'}), '(outbreaks, axis=1)\n', (1481, 1500), True, 'import numpy as np\n'), ((2018, 2029), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2026, 2029), True, 'import numpy as np\n'), ((1045, 1079), 'torch.from_numpy', 'torch.from_numpy', (['scaled_outbreaks'], {}), '(scaled_outbreaks)\n', (1061, 1079), False, 'import torch\n'), ((1675, 1709), 'torch.from_numpy', 'torch.from_numpy', (['scaled_outbreaks'], {}), '(scaled_outbreaks)\n', (1691, 1709), False, 'import torch\n')] |
from keras.utils import Sequence
import numpy as np
import copy
import shared.agent_methods.methods as agmeth
class EnvironmentSequence(Sequence):
def __init__(self, policy_source, source_type, environment,
epsilon, batch_size, grad_update_frequency,
target_update_frequency, action_repeat,
gamma, epoch_length, replay_buffer_size=None,
replay_buffer_min=None, use_double_dqn=False):
"""
We initialize the EnvironmentSequence class
with a batch size and an environment to generate
data from. The generator will have a pointer to
the model object in order to generate policy
outputs.
Note that:
- If replay_buffer_size > batch_size we effectively get an experience replay mechanism.
If replay_buffer_size < batch_size we will resample points for the minibatch update.
If replay_buffer_size == batch_size we sample the entire buffer.
- If batch_size == grad_update_frequency each learning update uses entirely new data.
If batch_size < grad_update_frequency (and replay_buffer_size == batch_size) then
some experiences will be skipped and not contribute to the gradient calculation.
If batch_size > grad_update_frequency more recent experiences get resampled.
"""
self.policy_source = policy_source
self.source_type = source_type
self.environment = environment
self.epsilon = epsilon # This should be a function taking in the iteration number
self.batch_size = batch_size
self.grad_update_frequency = grad_update_frequency
self.target_update_frequency = target_update_frequency
self.action_repeat = action_repeat
self.gamma = gamma
self.epoch_length = epoch_length
self.replay_buffer_size = replay_buffer_size if replay_buffer_size is not None else batch_size
self.replay_buffer_min = replay_buffer_min if replay_buffer_min is not None else batch_size
self.use_double_dqn = use_double_dqn
self.use_target_model = self.target_update_frequency is not None
self.initial_sims = self.replay_buffer_min // self.grad_update_frequency
self.initial_iterations = self.initial_sims * self.grad_update_frequency
if self.use_double_dqn:
assert self.use_target_model, "`use_double_dqn` cannot be set to `True` if no target model used"
# Buffers
self.reward_buffer = []
self.observation_buffer = []
self.action_buffer = []
self.done_buffer = []
# Iteration state variables
self.episode = 1
self.iteration = 0
# Keep track of state before getting minibatch, Initialize state buffers.
self.prev_observation, self.prev_action, self.prev_reward, self.prev_done = self.environment.reset(), None, None, None
EnvironmentSequence.record_single(self.observation_buffer, self.prev_observation, self.replay_buffer_size)
EnvironmentSequence.record_single(self.action_buffer, self.prev_action, self.replay_buffer_size)
EnvironmentSequence.record_single(self.reward_buffer, self.prev_reward, self.replay_buffer_size)
EnvironmentSequence.record_single(self.done_buffer, self.prev_done, self.replay_buffer_size)
# Model copies
self.current_model = policy_source
self.target_model = copy.deepcopy(self.current_model) if self.use_target_model else self.current_model
assert source_type in ['value', 'policy'], "Allowed policy source types are `value` and `policy`"
# Initial simulations
for sim in range(self.initial_sims):
self.simulate()
def __len__(self):
"""
Should return the number of minibatches
per epoch. In the case of deep RL this
is a choice of convenience.
This method is required by the Sequence
interface.
"""
return self.epoch_length
def __getitem__(self, idx):
"""
The second method required to be implemented by
the Sequence interface. This is the method used to
generate minibatches.
This should use the environment to generate more
observations. This would also be a good place to
safe observations to a replay memory and to
multi-thread asynchronous agents.
"""
# FIXME - what happens if we have more than one worker loading minibatches? Do we have asynchrony issues?
iter = self.simulate()
assert ((iter - self.initial_iterations) / self.grad_update_frequency - 1) % self.epoch_length == idx, \
"Consistency check, iterations and minibatch index don't match"
return self.get_minibatch()
def get_latest_observations(self, n):
"""
Gets the latest n observations and
rewards.
"""
assert 0 < n <= self.replay_buffer_size, "Cannot get more observations than stored in buffer"
return self.observation_buffer[-n:]
def get_action(self):
"""
Gets next action either from value function or
policy function based on whatever is passed to
the model.
We get the latest experiences from the observation
buffer using get_latest_observations method. This
allows us to stack previous states if necessary.
:return: Next action.
"""
# FIXME - This will need to be different when the states are not the same as the observations.
# FIXME- also note we assume `policy_source` is a Q function. This will not work with policy gradients then.
# Do random policy until we have sufficiently filled the replay buffer
if self.iteration // self.grad_update_frequency < self.initial_sims:
action = self.environment.action_space.sample()
else:
states = self.get_latest_observations(1)
action = agmeth.get_action(self.current_model, self.environment, states, self.epsilon, self.iteration)
return action
def update_target(self):
"""
Update target model.
"""
self.target_model = copy.deepcopy(self.current_model) if self.use_target_model else self.current_model
def double_dqn_model(self):
if self.use_double_dqn:
return self.target_model
else:
return None
@staticmethod
def record_single(buffer, new_value, length_limit):
if len(buffer) == length_limit:
buffer.pop(0)
buffer.append(new_value)
def record(self, observation, reward, done, action):
"""
Takes in observed states and rewards and handles
storing them in a buffer (which may be an array,
a list of files to be loaded, etc.)
:return:
"""
# FIXME - for large enough buffers should we overwrite this method with one that serializes states???
# Note the observation we are saving is actually s_t. We keep an extra state so we can sample transitions.
EnvironmentSequence.record_single(self.observation_buffer, observation, self.replay_buffer_size)
# This is r_{t-1}
EnvironmentSequence.record_single(self.reward_buffer, reward, self.replay_buffer_size)
# This is a_{t-1}
EnvironmentSequence.record_single(self.action_buffer, action, self.replay_buffer_size)
# This denotes whether s_{t} is terminal
EnvironmentSequence.record_single(self.done_buffer, done, self.replay_buffer_size)
def simulate(self):
"""
Simulate grad_update_frequency steps of the agent
and add them to the experience buffer.
This method handles keeping track of iterations,
updating the target model when appropriate, updating
actions when required, etc.
:return:
"""
observation = self.prev_observation
done = self.prev_done
action = self.prev_action
# Simulate grad_update_frequency # of environment and action steps
for iter in range(self.grad_update_frequency):
self.iteration += 1
# Update target after the appropiate number of iterations
if self.use_target_model and (self.iteration % self.target_update_frequency) == 0\
and self.iteration > self.initial_iterations:
self.update_target()
# Check if episode done, if so draw next state with reset method on env
if done:
observation, action, reward, done = self.environment.reset(), None, None, None
self.episode += 1
# Otherwise only get action every action_repeat iterations or on restart and use action to get next state
else:
# Get a new action after repeating action_repeat # times
if (self.iteration % self.action_repeat) == 0 or action is None:
action = self.get_action()
observation, reward, done, info = self.environment.step(action)
# The record method takes care of recording observed states
self.record(observation, reward, done, action)
self.prev_observation, self.prev_done, self.prev_action = observation, done, action
return self.iteration
def get_states_length(self):
return min(len(self.observation_buffer), self.replay_buffer_size)
def check_is_end_start_transition(self, index):
"""
Check index does not map to an transition from
final state to initial. These are not allowed.
"""
action = self.action_buffer[index]
reward = self.action_buffer[index]
both_not_none = action is not None and reward is not None
both_none = action is None and reward is None
assert both_none or both_not_none, "Consistency check, reward and action must both be `None` or not."
return both_none
def sample_indices(self):
valid_indices = [index for index in range(1, self.get_states_length())
if not self.check_is_end_start_transition(index)]
sampled_indices = np.random.choice(valid_indices, self.batch_size)
return sampled_indices
def get_minibatch(self):
"""
A method for retrieving experiences from our
experience buffer. It serves as the interface
between our methods and the experience buffer.
When the experience buffer is larger than
grad_update_frequency it serves as a memory
replay mechanism.
:return: An experience minibatch.
"""
# FIXME - need to replace with method that feeds features, NOT observations to model
# FIXME- also note we assume `policy_source` is a Q function. This will not work with policy gradients then.
# We assume the paper does sampling with replacement. Makes the most sense if we're sampling a distribution.
sampled_indices = self.sample_indices()
states = np.array([self.observation_buffer[index - 1] for index in sampled_indices])
actions = np.array([self.action_buffer[index] for index in sampled_indices])
next_states = np.array([self.observation_buffer[index] for index in sampled_indices])
rewards = np.array([self.reward_buffer[index] for index in sampled_indices])
is_next_terminals = np.array([self.done_buffer[index] for index in sampled_indices])
Q_max = agmeth.evaluate_state(self.target_model, next_states, self.double_dqn_model())
# We reshape the arrays so that it is clear to Tensorflow that each row is a datapoint
x = [states, actions.reshape(-1, 1)]
y = (rewards + self.gamma * Q_max * np.logical_not(is_next_terminals)).reshape(-1, 1)
return x, y
| [
"shared.agent_methods.methods.get_action",
"copy.deepcopy",
"numpy.logical_not",
"numpy.array",
"numpy.random.choice"
] | [((10217, 10265), 'numpy.random.choice', 'np.random.choice', (['valid_indices', 'self.batch_size'], {}), '(valid_indices, self.batch_size)\n', (10233, 10265), True, 'import numpy as np\n'), ((11080, 11155), 'numpy.array', 'np.array', (['[self.observation_buffer[index - 1] for index in sampled_indices]'], {}), '([self.observation_buffer[index - 1] for index in sampled_indices])\n', (11088, 11155), True, 'import numpy as np\n'), ((11174, 11240), 'numpy.array', 'np.array', (['[self.action_buffer[index] for index in sampled_indices]'], {}), '([self.action_buffer[index] for index in sampled_indices])\n', (11182, 11240), True, 'import numpy as np\n'), ((11263, 11334), 'numpy.array', 'np.array', (['[self.observation_buffer[index] for index in sampled_indices]'], {}), '([self.observation_buffer[index] for index in sampled_indices])\n', (11271, 11334), True, 'import numpy as np\n'), ((11353, 11419), 'numpy.array', 'np.array', (['[self.reward_buffer[index] for index in sampled_indices]'], {}), '([self.reward_buffer[index] for index in sampled_indices])\n', (11361, 11419), True, 'import numpy as np\n'), ((11448, 11512), 'numpy.array', 'np.array', (['[self.done_buffer[index] for index in sampled_indices]'], {}), '([self.done_buffer[index] for index in sampled_indices])\n', (11456, 11512), True, 'import numpy as np\n'), ((3447, 3480), 'copy.deepcopy', 'copy.deepcopy', (['self.current_model'], {}), '(self.current_model)\n', (3460, 3480), False, 'import copy\n'), ((5991, 6089), 'shared.agent_methods.methods.get_action', 'agmeth.get_action', (['self.current_model', 'self.environment', 'states', 'self.epsilon', 'self.iteration'], {}), '(self.current_model, self.environment, states, self.\n epsilon, self.iteration)\n', (6008, 6089), True, 'import shared.agent_methods.methods as agmeth\n'), ((6219, 6252), 'copy.deepcopy', 'copy.deepcopy', (['self.current_model'], {}), '(self.current_model)\n', (6232, 6252), False, 'import copy\n'), ((11794, 11827), 'numpy.logical_not', 'np.logical_not', (['is_next_terminals'], {}), '(is_next_terminals)\n', (11808, 11827), True, 'import numpy as np\n')] |
import h5py
import os
import numpy as np
simpath = '/home/az396/project/sims'
damselpath = '/home/az396/project/damselfly'
combineddata_date = '210617'
combineddata_name = 'df1'
## create a list of simulation datasets to combine
simdata_date = '210615'
simdata_name = 'df_run'
nsimdatarun = 10
simdatalist = []
for i in range(nsimdatarun):
simdatalist.append(f'{simdata_date}_{simdata_name}{i + 1}.h5')
####
## open destination h5 file, iterate through list of signals copying each one
h5combined = h5py.File(os.path.join(damselpath, f'data/sim_data/{combineddata_date}_{combineddata_name}.h5'), 'w')
combinedgrp = h5combined.create_group('signal')
ncombine = 0
for simdata in simdatalist:
h5simdata = h5py.File(os.path.join(simpath,f'datasets/{simdata}'), 'r')
simdatakeys = list(h5simdata['signal'].keys())
print(simdata)
for key in simdatakeys:
simdataattrs = h5simdata['signal'][key].attrs.items()
combineddset = combinedgrp.create_dataset(
f'{ncombine}',
data = np.fft.fftshift(np.fft.fft(h5simdata['signal'][key][:]) / h5simdata['signal'][key][:].size)
)
#print(combineddset)
ncombine += 1
for item in simdataattrs:
combineddset.attrs.create(item[0], item[1])
h5simdata.close()
h5combined.close()
####
| [
"numpy.fft.fft",
"os.path.join"
] | [((521, 610), 'os.path.join', 'os.path.join', (['damselpath', 'f"""data/sim_data/{combineddata_date}_{combineddata_name}.h5"""'], {}), "(damselpath,\n f'data/sim_data/{combineddata_date}_{combineddata_name}.h5')\n", (533, 610), False, 'import os\n'), ((729, 773), 'os.path.join', 'os.path.join', (['simpath', 'f"""datasets/{simdata}"""'], {}), "(simpath, f'datasets/{simdata}')\n", (741, 773), False, 'import os\n'), ((1125, 1164), 'numpy.fft.fft', 'np.fft.fft', (["h5simdata['signal'][key][:]"], {}), "(h5simdata['signal'][key][:])\n", (1135, 1164), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from stacked_single_target import StackedSingleTarget
from sklearn.preprocessing import StandardScaler
class MTRTimeSeries:
def __init__(self, time_serie, window_size=20, horizon_size=5,
init_buffer_size=200, max_buffer_size=None,
use_exp_smoothing=False, gamma=0.1, sst_method='predictions'):
self.init_buffer_size = init_buffer_size
if max_buffer_size is None:
self.max_buffer_size = self.init_buffer_size
else:
self.max_buffer_size = max_buffer_size
self._time_serie = time_serie
self.window_size = window_size
self.horizon_size = horizon_size
self.use_exp_smoothing = use_exp_smoothing
self.gamma = gamma
self.sst_method = sst_method
def set_regressor(self, regressor, regressor_params):
self._regressor = regressor
self._regressor_params = regressor_params
def iterate_fit_predict(self):
s_idx = 0
ml_observations = {}
ema = self._time_serie[s_idx]
while s_idx < self.init_buffer_size:
incr = (s_idx + self.window_size + self.horizon_size)
if self.use_exp_smoothing:
aux_smooth = []
for s in self._time_serie[s_idx:incr]:
ema = self.gamma * s + (1 - self.gamma) * ema
aux_smooth.append(ema)
ml_observations[s_idx] = aux_smooth
else:
ml_observations[s_idx] = self._time_serie[s_idx:incr]
s_idx += 1
i = self.init_buffer_size + self.horizon_size
stop_point = len(self._time_serie) - \
(self.window_size + self.horizon_size)
predictions = []
s_idx -= 1
while i < stop_point:
data = pd.DataFrame.from_dict(
ml_observations, orient='index'
)
sst = StackedSingleTarget(
n_targets=self.horizon_size,
default_regressor=self._regressor,
default_regressor_params=self._regressor_params,
method=self.sst_method
)
scaler_x = StandardScaler()
scaler_y = StandardScaler()
X = scaler_x.fit_transform(
data.iloc[:, :-self.horizon_size].values
)
Y = scaler_y.fit_transform(
data.iloc[:, -self.horizon_size:].values
)
sst.fit(X, Y)
if self.use_exp_smoothing:
aux_ema = ema
aux_smooth = []
for s in self._time_serie[i:(i+self.window_size)]:
aux_ema = self.gamma * s + (1 - self.gamma) * aux_ema
aux_smooth.append(ema)
new_x = np.array(aux_smooth)
else:
new_x = np.array(self._time_serie[i:(i+self.window_size)])
new_x = new_x[None, :]
new_x = scaler_x.transform(new_x)
predictions.extend(
scaler_y.inverse_transform(sst.predict(new_x)).tolist()[0]
)
old_idx = s_idx
if s_idx >= self.max_buffer_size - 1:
s_idx = 0
else:
s_idx += 1
if self.use_exp_smoothing:
aux_smooth = ml_observations[old_idx][self.horizon_size:]
for s in self._time_serie[
(i + self.window_size):
(i + self.window_size + self.horizon_size)
]:
ema = self.gamma * s + (1 - self.gamma) * ema
aux_smooth.append(ema)
ml_observations[s_idx] = aux_smooth
else:
ml_observations[s_idx] = self._time_serie[
i:(i + self.window_size + self.horizon_size)
]
i += self.horizon_size
return predictions
def init_prediction_index(self):
return self.init_buffer_size + self.window_size
| [
"numpy.array",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame.from_dict",
"stacked_single_target.StackedSingleTarget"
] | [((1837, 1892), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['ml_observations'], {'orient': '"""index"""'}), "(ml_observations, orient='index')\n", (1859, 1892), True, 'import pandas as pd\n'), ((1941, 2107), 'stacked_single_target.StackedSingleTarget', 'StackedSingleTarget', ([], {'n_targets': 'self.horizon_size', 'default_regressor': 'self._regressor', 'default_regressor_params': 'self._regressor_params', 'method': 'self.sst_method'}), '(n_targets=self.horizon_size, default_regressor=self.\n _regressor, default_regressor_params=self._regressor_params, method=\n self.sst_method)\n', (1960, 2107), False, 'from stacked_single_target import StackedSingleTarget\n'), ((2199, 2215), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2213, 2215), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2239, 2255), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2253, 2255), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2813, 2833), 'numpy.array', 'np.array', (['aux_smooth'], {}), '(aux_smooth)\n', (2821, 2833), True, 'import numpy as np\n'), ((2876, 2926), 'numpy.array', 'np.array', (['self._time_serie[i:i + self.window_size]'], {}), '(self._time_serie[i:i + self.window_size])\n', (2884, 2926), True, 'import numpy as np\n')] |
from typing import Any, Optional
import numpy as np
from flatland.core.env_observation_builder import ObservationBuilder
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.observations import TreeObsForRailEnv
from flatlander.envs.observations.common.tree_flatter import TreeFlattener
from flatlander.envs.observations.common.utils import norm_obs_clip
class GroupingTreeFlattener(TreeFlattener):
def __init__(self, tree_depth=2, normalize_fixed=True, num_agents=5,
builder: Optional[ObservationBuilder] = None):
self.tree_depth = tree_depth
self.normalize_fixed = normalize_fixed
self.num_agents = num_agents
self.builder = builder
@staticmethod
def _split_node_into_feature_groups(node: Any) -> (np.ndarray, np.ndarray, np.ndarray):
data = np.zeros(6)
distance = np.zeros(1)
agent_data = np.zeros(4)
data[0] = node.dist_own_target_encountered
data[1] = node.dist_other_target_encountered
data[2] = node.dist_other_agent_encountered
data[3] = node.dist_potential_conflict
data[4] = node.dist_unusable_switch
data[5] = node.dist_to_next_branch
distance[0] = node.dist_min_to_target
agent_data[0] = node.num_agents_same_direction
agent_data[1] = node.num_agents_opposite_direction
agent_data[2] = node.num_agents_malfunctioning
agent_data[3] = node.speed_min_fractional
return data, distance, agent_data
def _split_subtree_into_feature_groups(self, node: Any, current_tree_depth: int, max_tree_depth: int) -> (
np.ndarray, np.ndarray, np.ndarray):
if node == -np.inf:
remaining_depth = max_tree_depth - current_tree_depth
# reference: https://stackoverflow.com/questions/515214/total-number-of-nodes-in-a-tree-data-structure
num_remaining_nodes = int((4 ** (remaining_depth + 1) - 1) / (4 - 1))
return [-np.inf] * num_remaining_nodes * 6, [-np.inf] * num_remaining_nodes, [
-np.inf] * num_remaining_nodes * 4
data, distance, agent_data = self._split_node_into_feature_groups(node)
if not node.childs:
return data, distance, agent_data
for direction in TreeObsForRailEnv.tree_explored_actions_char:
sub_data, sub_distance, sub_agent_data = self._split_subtree_into_feature_groups(node.childs[direction],
current_tree_depth + 1,
max_tree_depth)
data = np.concatenate((data, sub_data))
distance = np.concatenate((distance, sub_distance))
agent_data = np.concatenate((agent_data, sub_agent_data))
return data, distance, agent_data
def split_tree_into_feature_groups(self, tree: Any, max_tree_depth: int) -> (np.ndarray, np.ndarray, np.ndarray):
"""
This function splits the tree into three difference arrays of values
"""
data, distance, agent_data = self._split_node_into_feature_groups(tree)
for direction in TreeObsForRailEnv.tree_explored_actions_char:
sub_data, sub_distance, sub_agent_data = self._split_subtree_into_feature_groups(tree.childs[direction], 1,
max_tree_depth)
data = np.concatenate((data, sub_data))
distance = np.concatenate((distance, sub_distance))
agent_data = np.concatenate((agent_data, sub_agent_data))
return data, distance, agent_data
def normalize_observation(self, observation: Any, tree_depth: int, observation_radius=0,
normalize_fixed=None):
"""
This function normalizes the observation used by the RL algorithm
"""
data, distance, agent_data = self.split_tree_into_feature_groups(observation, tree_depth)
data = norm_obs_clip(data, fixed_radius=observation_radius)
if normalize_fixed is not None:
distance = norm_obs_clip(distance, fixed_radius=normalize_fixed)
else:
distance = norm_obs_clip(distance, normalize_to_range=True)
agent_data = np.clip(agent_data, -1, 1)
normalized_obs = np.concatenate((np.concatenate((data, distance)), agent_data))
return np.clip(normalized_obs, -1, 1)
def flatten(self, root: Any, handle, concat_agent_id, concat_status, **kwargs):
obs = self.normalize_observation(observation=root,
tree_depth=self.tree_depth,
observation_radius=10,
normalize_fixed=self.normalize_fixed)
if concat_agent_id:
agent_one_hot = np.zeros(self.num_agents)
agent_one_hot[handle % self.num_agents] = 1
obs = np.concatenate([obs, agent_one_hot])
if concat_status:
status = self.builder.env.agents[handle].status.value == RailAgentStatus.READY_TO_DEPART.value
obs = np.concatenate([[status], obs])
return obs
| [
"flatlander.envs.observations.common.utils.norm_obs_clip",
"numpy.zeros",
"numpy.concatenate",
"numpy.clip"
] | [((839, 850), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (847, 850), True, 'import numpy as np\n'), ((870, 881), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (878, 881), True, 'import numpy as np\n'), ((903, 914), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (911, 914), True, 'import numpy as np\n'), ((4099, 4151), 'flatlander.envs.observations.common.utils.norm_obs_clip', 'norm_obs_clip', (['data'], {'fixed_radius': 'observation_radius'}), '(data, fixed_radius=observation_radius)\n', (4112, 4151), False, 'from flatlander.envs.observations.common.utils import norm_obs_clip\n'), ((4376, 4402), 'numpy.clip', 'np.clip', (['agent_data', '(-1)', '(1)'], {}), '(agent_data, -1, 1)\n', (4383, 4402), True, 'import numpy as np\n'), ((4506, 4536), 'numpy.clip', 'np.clip', (['normalized_obs', '(-1)', '(1)'], {}), '(normalized_obs, -1, 1)\n', (4513, 4536), True, 'import numpy as np\n'), ((2700, 2732), 'numpy.concatenate', 'np.concatenate', (['(data, sub_data)'], {}), '((data, sub_data))\n', (2714, 2732), True, 'import numpy as np\n'), ((2756, 2796), 'numpy.concatenate', 'np.concatenate', (['(distance, sub_distance)'], {}), '((distance, sub_distance))\n', (2770, 2796), True, 'import numpy as np\n'), ((2822, 2866), 'numpy.concatenate', 'np.concatenate', (['(agent_data, sub_agent_data)'], {}), '((agent_data, sub_agent_data))\n', (2836, 2866), True, 'import numpy as np\n'), ((3530, 3562), 'numpy.concatenate', 'np.concatenate', (['(data, sub_data)'], {}), '((data, sub_data))\n', (3544, 3562), True, 'import numpy as np\n'), ((3586, 3626), 'numpy.concatenate', 'np.concatenate', (['(distance, sub_distance)'], {}), '((distance, sub_distance))\n', (3600, 3626), True, 'import numpy as np\n'), ((3652, 3696), 'numpy.concatenate', 'np.concatenate', (['(agent_data, sub_agent_data)'], {}), '((agent_data, sub_agent_data))\n', (3666, 3696), True, 'import numpy as np\n'), ((4215, 4268), 'flatlander.envs.observations.common.utils.norm_obs_clip', 'norm_obs_clip', (['distance'], {'fixed_radius': 'normalize_fixed'}), '(distance, fixed_radius=normalize_fixed)\n', (4228, 4268), False, 'from flatlander.envs.observations.common.utils import norm_obs_clip\n'), ((4306, 4354), 'flatlander.envs.observations.common.utils.norm_obs_clip', 'norm_obs_clip', (['distance'], {'normalize_to_range': '(True)'}), '(distance, normalize_to_range=True)\n', (4319, 4354), False, 'from flatlander.envs.observations.common.utils import norm_obs_clip\n'), ((4950, 4975), 'numpy.zeros', 'np.zeros', (['self.num_agents'], {}), '(self.num_agents)\n', (4958, 4975), True, 'import numpy as np\n'), ((5050, 5086), 'numpy.concatenate', 'np.concatenate', (['[obs, agent_one_hot]'], {}), '([obs, agent_one_hot])\n', (5064, 5086), True, 'import numpy as np\n'), ((5239, 5270), 'numpy.concatenate', 'np.concatenate', (['[[status], obs]'], {}), '([[status], obs])\n', (5253, 5270), True, 'import numpy as np\n'), ((4444, 4476), 'numpy.concatenate', 'np.concatenate', (['(data, distance)'], {}), '((data, distance))\n', (4458, 4476), True, 'import numpy as np\n')] |
import os
import tensorflow as tf
from tensorflow import keras
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
tf.random.set_seed(11)
np.random.seed(11)
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
assert tf.__version__.startswith('2.')
def save_images(imgs,name):
new_im=Image.new('L',(280,280))
index=0
for i in range(0,280,28):
for j in range(0,280,28):
im=imgs[index]
im=Image.fromarray(im,mode='L')
new_im.paste(im,(i,j))
index+=1
new_im.save(name)
h_dim=20
batch_sz=512
lr=1e-3
(x_train,y_train),(x_test,y_test)=keras.datasets.fashion_mnist.load_data()
x_train,x_test=x_train.astype(np.float32)/255.,x_test.astype(np.float32)/255.
train_db=tf.data.Dataset.from_tensor_slices(x_train)
train_db=train_db.shuffle(batch_sz*5).batch(batch_sz)
test_db=tf.data.Dataset.from_tensor_slices(x_test)
test_db=test_db.batch(batch_sz)
print(x_train.shape,y_train.shape)
print(x_test.shape,y_test.shape)
class AE(keras.Model):
def __init__(self):
super(AE,self).__init__()
#encoder
self.encoder=keras.Sequential([
keras.layers.Dense(256,activation=tf.nn.relu),
keras.layers.Dense(128,activation=tf.nn.relu),
keras.layers.Dense(h_dim)
])
self.decoder=keras.Sequential([
keras.layers.Dense(128,activation=tf.nn.relu),
keras.layers.Dense(256,activation=tf.nn.relu),
keras.layers.Dense(784)
])
def call(self,inputs,training=None):
h=self.encoder(inputs)
h_hat=self.decoder(h)
return h_hat
model=AE()
model.build(input_shape=(None,784))
model.summary()
optimazer=tf.optimizers.Adam(lr=lr)
for epoch in range(100):
for step,x in enumerate(train_db):
x=tf.reshape(x,[-1,784])
with tf.GradientTape() as tape:
x_rec_logits=model(x)
rec_loss=tf.losses.binary_crossentropy(x,x_rec_logits,from_logits=True)
rec_loss=tf.reduce_mean(rec_loss)
grads=tape.gradient(rec_loss,model.trainable_variables)
optimazer.apply_gradients(zip(grads,model.trainable_variables))
if step%100==0:
print(epoch,step,'loss:',float(rec_loss))
x=next(iter(test_db))
# x=tf.reshape(x,[-1,784])
logits=model(tf.reshape(x,[-1,784]))
x_hat=tf.sigmoid(logits)
x_hat=tf.reshape(x_hat,[-1,28,28])
x_concat=tf.concat([x,x_hat],axis=0)
x_concat=x_concat.numpy()*255.
x_concat=x_concat.astype(np.uint8)
save_images(x_concat,'ae_images/rec_epoch_%d.png'%epoch)
| [
"tensorflow.random.set_seed",
"PIL.Image.new",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"tensorflow.losses.binary_crossentropy",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"tensorflow.concat",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.optimizers.Adam",
"tensorfl... | [((137, 159), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(11)'], {}), '(11)\n', (155, 159), True, 'import tensorflow as tf\n'), ((160, 178), 'numpy.random.seed', 'np.random.seed', (['(11)'], {}), '(11)\n', (174, 178), True, 'import numpy as np\n'), ((225, 256), 'tensorflow.__version__.startswith', 'tf.__version__.startswith', (['"""2."""'], {}), "('2.')\n", (250, 256), True, 'import tensorflow as tf\n'), ((613, 653), 'tensorflow.keras.datasets.fashion_mnist.load_data', 'keras.datasets.fashion_mnist.load_data', ([], {}), '()\n', (651, 653), False, 'from tensorflow import keras\n'), ((741, 784), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['x_train'], {}), '(x_train)\n', (775, 784), True, 'import tensorflow as tf\n'), ((847, 889), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['x_test'], {}), '(x_test)\n', (881, 889), True, 'import tensorflow as tf\n'), ((1701, 1726), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (1719, 1726), True, 'import tensorflow as tf\n'), ((297, 323), 'PIL.Image.new', 'Image.new', (['"""L"""', '(280, 280)'], {}), "('L', (280, 280))\n", (306, 323), False, 'from PIL import Image\n'), ((1801, 1825), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 784]'], {}), '(x, [-1, 784])\n', (1811, 1825), True, 'import tensorflow as tf\n'), ((2368, 2386), 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), '(logits)\n', (2378, 2386), True, 'import tensorflow as tf\n'), ((2401, 2432), 'tensorflow.reshape', 'tf.reshape', (['x_hat', '[-1, 28, 28]'], {}), '(x_hat, [-1, 28, 28])\n', (2411, 2432), True, 'import tensorflow as tf\n'), ((2447, 2476), 'tensorflow.concat', 'tf.concat', (['[x, x_hat]'], {'axis': '(0)'}), '([x, x_hat], axis=0)\n', (2456, 2476), True, 'import tensorflow as tf\n'), ((440, 469), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {'mode': '"""L"""'}), "(im, mode='L')\n", (455, 469), False, 'from PIL import Image\n'), ((1837, 1854), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1852, 1854), True, 'import tensorflow as tf\n'), ((1919, 1983), 'tensorflow.losses.binary_crossentropy', 'tf.losses.binary_crossentropy', (['x', 'x_rec_logits'], {'from_logits': '(True)'}), '(x, x_rec_logits, from_logits=True)\n', (1948, 1983), True, 'import tensorflow as tf\n'), ((2003, 2027), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['rec_loss'], {}), '(rec_loss)\n', (2017, 2027), True, 'import tensorflow as tf\n'), ((2330, 2354), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 784]'], {}), '(x, [-1, 784])\n', (2340, 2354), True, 'import tensorflow as tf\n'), ((1142, 1188), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {'activation': 'tf.nn.relu'}), '(256, activation=tf.nn.relu)\n', (1160, 1188), False, 'from tensorflow import keras\n'), ((1201, 1247), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.relu'}), '(128, activation=tf.nn.relu)\n', (1219, 1247), False, 'from tensorflow import keras\n'), ((1260, 1285), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['h_dim'], {}), '(h_dim)\n', (1278, 1285), False, 'from tensorflow import keras\n'), ((1350, 1396), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.relu'}), '(128, activation=tf.nn.relu)\n', (1368, 1396), False, 'from tensorflow import keras\n'), ((1409, 1455), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {'activation': 'tf.nn.relu'}), '(256, activation=tf.nn.relu)\n', (1427, 1455), False, 'from tensorflow import keras\n'), ((1468, 1491), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(784)'], {}), '(784)\n', (1486, 1491), False, 'from tensorflow import keras\n')] |
from __future__ import print_function
import math
try:
import fst
except ImportError:
print("No PyFST module, trying to work without it. If you want to run the "
"language model, please install openfst and PyFST")
import numpy
import theano
import itertools
from theano import tensor, Op
from theano.gradient import disconnected_type
from fuel.utils import do_not_pickle_attributes
from picklable_itertools.extras import equizip
from collections import defaultdict, deque
from toposort import toposort_flatten
from lvsr.error_rate import reward_matrix, gain_matrix
EPSILON = 0
MAX_STATES = 7
NOT_STATE = -1
def read_symbols(fname):
syms = fst.SymbolTable('eps')
with open(fname) as sf:
for line in sf:
s,i = line.strip().split()
syms[s] = int(i)
return syms
@do_not_pickle_attributes('fst')
class FST(object):
"""Picklable wrapper around FST."""
def __init__(self, path):
self.path = path
def load(self):
self.fst = fst.read(self.path)
self.isyms = dict(self.fst.isyms.items())
def __getitem__(self, state):
"""Returns all arcs of the state i"""
return self.fst[state]
def combine_weights(self, *args):
# Protection from underflow when -x is too small
m = max(args)
return m - math.log(sum(math.exp(m - x) for x in args if x is not None))
def get_arcs(self, state, character):
return [(state, arc.nextstate, arc.ilabel, float(arc.weight))
for arc in self[state] if arc.ilabel == character]
def transition(self, states, character):
arcs = list(itertools.chain(
*[self.get_arcs(state, character) for state in states]))
next_states = {}
for next_state in {arc[1] for arc in arcs}:
next_states[next_state] = self.combine_weights(
*[states[arc[0]] + arc[3] for arc in arcs
if arc[1] == next_state])
return next_states
def expand(self, states):
seen = set()
depends = defaultdict(list)
queue = deque()
for state in states:
queue.append(state)
seen.add(state)
while len(queue):
state = queue.popleft()
for arc in self.get_arcs(state, EPSILON):
depends[arc[1]].append((arc[0], arc[3]))
if arc[1] in seen:
continue
queue.append(arc[1])
seen.add(arc[1])
depends_for_toposort = {key: {state for state, weight in value}
for key, value in depends.items()}
order = toposort_flatten(depends_for_toposort)
next_states = states
for next_state in order:
next_states[next_state] = self.combine_weights(
*([next_states.get(next_state)] +
[next_states[prev_state] + weight
for prev_state, weight in depends[next_state]]))
return next_states
def explain(self, input_):
input_ = list(input_)
states = {self.fst.start: 0}
print("Initial states: {}".format(states))
states = self.expand(states)
print("Expanded states: {}".format(states))
for char, ilabel in zip(input_, [self.isyms[char] for char in input_]):
states = self.transition(states, ilabel)
print("{} consumed: {}".format(char, states))
states = self.expand(states)
print("Expanded states: {}".format(states))
result = None
for state, weight in states.items():
if numpy.isfinite(weight + float(self.fst[state].final)):
print("Finite state {} with path weight {} and its own weight {}".format(
state, weight, self.fst[state].final))
result = self.combine_weights(
result, weight + float(self.fst[state].final))
print("Total weight: {}".format(result))
return result
class FSTTransitionOp(Op):
"""Performs transition in an FST.
Given a state and an input symbol (character) returns the next state.
Parameters
----------
fst : FST instance
remap_table : dict
Maps neutral network characters to FST characters.
"""
__props__ = ()
def __init__(self, fst, remap_table):
self.fst = fst
self.remap_table = remap_table
def pad(self, arr, value):
return numpy.pad(arr, (0, MAX_STATES - len(arr)),
mode='constant', constant_values=value)
def perform(self, node, inputs, output_storage):
all_states, all_weights, all_inputs = inputs
# Each row of all_states contains a set of states
# padded with NOT_STATE.
all_next_states = []
all_next_weights = []
for states, weights, input_ in equizip(all_states, all_weights, all_inputs):
states_dict = dict(zip(states, weights))
del states_dict[NOT_STATE]
next_states_dict = self.fst.transition(
states_dict, self.remap_table[input_])
next_states_dict = self.fst.expand(next_states_dict)
if next_states_dict:
next_states, next_weights = zip(*next_states_dict.items())
else:
# No adequate state when no arc exists for now
next_states, next_weights = [], []
all_next_states.append(self.pad(next_states, NOT_STATE))
all_next_weights.append(self.pad(next_weights, 0))
output_storage[0][0] = numpy.array(all_next_states, dtype='int64')
output_storage[1][0] = numpy.array(all_next_weights)
def make_node(self, states, weights, input_):
# check that the theano version has support for __props__
assert hasattr(self, '_props')
states = theano.tensor.as_tensor_variable(states)
weights = theano.tensor.as_tensor_variable(weights)
input_ = theano.tensor.as_tensor_variable(input_)
return theano.Apply(self,
[states, weights, input_],
[states.type(), weights.type()])
class FSTCostsOp(Op):
"""Returns transition costs for all possible input symbols.
Parameters
----------
fst : FST instance
remap_table : dict
Maps neutral network characters to FST characters.
no_transition_cost : float
Cost of going to the start state when no arc for an input
symbol is available.
Notes
-----
It is assumed that neural network characters start from zero.
"""
__props__ = ()
def __init__(self, fst, remap_table, no_transition_cost):
self.fst = fst
self.remap_table = remap_table
self.no_transition_cost = no_transition_cost
def perform(self, node, inputs, output_storage):
all_states, all_weights = inputs
all_costs = []
for states, weights in zip(all_states, all_weights):
states_dict = dict(zip(states, weights))
del states_dict[NOT_STATE]
costs = (numpy.ones(len(self.remap_table), dtype=theano.config.floatX)
* self.no_transition_cost)
if states_dict:
total_weight = self.fst.combine_weights(*states_dict.values())
for nn_character, fst_character in self.remap_table.items():
next_states_dict = self.fst.transition(states_dict, fst_character)
next_states_dict = self.fst.expand(next_states_dict)
if next_states_dict:
next_total_weight = self.fst.combine_weights(*next_states_dict.values())
costs[nn_character] = next_total_weight - total_weight
all_costs.append(costs)
output_storage[0][0] = numpy.array(all_costs)
def make_node(self, states, weights):
# check that the theano version has support for __props__
assert hasattr(self, '_props')
states = theano.tensor.as_tensor_variable(states)
weights = theano.tensor.as_tensor_variable(weights)
return theano.Apply(self,
[states, weights], [theano.tensor.matrix()])
class RewardOp(Op):
__props__ = ()
def __init__(self, eos_label, alphabet_size):
"""Computes matrices of rewards and gains."""
self.eos_label = eos_label
self.alphabet_size = alphabet_size
def perform(self, node, inputs, output_storage):
groundtruth, recognized = inputs
if (groundtruth.ndim != 2 or recognized.ndim != 2
or groundtruth.shape[1] != recognized.shape[1]):
raise ValueError
batch_size = groundtruth.shape[1]
all_rewards = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
all_gains = numpy.zeros(
recognized.shape + (self.alphabet_size,), dtype='int64')
alphabet = list(range(self.alphabet_size))
for index in range(batch_size):
y = list(groundtruth[:, index])
y_hat = list(recognized[:, index])
try:
eos_pos = y.index(self.eos_label)
y = y[:eos_pos + 1]
except:
# Sometimes groundtruth is in fact also a prediction
# and in this case it might not have EOS label
pass
if self.eos_label in y_hat:
y_hat_eos_pos = y_hat.index(self.eos_label)
y_hat_trunc = y_hat[:y_hat_eos_pos + 1]
else:
y_hat_trunc = y_hat
rewards_trunc = reward_matrix(
y, y_hat_trunc, alphabet, self.eos_label)
# pass freshly computed rewards to gain_matrix to speed things up
# a bit
gains_trunc = gain_matrix(y, y_hat_trunc, alphabet,
given_reward_matrix=rewards_trunc)
gains = numpy.ones((len(y_hat), len(alphabet))) * -1000
gains[:(gains_trunc.shape[0] - 1), :] = gains_trunc[:-1, :]
rewards = numpy.ones((len(y_hat), len(alphabet))) * -1
rewards[:(rewards_trunc.shape[0] - 1), :] = rewards_trunc[:-1, :]
all_rewards[:, index, :] = rewards
all_gains[:, index, :] = gains
output_storage[0][0] = all_rewards
output_storage[1][0] = all_gains
def grad(self, *args, **kwargs):
return disconnected_type(), disconnected_type()
def make_node(self, groundtruth, recognized):
recognized = tensor.as_tensor_variable(recognized)
groundtruth = tensor.as_tensor_variable(groundtruth)
return theano.Apply(
self, [groundtruth, recognized], [tensor.ltensor3(), tensor.ltensor3()])
| [
"toposort.toposort_flatten",
"fst.read",
"theano.tensor.as_tensor_variable",
"math.exp",
"theano.gradient.disconnected_type",
"fst.SymbolTable",
"lvsr.error_rate.reward_matrix",
"numpy.zeros",
"lvsr.error_rate.gain_matrix",
"collections.defaultdict",
"picklable_itertools.extras.equizip",
"thea... | [((829, 860), 'fuel.utils.do_not_pickle_attributes', 'do_not_pickle_attributes', (['"""fst"""'], {}), "('fst')\n", (853, 860), False, 'from fuel.utils import do_not_pickle_attributes\n'), ((667, 689), 'fst.SymbolTable', 'fst.SymbolTable', (['"""eps"""'], {}), "('eps')\n", (682, 689), False, 'import fst\n'), ((1016, 1035), 'fst.read', 'fst.read', (['self.path'], {}), '(self.path)\n', (1024, 1035), False, 'import fst\n'), ((2065, 2082), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2076, 2082), False, 'from collections import defaultdict, deque\n'), ((2099, 2106), 'collections.deque', 'deque', ([], {}), '()\n', (2104, 2106), False, 'from collections import defaultdict, deque\n'), ((2659, 2697), 'toposort.toposort_flatten', 'toposort_flatten', (['depends_for_toposort'], {}), '(depends_for_toposort)\n', (2675, 2697), False, 'from toposort import toposort_flatten\n'), ((4884, 4928), 'picklable_itertools.extras.equizip', 'equizip', (['all_states', 'all_weights', 'all_inputs'], {}), '(all_states, all_weights, all_inputs)\n', (4891, 4928), False, 'from picklable_itertools.extras import equizip\n'), ((5598, 5641), 'numpy.array', 'numpy.array', (['all_next_states'], {'dtype': '"""int64"""'}), "(all_next_states, dtype='int64')\n", (5609, 5641), False, 'import numpy\n'), ((5673, 5702), 'numpy.array', 'numpy.array', (['all_next_weights'], {}), '(all_next_weights)\n', (5684, 5702), False, 'import numpy\n'), ((5876, 5916), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['states'], {}), '(states)\n', (5908, 5916), False, 'import theano\n'), ((5935, 5976), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['weights'], {}), '(weights)\n', (5967, 5976), False, 'import theano\n'), ((5994, 6034), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['input_'], {}), '(input_)\n', (6026, 6034), False, 'import theano\n'), ((7828, 7850), 'numpy.array', 'numpy.array', (['all_costs'], {}), '(all_costs)\n', (7839, 7850), False, 'import numpy\n'), ((8016, 8056), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['states'], {}), '(states)\n', (8048, 8056), False, 'import theano\n'), ((8075, 8116), 'theano.tensor.as_tensor_variable', 'theano.tensor.as_tensor_variable', (['weights'], {}), '(weights)\n', (8107, 8116), False, 'import theano\n'), ((8743, 8811), 'numpy.zeros', 'numpy.zeros', (['(recognized.shape + (self.alphabet_size,))'], {'dtype': '"""int64"""'}), "(recognized.shape + (self.alphabet_size,), dtype='int64')\n", (8754, 8811), False, 'import numpy\n'), ((8845, 8913), 'numpy.zeros', 'numpy.zeros', (['(recognized.shape + (self.alphabet_size,))'], {'dtype': '"""int64"""'}), "(recognized.shape + (self.alphabet_size,), dtype='int64')\n", (8856, 8913), False, 'import numpy\n'), ((10558, 10595), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['recognized'], {}), '(recognized)\n', (10583, 10595), False, 'from theano import tensor, Op\n'), ((10618, 10656), 'theano.tensor.as_tensor_variable', 'tensor.as_tensor_variable', (['groundtruth'], {}), '(groundtruth)\n', (10643, 10656), False, 'from theano import tensor, Op\n'), ((9623, 9678), 'lvsr.error_rate.reward_matrix', 'reward_matrix', (['y', 'y_hat_trunc', 'alphabet', 'self.eos_label'], {}), '(y, y_hat_trunc, alphabet, self.eos_label)\n', (9636, 9678), False, 'from lvsr.error_rate import reward_matrix, gain_matrix\n'), ((9820, 9892), 'lvsr.error_rate.gain_matrix', 'gain_matrix', (['y', 'y_hat_trunc', 'alphabet'], {'given_reward_matrix': 'rewards_trunc'}), '(y, y_hat_trunc, alphabet, given_reward_matrix=rewards_trunc)\n', (9831, 9892), False, 'from lvsr.error_rate import reward_matrix, gain_matrix\n'), ((10445, 10464), 'theano.gradient.disconnected_type', 'disconnected_type', ([], {}), '()\n', (10462, 10464), False, 'from theano.gradient import disconnected_type\n'), ((10466, 10485), 'theano.gradient.disconnected_type', 'disconnected_type', ([], {}), '()\n', (10483, 10485), False, 'from theano.gradient import disconnected_type\n'), ((8183, 8205), 'theano.tensor.matrix', 'theano.tensor.matrix', ([], {}), '()\n', (8203, 8205), False, 'import theano\n'), ((10732, 10749), 'theano.tensor.ltensor3', 'tensor.ltensor3', ([], {}), '()\n', (10747, 10749), False, 'from theano import tensor, Op\n'), ((10751, 10768), 'theano.tensor.ltensor3', 'tensor.ltensor3', ([], {}), '()\n', (10766, 10768), False, 'from theano import tensor, Op\n'), ((1348, 1363), 'math.exp', 'math.exp', (['(m - x)'], {}), '(m - x)\n', (1356, 1363), False, 'import math\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.