code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord, search_around_sky
import astropy.units as u
def overlap(b4_reg, b7_reg, sep):
"""
function to find ds9 ellipse regions that match or overlap between band 4 and band 7
"""
# define some other little functions
def match_function(c, c_array, sep):
""" c would be a single band 4 dust region
c_array would be array of the band 7 regions
"""
# find where in the c_array has a separation less the input sep for a given dust region c
s = c.separation(c_array)
w = np.where(s < sep)[0]
if len(w) > 0:
i = np.argmin(s)
return c, c_array[i]
def make_ds9_region(ra, dec, a, b, pa, filename, color):
f = open(filename, 'w')
f.write('# Region file format: DS9 version 4.1\n')
f.write('global color=%s dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n'%color)
f.write('fk5\n')
for r,d,at,bt,pat in zip(ra, dec, a, b, pa):
f.write('ellipse(%1.6f,%1.6f,%1.2f",%1.2f",%1.6e)\n'%(r,d,at,bt,pat) )
f.close()
# read in ds9 degree region files
xstr4, ystr4 = np.loadtxt(b4_reg, skiprows=3, usecols=[0,1], dtype='str', delimiter=',', unpack=True)
xstr7, ystr7 = np.loadtxt(b7_reg, skiprows=3, usecols=[0,1], dtype='str', delimiter=',', unpack=True)
# get rid of 'ellipse(' and convert to float
x4 = np.array([ float(s[8:]) for s in xstr4])
y4 = np.array([ float(s) for s in ystr4])
x7 = np.array([ float(s[8:]) for s in xstr7])
y7 = np.array([ float(s) for s in ystr7])
# take x and y coordinates to make astropy skycoords for them. distance to ngc0628 is 10 Mpc or 1e7 pc
c4 = SkyCoord(x4*u.deg, y4*u.deg, distance=10*u.Mpc)
c7 = SkyCoord(x7*u.deg, y7*u.deg, distance=10*u.Mpc)
# set max separation b/w regions (2 x beam size?)
# sep = 2.2*u.arcsec
sep = sep*u.arcsec
coord_match = np.array([ match_function(c, c7, sep.to(u.degree)) for c in c4 ])
# get rid of Nones
coord_match = coord_match[np.not_equal(coord_match, None)]
# separate out into ra and dec arrays
ra4 = np.array([c[0].ra.deg for c in coord_match])
dec4 = np.array([c[0].dec.deg for c in coord_match])
ra7 = np.array([c[1].ra.deg for c in coord_match])
dec7 = np.array([c[1].dec.deg for c in coord_match])
# since i did the separation of c4 from c7, unique c4's can share the same c7...
# so, we do the separation of c7 from c4 to find opposite
# using only the matched ones found just above
c4 = SkyCoord(ra4*u.deg, dec4*u.deg, distance=10*u.Mpc)
c7 = SkyCoord(ra7*u.deg, dec7*u.deg, distance=10*u.Mpc)
coord_match = np.array([match_function(c, c4, sep.to(u.degree)) for c in c7])
# separate out into ra and dec arrays
ra4 = np.array([c[1].ra.deg for c in coord_match])
dec4 = np.array([c[1].dec.deg for c in coord_match])
ra7 = np.array([c[0].ra.deg for c in coord_match])
dec7 = np.array([c[0].dec.deg for c in coord_match])
# now need to grab the flux from sextractor for these regions!
# read in sextractor business just for band 4 for now
flux, flux_err, kron, background, x_world, y_world, a_image, b_image, pa = np.loadtxt(b4_reg.replace('.deg.reg', '.cat'), usecols=[3,4,5,6,10,11,18,19,20], unpack=True)
coords_all = SkyCoord(x_world*u.deg, y_world*u.deg)
c4 = SkyCoord(ra4*u.deg, dec4*u.deg)
m0, m1, m2, m3 = coords_all.search_around_sky(c4,0.1*u.arcsec)
flux = flux[m1]
flux_err = flux_err[m1]
kron = kron[m1]
background = background[m1]
x_world = x_world[m1]
y_world = y_world[m1]
a_image = a_image[m1]
b_image = b_image[m1]
pa = pa[m1]
header = 'flux \t flux_err \t kron \t background \t RA \t DEC \t A_image \t b_image \t PA'
np.savetxt('band4.overlap.cat', np.transpose([flux, flux_err, kron, background, x_world, y_world, a_image, b_image, pa]), header=header)
# convert a_image and b_image to arcsecs
pixsize = 0.06 # arcsec/pixel
a_arcsec = kron*a_image*pixsize
b_arcsec = kron*b_image*pixsize
make_ds9_region(x_world, y_world, a_arcsec, b_arcsec, pa, 'band4.overlap.deg.reg', 'magenta')
# now same for band 7
flux, flux_err, kron, background, x_world, y_world, a_image, b_image, pa = np.loadtxt(b7_reg.replace('.deg.reg', '.cat'), skiprows=25, usecols=[3,4,5,6,10,11,18,19,20], unpack=True)
coords_all = SkyCoord(x_world*u.deg, y_world*u.deg)
c7 = SkyCoord(ra7*u.deg, dec7*u.deg)
# find the indices of the 4 arcsec matched dust clumps in this big list of all the dust clumps
m0, m1, m2, m3 = coords_all.search_around_sky(c7,0.1*u.arcsec)
flux = flux[m1]
flux_err = flux_err[m1]
kron = kron[m1]
background = background[m1]
x_world = x_world[m1]
y_world = y_world[m1]
a_image = a_image[m1]
b_image = b_image[m1]
pa = pa[m1]
header = 'flux \t flux_err \t background \t RA \t DEC \t A_image \t b_image \t PA'
np.savetxt('band7.overlap.cat', np.transpose([flux, flux_err, kron, background, x_world, y_world, a_image, b_image, pa]), header=header)
# convert a_image and b_image to arcsecs
a_arcsec = kron*a_image*pixsize
b_arcsec = kron*b_image*pixsize
make_ds9_region(x_world, y_world, a_arcsec, b_arcsec, pa, 'band7.overlap.deg.reg', 'cyan')
| [
"numpy.where",
"astropy.coordinates.SkyCoord",
"numpy.not_equal",
"numpy.array",
"numpy.argmin",
"numpy.loadtxt",
"numpy.transpose"
] | [((1171, 1262), 'numpy.loadtxt', 'np.loadtxt', (['b4_reg'], {'skiprows': '(3)', 'usecols': '[0, 1]', 'dtype': '"""str"""', 'delimiter': '""","""', 'unpack': '(True)'}), "(b4_reg, skiprows=3, usecols=[0, 1], dtype='str', delimiter=',',\n unpack=True)\n", (1181, 1262), True, 'import numpy as np\n'), ((1274, 1365), 'numpy.loadtxt', 'np.loadtxt', (['b7_reg'], {'skiprows': '(3)', 'usecols': '[0, 1]', 'dtype': '"""str"""', 'delimiter': '""","""', 'unpack': '(True)'}), "(b7_reg, skiprows=3, usecols=[0, 1], dtype='str', delimiter=',',\n unpack=True)\n", (1284, 1365), True, 'import numpy as np\n'), ((1699, 1752), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(x4 * u.deg)', '(y4 * u.deg)'], {'distance': '(10 * u.Mpc)'}), '(x4 * u.deg, y4 * u.deg, distance=10 * u.Mpc)\n', (1707, 1752), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((1753, 1806), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(x7 * u.deg)', '(y7 * u.deg)'], {'distance': '(10 * u.Mpc)'}), '(x7 * u.deg, y7 * u.deg, distance=10 * u.Mpc)\n', (1761, 1806), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((2104, 2148), 'numpy.array', 'np.array', (['[c[0].ra.deg for c in coord_match]'], {}), '([c[0].ra.deg for c in coord_match])\n', (2112, 2148), True, 'import numpy as np\n'), ((2157, 2202), 'numpy.array', 'np.array', (['[c[0].dec.deg for c in coord_match]'], {}), '([c[0].dec.deg for c in coord_match])\n', (2165, 2202), True, 'import numpy as np\n'), ((2210, 2254), 'numpy.array', 'np.array', (['[c[1].ra.deg for c in coord_match]'], {}), '([c[1].ra.deg for c in coord_match])\n', (2218, 2254), True, 'import numpy as np\n'), ((2263, 2308), 'numpy.array', 'np.array', (['[c[1].dec.deg for c in coord_match]'], {}), '([c[1].dec.deg for c in coord_match])\n', (2271, 2308), True, 'import numpy as np\n'), ((2508, 2564), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(ra4 * u.deg)', '(dec4 * u.deg)'], {'distance': '(10 * u.Mpc)'}), '(ra4 * u.deg, dec4 * u.deg, distance=10 * u.Mpc)\n', (2516, 2564), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((2565, 2621), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(ra7 * u.deg)', '(dec7 * u.deg)'], {'distance': '(10 * u.Mpc)'}), '(ra7 * u.deg, dec7 * u.deg, distance=10 * u.Mpc)\n', (2573, 2621), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((2744, 2788), 'numpy.array', 'np.array', (['[c[1].ra.deg for c in coord_match]'], {}), '([c[1].ra.deg for c in coord_match])\n', (2752, 2788), True, 'import numpy as np\n'), ((2797, 2842), 'numpy.array', 'np.array', (['[c[1].dec.deg for c in coord_match]'], {}), '([c[1].dec.deg for c in coord_match])\n', (2805, 2842), True, 'import numpy as np\n'), ((2850, 2894), 'numpy.array', 'np.array', (['[c[0].ra.deg for c in coord_match]'], {}), '([c[0].ra.deg for c in coord_match])\n', (2858, 2894), True, 'import numpy as np\n'), ((2903, 2948), 'numpy.array', 'np.array', (['[c[0].dec.deg for c in coord_match]'], {}), '([c[0].dec.deg for c in coord_match])\n', (2911, 2948), True, 'import numpy as np\n'), ((3256, 3298), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(x_world * u.deg)', '(y_world * u.deg)'], {}), '(x_world * u.deg, y_world * u.deg)\n', (3264, 3298), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((3302, 3337), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(ra4 * u.deg)', '(dec4 * u.deg)'], {}), '(ra4 * u.deg, dec4 * u.deg)\n', (3310, 3337), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((4284, 4326), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(x_world * u.deg)', '(y_world * u.deg)'], {}), '(x_world * u.deg, y_world * u.deg)\n', (4292, 4326), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((4331, 4366), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(ra7 * u.deg)', '(dec7 * u.deg)'], {}), '(ra7 * u.deg, dec7 * u.deg)\n', (4339, 4366), False, 'from astropy.coordinates import SkyCoord, search_around_sky\n'), ((2024, 2055), 'numpy.not_equal', 'np.not_equal', (['coord_match', 'None'], {}), '(coord_match, None)\n', (2036, 2055), True, 'import numpy as np\n'), ((3719, 3811), 'numpy.transpose', 'np.transpose', (['[flux, flux_err, kron, background, x_world, y_world, a_image, b_image, pa]'], {}), '([flux, flux_err, kron, background, x_world, y_world, a_image,\n b_image, pa])\n', (3731, 3811), True, 'import numpy as np\n'), ((4839, 4931), 'numpy.transpose', 'np.transpose', (['[flux, flux_err, kron, background, x_world, y_world, a_image, b_image, pa]'], {}), '([flux, flux_err, kron, background, x_world, y_world, a_image,\n b_image, pa])\n', (4851, 4931), True, 'import numpy as np\n'), ((579, 596), 'numpy.where', 'np.where', (['(s < sep)'], {}), '(s < sep)\n', (587, 596), True, 'import numpy as np\n'), ((624, 636), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (633, 636), True, 'import numpy as np\n')] |
import shap
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import plotly.express as px
import plotly.graph_objects as go
except ModuleNotFoundError:
_has_plotly = False
_plotly_exception_message = (
'Plotly is required to run this pydrift functionality.'
)
else:
_has_plotly = True
_plotly_exception_message = None
from typing import List, Union, Dict, Tuple
from sklearn.pipeline import Pipeline
from pathlib import Path
from ..models import ScikitModel
from ..decorators import check_optional_module
class InterpretableDrift:
def __init__(self,
model: ScikitModel,
X_train: pd.DataFrame,
X_test: pd.DataFrame,
y_train: pd.DataFrame,
y_test: pd.DataFrame,
column_names: List[str]):
"""Inits `InterpretableDrift` for a given `model`,
`X_train` and `X_test` datasets and `column_names
"""
if isinstance(model, Pipeline):
X_train_to_shap = model[:-1].transform(X_train)
X_test_to_shap = model[:-1].transform(X_test)
model_to_shap = model.steps[-1][1]
else:
X_train_to_shap = X_train.copy()
X_test_to_shap = X_test.copy()
model_to_shap = model
self.model = model_to_shap
self.X_train_to_shap = pd.DataFrame(X_train_to_shap,
columns=column_names)
self.X_test_to_shap = pd.DataFrame(X_test_to_shap,
columns=column_names)
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.column_names = column_names
self.shap_values = np.empty(0)
def compute_shap_values(self) -> None:
"""Shap values depending on what model we are using
`shap.TreeExplainer` by default and if not it uses
`KernelExplainer`
Also provides compatibility with sklearn pipelines
`shap_values` are stored in `self.shap_values`
"""
with warnings.catch_warnings():
# Some `shap` warnings are not useful for this implementation
warnings.simplefilter("ignore")
try:
explainer = shap.TreeExplainer(
model=self.model,
feature_perturbation='tree_path_dependent'
)
shap_values_arguments = dict(X=self.X_test_to_shap)
except Exception:
def model_predict(data_array):
data_frame = pd.DataFrame(data_array,
columns=self.column_names)
return self.model.predict_proba(data_frame)[:, 1]
explainer = shap.KernelExplainer(model=model_predict,
data=shap.sample(
self.X_train_to_shap,
100
),
link='logit')
shap_values_arguments = dict(X=self.X_test_to_shap,
l1_reg='aic')
self.shap_values = explainer.shap_values(**shap_values_arguments)
def most_discriminative_features_plot(self,
save_plot_path: Path = None) -> None:
"""Plots most discriminative features with its
shap values
You can save the plot in `save_plot_path` path
"""
if self.shap_values.size == 0:
self.compute_shap_values()
shap.summary_plot(self.shap_values,
self.X_test_to_shap,
plot_type='bar',
title='Most Discriminative Features',
show=True if not save_plot_path else False)
if save_plot_path:
plt.savefig(save_plot_path, bbox_inches='tight')
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def both_histogram_plot(self,
column: str,
fillna_value: Union[str, float, int] = None,
nbins: int = None,
save_plot_path: Path = None) -> None:
"""Plots histogram for the column passed
in `column`
You can set `nbins` to any number that makes
your plot better
You can save the plot in `save_plot_path` path
Requires `plotly`
"""
if not _has_plotly:
raise ModuleNotFoundError(
)
X_train_column = self.X_train.loc[:, [column]]
X_test_column = self.X_test.loc[:, [column]]
if fillna_value:
X_train_column.fillna(fillna_value, inplace=True)
X_test_column.fillna(fillna_value, inplace=True)
X_train_total_nans = X_train_column[column].isna().sum()
X_test_total_nans = X_test_column[column].isna().sum()
if X_train_total_nans or X_test_total_nans:
warnings.warn(
f'Column {column} has '
f'{X_train_total_nans + X_test_total_nans} nan values, '
f'you can use `fillna_value` if you need it'
)
X_train_column['is_left'] = self.y_train.to_numpy()
X_test_column['is_left'] = self.y_test.to_numpy()
X_train_and_test = pd.concat([X_train_column, X_test_column])
fig = px.histogram(X_train_and_test,
title=f'Both Histogram Normalized For {column}',
x=column,
color='is_left',
barmode='group',
nbins=nbins,
histnorm='probability density')
fig.update_layout(bargroupgap=.1)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def feature_importance_vs_drift_map_plot(
self,
dict_each_column_drift_coefficient: Dict[str, float],
top: int = 10,
save_plot_path: Path = None) -> None:
"""Feature importance versus drift coefficient map,
with this plot you can visualize the most critical
features involved in your model drift process
By default shows you the top 10 most important features
but you can customize it with `top` parameter
You can save the plot in `save_plot_path` path
"""
df_feature_importance = pd.DataFrame(
zip(self.column_names,
np.abs(self.shap_values).mean(axis=0)),
columns=['Feature Name', 'Feature Importance']
)
df_feature_importance['Drift Coefficient'] = (
(df_feature_importance['Feature Name']
.map(dict_each_column_drift_coefficient))
)
value_min = df_feature_importance['Feature Importance'].min()
value_max = df_feature_importance['Feature Importance'].max()
df_feature_importance['Feature Importance Scaled'] = (
(df_feature_importance['Feature Importance'] - value_min)
/ (value_max - value_min)
)
df_feature_importance_to_plot = (
df_feature_importance
.sort_values('Feature Importance Scaled', ascending=False)
.nlargest(top, columns='Feature Importance Scaled')
)
fig = px.scatter(df_feature_importance_to_plot,
x='Feature Importance Scaled',
y='Drift Coefficient',
text='Feature Name',
hover_name='Feature Name',
hover_data={'Feature Importance Scaled': ':.2f',
'Drift Coefficient': ':.2f',
'Feature Importance': False,
'Feature Name': False},
title='Feature Importance vs Drift Map')
fig.update_traces(marker=dict(size=10, opacity=.75))
axis_value_min, axis_value_medium, axis_value_max = 0, .5, 1
fig.add_trace(
go.Scatter(
x=[axis_value_min + .15, axis_value_max - .15,
axis_value_max - .15, axis_value_min + .15],
y=[axis_value_max + .05, axis_value_max + .05,
axis_value_min - .05, axis_value_min - .05],
text=['NON-IMPORTANT FEATURES DRIFTED',
'IMPORTANT FEATURES AND DRIFTED',
'IMPORTANT FEATURES NON-DRIFTED',
'NON-IMPORTANT FEATURES NON-DRIFTED'],
mode="text",
showlegend=False
)
)
fig.add_shape(
type="rect",
x0=axis_value_min,
y0=axis_value_min,
x1=axis_value_medium,
y1=axis_value_medium,
fillcolor="khaki",
opacity=.25
)
fig.add_shape(
type="rect",
x0=axis_value_min,
y0=axis_value_medium,
x1=axis_value_medium,
y1=axis_value_max,
fillcolor="coral",
opacity=.25
)
fig.add_shape(
type="rect",
x0=axis_value_medium,
y0=axis_value_min,
x1=axis_value_max,
y1=axis_value_medium,
fillcolor="limegreen",
opacity=.25
)
fig.add_shape(
type="rect",
x0=axis_value_medium,
y0=axis_value_medium,
x1=axis_value_max,
y1=axis_value_max,
fillcolor="crimson",
opacity=.25
)
fig.update_layout(
xaxis=dict(range=[axis_value_min - .05, axis_value_max + .05]),
yaxis=dict(range=[axis_value_min - .1, axis_value_max + .1])
)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@staticmethod
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def weights_plot(weights: np.array, save_plot_path: Path = None) -> None:
"""Weights plot, the higher the weight, the more
similar the train data is to the test data
This will be used to retrain the model
You can save the plot in `save_plot_path` path
"""
fig = px.histogram(weights,
title='Weights From The Discriminative Model')
fig.update_layout(showlegend=False)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@staticmethod
def _drop_outliers_between(
df: pd.DataFrame,
feature: str,
percentiles: Tuple[float,
float] = (.05, .95)) -> pd.DataFrame:
"""Drop outliers for column `feature` of
`df` between `percentiles`
"""
lower, upper = percentiles
return df[df[feature].between(df[feature].quantile(lower),
df[feature].quantile(upper))]
@staticmethod
def _convert_to_mid_interval_with_max_bins(
serie: pd.Series,
bins: int = 25) -> pd.DataFrame:
"""Convert `series` values to a binned version
of it in `bins` as number of
intervals
"""
return (pd
.cut(serie, bins=bins)
.apply(lambda x: x.mid)
.astype(float))
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def partial_dependence_comparison_plot(
self,
feature: str,
percentiles: Tuple[float, float] = (.05, .95),
max_bins: int = 25,
save_plot_path: Path = None) -> None:
"""Partial dependence plot for `feature` in
both datasets predictions
You can save the plot in `save_plot_path` path
"""
X_train_copy = self.X_train.copy()
X_test_copy = self.X_test.copy()
X_train_copy['is_left'] = '1'
X_test_copy['is_left'] = '0'
X_train_copy['Prediction'] = (
self.model.predict_proba(X_train_copy)[:, 1]
)
X_test_copy['Prediction'] = (
self.model.predict_proba(X_test_copy)[:, 1]
)
is_numeric = pd.api.types.is_numeric_dtype(
X_train_copy[feature]
)
if is_numeric:
X_train_copy = (
self._drop_outliers_between(X_train_copy,
feature=feature,
percentiles=percentiles)
)
X_test_copy = (
self._drop_outliers_between(X_test_copy,
feature=feature,
percentiles=percentiles)
)
bins = min(X_train_copy[feature].nunique(),
max_bins)
X_train_copy[feature] = (
self._convert_to_mid_interval_with_max_bins(
X_train_copy[feature],
bins
)
)
X_test_copy[feature] = (
self._convert_to_mid_interval_with_max_bins(
X_test_copy[feature],
bins
)
)
X_both = pd.concat([X_train_copy, X_test_copy])
data_to_plot = (
X_both
.groupby(['is_left', feature])
.Prediction
.mean()
.reset_index()
)
if is_numeric:
fig = px.scatter(data_to_plot,
x=feature,
y='Prediction',
color='is_left',
trendline="ols")
else:
fig = px.bar(data_to_plot,
x=feature,
y='Prediction',
color='is_left',
barmode='group')
fig.update_layout(title=f'Partial Dependence For {feature}',
bargroupgap=.1)
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
@check_optional_module(has_module=_has_plotly,
exception_message=_plotly_exception_message)
def drift_by_sorted_bins_plot(self,
feature: str,
bins: int = 10,
save_plot_path: Path = None) -> None:
"""Concat all the data in both dataframes and
sort it by `feature`, then it cuts in `bins`
number of bins and computes quantity of registers
in each bin
You can save the plot in `save_plot_path` path
"""
X_train_copy = self.X_train.copy()
X_test_copy = self.X_test.copy()
X_train_copy['is_left'] = '1'
X_test_copy['is_left'] = '0'
X_both = (
pd
.concat([X_train_copy[[feature, 'is_left']],
X_test_copy[[feature, 'is_left']]])
.sample(frac=1)
.reset_index()
)
is_categorical = not pd.api.types.is_numeric_dtype(
X_both[feature]
)
if is_categorical:
X_both[feature] = X_both[feature].astype('category')
X_both['rank'] = (
X_both[feature].cat.codes .rank(method='first') if is_categorical
else X_both[feature].rank(method='first')
)
X_both['Bin Number'] = pd.qcut(X_both['rank'],
q=bins,
labels=range(1, bins + 1))
fig = px.histogram(X_both,
x='Bin Number',
color='is_left',
nbins=bins,
barmode='group')
fig.update_layout(title=f'Drift By Bin For {feature}',
bargroupgap=.1,
xaxis=dict(tickmode='linear'))
if save_plot_path:
fig.write_html(save_plot_path)
else:
fig.show()
| [
"numpy.abs",
"plotly.express.scatter",
"matplotlib.pyplot.savefig",
"plotly.express.histogram",
"shap.summary_plot",
"pandas.api.types.is_numeric_dtype",
"plotly.express.bar",
"warnings.catch_warnings",
"pandas.cut",
"warnings.simplefilter",
"plotly.graph_objects.Scatter",
"numpy.empty",
"sh... | [((1411, 1462), 'pandas.DataFrame', 'pd.DataFrame', (['X_train_to_shap'], {'columns': 'column_names'}), '(X_train_to_shap, columns=column_names)\n', (1423, 1462), True, 'import pandas as pd\n'), ((1537, 1587), 'pandas.DataFrame', 'pd.DataFrame', (['X_test_to_shap'], {'columns': 'column_names'}), '(X_test_to_shap, columns=column_names)\n', (1549, 1587), True, 'import pandas as pd\n'), ((1819, 1830), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1827, 1830), True, 'import numpy as np\n'), ((3785, 3948), 'shap.summary_plot', 'shap.summary_plot', (['self.shap_values', 'self.X_test_to_shap'], {'plot_type': '"""bar"""', 'title': '"""Most Discriminative Features"""', 'show': '(True if not save_plot_path else False)'}), "(self.shap_values, self.X_test_to_shap, plot_type='bar',\n title='Most Discriminative Features', show=True if not save_plot_path else\n False)\n", (3802, 3948), False, 'import shap\n'), ((5646, 5688), 'pandas.concat', 'pd.concat', (['[X_train_column, X_test_column]'], {}), '([X_train_column, X_test_column])\n', (5655, 5688), True, 'import pandas as pd\n'), ((5704, 5881), 'plotly.express.histogram', 'px.histogram', (['X_train_and_test'], {'title': 'f"""Both Histogram Normalized For {column}"""', 'x': 'column', 'color': '"""is_left"""', 'barmode': '"""group"""', 'nbins': 'nbins', 'histnorm': '"""probability density"""'}), "(X_train_and_test, title=\n f'Both Histogram Normalized For {column}', x=column, color='is_left',\n barmode='group', nbins=nbins, histnorm='probability density')\n", (5716, 5881), True, 'import plotly.express as px\n'), ((7823, 8156), 'plotly.express.scatter', 'px.scatter', (['df_feature_importance_to_plot'], {'x': '"""Feature Importance Scaled"""', 'y': '"""Drift Coefficient"""', 'text': '"""Feature Name"""', 'hover_name': '"""Feature Name"""', 'hover_data': "{'Feature Importance Scaled': ':.2f', 'Drift Coefficient': ':.2f',\n 'Feature Importance': False, 'Feature Name': False}", 'title': '"""Feature Importance vs Drift Map"""'}), "(df_feature_importance_to_plot, x='Feature Importance Scaled', y=\n 'Drift Coefficient', text='Feature Name', hover_name='Feature Name',\n hover_data={'Feature Importance Scaled': ':.2f', 'Drift Coefficient':\n ':.2f', 'Feature Importance': False, 'Feature Name': False}, title=\n 'Feature Importance vs Drift Map')\n", (7833, 8156), True, 'import plotly.express as px\n'), ((10884, 10952), 'plotly.express.histogram', 'px.histogram', (['weights'], {'title': '"""Weights From The Discriminative Model"""'}), "(weights, title='Weights From The Discriminative Model')\n", (10896, 10952), True, 'import plotly.express as px\n'), ((12909, 12961), 'pandas.api.types.is_numeric_dtype', 'pd.api.types.is_numeric_dtype', (['X_train_copy[feature]'], {}), '(X_train_copy[feature])\n', (12938, 12961), True, 'import pandas as pd\n'), ((13975, 14013), 'pandas.concat', 'pd.concat', (['[X_train_copy, X_test_copy]'], {}), '([X_train_copy, X_test_copy])\n', (13984, 14013), True, 'import pandas as pd\n'), ((16368, 16455), 'plotly.express.histogram', 'px.histogram', (['X_both'], {'x': '"""Bin Number"""', 'color': '"""is_left"""', 'nbins': 'bins', 'barmode': '"""group"""'}), "(X_both, x='Bin Number', color='is_left', nbins=bins, barmode=\n 'group')\n", (16380, 16455), True, 'import plotly.express as px\n'), ((2162, 2187), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2185, 2187), False, 'import warnings\n'), ((2275, 2306), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2296, 2306), False, 'import warnings\n'), ((4085, 4133), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_plot_path'], {'bbox_inches': '"""tight"""'}), "(save_plot_path, bbox_inches='tight')\n", (4096, 4133), True, 'import matplotlib.pyplot as plt\n'), ((5296, 5438), 'warnings.warn', 'warnings.warn', (['f"""Column {column} has {X_train_total_nans + X_test_total_nans} nan values, you can use `fillna_value` if you need it"""'], {}), "(\n f'Column {column} has {X_train_total_nans + X_test_total_nans} nan values, you can use `fillna_value` if you need it'\n )\n", (5309, 5438), False, 'import warnings\n'), ((8568, 8969), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[axis_value_min + 0.15, axis_value_max - 0.15, axis_value_max - 0.15, \n axis_value_min + 0.15]', 'y': '[axis_value_max + 0.05, axis_value_max + 0.05, axis_value_min - 0.05, \n axis_value_min - 0.05]', 'text': "['NON-IMPORTANT FEATURES DRIFTED', 'IMPORTANT FEATURES AND DRIFTED',\n 'IMPORTANT FEATURES NON-DRIFTED', 'NON-IMPORTANT FEATURES NON-DRIFTED']", 'mode': '"""text"""', 'showlegend': '(False)'}), "(x=[axis_value_min + 0.15, axis_value_max - 0.15, axis_value_max -\n 0.15, axis_value_min + 0.15], y=[axis_value_max + 0.05, axis_value_max +\n 0.05, axis_value_min - 0.05, axis_value_min - 0.05], text=[\n 'NON-IMPORTANT FEATURES DRIFTED', 'IMPORTANT FEATURES AND DRIFTED',\n 'IMPORTANT FEATURES NON-DRIFTED', 'NON-IMPORTANT FEATURES NON-DRIFTED'],\n mode='text', showlegend=False)\n", (8578, 8969), True, 'import plotly.graph_objects as go\n'), ((14225, 14314), 'plotly.express.scatter', 'px.scatter', (['data_to_plot'], {'x': 'feature', 'y': '"""Prediction"""', 'color': '"""is_left"""', 'trendline': '"""ols"""'}), "(data_to_plot, x=feature, y='Prediction', color='is_left',\n trendline='ols')\n", (14235, 14314), True, 'import plotly.express as px\n'), ((14459, 14545), 'plotly.express.bar', 'px.bar', (['data_to_plot'], {'x': 'feature', 'y': '"""Prediction"""', 'color': '"""is_left"""', 'barmode': '"""group"""'}), "(data_to_plot, x=feature, y='Prediction', color='is_left', barmode=\n 'group')\n", (14465, 14545), True, 'import plotly.express as px\n'), ((15852, 15898), 'pandas.api.types.is_numeric_dtype', 'pd.api.types.is_numeric_dtype', (['X_both[feature]'], {}), '(X_both[feature])\n', (15881, 15898), True, 'import pandas as pd\n'), ((2352, 2437), 'shap.TreeExplainer', 'shap.TreeExplainer', ([], {'model': 'self.model', 'feature_perturbation': '"""tree_path_dependent"""'}), "(model=self.model, feature_perturbation='tree_path_dependent'\n )\n", (2370, 2437), False, 'import shap\n'), ((2670, 2721), 'pandas.DataFrame', 'pd.DataFrame', (['data_array'], {'columns': 'self.column_names'}), '(data_array, columns=self.column_names)\n', (2682, 2721), True, 'import pandas as pd\n'), ((6974, 6998), 'numpy.abs', 'np.abs', (['self.shap_values'], {}), '(self.shap_values)\n', (6980, 6998), True, 'import numpy as np\n'), ((11894, 11918), 'pandas.cut', 'pd.cut', (['serie'], {'bins': 'bins'}), '(serie, bins=bins)\n', (11900, 11918), True, 'import pandas as pd\n'), ((15640, 15726), 'pandas.concat', 'pd.concat', (["[X_train_copy[[feature, 'is_left']], X_test_copy[[feature, 'is_left']]]"], {}), "([X_train_copy[[feature, 'is_left']], X_test_copy[[feature,\n 'is_left']]])\n", (15649, 15726), True, 'import pandas as pd\n'), ((2963, 3001), 'shap.sample', 'shap.sample', (['self.X_train_to_shap', '(100)'], {}), '(self.X_train_to_shap, 100)\n', (2974, 3001), False, 'import shap\n')] |
#Reference: https://github.com/openai/baselines/tree/master/baselines/ppo2 (GPU-enabled PPO, compared to PPO1)
#add parent dir to find package. Only needed for source code build, pip install doesn't need it.
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
print('CURRENT DIR:', currentdir)
parentdir = os.path.dirname(currentdir)
print('PARENT DIR:', parentdir)
os.sys.path.insert(0, parentdir)
import gym
from RobotOperationEnv import RobotOperationEnvironment
import robotCommon as RC
from gym import utils, spaces
from gym.utils import seeding
from std_srvs.srv import Empty
#from baselines import deepq
import time
import datetime
import numpy as np
import random
import argparse
import tensorflow as tf
#from keras.engine.training import collect_trainable_weights
import json
# PPO2
from baselines.common import set_global_seeds
from baselines.common.vec_env.vec_normalize import VecNormalize
from ppo.ppo2 import ppo2
from ppo.ppo2.policies import MlpPolicy
from baselines import bench, logger
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
import timeit
try:
import vrep
except:
print ('--------------------------------------------------------------')
print ('"vrep.py" could not be imported. This means very probably that')
print ('either "vrep.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "vrep.py"')
print ('--------------------------------------------------------------')
print ('')
##############################################################################################################################################################
##############################################################################################################################################################
def startTrainingPPO2(num_timesteps, seed):
# Create the environment
print('START ENV', RC.GB_CLIENT_ID(), RC.gbRobotHandle())
env = RobotOperationEnvironment(RC.GB_CLIENT_ID(), RC.GB_CSERVER_ROBOT_ID, RC.gbRobotHandle())
#env = bench.Monitor(env, logger.get_dir())
ncpu = 1
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
tf.Session(config=config).__enter__()
#env = DummyVecEnv(env)
#env = VecNormalize(env)
set_global_seeds(seed)
policy = MlpPolicy
ppo2.learn(policy=policy, env=env, nsteps=512, nminibatches=32,
lam=0.95, gamma=0.99, noptepochs=10, log_interval=1,
ent_coef=0.0,
lr=3e-4,
cliprange=0.2,
total_timesteps=num_timesteps, save_interval=1)
if __name__ == '__main__':
RC.initialize_vrep()
logger.configure()
startTrainingPPO2(10000, np.random.seed(1337))
RC.finalize_vrep()
| [
"baselines.common.set_global_seeds",
"robotCommon.finalize_vrep",
"baselines.logger.configure",
"inspect.currentframe",
"tensorflow.Session",
"os.sys.path.insert",
"robotCommon.gbRobotHandle",
"os.path.dirname",
"numpy.random.seed",
"robotCommon.GB_CLIENT_ID",
"tensorflow.ConfigProto",
"ppo.pp... | [((361, 388), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (376, 388), False, 'import os, inspect\n'), ((421, 453), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (439, 453), False, 'import os, inspect\n'), ((2245, 2360), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'intra_op_parallelism_threads': 'ncpu', 'inter_op_parallelism_threads': 'ncpu'}), '(allow_soft_placement=True, intra_op_parallelism_threads=ncpu,\n inter_op_parallelism_threads=ncpu)\n', (2259, 2360), True, 'import tensorflow as tf\n'), ((2517, 2539), 'baselines.common.set_global_seeds', 'set_global_seeds', (['seed'], {}), '(seed)\n', (2533, 2539), False, 'from baselines.common import set_global_seeds\n'), ((2567, 2779), 'ppo.ppo2.ppo2.learn', 'ppo2.learn', ([], {'policy': 'policy', 'env': 'env', 'nsteps': '(512)', 'nminibatches': '(32)', 'lam': '(0.95)', 'gamma': '(0.99)', 'noptepochs': '(10)', 'log_interval': '(1)', 'ent_coef': '(0.0)', 'lr': '(0.0003)', 'cliprange': '(0.2)', 'total_timesteps': 'num_timesteps', 'save_interval': '(1)'}), '(policy=policy, env=env, nsteps=512, nminibatches=32, lam=0.95,\n gamma=0.99, noptepochs=10, log_interval=1, ent_coef=0.0, lr=0.0003,\n cliprange=0.2, total_timesteps=num_timesteps, save_interval=1)\n', (2577, 2779), False, 'from ppo.ppo2 import ppo2\n'), ((2842, 2862), 'robotCommon.initialize_vrep', 'RC.initialize_vrep', ([], {}), '()\n', (2860, 2862), True, 'import robotCommon as RC\n'), ((2867, 2885), 'baselines.logger.configure', 'logger.configure', ([], {}), '()\n', (2883, 2885), False, 'from baselines import bench, logger\n'), ((2941, 2959), 'robotCommon.finalize_vrep', 'RC.finalize_vrep', ([], {}), '()\n', (2957, 2959), True, 'import robotCommon as RC\n'), ((2032, 2049), 'robotCommon.GB_CLIENT_ID', 'RC.GB_CLIENT_ID', ([], {}), '()\n', (2047, 2049), True, 'import robotCommon as RC\n'), ((2051, 2069), 'robotCommon.gbRobotHandle', 'RC.gbRobotHandle', ([], {}), '()\n', (2067, 2069), True, 'import robotCommon as RC\n'), ((2107, 2124), 'robotCommon.GB_CLIENT_ID', 'RC.GB_CLIENT_ID', ([], {}), '()\n', (2122, 2124), True, 'import robotCommon as RC\n'), ((2150, 2168), 'robotCommon.gbRobotHandle', 'RC.gbRobotHandle', ([], {}), '()\n', (2166, 2168), True, 'import robotCommon as RC\n'), ((2915, 2935), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (2929, 2935), True, 'import numpy as np\n'), ((289, 311), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (309, 311), False, 'import os, inspect\n'), ((2417, 2442), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2427, 2442), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pyplot as plt
import h5py
import matplotlib as mpl
import os
import sys
from datetime import datetime
plt.close('all')
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.linewidth'] = 1
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['xtick.minor.width'] = 1
fig, ax = plt.subplots(figsize=(6, 5))
if sys.platform == 'linux':
datapath = '/mnt/llmStorage203/Danny/freqent/spinOsc/190709/'
savepath = '/media/daniel/storage11/Dropbox/LLM_Danny/freqent/spinOsc/'
elif sys.platform == 'darwin':
datapath = '/Volumes/Storage/Danny/freqent/spinOsc/190709/'
savepath = '/Users/Danny/Dropbox/LLM_Danny/freqent/spinOsc/'
alpha = 2 # pick which value of alpha to plot with
sdot_array = []
ndim_array = []
sdot_thry = 2 * alpha**2
for file in os.listdir(datapath):
if file.endswith('.hdf5'):
with h5py.File(os.path.join(datapath, file), 'r') as f:
if f['params']['alpha'][()] == alpha:
ndim_array.append(f['params']['ndim'][()])
sdot_array.append(f['data']['sdot_array'][:])
t_epr = f['params']['t_epr'][()]
sigma = f['params']['sigma_array'][:]
n_epr = f['params']['n_epr'][:]
cmap = mpl.cm.get_cmap('viridis')
normalize = mpl.colors.Normalize(vmin=min(sigma), vmax=max(sigma))
colors = [cmap(normalize(s)) for s in sigma]
ndim_inds = np.argsort(ndim_array) # get indices of number of dimensions in order
xscale_array = [0.9, 1, 1.1] # scale x-axis for plotting distinguishability
for dimInd, ind in enumerate(ndim_inds):
sdot = sdot_array[ind]
ndim = ndim_array[ind]
for nInd, n in enumerate(n_epr):
for sInd, s in enumerate(sigma):
ax.semilogx(n * t_epr * xscale_array[dimInd], sdot[nInd, sInd],
marker=(ndim, 0, 45),
color=colors[sInd],
markersize=10)
ax.plot([n_epr[0] * t_epr, n_epr[-1] * t_epr], [2 * alpha**2, 2 * alpha**2], 'k--', lw=2)
handles = [mpl.lines.Line2D([0], [0], color='k', linestyle='', marker=(2, 0, 45), markersize=10, label='2D'),
mpl.lines.Line2D([0], [0], color='k', linestyle='', marker=(3, 0, 45), markersize=10, label='3D'),
mpl.lines.Line2D([0], [0], color='k', linestyle='', marker=(4, 0, 45), markersize=10, label='4D'),
mpl.lines.Line2D([0], [0], color='k', linestyle='--', label=r'$\dot{S}_{thry} = 8$')]
cax, _ = mpl.colorbar.make_axes(ax)
cbar = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=normalize)
cbar.ax.set_title(r'$\sigma$')
ax.legend(handles=handles, loc='best')
ax.set(xlabel=r'$N_{traj} T$', ylabel=r'$\dot{\hat{S}}$',
xticks=[500, 1000, 5000],
xticklabels=[r'$5 \times 10^2$', r'$10^3$', r'$5 \times 10^3$'])
ax.tick_params(axis='both', which='both', direction='in')
fig.savefig(os.path.join(savepath, datetime.now().strftime('%y%m%d') + '_alpha{a}_epr_vs_dataSize.pdf'.format(a=alpha)), format='pdf')
plt.show()
| [
"os.listdir",
"matplotlib.colorbar.ColorbarBase",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.argsort",
"datetime.datetime.now",
"matplotlib.pyplot.subplots",
"matplotlib.colorbar.make_axes",
"matplotlib.lines.Line2D",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.show"
] | [((140, 156), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (149, 156), True, 'import matplotlib.pyplot as plt\n'), ((382, 410), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (394, 410), True, 'import matplotlib.pyplot as plt\n'), ((864, 884), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (874, 884), False, 'import os\n'), ((1312, 1338), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (1327, 1338), True, 'import matplotlib as mpl\n'), ((1464, 1486), 'numpy.argsort', 'np.argsort', (['ndim_array'], {}), '(ndim_array)\n', (1474, 1486), True, 'import numpy as np\n'), ((2520, 2546), 'matplotlib.colorbar.make_axes', 'mpl.colorbar.make_axes', (['ax'], {}), '(ax)\n', (2542, 2546), True, 'import matplotlib as mpl\n'), ((2554, 2611), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['cax'], {'cmap': 'cmap', 'norm': 'normalize'}), '(cax, cmap=cmap, norm=normalize)\n', (2579, 2611), True, 'import matplotlib as mpl\n'), ((3042, 3052), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3050, 3052), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2195), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'linestyle': '""""""', 'marker': '(2, 0, 45)', 'markersize': '(10)', 'label': '"""2D"""'}), "([0], [0], color='k', linestyle='', marker=(2, 0, 45),\n markersize=10, label='2D')\n", (2110, 2195), True, 'import matplotlib as mpl\n'), ((2204, 2305), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'linestyle': '""""""', 'marker': '(3, 0, 45)', 'markersize': '(10)', 'label': '"""3D"""'}), "([0], [0], color='k', linestyle='', marker=(3, 0, 45),\n markersize=10, label='3D')\n", (2220, 2305), True, 'import matplotlib as mpl\n'), ((2314, 2415), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'linestyle': '""""""', 'marker': '(4, 0, 45)', 'markersize': '(10)', 'label': '"""4D"""'}), "([0], [0], color='k', linestyle='', marker=(4, 0, 45),\n markersize=10, label='4D')\n", (2330, 2415), True, 'import matplotlib as mpl\n'), ((2424, 2513), 'matplotlib.lines.Line2D', 'mpl.lines.Line2D', (['[0]', '[0]'], {'color': '"""k"""', 'linestyle': '"""--"""', 'label': '"""$\\\\dot{S}_{thry} = 8$"""'}), "([0], [0], color='k', linestyle='--', label=\n '$\\\\dot{S}_{thry} = 8$')\n", (2440, 2513), True, 'import matplotlib as mpl\n'), ((940, 968), 'os.path.join', 'os.path.join', (['datapath', 'file'], {}), '(datapath, file)\n', (952, 968), False, 'import os\n'), ((2941, 2955), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2953, 2955), False, 'from datetime import datetime\n')] |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
***
MakePSF
***
"""
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright 2020, nenupy'
__credits__ = ['<NAME>','<NAME>']
__maintainer__ = 'Julien'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'MakePSF'
]
import logging
log = logging.getLogger(__name__)
import numpy as np
import numexpr as ne
import MeerKAT
import astropy.units as u
from astropy.time import Time, TimeDelta
from astropy.coordinates import (
EarthLocation,
Angle,
AltAz,
ICRS,
Longitude,
FK5,
SkyCoord
)
from astropy.constants import c as lspeed
MeerKATarr=MeerKAT.MeerKATarray()
# ============================================================= #
# ---------------------------- lst ---------------------------- #
# ============================================================= #
def lst(time, kind):
""" Local sidereal time
:param time:
Time
:type time: :class:`~astropy.time.Time`
:param kind:
``'fast'`` computes an approximation of local sidereal
time, ``'mean'`` accounts for precession and ``'apparent'``
accounts for precession and nutation.
:type kind: str
:returns: LST time
:rtype: :class:`~astropy.coordinates.Longitude`
"""
if kind.lower() == 'fast':
# http://www.roma1.infn.it/~frasca/snag/GeneralRules.pdf
# Number of days since 2000 January 1, 12h UT
nDays = time.jd - 2451545.
# Greenwich mean sidereal time
gmst = 18.697374558 + 24.06570982441908 * nDays
gmst %= 24.
# Local Sidereal Time
lst = gmst + MeerKATarr.Loc.lon.hour
if np.isscalar(lst):
if lst < 0:
lst += 24
else:
lst[lst < 0] += 24.
return Longitude(lst, 'hour')
else:
location = MeerKATarr.Loc
lon = location.to_geodetic().lon
lst = time.sidereal_time(kind, lon)
return lst
# ============================================================= #
# ---------------------------- lha ---------------------------- #
# ============================================================= #
def lha(lst, skycoord):
""" Local Hour Angle of an object in the observer's sky
:param lst:
Local Sidereal Time, such as returned by
:func:`~nenupy.astro.astro.lst` for instance.
:type lst: :class:`~astropy.coordinates.Longitude`
:param skycoord:
Sky coordinates to convert to Local Hour Angles. This
must be converted to FK5 coordinates with the
corresponding equinox in order to give accurate
results (see :func:`~nenupy.astro.astro.toFK5`).
:type skycoord: :class:`~astropy.coordinates.SkyCoord`
:returns: LHA time
:rtype: :class:`~astropy.coordinates.Angle`
"""
if skycoord.equinox is None:
log.warning(
(
'Given skycoord for LHA computation does not '
'have an equinox attribute, make sure the '
'precession is taken into account.'
)
)
ha = lst - skycoord.ra
twopi = Angle(360.000000 * u.deg)
if ha.isscalar:
if ha.deg < 0:
ha += twopi
elif ha.deg > 360:
ha -= twopi
else:
ha[ha.deg < 0] += twopi
ha[ha.deg > 360] -= twopi
return ha
# ============================================================= #
# ============================================================= #
# ------------------------ wavelength ------------------------- #
# ============================================================= #
def wavelength(freq):
""" Convert radio frequency in wavelength.
:param freq:
Frequency (assumed in MHz unless a
:class:`~astropy.units.Quantity` is provided)
:type freq: `float`, :class:`~numpy.ndarray` or
:class:`~astropy.units.Quantity`
:returns: Wavelength in meters
:rtype: :class:`~astropy.units.Quantity`
"""
if not isinstance(freq, u.Quantity):
freq *= u.MHz
freq = freq.to(u.Hz)
wavel = lspeed / freq
return wavel.to(u.m)
# ============================================================= #
# ------------------------- ho_coord -------------------------- #
# ============================================================= #
def ho_coord(alt, az, time):
""" Horizontal coordinates
:param alt:
Altitude in degrees
:type alt: `float` or :class:`~astropy.units.Quantity`
:param az:
Azimuth in degrees
:type az: `float` or :class:`~astropy.units.Quantity`
:param time:
Time at which the local zenith coordinates should be
computed. It can either be provided as an
:class:`~astropy.time.Time` object or a string in ISO
or ISOT format.
:type time: str, :class:`~astropy.time.Time`
:returns: :class:`~astropy.coordinates.AltAz` object
:rtype: :class:`~astropy.coordinates.AltAz`
:Example:
>>> from nenupysim.astro import ho_coord
>>> altaz = ho_coord(
alt=45,
az=180,
time='2020-01-01 12:00:00'
)
"""
if not isinstance(az, u.Quantity):
az *= u.deg
if not isinstance(alt, u.Quantity):
alt *= u.deg
if not isinstance(time, Time):
time = Time(time)
return AltAz(
az=az,
alt=alt,
location=MeerKATarr.Loc,
obstime=time
)
def ho_zenith(time):
""" Horizontal coordinates of local zenith above the array
:param time:
Time at which the local zenith coordinates should be
computed. It can either be provided as an
:class:`~astropy.time.Time` object or a string in ISO
or ISOT format.
:type time: `str`, :class:`~astropy.time.Time`
:returns: :class:`~astropy.coordinates.AltAz` object
:rtype: :class:`~astropy.coordinates.AltAz`
:Example:
>>> zen_altaz = ho_zenith(time='2020-01-01 12:00:00')
"""
if not isinstance(time, Time):
time = Time(time)
if time.isscalar:
return ho_coord(
az=0.,
alt=90.,
time=time
)
else:
return ho_coord(
az=np.zeros(time.size),
alt=np.ones(time.size) * 90.,
time=time
)
# ============================================================= #
# ------------------------- eq_zenith ------------------------- #
# ============================================================= #
def eq_zenith(time):
""" Equatorial coordinates of local zenith above the array
:param time:
Time at which the local zenith coordinates should be
computed. It can either be provided as an
:class:`~astropy.time.Time` object or a string in ISO
or ISOT format.
:type time: `str`, :class:`~astropy.time.Time`
:returns: :class:`~astropy.coordinates.ICRS` object
:rtype: :class:`~astropy.coordinates.ICRS`
:Example:
>>> zen_radec = eq_zenith(time='2020-01-01 12:00:00')
"""
altaz_zenith = ho_zenith(
time=time
)
return to_radec(altaz_zenith)
def to_radec(altaz):
""" Transform altaz coordinates to ICRS equatorial system
:param altaz:
Horizontal coordinates
:type altaz: :class:`~astropy.coordinates.AltAz`
:returns: :class:`~astropy.coordinates.ICRS` object
:rtype: :class:`~astropy.coordinates.ICRS`
:Example:
>>> from nenupysim.astro import eq_coord
>>> radec = eq_coord(
ra=51,
dec=39,
)
"""
if not isinstance(altaz, AltAz):
raise TypeError(
'AltAz object expected.'
)
return altaz.transform_to(ICRS)
def toFK5(skycoord, time):
""" Converts sky coordinates ``skycoord`` to FK5 system with
equinox given by ``time``.
:param skycoord:
Sky Coordinates to be converted to FK5 system.
:type skycoord: :class:`~astropy.coordinates.SkyCoord`
:param time:
Time that defines the equinox to be accounted for.
:type time: :class:`~astropy.time.Time`
:returns: FK5 sky coordinates
:rtype: :class:`~astropy.coordinates.SkyCoord`
"""
return skycoord.transform_to(
FK5(equinox=time)
)
# ============================================================= #
# ------------------- Multiprocessing Image ------------------- #
# ============================================================= #
def _init(arr_to_populate1, arr_to_populate2, lg, mg, u, v):
""" Each pool process calls this initializer. Load the array to be populated into that process's global namespace """
global arr1
global arr2
global larr
global marr
global uarr
global varr
arr1 = arr_to_populate1
arr2 = arr_to_populate2
larr = lg
marr = mg
uarr = u
varr = v
def fill_per_block(args):
x0, x1, y0, y1 = args.astype(int)
tmp_r = np.ctypeslib.as_array(arr1)
tmp_i = np.ctypeslib.as_array(arr2)
na = np.newaxis
lg = larr[na, x0:x1, y0:y1]
mg = marr[na, x0:x1, y0:y1]
pi = np.pi
expo = ne.evaluate('exp(2j*pi*(uarr*lg+varr*mg))')
tmp_r[:, x0:x1, y0:y1] = expo.real
tmp_i[:, x0:x1, y0:y1] = expo.imag
def mp_expo(npix, ncpus, lg, mg, u, v):
# print('inside mp_expo 1')
block_size = int(npix/np.sqrt(ncpus))
result_r = np.ctypeslib.as_ctypes(
np.zeros((u.shape[0], npix, npix))
)
result_i = np.ctypeslib.as_ctypes(
np.zeros_like(result_r)
)
shared_array_r = sharedctypes.RawArray(
result_r._type_,
result_r
)
shared_array_i = sharedctypes.RawArray(
result_i._type_,
result_i
)
# print('inside mp_expo 2')
n_windows = int(np.sqrt(ncpus))
block_idxs = np.array([
(i, i+1, j, j+1) for i in range(n_windows) for j in range(n_windows)
])*block_size
# pool = Pool(ncpus)
# res = pool.map(
# fill_per_block,
# (block_idxs, shared_array_r, shared_array_i, block_size, lg, mg)
# )
# print('inside mp_expo 3')
pool = Pool(
processes=ncpus,
initializer=_init,
initargs=(shared_array_r, shared_array_i, lg, mg, u, v)
)
# print('inside mp_expo 4')
res = pool.map(fill_per_block, (block_idxs))
pool.close()
# print('inside mp_expo 5')
result_r = np.ctypeslib.as_array(shared_array_r)
result_i = np.ctypeslib.as_array(shared_array_i)
del shared_array_r, shared_array_i
return result_r + 1j * result_i
# ============================================================= #
# ============================================================= #
# ---------------------------- UVW ---------------------------- #
# ============================================================= #
class UVW(object):
"""
"""
def __init__(self, radioarray, freqs=None):
self.bsl_xyz = None
#self.times = times
self.freqs = freqs
# Meaning ?
self._radioarray = radioarray
# Question: what is this
# Meaning ?
self.antpos = self._radioarray.T #np.array([a.tolist() for a in self._radioarray])
# RGF93 to ITRF97
# See http://epsg.io/?q=itrf97 to find correct EPSG
#t = Transformer.from_crs(
# crs_from='EPSG:2154', # RGF93
# crs_to='EPSG:4896'# ITRF2005
#)
#antpos[:, 0], antpos[:, 1], antpos[:, 2] = t.transform(
# xx=antpos[:, 0],
# yy=antpos[:, 1],
# zz=antpos[:, 2]
#)
m=self.antpos.shape[0]
# print(m)
xyz = self.antpos[..., None]
xyz = xyz[:, :, 0][:, None]
# xyz = xyz - xyz.transpose(1, 0, 2)
xyz = xyz.transpose(1, 0, 2) - xyz
# self.bsl = xyz[np.triu_indices(m.size)]
# Question: what is this
# Meaning ?
self.bsl = xyz[np.tril_indices(m)]
self._ants = m
return
@property
def freqs(self):
return self._freqs
@freqs.setter
def freqs(self, f):
if f is None:
self._freqs = None
else:
if not isinstance(f, u.Quantity):
f *= u.MHz
if f.isscalar:
f = np.array([f.value]) * u.MHz
self._freqs = f
return
@property
def uvw(self):
""" UVW in meters.
:getter: (times, baselines, UVW)
:type: :class:`~numpy.ndarray`
"""
if not hasattr(self, '_uvw'):
raise Exception(
'Run .compute() first.'
)
return self._uvw
@property
def uvw_wave(self):
""" UVW in lambdas.
:getter: (times, freqs, baselines, UVW)
:type: :class:`~numpy.ndarray`
"""
if not hasattr(self, '_uvw'):
raise Exception(
'Run .compute() first.'
)
if self.freqs is None:
raise ValueError(
'No frequency input, fill self.freqs.'
)
lamb = wavelength(self.freqs).value
na = np.newaxis
return self._uvw[:, na, :, :]/lamb[na, :, na, na]
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def compute(self,ha,dec):
r""" Compute the UVW at a given ``phase_center`` for all
the :attr:`~nenupy.crosslet.uvw.UVW.times` and baselines
formed by :attr:`~nenupy.crosslet.uvw.UVW.mas`.
:param phase_center: Observation phase center. If
``None``, local zenith is considered as phase
center for all :attr:`~nenupy.crosslet.uvw.UVW.times`.
:type phase_center: :class:`~astropy.coordinates.SkyCoord`
UVW are computed such as:
.. math::
\pmatrix{
u \\
v \\
w
} =
\pmatrix{
\sin(h) & \cos(h) & 0\\
-\sin(\delta) \cos(h) & \sin(\delta) \sin(h) & \cos(\delta)\\
\cos(\delta)\cos(h) & -\cos(\delta) \sin(h) & \sin(\delta)
}
\pmatrix{
\Delta x\\
\Delta y\\
\Delta z
}
:math:`u`, :math:`v`, :math:`w` are in meters. :math:`h`
is the hour angle (see :func:`~nenupy.astro.astro.lha`)
at which the phase center is observed, :math:`\delta`
is the phase center's declination, :math:`(\Delta x,
\Delta y, \Delta z)` are the baselines projections
with the convention of :math:`x` to the South, :math:`y`
to the East and :math:`z` to :math:`\delta = 90` deg.
Result of the computation are stored as a :class:`~numpy.ndarray`
in :attr:`~nenupy.crosslet.uvw.UVW.uvw` whose shape is
(times, cross-correlations, 3), 3 being :math:`(u, v, w)`.
"""
# Transformations
self._uvw = np.zeros(
(
ha.size,
self.bsl.shape[0],
3
)
)
# print('RAS', self._uvw.shape)
# print('RAS', self.bsl.shape)
xyz = np.array(self.bsl).T
# rot = np.radians(-90) # x to the south, y to the east
# rotation = np.array(
# [
# [ np.cos(rot), np.sin(rot), 0],
# [-np.sin(rot), np.cos(rot), 0],
# [ 0, 0, 1]
# ]
# )
self._ha=ha
if self._ha.size ==1:
sr = np.sin(ha)
cr = np.cos(ha)
sd = np.sin(dec)
cd = np.cos(dec)
rot_uvw = np.array([
[ sr, cr, 0],
[-sd*cr, sd*sr, cd],
[ cd*cr, -cd*sr, sd]
])
self._uvw[0, ...] = - np.dot(rot_uvw, xyz).T
else:
for i in range(self._ha.size):
sr = np.sin(ha[i])
cr = np.cos(ha[i])
sd = np.sin(dec)
cd = np.cos(dec)
rot_uvw = np.array([
[ sr, cr, 0],
[-sd*cr, sd*sr, cd],
[ cd*cr, -cd*sr, sd]
])
# self.uvw[i, ...] = - np.dot(
# np.dot(rot_uvw, xyz).T,
# rotation
# )
self._uvw[i, ...] = - np.dot(rot_uvw, xyz).T
return
def compute2(self, phase_center=None):
r""" Compute the UVW at a given ``phase_center`` for all
the :attr:`~nenupy.crosslet.uvw.UVW.times` and baselines
formed by :attr:`~nenupy.crosslet.uvw.UVW.mas`.
:param phase_center: Observation phase center. If
``None``, local zenith is considered as phase
center for all :attr:`~nenupy.crosslet.uvw.UVW.times`.
:type phase_center: :class:`~astropy.coordinates.SkyCoord`
UVW are computed such as:
.. math::
\pmatrix{
u \\
v \\
w
} =
\pmatrix{
\sin(h) & \cos(h) & 0\\
-\sin(\delta) \cos(h) & \sin(\delta) \sin(h) & \cos(\delta)\\
\cos(\delta)\cos(h) & -\cos(\delta) \sin(h) & \sin(\delta)
}
\pmatrix{
\Delta x\\
\Delta y\\
\Delta z
}
:math:`u`, :math:`v`, :math:`w` are in meters. :math:`h`
is the hour angle (see :func:`~nenupy.astro.astro.lha`)
at which the phase center is observed, :math:`\delta`
is the phase center's declination, :math:`(\Delta x,
\Delta y, \Delta z)` are the baselines projections
with the convention of :math:`x` to the South, :math:`y`
to the East and :math:`z` to :math:`\delta = 90` deg.
Result of the computation are stored as a :class:`~numpy.ndarray`
in :attr:`~nenupy.crosslet.uvw.UVW.uvw` whose shape is
(times, cross-correlations, 3), 3 being :math:`(u, v, w)`.
"""
# Phase center
if phase_center is None:
print('UVW phase centered at local zenith.')
phase_center = eq_zenith(self.times)
else:
if not isinstance(phase_center, SkyCoord):
raise TypeError(
'phase_center should be a SkyCoord object'
)
if phase_center.isscalar:
ones = np.ones(self.times.size)
ra_tab = ones * phase_center.ra
dec_tab = ones * phase_center.dec
phase_center = SkyCoord(ra_tab, dec_tab)
else:
if phase_center.size != self.times.size:
raise ValueError(
'Size of phase_center != times'
)
print('UVW phase centered at RA={}, Dec={}'.format(
phase_center.ra[0].deg,
phase_center.dec[0].deg
)
)
# Hour angles
lstTime = lst(
time=self.times,
kind='apparent'
)
phase_center = toFK5(
skycoord=phase_center,
time=self.times
)
ha = lha(
lst=lstTime,
skycoord=phase_center
)
# Transformations
self._uvw = np.zeros(
(
self.times.size,
self.bsl.shape[0],
3
)
)
# print('RAS', self._uvw.shape)
# print('RAS', self.bsl.shape)
xyz = np.array(self.bsl).T
# rot = np.radians(-90) # x to the south, y to the east
# rotation = np.array(
# [
# [ np.cos(rot), np.sin(rot), 0],
# [-np.sin(rot), np.cos(rot), 0],
# [ 0, 0, 1]
# ]
# )
self._ha=ha
for i in range(self.times.size):
sr = np.sin(ha[i].rad)
cr = np.cos(ha[i].rad)
sd = np.sin(phase_center.dec[i].rad)
cd = np.cos(phase_center.dec[i].rad)
rot_uvw = np.array([
[ sr, cr, 0],
[-sd*cr, sd*sr, cd],
[ cd*cr, -cd*sr, sd]
])
# self.uvw[i, ...] = - np.dot(
# np.dot(rot_uvw, xyz).T,
# rotation
# )
self._uvw[i, ...] = - np.dot(rot_uvw, xyz).T
return
# ============================================================= #
class Imager(UVW):
"""
"""
def __init__(self, crosslets, fov=60, tidx=None, ncpus=4):
self.skymodel = None
self.srcnames = None
# Meaning ?
self.fov = fov
# Meaning ?
self.ncpus = ncpus
# Meaning ?
self._uvw=crosslets
# self.cross = crosslets
# self.vis = self.cross.reshape(
# tidx=tidx,
# fmean=False,
# tmean=False
# )
# start = self.cross.time[0]
# stop = self.cross.time[-1]
# self.phase_center = eq_zenith(
# time=start + (stop - start)/2,
# )
# Initialize the UVW Class
# super().__init__(
# array=NenuFAR(
# miniarrays=self.cross.meta['ma']
# ),
# freq=self.cross.meta['freq']
# )
# Compute the UVW coordinates at the zenith
# self.compute(
# time=self.cross.time if tidx is None else self.cross.time[tidx],
# ra=None,
# dec=None,
# )
# --------------------------------------------------------- #
# --------------------- Getter/Setter --------------------- #
@property
def fov(self):
return self._fov
@fov.setter
def fov(self, f):
#lmmax = np.cos(np.radians(f))
lmmax = np.cos(np.radians(90 - f/2))
self.lmax = lmmax
self.mmax = lmmax
self._fov = f
return
@property
def ncpus(self):
return self._ncpus
@ncpus.setter
def ncpus(self, n):
if not np.sqrt(n).is_integer():
raise ValueError(
'Number of CPUs must be a sqrtable'
)
# if not ( n <= mp.cpu_count()):
# raise Exception(
# 'Number of CPUs should be <={}'.format(n)
# )
self._ncpus = n
return
@property
def npix(self):
return self._npix
@npix.setter
def npix(self, n):
if not np.log2(n).is_integer():
raise ValueError(
'Number of pixels must be a power of 2'
)
self._npix = n
return
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def make_psf(self, npix=None, freq=None):
""" Make the PSF regarding the UV distribution
:param npix:
Size in pixels of the image
:type npix: int, optional
:param freq:
Frequency
:type freqi: Freq in MHz
"""
self.freq=freq
if npix is None:
npix = self.npix * 2
uvw=self._uvw
# Transform UVW in lambdas units, take frequency
uvw = uvw[...] / wavelength(self.freq)
# Prepare image parameters
max_uv=np.max(np.abs(uvw[...,0:2])) # max uv to compute delta_u delta_v
cell_size_l = cell_size_m = np.rad2deg((1 / (2 * max_uv.value))) # cell size on the sky depends on max uv = a.k.a. angular resolution (IN DEGREES)
Nx = npix//2 #np.max([int(np.round(self.fov / cell_size_l)),npix//2]) # guessing the number of pixels from fov / resolution # FOV IN DEGREES
Ny = npix//2 #np.max([int(np.round(self.fov / cell_size_m)),npix//2])
uvwscaled=np.copy(uvw[...,0:2])
uvwscaled[...,0]*=np.deg2rad(cell_size_l*Nx) # scaling the uv values to match
uvwscaled[...,1]*=np.deg2rad(cell_size_m*Ny)
uvw2=uvwscaled.reshape(-1, uvwscaled.shape[-1]) # ravel (Ntimes,Nbl,3) to (NtimesxNbl,3)
print("(Time steps, baselines, 3)= (%d,%d,3) "%(uvw.shape[0],uvw.shape[1]))
print("Total of %d visibilities"%(uvw2.shape[0]))
tabulated_filter = AA_filter(5,63,"gaussian_sinc") # convolution kernel for convolutional gridding
psf,samplingfunc = grid_ifft2(uvw2, Nx, Ny, tabulated_filter) # do the gridding (slow python)
self.samplingfunc=samplingfunc
self.psf = psf / psf.max()
return self.psf,self.samplingfunc
class AA_filter:
"""
Anti-Aliasing filter
Keyword arguments for __init__:
filter_half_support --- Half support (N) of the filter; the filter has a full support of N*2 + 1 taps
filter_oversampling_factor --- Number of spaces in-between grid-steps (improves gridding/degridding accuracy)
filter_type --- box (nearest-neighbour), sinc or gaussian_sinc
"""
half_sup = 0
oversample = 0
full_sup_wo_padding = 0
full_sup = 0
no_taps = 0
filter_taps = None
def __init__(self, filter_half_support, filter_oversampling_factor, filter_type):
self.half_sup = filter_half_support
self.oversample = filter_oversampling_factor
self.full_sup_wo_padding = (filter_half_support * 2 + 1)
self.full_sup = self.full_sup_wo_padding + 2 #+ padding
self.no_taps = self.full_sup + (self.full_sup - 1) * (filter_oversampling_factor - 1)
taps = np.arange(self.no_taps)/float(filter_oversampling_factor) - self.full_sup / 2
if filter_type == "box":
self.filter_taps = np.where((taps >= -0.5) & (taps <= 0.5),
np.ones([len(taps)]),np.zeros([len(taps)]))
elif filter_type == "sinc":
self.filter_taps = np.sinc(taps)
elif filter_type == "gaussian_sinc":
alpha_1=1.55
alpha_2=2.52
self.filter_taps = np.sin(np.pi/alpha_1*(taps+0.00000000001))/(np.pi*(taps+0.00000000001))*np.exp(-(taps/alpha_2)**2)
else:
raise ValueError("Expected one of 'box','sinc' or 'gaussian_sinc'")
def grid_ifft(vis, uvw, ref_lda, Nx, Ny, convolution_filter):
"""
Convolutional gridder (continuum)
Keyword arguments:
vis --- Visibilities as sampled by the interferometer
uvw --- interferometer's scaled uvw coordinates
(Prerequisite: these uv points are already scaled by the similarity
theorem, such that -N_x*Cell_l*0.5 <= theta_l <= N_x*Cell_l*0.5 and
-N_y*Cell_m*0.5 <= theta_m <= N_y*Cell_m*0.5)
ref_lda --- array of reference lambdas (size of vis channels)
Nx,Ny --- size of image in pixels
convolution_filter --- pre-instantiated AA_filter anti-aliasing
filter object
"""
assert vis.shape[1] == ref_lda.shape[0], (vis.shape[1], ref_lda.shape[0])
filter_index = \
np.arange(-convolution_filter.half_sup,convolution_filter.half_sup+1)
# one grid for the resampled visibilities per correlation:
measurement_regular = \
np.zeros([vis.shape[2],Ny,Nx],dtype=np.complex)
# for deconvolution the PSF should be 2x size of the image (see
# Hogbom CLEAN for details), one grid for the sampling function:
sampling_regular = \
np.zeros([2*Ny,2*Nx],dtype=np.complex)
for r in np.xrange(uvw.shape[0]):
for c in np.xrange(vis.shape[1]):
scaled_uv = uvw[r,:] / ref_lda[c]
disc_u = int(np.round(scaled_uv[0]))
disc_v = int(np.round(scaled_uv[1]))
frac_u_offset = int((1 + convolution_filter.half_sup +
(-scaled_uv[0] + disc_u)) *
convolution_filter.oversample)
frac_v_offset = int((1 + convolution_filter.half_sup +
(-scaled_uv[1] + disc_v)) *
convolution_filter.oversample)
disc_u_psf = int(np.round(scaled_uv[0]*2))
disc_v_psf = int(np.round(scaled_uv[1]*2))
frac_u_offset_psf = int((1 + convolution_filter.half_sup +
(-scaled_uv[0]*2 + disc_u_psf)) *
convolution_filter.oversample)
frac_v_offset_psf = int((1 + convolution_filter.half_sup +
(-scaled_uv[1]*2 + disc_v_psf)) *
convolution_filter.oversample)
if (disc_v + Ny // 2 + convolution_filter.half_sup >= Ny or
disc_u + Nx // 2 + convolution_filter.half_sup >= Nx or
disc_v + Ny // 2 - convolution_filter.half_sup < 0 or
disc_u + Nx // 2 - convolution_filter.half_sup < 0):
continue
for conv_v in filter_index:
v_tap = \
convolution_filter.filter_taps[conv_v *
convolution_filter.oversample
+ frac_v_offset]
v_tap_psf = \
convolution_filter.filter_taps[conv_v *
convolution_filter.oversample
+ frac_v_offset_psf]
grid_pos_v = disc_v + conv_v + Ny // 2
grid_pos_v_psf = disc_v_psf + conv_v + Ny
for conv_u in filter_index:
u_tap = \
convolution_filter.filter_taps[conv_u *
convolution_filter.oversample
+ frac_u_offset]
u_tap_psf = \
convolution_filter.filter_taps[conv_u *
convolution_filter.oversample
+ frac_u_offset_psf]
conv_weight = v_tap * u_tap
conv_weight_psf = v_tap_psf * u_tap_psf
grid_pos_u = disc_u + conv_u + Nx // 2
grid_pos_u_psf = disc_u_psf + conv_u + Nx
for p in range(vis.shape[2]):
measurement_regular[p, grid_pos_v, grid_pos_u] += \
vis[r, c, p] * conv_weight
# assuming the PSF is the same for different correlations:
sampling_regular[grid_pos_v_psf, grid_pos_u_psf] += \
(1+0.0j) * conv_weight_psf
dirty = np.zeros(measurement_regular.shape, dtype=measurement_regular.dtype)
psf = np.zeros(sampling_regular.shape, dtype=sampling_regular.dtype)
for p in range(vis.shape[2]):
dirty[p,:,:] = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(measurement_regular[p,:,:])))
psf[:,:] = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(sampling_regular[:,:])))
return dirty,psf
def grid_ifft2(uvw, Nx, Ny, convolution_filter):
import matplotlib.pyplot as plt
"""
Convolutional gridder (continuum)
Keyword arguments:
uvw --- interferometer's scaled uvw coordinates
(Prerequisite: these uv points are already scaled by the similarity
theorem, such that -N_x*Cell_l*0.5 <= theta_l <= N_x*Cell_l*0.5 and
-N_y*Cell_m*0.5 <= theta_m <= N_y*Cell_m*0.5)
ref_lda --- array of reference lambdas (size of vis channels)
Nx,Ny --- size of image in pixels
convolution_filter --- pre-instantiated AA_filter anti-aliasing
filter object
"""
filter_index = \
np.arange(-convolution_filter.half_sup,convolution_filter.half_sup+1)
# for deconvolution the PSF should be 2x size of the image (see
# Hogbom CLEAN for details), one grid for the sampling function:
sampling_regular = \
np.zeros([2*Ny,2*Nx],dtype=np.complex)
for r in range(uvw.shape[0]):
scaled_uv = uvw[r,:]
if scaled_uv[0].value == 0 and scaled_uv[1].value == 0: # skipping null frequency
continue
disc_u_psf = int(np.round(scaled_uv[0].value*2))
disc_v_psf = int(np.round(scaled_uv[1].value*2))
frac_u_offset_psf = int((1 + convolution_filter.half_sup +
(-scaled_uv[0].value*2 + disc_u_psf)) *
convolution_filter.oversample)
frac_v_offset_psf = int((1 + convolution_filter.half_sup +
(-scaled_uv[1].value*2 + disc_v_psf)) *
convolution_filter.oversample)
for conv_v in filter_index:
v_tap_psf = \
convolution_filter.filter_taps[conv_v *
convolution_filter.oversample
+ frac_v_offset_psf]
grid_pos_v_psf = disc_v_psf + conv_v + Ny
for conv_u in filter_index:
u_tap_psf = \
convolution_filter.filter_taps[conv_u *
convolution_filter.oversample
+ frac_u_offset_psf]
conv_weight_psf = v_tap_psf * u_tap_psf
grid_pos_u_psf = disc_u_psf + conv_u + Nx
#print(grid_pos_u_psf,grid_pos_v_psf)
# assuming the PSF is the same for different correlations:
if np.abs(grid_pos_v_psf) < sampling_regular.shape[0] and np.abs(grid_pos_u_psf) < sampling_regular.shape[1]:
sampling_regular[grid_pos_v_psf, grid_pos_u_psf] += \
(1+0.0j) * conv_weight_psf
psf = np.zeros(sampling_regular.shape, dtype=sampling_regular.dtype)
psf[:,:] = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(sampling_regular[:,:])))
return psf,sampling_regular
def RandomPointing(N,Elevmin=0.,Obsmin=2):
lat=MeerKATarr.Loc.lat.value
deltaDec=0.5 #in degrees
deltaHA=0.1 #in hours
tabDec=np.arange(-90.,90.,deltaDec)
tabHA=np.arange(-12,12,deltaHA)
tabtabDec,tabtabHA=np.meshgrid(tabDec,tabHA)
elev=np.degrees(np.arcsin(np.cos(np.radians(tabtabHA*15.))*np.cos(np.radians(tabtabDec))*np.cos(np.radians(lat))+np.sin(np.radians(tabtabDec))*np.sin(np.radians(lat))))
elev2=elev.copy()*np.NaN
mask=np.where(elev>Elevmin)
elev2[mask]=elev[mask]
Nelev=len(tabDec)
tabHAmax=np.zeros(Nelev)
tabHAmin=np.zeros(Nelev)
for i in range(Nelev):
if np.all(np.isnan(elev2[:,i])):
#print('Badline %d'%i)
continue
tmpmin=np.where(elev2[:,i] == np.nanmin(elev2[:,i]))
tmpmax=np.where(elev2[:,i] == np.nanmax(elev2[:,i]))
tmpHAmax=tmpmin[0][0]
tmpHAmin=tmpmax[0][0]
tabHAmin[i]=tmpHAmin
tabHAmax[i]=tmpHAmax
if len(tmpmin[0])==2:
tabwidth[i]=(tmpmin[0][1]-tmpmin[0][0])
SelectHA=tabHA[tabHAmax.astype(int)][0:-61]
wloc=np.where(np.abs(SelectHA) >=Obsmin)
d=wloc[0][-1]
DEC_upperlimits=tabDec[d]
# draw DEC
tabDECsources=np.random.rand(N)*(DEC_upperlimits+90)-90
tabHAstart=np.zeros(N)
for i in range(N):
locdec=tabDec.flat[np.abs(tabDec - tabDECsources[i]).argmin()]
tmpHAstart=np.random.rand()*(tabHAmax[locdec.astype(int)]-Obsmin-tabHAmin[locdec.astype(int)])+tabHAmin[locdec.astype(int)]
tabHAstart[i]=tmpHAstart
return tabDECsources,tabHAstart
def compute(N,Npix,pixelscale,Obslength=2.,Timedelta=300,Elevmin=0,F=1420,split=False):
FOV=pixelscale*Npix/3600*1.0 # Desired Field of View of the image (in degrees) ===> gives size of pixel on the sky. Should be compatible with galaxy size range in degrees.
tabDEC,tabHAstart=RandomPointing(N,Elevmin,Obsmin=Obslength)
tabPSFList=[]
tabSamplingList=[]
Ndates=Obslength*3600./Timedelta #Timedelta in seconds
uvw=UVW(MeerKATarr.Array,F) # setting times, array and frequency
for icase in range(N):
tmpdec=np.radians(tabDEC[icase])
if split == False:
tmptabHA=np.radians((np.arange(Ndates)*Timedelta*1./3600+tabHAstart[icase])*15.)
uvw.compute(tmptabHA,tmpdec) # computing uv coverage from pointing
tmpimg=Imager(uvw.uvw[...,0:2],fov=FOV) # preparing the imager
tmpPSF,tmpSampling=tmpimg.make_psf(npix=Npix,freq=F) # gridding
tabPSFList.append(tmpPSF)
tabSamplingList.append(tmpSampling)
del tmpimg
else:
tabPSF=[]
tabSampling=[]
for idate in range(int(Ndates)):
tmptabHA=np.radians((idate*Timedelta*1./3600+tabHAstart[icase])*15.)
uvw.compute(tmptabHA,tmpdec) # computing uv coverage from pointing
tmpimg=Imager(uvw.uvw[...,0:2],fov=FOV) # preparing the imager
tmpPSF,tmpSampling=tmpimg.make_psf(npix=Npix,freq=F) # gridding
tabPSF.append(tmpPSF)
tabSampling.append(tmpSampling)
tabPSFList.append(tabPSF)
tabSamplingList.append(tabSampling)
#print(np.degrees(tmptabHA))
#print(tmpdec)
del tmpimg
del uvw
#Pointing_RA=5.4 # Right ascension in degrees. (15° = 1h of RA)
#Pointing_DEC=-30.83 # Declination in degrees.
#Pointing=SkyCoord(Pointing_RA,Pointing_DEC,unit='deg') # generating pointing object
# Time settings
#Obs_start='2020-10-05T20:00:00' # in Universal Time (UT)
#Obs_end='2020-10-05T20:05:00'
#tstart=Time(Obs_start,format='isot',scale='utc')
#tend=Time(Obs_end,format='isot',scale='utc')
# Time intervals and time integration
#delta_t=300. # integration time per uv component (small will increase computing time for long observations)
#tstep=TimeDelta(delta_t,format='sec') # using convenient time object
#Ndates=round(((tend-tstart)/tstep).value) # number of time steps
#timetab=Time([tstart+i*tstep for i in range(int(Ndates))])
# Setting
return tabPSFList,tabSamplingList,tabDEC,tabHAstart
if __name__ == '__main__':
sys.exit(main())
| [
"logging.getLogger",
"numpy.radians",
"numpy.xrange",
"numpy.sqrt",
"numpy.random.rand",
"numpy.array",
"numpy.sin",
"numpy.nanmin",
"numpy.arange",
"numpy.isscalar",
"astropy.coordinates.Angle",
"numpy.where",
"numpy.ctypeslib.as_array",
"numpy.exp",
"numpy.dot",
"numpy.nanmax",
"nu... | [((310, 337), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (327, 337), False, 'import logging\n'), ((641, 663), 'MeerKAT.MeerKATarray', 'MeerKAT.MeerKATarray', ([], {}), '()\n', (661, 663), False, 'import MeerKAT\n'), ((3222, 3242), 'astropy.coordinates.Angle', 'Angle', (['(360.0 * u.deg)'], {}), '(360.0 * u.deg)\n', (3227, 3242), False, 'from astropy.coordinates import EarthLocation, Angle, AltAz, ICRS, Longitude, FK5, SkyCoord\n'), ((5577, 5637), 'astropy.coordinates.AltAz', 'AltAz', ([], {'az': 'az', 'alt': 'alt', 'location': 'MeerKATarr.Loc', 'obstime': 'time'}), '(az=az, alt=alt, location=MeerKATarr.Loc, obstime=time)\n', (5582, 5637), False, 'from astropy.coordinates import EarthLocation, Angle, AltAz, ICRS, Longitude, FK5, SkyCoord\n'), ((9357, 9384), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['arr1'], {}), '(arr1)\n', (9378, 9384), True, 'import numpy as np\n'), ((9397, 9424), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['arr2'], {}), '(arr2)\n', (9418, 9424), True, 'import numpy as np\n'), ((9536, 9579), 'numexpr.evaluate', 'ne.evaluate', (['"""exp(2j*pi*(uarr*lg+varr*mg))"""'], {}), "('exp(2j*pi*(uarr*lg+varr*mg))')\n", (9547, 9579), True, 'import numexpr as ne\n'), ((10786, 10823), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['shared_array_r'], {}), '(shared_array_r)\n', (10807, 10823), True, 'import numpy as np\n'), ((10839, 10876), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['shared_array_i'], {}), '(shared_array_i)\n', (10860, 10876), True, 'import numpy as np\n'), ((27831, 27903), 'numpy.arange', 'np.arange', (['(-convolution_filter.half_sup)', '(convolution_filter.half_sup + 1)'], {}), '(-convolution_filter.half_sup, convolution_filter.half_sup + 1)\n', (27840, 27903), True, 'import numpy as np\n'), ((28000, 28050), 'numpy.zeros', 'np.zeros', (['[vis.shape[2], Ny, Nx]'], {'dtype': 'np.complex'}), '([vis.shape[2], Ny, Nx], dtype=np.complex)\n', (28008, 28050), True, 'import numpy as np\n'), ((28219, 28263), 'numpy.zeros', 'np.zeros', (['[2 * Ny, 2 * Nx]'], {'dtype': 'np.complex'}), '([2 * Ny, 2 * Nx], dtype=np.complex)\n', (28227, 28263), True, 'import numpy as np\n'), ((28271, 28294), 'numpy.xrange', 'np.xrange', (['uvw.shape[0]'], {}), '(uvw.shape[0])\n', (28280, 28294), True, 'import numpy as np\n'), ((31513, 31581), 'numpy.zeros', 'np.zeros', (['measurement_regular.shape'], {'dtype': 'measurement_regular.dtype'}), '(measurement_regular.shape, dtype=measurement_regular.dtype)\n', (31521, 31581), True, 'import numpy as np\n'), ((31592, 31654), 'numpy.zeros', 'np.zeros', (['sampling_regular.shape'], {'dtype': 'sampling_regular.dtype'}), '(sampling_regular.shape, dtype=sampling_regular.dtype)\n', (31600, 31654), True, 'import numpy as np\n'), ((32572, 32644), 'numpy.arange', 'np.arange', (['(-convolution_filter.half_sup)', '(convolution_filter.half_sup + 1)'], {}), '(-convolution_filter.half_sup, convolution_filter.half_sup + 1)\n', (32581, 32644), True, 'import numpy as np\n'), ((32814, 32858), 'numpy.zeros', 'np.zeros', (['[2 * Ny, 2 * Nx]'], {'dtype': 'np.complex'}), '([2 * Ny, 2 * Nx], dtype=np.complex)\n', (32822, 32858), True, 'import numpy as np\n'), ((34713, 34775), 'numpy.zeros', 'np.zeros', (['sampling_regular.shape'], {'dtype': 'sampling_regular.dtype'}), '(sampling_regular.shape, dtype=sampling_regular.dtype)\n', (34721, 34775), True, 'import numpy as np\n'), ((35043, 35075), 'numpy.arange', 'np.arange', (['(-90.0)', '(90.0)', 'deltaDec'], {}), '(-90.0, 90.0, deltaDec)\n', (35052, 35075), True, 'import numpy as np\n'), ((35082, 35109), 'numpy.arange', 'np.arange', (['(-12)', '(12)', 'deltaHA'], {}), '(-12, 12, deltaHA)\n', (35091, 35109), True, 'import numpy as np\n'), ((35132, 35158), 'numpy.meshgrid', 'np.meshgrid', (['tabDec', 'tabHA'], {}), '(tabDec, tabHA)\n', (35143, 35158), True, 'import numpy as np\n'), ((35369, 35393), 'numpy.where', 'np.where', (['(elev > Elevmin)'], {}), '(elev > Elevmin)\n', (35377, 35393), True, 'import numpy as np\n'), ((35457, 35472), 'numpy.zeros', 'np.zeros', (['Nelev'], {}), '(Nelev)\n', (35465, 35472), True, 'import numpy as np\n'), ((35486, 35501), 'numpy.zeros', 'np.zeros', (['Nelev'], {}), '(Nelev)\n', (35494, 35501), True, 'import numpy as np\n'), ((36187, 36198), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (36195, 36198), True, 'import numpy as np\n'), ((1711, 1727), 'numpy.isscalar', 'np.isscalar', (['lst'], {}), '(lst)\n', (1722, 1727), True, 'import numpy as np\n'), ((1843, 1865), 'astropy.coordinates.Longitude', 'Longitude', (['lst', '"""hour"""'], {}), "(lst, 'hour')\n", (1852, 1865), False, 'from astropy.coordinates import EarthLocation, Angle, AltAz, ICRS, Longitude, FK5, SkyCoord\n'), ((5555, 5565), 'astropy.time.Time', 'Time', (['time'], {}), '(time)\n', (5559, 5565), False, 'from astropy.time import Time, TimeDelta\n'), ((6307, 6317), 'astropy.time.Time', 'Time', (['time'], {}), '(time)\n', (6311, 6317), False, 'from astropy.time import Time, TimeDelta\n'), ((8668, 8685), 'astropy.coordinates.FK5', 'FK5', ([], {'equinox': 'time'}), '(equinox=time)\n', (8671, 8685), False, 'from astropy.coordinates import EarthLocation, Angle, AltAz, ICRS, Longitude, FK5, SkyCoord\n'), ((9821, 9855), 'numpy.zeros', 'np.zeros', (['(u.shape[0], npix, npix)'], {}), '((u.shape[0], npix, npix))\n', (9829, 9855), True, 'import numpy as np\n'), ((9909, 9932), 'numpy.zeros_like', 'np.zeros_like', (['result_r'], {}), '(result_r)\n', (9922, 9932), True, 'import numpy as np\n'), ((10175, 10189), 'numpy.sqrt', 'np.sqrt', (['ncpus'], {}), '(ncpus)\n', (10182, 10189), True, 'import numpy as np\n'), ((15550, 15591), 'numpy.zeros', 'np.zeros', (['(ha.size, self.bsl.shape[0], 3)'], {}), '((ha.size, self.bsl.shape[0], 3))\n', (15558, 15591), True, 'import numpy as np\n'), ((20128, 20177), 'numpy.zeros', 'np.zeros', (['(self.times.size, self.bsl.shape[0], 3)'], {}), '((self.times.size, self.bsl.shape[0], 3))\n', (20136, 20177), True, 'import numpy as np\n'), ((24326, 24360), 'numpy.rad2deg', 'np.rad2deg', (['(1 / (2 * max_uv.value))'], {}), '(1 / (2 * max_uv.value))\n', (24336, 24360), True, 'import numpy as np\n'), ((24700, 24722), 'numpy.copy', 'np.copy', (['uvw[..., 0:2]'], {}), '(uvw[..., 0:2])\n', (24707, 24722), True, 'import numpy as np\n'), ((24748, 24776), 'numpy.deg2rad', 'np.deg2rad', (['(cell_size_l * Nx)'], {}), '(cell_size_l * Nx)\n', (24758, 24776), True, 'import numpy as np\n'), ((24834, 24862), 'numpy.deg2rad', 'np.deg2rad', (['(cell_size_m * Ny)'], {}), '(cell_size_m * Ny)\n', (24844, 24862), True, 'import numpy as np\n'), ((28313, 28336), 'numpy.xrange', 'np.xrange', (['vis.shape[1]'], {}), '(vis.shape[1])\n', (28322, 28336), True, 'import numpy as np\n'), ((37045, 37070), 'numpy.radians', 'np.radians', (['tabDEC[icase]'], {}), '(tabDEC[icase])\n', (37055, 37070), True, 'import numpy as np\n'), ((9758, 9772), 'numpy.sqrt', 'np.sqrt', (['ncpus'], {}), '(ncpus)\n', (9765, 9772), True, 'import numpy as np\n'), ((12311, 12329), 'numpy.tril_indices', 'np.tril_indices', (['m'], {}), '(m)\n', (12326, 12329), True, 'import numpy as np\n'), ((15770, 15788), 'numpy.array', 'np.array', (['self.bsl'], {}), '(self.bsl)\n', (15778, 15788), True, 'import numpy as np\n'), ((16146, 16156), 'numpy.sin', 'np.sin', (['ha'], {}), '(ha)\n', (16152, 16156), True, 'import numpy as np\n'), ((16174, 16184), 'numpy.cos', 'np.cos', (['ha'], {}), '(ha)\n', (16180, 16184), True, 'import numpy as np\n'), ((16202, 16213), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (16208, 16213), True, 'import numpy as np\n'), ((16231, 16242), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (16237, 16242), True, 'import numpy as np\n'), ((16265, 16338), 'numpy.array', 'np.array', (['[[sr, cr, 0], [-sd * cr, sd * sr, cd], [cd * cr, -cd * sr, sd]]'], {}), '([[sr, cr, 0], [-sd * cr, sd * sr, cd], [cd * cr, -cd * sr, sd]])\n', (16273, 16338), True, 'import numpy as np\n'), ((20356, 20374), 'numpy.array', 'np.array', (['self.bsl'], {}), '(self.bsl)\n', (20364, 20374), True, 'import numpy as np\n'), ((20743, 20760), 'numpy.sin', 'np.sin', (['ha[i].rad'], {}), '(ha[i].rad)\n', (20749, 20760), True, 'import numpy as np\n'), ((20778, 20795), 'numpy.cos', 'np.cos', (['ha[i].rad'], {}), '(ha[i].rad)\n', (20784, 20795), True, 'import numpy as np\n'), ((20813, 20844), 'numpy.sin', 'np.sin', (['phase_center.dec[i].rad'], {}), '(phase_center.dec[i].rad)\n', (20819, 20844), True, 'import numpy as np\n'), ((20862, 20893), 'numpy.cos', 'np.cos', (['phase_center.dec[i].rad'], {}), '(phase_center.dec[i].rad)\n', (20868, 20893), True, 'import numpy as np\n'), ((20916, 20989), 'numpy.array', 'np.array', (['[[sr, cr, 0], [-sd * cr, sd * sr, cd], [cd * cr, -cd * sr, sd]]'], {}), '([[sr, cr, 0], [-sd * cr, sd * sr, cd], [cd * cr, -cd * sr, sd]])\n', (20924, 20989), True, 'import numpy as np\n'), ((22700, 22722), 'numpy.radians', 'np.radians', (['(90 - f / 2)'], {}), '(90 - f / 2)\n', (22710, 22722), True, 'import numpy as np\n'), ((24231, 24252), 'numpy.abs', 'np.abs', (['uvw[..., 0:2]'], {}), '(uvw[..., 0:2])\n', (24237, 24252), True, 'import numpy as np\n'), ((31833, 31873), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['sampling_regular[:, :]'], {}), '(sampling_regular[:, :])\n', (31849, 31873), True, 'import numpy as np\n'), ((33053, 33085), 'numpy.round', 'np.round', (['(scaled_uv[0].value * 2)'], {}), '(scaled_uv[0].value * 2)\n', (33061, 33085), True, 'import numpy as np\n'), ((33110, 33142), 'numpy.round', 'np.round', (['(scaled_uv[1].value * 2)'], {}), '(scaled_uv[1].value * 2)\n', (33118, 33142), True, 'import numpy as np\n'), ((34821, 34861), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['sampling_regular[:, :]'], {}), '(sampling_regular[:, :])\n', (34837, 34861), True, 'import numpy as np\n'), ((35548, 35569), 'numpy.isnan', 'np.isnan', (['elev2[:, i]'], {}), '(elev2[:, i])\n', (35556, 35569), True, 'import numpy as np\n'), ((36021, 36037), 'numpy.abs', 'np.abs', (['SelectHA'], {}), '(SelectHA)\n', (36027, 36037), True, 'import numpy as np\n'), ((36129, 36146), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (36143, 36146), True, 'import numpy as np\n'), ((6487, 6506), 'numpy.zeros', 'np.zeros', (['time.size'], {}), '(time.size)\n', (6495, 6506), True, 'import numpy as np\n'), ((16539, 16552), 'numpy.sin', 'np.sin', (['ha[i]'], {}), '(ha[i])\n', (16545, 16552), True, 'import numpy as np\n'), ((16574, 16587), 'numpy.cos', 'np.cos', (['ha[i]'], {}), '(ha[i])\n', (16580, 16587), True, 'import numpy as np\n'), ((16609, 16620), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (16615, 16620), True, 'import numpy as np\n'), ((16642, 16653), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (16648, 16653), True, 'import numpy as np\n'), ((16680, 16753), 'numpy.array', 'np.array', (['[[sr, cr, 0], [-sd * cr, sd * sr, cd], [cd * cr, -cd * sr, sd]]'], {}), '([[sr, cr, 0], [-sd * cr, sd * sr, cd], [cd * cr, -cd * sr, sd]])\n', (16688, 16753), True, 'import numpy as np\n'), ((19224, 19248), 'numpy.ones', 'np.ones', (['self.times.size'], {}), '(self.times.size)\n', (19231, 19248), True, 'import numpy as np\n'), ((19378, 19403), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra_tab', 'dec_tab'], {}), '(ra_tab, dec_tab)\n', (19386, 19403), False, 'from astropy.coordinates import EarthLocation, Angle, AltAz, ICRS, Longitude, FK5, SkyCoord\n'), ((26374, 26397), 'numpy.arange', 'np.arange', (['self.no_taps'], {}), '(self.no_taps)\n', (26383, 26397), True, 'import numpy as np\n'), ((26708, 26721), 'numpy.sinc', 'np.sinc', (['taps'], {}), '(taps)\n', (26715, 26721), True, 'import numpy as np\n'), ((28409, 28431), 'numpy.round', 'np.round', (['scaled_uv[0]'], {}), '(scaled_uv[0])\n', (28417, 28431), True, 'import numpy as np\n'), ((28458, 28480), 'numpy.round', 'np.round', (['scaled_uv[1]'], {}), '(scaled_uv[1])\n', (28466, 28480), True, 'import numpy as np\n'), ((28893, 28919), 'numpy.round', 'np.round', (['(scaled_uv[0] * 2)'], {}), '(scaled_uv[0] * 2)\n', (28901, 28919), True, 'import numpy as np\n'), ((28948, 28974), 'numpy.round', 'np.round', (['(scaled_uv[1] * 2)'], {}), '(scaled_uv[1] * 2)\n', (28956, 28974), True, 'import numpy as np\n'), ((31742, 31788), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['measurement_regular[p, :, :]'], {}), '(measurement_regular[p, :, :])\n', (31758, 31788), True, 'import numpy as np\n'), ((35661, 35683), 'numpy.nanmin', 'np.nanmin', (['elev2[:, i]'], {}), '(elev2[:, i])\n', (35670, 35683), True, 'import numpy as np\n'), ((35722, 35744), 'numpy.nanmax', 'np.nanmax', (['elev2[:, i]'], {}), '(elev2[:, i])\n', (35731, 35744), True, 'import numpy as np\n'), ((36312, 36328), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (36326, 36328), True, 'import numpy as np\n'), ((37666, 37737), 'numpy.radians', 'np.radians', (['((idate * Timedelta * 1.0 / 3600 + tabHAstart[icase]) * 15.0)'], {}), '((idate * Timedelta * 1.0 / 3600 + tabHAstart[icase]) * 15.0)\n', (37676, 37737), True, 'import numpy as np\n'), ((6524, 6542), 'numpy.ones', 'np.ones', (['time.size'], {}), '(time.size)\n', (6531, 6542), True, 'import numpy as np\n'), ((12662, 12681), 'numpy.array', 'np.array', (['[f.value]'], {}), '([f.value])\n', (12670, 12681), True, 'import numpy as np\n'), ((16438, 16458), 'numpy.dot', 'np.dot', (['rot_uvw', 'xyz'], {}), '(rot_uvw, xyz)\n', (16444, 16458), True, 'import numpy as np\n'), ((21217, 21237), 'numpy.dot', 'np.dot', (['rot_uvw', 'xyz'], {}), '(rot_uvw, xyz)\n', (21223, 21237), True, 'import numpy as np\n'), ((22932, 22942), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (22939, 22942), True, 'import numpy as np\n'), ((23353, 23363), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (23360, 23363), True, 'import numpy as np\n'), ((36249, 36282), 'numpy.abs', 'np.abs', (['(tabDec - tabDECsources[i])'], {}), '(tabDec - tabDECsources[i])\n', (36255, 36282), True, 'import numpy as np\n'), ((16985, 17005), 'numpy.dot', 'np.dot', (['rot_uvw', 'xyz'], {}), '(rot_uvw, xyz)\n', (16991, 17005), True, 'import numpy as np\n'), ((26920, 26950), 'numpy.exp', 'np.exp', (['(-(taps / alpha_2) ** 2)'], {}), '(-(taps / alpha_2) ** 2)\n', (26926, 26950), True, 'import numpy as np\n'), ((34466, 34488), 'numpy.abs', 'np.abs', (['grid_pos_v_psf'], {}), '(grid_pos_v_psf)\n', (34472, 34488), True, 'import numpy as np\n'), ((34521, 34543), 'numpy.abs', 'np.abs', (['grid_pos_u_psf'], {}), '(grid_pos_u_psf)\n', (34527, 34543), True, 'import numpy as np\n'), ((35258, 35273), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (35268, 35273), True, 'import numpy as np\n'), ((35282, 35303), 'numpy.radians', 'np.radians', (['tabtabDec'], {}), '(tabtabDec)\n', (35292, 35303), True, 'import numpy as np\n'), ((35312, 35327), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (35322, 35327), True, 'import numpy as np\n'), ((26848, 26888), 'numpy.sin', 'np.sin', (['(np.pi / alpha_1 * (taps + 1e-11))'], {}), '(np.pi / alpha_1 * (taps + 1e-11))\n', (26854, 26888), True, 'import numpy as np\n'), ((35195, 35222), 'numpy.radians', 'np.radians', (['(tabtabHA * 15.0)'], {}), '(tabtabHA * 15.0)\n', (35205, 35222), True, 'import numpy as np\n'), ((35228, 35249), 'numpy.radians', 'np.radians', (['tabtabDec'], {}), '(tabtabDec)\n', (35238, 35249), True, 'import numpy as np\n'), ((37132, 37149), 'numpy.arange', 'np.arange', (['Ndates'], {}), '(Ndates)\n', (37141, 37149), True, 'import numpy as np\n')] |
# coding: utf-8
# In[14]:
import numpy as np
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier as KNC
iris = datasets.load_iris()
x= iris.data
y= iris.target
np.unique(y)
np.random.seed(123)
indices = np.random.permutation(len(x))
iris_x_train = x[indices[:-10]]
iris_y_train = y[indices[:-10]]
iris_x_test = x[indices[-10:]]
iris_y_test = y[indices[-10:]]
model=KNC()
model.fit(iris_x_train, iris_y_train)
KNC(algorithm='auto',leaf_size=30, metric='minkowski',
metric_params=None,n_jobs=1,n_neighbors=5, p=2,weights='uniform')
out=model.predict(iris_x_test)
print("predicted:",out)
print("True :",iris_y_test)
| [
"sklearn.datasets.load_iris",
"numpy.random.seed",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.unique"
] | [((143, 163), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (161, 163), False, 'from sklearn import datasets\n'), ((192, 204), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (201, 204), True, 'import numpy as np\n'), ((205, 224), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (219, 224), True, 'import numpy as np\n'), ((399, 404), 'sklearn.neighbors.KNeighborsClassifier', 'KNC', ([], {}), '()\n', (402, 404), True, 'from sklearn.neighbors import KNeighborsClassifier as KNC\n'), ((444, 572), 'sklearn.neighbors.KNeighborsClassifier', 'KNC', ([], {'algorithm': '"""auto"""', 'leaf_size': '(30)', 'metric': '"""minkowski"""', 'metric_params': 'None', 'n_jobs': '(1)', 'n_neighbors': '(5)', 'p': '(2)', 'weights': '"""uniform"""'}), "(algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None,\n n_jobs=1, n_neighbors=5, p=2, weights='uniform')\n", (447, 572), True, 'from sklearn.neighbors import KNeighborsClassifier as KNC\n')] |
#!python3
#from capy import *
from operators import *
from qChain import *
from utility import *
from demodulating_faraday_file import *
from shutil import copyfile
from simFunction import *
import numpy as np
import scipy.signal as sp
import time
import matplotlib.pyplot as plt
import git
import yaml
import h5py
from lmfit import minimize, Parameters
detuning = np.arange(-500,500,5);
# detuning = np.arange(230,500,5);
# detuning = np.arange(-50,50,2);
# for i in range(len(detuning)):
# det = detuning[i];
# Iarray = smallSim(detFreq = det, output = True, save = False);
# # Write to hdf5 file
# h5name = 'C:/Users/Boundsy/Documents/GitHub/Atomicpy/SmallSignals/detScan3.h5'
# with h5py.File(h5name, 'a') as hf:
# hf.create_dataset('det' + str(det), data=Iarray)
# print('Array saved to hdf5 in SmallSignals subfolder')
# print('sims complete')
# Iarray = smallSim(detFreq = det, output = True, save = False);
#######################################################################
# Plot the varying signals
#######################################################################
# Now import and plot to check;
# h5name = 'C:/Users/Boundsy/Documents/GitHub/Atomicpy/SmallSignals/detScan.h5'
# with h5py.File(h5name, 'r') as hf:
# datam50 = hf['det-50'][:]
# datam20 = hf['det-20'][:]
# datam2 = hf['det-2'][:]
# data10 = hf['det10'][:]
# data28 = hf['det28'][:]
# data49 = hf['det49'][:]
# plt.plot(xvals, datam50)
# plt.plot(xvals, datam20)
# plt.plot(xvals, datam2)
# plt.plot(xvals, data10)
# plt.plot(xvals, data28)
# plt.plot(xvals, data49)
# plt.grid()
# plt.legend(['-50Hz','-20Hz', '-2Hz', '10Hz', '28Hz', '49Hz'])
# plt.xlabel('time (s)')
# plt.ylabel('I(t) amplitude')
# plt.title('I(t) w/ Small Signal @ Different Detuning')
# plt.show()
# h5name = 'C:/Users/Boundsy/Documents/GitHub/Atomicpy/SmallSignals/detScan3.h5'
# with h5py.File(h5name, 'r') as hf:
# datam50 = hf['det-50'][:]
# datam30 = hf['det-30'][:]
# datam10 = hf['det-10'][:]
# data0 = hf['det0'][:]
# data10 = hf['det10'][:]
# data30 = hf['det30'][:]
# data48 = hf['det48'][:]
# xvals = np.linspace(0,0.01,len(datam50))
# plt.plot(xvals, datam50)
# plt.plot(xvals, datam30)
# plt.plot(xvals, datam10)
# plt.plot(xvals, data0)
# plt.plot(xvals, data10)
# plt.plot(xvals, data30)
# plt.plot(xvals, data48)
# plt.grid()
# plt.legend(['-50Hz','-30Hz', '-10Hz', '0Hz', '10Hz', '30Hz', '48Hz'], fontsize = '16')
# plt.xlabel('time (s)',fontsize = '20')
# plt.ylabel('I(t) amplitude', fontsize = '20')
# plt.title('I(t) w/ Small Signal @ Different Detuning', fontsize = '24')
# plt.tick_params(labelsize = '16')
# plt.show()
# h5name = 'C:/Users/Boundsy/Documents/GitHub/Atomicpy/SmallSignals/detScan2.h5'
# with h5py.File(h5name, 'r') as hf:
# data = hf['det0'][:]
# xvals = np.linspace(0,0.01,len(data))
# plt.plot(xvals, data)
# plt.grid()
# plt.xlabel('time (s)', fontsize = '20')
# plt.ylabel('I(t) amplitude', fontsize = '20')
# plt.title('I(t) w/ Small Signal @ Resonance', fontsize = '24')
# plt.tick_params(labelsize = '16')
# plt.show()
#######################################################################
# Plot the final point for each detuning
#######################################################################
Ifin = np.zeros(len(detuning))
h5name = 'C:/Users/Boundsy/Documents/GitHub/Atomicpy/SmallSignals/detScan2.h5'
for i in range(len(detuning)):
dataName = 'det' + str(detuning[i]);
with h5py.File(h5name, 'r') as hf:
data = hf[dataName][:]
Ifin[i] = data[-1]
plt.figure()
plt.plot(detuning, Ifin)
plt.grid()
plt.xlabel('Detuning (Hz)', fontsize = '20')
plt.ylabel('Final I(t) value @ 0.01s', fontsize = '20')
plt.title('Small Signal final value vs Detuning', fontsize = '24')
plt.tick_params(labelsize = '16')
plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"h5py.File",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((367, 390), 'numpy.arange', 'np.arange', (['(-500)', '(500)', '(5)'], {}), '(-500, 500, 5)\n', (376, 390), True, 'import numpy as np\n'), ((3555, 3567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3565, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3568, 3592), 'matplotlib.pyplot.plot', 'plt.plot', (['detuning', 'Ifin'], {}), '(detuning, Ifin)\n', (3576, 3592), True, 'import matplotlib.pyplot as plt\n'), ((3593, 3603), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3601, 3603), True, 'import matplotlib.pyplot as plt\n'), ((3604, 3646), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Detuning (Hz)"""'], {'fontsize': '"""20"""'}), "('Detuning (Hz)', fontsize='20')\n", (3614, 3646), True, 'import matplotlib.pyplot as plt\n'), ((3649, 3702), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Final I(t) value @ 0.01s"""'], {'fontsize': '"""20"""'}), "('Final I(t) value @ 0.01s', fontsize='20')\n", (3659, 3702), True, 'import matplotlib.pyplot as plt\n'), ((3705, 3769), 'matplotlib.pyplot.title', 'plt.title', (['"""Small Signal final value vs Detuning"""'], {'fontsize': '"""24"""'}), "('Small Signal final value vs Detuning', fontsize='24')\n", (3714, 3769), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3803), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""16"""'}), "(labelsize='16')\n", (3787, 3803), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3814, 3816), True, 'import matplotlib.pyplot as plt\n'), ((3479, 3501), 'h5py.File', 'h5py.File', (['h5name', '"""r"""'], {}), "(h5name, 'r')\n", (3488, 3501), False, 'import h5py\n')] |
import numpy as np
from keras.utils import Sequence
import xarray as xr
class DataGenerator(Sequence):
def __init__(self, partition='train', batch_size=32, n_channels=3, t_stride=10, shuffle=True, seed=1):
self.partition = partition
self.batch_size = batch_size
self.n_channels = n_channels
self.t_stride = t_stride
self.shuffle = shuffle
self.era5 = None
self.prec = None
#self.on_epoch_end()
# Load ERA5 geopotential levels
era5_ds1 = xr.open_dataset("./datasets/GEOP1000_GAN_2017.nc")
era5_ds2 = xr.open_dataset("./datasets/GEOP800_GAN_2017.nc")
era5_ds3 = xr.open_dataset("./datasets/GEOP500_GAN_2017.nc")
era5_times = era5_ds1.time[:].data
# Load ERA5 total precipitation
prec_ds = xr.open_dataset("./datasets/TP_GAN_2017.nc")
prec_times = prec_ds.time[:].data
# Find common dates and shuffle
times = np.intersect1d(era5_times, prec_times)
np.random.seed(seed)
np.random.shuffle(times)
# Create geopotential normalised stack
z500 = era5_ds3.Geopotential.sel(time=times[::self.t_stride])[:].data
z500 = (z500 - z500.min()) / (z500.max() - z500.min())
z800 = era5_ds2.Geopotential.sel(time=times[::self.t_stride])[:].data
z800 = (z800 - z800.min()) / (z800.max() - z800.min())
z1000 = era5_ds1.Geopotential.sel(time=times[::self.t_stride])[:].data
z1000 = (z1000 - z1000.min()) / (z1000.max() - z1000.min())
self.era5 = np.stack((z1000, z800, z500), axis=3)
z1000, z800, z500 = None, None, None
self.era5 = (self.era5 * 2) - 1
# Create precipitation normalised stack
tp = prec_ds.tp.sel(time=times[::self.t_stride])[:].data * 1000
tp = np.clip(tp, 0, 30)
tp1 = np.log(1+np.log(1+tp))
tp1 = np.clip(tp1, 0, 1)
tp2 = np.log(1+tp)/np.log(31)
tp3 = tp / 30
self.prec = np.stack((tp1, tp2, tp3), axis=3)
tp, tp1, tp2, tp3 = None, None, None, None
self.prec = (self.prec * 2) - 1
if self.partition == 'train':
n1 = int(tp.shape[0] * .7)
self.era5 = self.era5[:n1,:,:,:]
self.prec = self.prec[:n1,:,:,:]
elif self.partition == 'test':
n0 = int(tp.shape[0] * .7)
n1 = int(tp.shape[0] * .9)
self.era5 = self.era5[n0:n1,:,:,:]
self.prec = self.prec[n0:n1,:,:,:]
elif self.partition == 'val':
n0 = int(tp.shape[0] * .9)
self.era5 = self.era5[n0:,:,:]
self.prec = self.prec[n0:,:,:]
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
idx = np.random.randint(self.prec.shape[0], size=self.batch_size)
return self.era5[idx,:,:], self.prec[idx,:,:]
def on_epoch_end(self):
'Updates indexes after each epoch'
pass
| [
"numpy.clip",
"numpy.intersect1d",
"numpy.log",
"numpy.stack",
"numpy.random.randint",
"numpy.random.seed",
"xarray.open_dataset",
"numpy.random.shuffle"
] | [((524, 574), 'xarray.open_dataset', 'xr.open_dataset', (['"""./datasets/GEOP1000_GAN_2017.nc"""'], {}), "('./datasets/GEOP1000_GAN_2017.nc')\n", (539, 574), True, 'import xarray as xr\n'), ((594, 643), 'xarray.open_dataset', 'xr.open_dataset', (['"""./datasets/GEOP800_GAN_2017.nc"""'], {}), "('./datasets/GEOP800_GAN_2017.nc')\n", (609, 643), True, 'import xarray as xr\n'), ((663, 712), 'xarray.open_dataset', 'xr.open_dataset', (['"""./datasets/GEOP500_GAN_2017.nc"""'], {}), "('./datasets/GEOP500_GAN_2017.nc')\n", (678, 712), True, 'import xarray as xr\n'), ((815, 859), 'xarray.open_dataset', 'xr.open_dataset', (['"""./datasets/TP_GAN_2017.nc"""'], {}), "('./datasets/TP_GAN_2017.nc')\n", (830, 859), True, 'import xarray as xr\n'), ((959, 997), 'numpy.intersect1d', 'np.intersect1d', (['era5_times', 'prec_times'], {}), '(era5_times, prec_times)\n', (973, 997), True, 'import numpy as np\n'), ((1006, 1026), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1020, 1026), True, 'import numpy as np\n'), ((1035, 1059), 'numpy.random.shuffle', 'np.random.shuffle', (['times'], {}), '(times)\n', (1052, 1059), True, 'import numpy as np\n'), ((1557, 1594), 'numpy.stack', 'np.stack', (['(z1000, z800, z500)'], {'axis': '(3)'}), '((z1000, z800, z500), axis=3)\n', (1565, 1594), True, 'import numpy as np\n'), ((1814, 1832), 'numpy.clip', 'np.clip', (['tp', '(0)', '(30)'], {}), '(tp, 0, 30)\n', (1821, 1832), True, 'import numpy as np\n'), ((1884, 1902), 'numpy.clip', 'np.clip', (['tp1', '(0)', '(1)'], {}), '(tp1, 0, 1)\n', (1891, 1902), True, 'import numpy as np\n'), ((1984, 2017), 'numpy.stack', 'np.stack', (['(tp1, tp2, tp3)'], {'axis': '(3)'}), '((tp1, tp2, tp3), axis=3)\n', (1992, 2017), True, 'import numpy as np\n'), ((2882, 2941), 'numpy.random.randint', 'np.random.randint', (['self.prec.shape[0]'], {'size': 'self.batch_size'}), '(self.prec.shape[0], size=self.batch_size)\n', (2899, 2941), True, 'import numpy as np\n'), ((1917, 1931), 'numpy.log', 'np.log', (['(1 + tp)'], {}), '(1 + tp)\n', (1923, 1931), True, 'import numpy as np\n'), ((1930, 1940), 'numpy.log', 'np.log', (['(31)'], {}), '(31)\n', (1936, 1940), True, 'import numpy as np\n'), ((1856, 1870), 'numpy.log', 'np.log', (['(1 + tp)'], {}), '(1 + tp)\n', (1862, 1870), True, 'import numpy as np\n')] |
from torchvision import datasets, models, transforms
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Function
import pdb
import math
import numpy as np
model_urls = {
'resnet18': 'https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth',
'resnet34': 'https://s3.amazonaws.com/pytorch/models/resnet34-333f7ec4.pth',
'resnet50': 'https://s3.amazonaws.com/pytorch/models/resnet50-19c8e357.pth',
'resnet101': 'https://s3.amazonaws.com/pytorch/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth',
}
def calc_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0):
return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low)
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:
nn.init.kaiming_uniform_(m.weight)
#nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight, 1.0, 0.02)
#nn.init.zeros_(m.bias)
elif classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
#nn.init.zeros_(m.bias)
class RandomLayer(nn.Module):
def __init__(self, input_dim_list=[], output_dim=1024):
super(RandomLayer, self).__init__()
self.input_num = len(input_dim_list)
self.output_dim = output_dim
self.random_matrix = [torch.randn(input_dim_list[i], output_dim) for i in range(self.input_num)]
def forward(self, input_list):
return_list = [torch.mm(input_list[i], self.random_matrix[i]) for i in range(self.input_num)]
return_tensor = return_list[0] / math.pow(float(self.output_dim), 1.0/len(return_list))
for single in return_list[1:]:
return_tensor = torch.mul(return_tensor, single)
return return_tensor
def cuda(self):
super(RandomLayer, self).cuda()
self.random_matrix = [val.cuda() for val in self.random_matrix]
class AdversarialNetwork(nn.Module):
def __init__(self, in_feature, hidden_size):
super(AdversarialNetwork, self).__init__()
self.ad_layer1 = nn.Linear(in_feature, hidden_size)
self.ad_layer2 = nn.Linear(hidden_size, hidden_size)
self.ad_layer3 = nn.Linear(hidden_size, 1)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.dropout1 = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.5)
self.sigmoid = nn.Sigmoid()
self.apply(init_weights)
self.iter_num = 0
self.alpha = 10
self.low = 0.0
self.high = 1.0
self.max_iter = 10000.0
def forward(self, x):
if self.training:
self.iter_num += 1
coeff = calc_coeff(self.iter_num, self.high, self.low, self.alpha, self.max_iter)
x = x * 1.0
x.register_hook(grl_hook(coeff))
x = self.ad_layer1(x)
x = self.relu1(x)
x = self.dropout1(x)
x = self.ad_layer2(x)
x = self.relu2(x)
x = self.dropout2(x)
y = self.ad_layer3(x)
y = self.sigmoid(y)
return y
def output_num(self):
return 1
def get_parameters(self):
return [{"params":self.parameters(), "lr_mult":10, 'decay_mult':2}]
def loss_Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def loss_CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduce=False)(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.1)
elif classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.1)
m.bias.data.fill_(0)
class GradReverse(Function):
@staticmethod
def forward(ctx, x, lambd):
ctx.lambd = lambd
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return (grad_output * -ctx.lambd), None
def grad_reverse(x, lambd=1.0):
return GradReverse.apply(x, lambd)
class GradMulti(Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
return (grad_output * self.lambd)
def grad_multi(x, lambd=1.0):
return GradMulti(lambd)(x)
def l2_norm(input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def conv3x3(in_planes, out_planes, stride=1, sn=False):
"3x3 convolution with padding"
if not sn:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
else:
return SNConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
def conv1x1(in_planes, out_planes, stride=1):
"1x1 convolution no padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False)
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(1).sqrt() + self.eps
x /= norm.expand_as(x)
out = self.weight.unsqueeze(0).expand_as(x) * x
return out
class AlexNetBase(nn.Module):
def __init__(self,pret=True):
super(AlexNetBase, self).__init__()
model_alexnet = models.alexnet(pretrained=pret)
#print(model_alexnet.features)
self.conv1 = model_alexnet.features[0]
self.relu1 = model_alexnet.features[1]
self.pool1 = model_alexnet.features[2]
self.conv2 = model_alexnet.features[3]
self.relu2 = model_alexnet.features[4]
self.pool2 = model_alexnet.features[5]
self.conv3 = model_alexnet.features[6]
self.relu3 = model_alexnet.features[7]
self.conv4 = model_alexnet.features[8]
self.relu4 = model_alexnet.features[9]
self.conv5 = model_alexnet.features[10]
self.relu5 = model_alexnet.features[11]
self.pool3 = model_alexnet.features[12]
self.feature1 = nn.Sequential(*list(model_alexnet.features._modules.values())[:6])
self.feature2 = nn.Sequential(*list(model_alexnet.features._modules.values())[6:])
#self.features = model_alexnet.features
self.classifier = nn.Sequential()
for i in range(6):
self.classifier.add_module("classifier" + str(i), model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features
def forward(self, x, target=False, lamda=0.1):
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
if target:
x = grad_multi(x,lamda)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
if target:
x = grad_multi(x, lamda)
x = self.conv3(x)
x = self.relu3(x)
if target:
x = grad_multi(x, lamda)
x = self.conv4(x)
x = self.relu4(x)
if target:
x = grad_multi(x, lamda)
x = self.conv5(x)
x = self.relu5(x)
x = self.pool3(x)
#x = self.feature1(x)
#x = self.feature2(x)
feature = x
x = x.view(x.size(0), 256 * 6 * 6)
#x = F.normalize(x)
x = self.classifier(x)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
return x
def output_num(self):
return self.__in_features
class AlexNetBase_selfsup(nn.Module):
def __init__(self,path=False):
super(AlexNetBase_selfsup, self).__init__()
if path:
checkpoint = torch.load(path)
pretrained_dict = checkpoint['net']
# lemniscate = checkpoint['lemniscate']
# best_acc = checkpoint['acc']
# start_epoch = checkpoint['epoch']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in pretrained_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model_alexnet = models.alexnet(pretrained=True)
model_dict = model_alexnet.state_dict()
# # 1. filter out unnecessary keys
new_state_dict = {k: v for k, v in new_state_dict.items() if k in model_dict}
# # 2. overwrite entries in the existing state dict
model_dict.update(new_state_dict)
# # 3. load the new state dict
model_alexnet.load_state_dict(model_dict)
else:
model_alexnet = models.alexnet(pretrained=True)
#print(model_alexnet.features)
self.conv1 = model_alexnet.features[0]
self.relu1 = model_alexnet.features[1]
self.pool1 = model_alexnet.features[2]
self.conv2 = model_alexnet.features[3]
self.relu2 = model_alexnet.features[4]
self.pool2 = model_alexnet.features[5]
self.conv3 = model_alexnet.features[6]
self.relu3 = model_alexnet.features[7]
self.conv4 = model_alexnet.features[8]
self.relu4 = model_alexnet.features[9]
self.conv5 = model_alexnet.features[10]
self.relu5 = model_alexnet.features[11]
self.pool3 = model_alexnet.features[12]
self.feature1 = nn.Sequential(*list(model_alexnet.features._modules.values())[:6])
self.feature2 = nn.Sequential(*list(model_alexnet.features._modules.values())[6:])
#self.features = model_alexnet.features
self.classifier = nn.Sequential()
for i in range(6):
self.classifier.add_module("classifier" + str(i), model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features
def forward(self, x, target=False, lamda=0.1):
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
if target:
x = grad_multi(x,lamda)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
if target:
x = grad_multi(x, lamda)
x = self.conv3(x)
x = self.relu3(x)
if target:
x = grad_multi(x, lamda)
x = self.conv4(x)
x = self.relu4(x)
if target:
x = grad_multi(x, lamda)
x = self.conv5(x)
x = self.relu5(x)
x = self.pool3(x)
#x = self.feature1(x)
#x = self.feature2(x)
feature = x
x = x.view(x.size(0), 256 * 6 * 6)
#x = F.normalize(x)
x = self.classifier(x)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
return x
def output_num(self):
return self.__in_features
class VGGBase(nn.Module):
def __init__(self, option='vgg', pret=True, no_pool=False):
super(VGGBase, self).__init__()
self.dim = 2048
self.no_pool = no_pool
if option =='vgg_bn':
vgg16=models.vgg11_bn(pretrained=pret)
else:
vgg16 = models.vgg16(pretrained=pret)
self.classifier = nn.Sequential(*list(vgg16.classifier._modules.values())[:-1])
self.features = nn.Sequential(*list(vgg16.features._modules.values())[:])
self.s = nn.Parameter(torch.FloatTensor([10]))
def forward(self, x, source=True,target=False):
x = self.features(x)
x = x.view(x.size(0), 7 * 7 * 512)
x = self.classifier(x)
return x
class Predictor(nn.Module):
def __init__(self, num_class=64, inc=4096, temp=0.1,
cosine=False,dropout=False):
super(Predictor, self).__init__()
if cosine:
self.fc = nn.Linear(inc, num_class, bias=False)
else:
self.fc = nn.Linear(inc, num_class,bias=True)
self.num_class = num_class
self.temp = temp
self.cosine = cosine
self.dropout = dropout
def forward(self, x, reverse=False,eta=0.1):
if reverse:
x = grad_reverse(x,eta)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
if not self.cosine:
x_out = self.fc(x)
return x_out
else:
x = F.normalize(x)
x_out = self.fc(x) / self.temp
return x_out
def weight_norm(self):
w = self.fc.weight.data
norm = w.norm(p=2, dim=1, keepdim=True)
self.fc.weight.data = w.div(norm.expand_as(w))
class Predictor_deep(nn.Module):
def __init__(self, num_class=64, inc=4096, temp=0.1,dropout=False):
super(Predictor_deep, self).__init__()
self.fc1 = nn.Linear(inc, 512)
self.fc2 = nn.Linear(512, num_class)
self.num_class = num_class
self.temp = temp
self.dropout = dropout
def forward(self, x, reverse=False,eta=0.1):
x = self.fc1(x)
if reverse:
x = grad_reverse(x, eta)
x = F.normalize(x)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
x_out = self.fc2(x)/0.05
return x_out
class Predictor_single(nn.Module):
def __init__(self, num_class=64, low_dim=4096, temp=0.1,dropout=False):
super(Predictor_single, self).__init__()
self.fc2 = nn.Linear(low_dim, num_class)
self.num_class = num_class
self.temp = temp
self.dropout = dropout
def forward(self, x, reverse=False,eta=0.1):
if reverse:
x = grad_reverse(x, eta)
# x = F.normalize(x)
if self.dropout:
x = F.dropout(x,training=self.training, p=0.1)
x_out = self.fc2(x)/0.05
return x_out
class Predictor_deep_2(nn.Module):
def __init__(self, num_class=64, inc=4096, low_dim=512,temp=0.1,dropout=False, path=False):
super(Predictor_deep_2, self).__init__()
self.fc = nn.Linear(inc, low_dim)
self.fc2 = nn.Linear(low_dim, num_class)
self.num_class = num_class
self.temp = temp
self.dropout = dropout
weights_init(self)
if path:
import torch
checkpoint = torch.load(path)
pretrained_dict = checkpoint['net']
# lemniscate = checkpoint['lemniscate']
# best_acc = checkpoint['acc']
# start_epoch = checkpoint['epoch']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in pretrained_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model_dict = self.state_dict()
# 1. filter out unnecessary keys
new_state_dict = {k: v for k, v in new_state_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(new_state_dict)
# 3. load the new state dict
self.load_state_dict(model_dict)
def forward(self, x, reverse=False,eta=0.1):
x = self.fc(x)
if reverse:
x = grad_reverse(x, eta)
x = F.normalize(x)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
x_out = self.fc2(x)/0.05
return x_out
def forward_2(self, x, reverse=False,eta=0.1):
x_feat = self.fc(x)
if reverse:
x = grad_reverse(x, eta)
x_out = F.normalize(x_feat)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
x_out = self.fc2(x_out)/0.05
return x_out, x_feat
class Predictor_deep_id(nn.Module):
def __init__(self, num_class=64, inc=4096, low_dim=512,temp=0.1,dropout=False, path=False, normalize=False):
super(Predictor_deep_id, self).__init__()
self.fc2 = nn.Linear(low_dim, num_class)
self.num_class = num_class
self.temp = temp
self.dropout = dropout
self.normalize = normalize
weights_init(self)
def forward(self, x, reverse=False,eta=0.1):
if reverse:
x = grad_reverse(x, eta)
# if self.normalize:
# x = F.normalize(x)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
x_out = self.fc2(x)/0.05
return x_out
def forward_2(self, x, reverse=False,eta=0.1):
x_feat = self.fc(x)
if reverse:
x = grad_reverse(x, eta)
x_out = F.normalize(x_feat)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
x_out = self.fc2(x_out)/0.05
return x_out, x_feat
class Predictor_deep_id_2(nn.Module):
def __init__(self, num_class=64, inc=4096, low_dim=512,temp=0.1,dropout=False, path=False, normalize=False):
super(Predictor_deep_id_2, self).__init__()
self.fc2 = nn.Linear(low_dim, num_class)
self.num_class = num_class
self.temp = temp
self.dropout = dropout
self.normalize = normalize
weights_init(self)
def forward(self, x, reverse=False,eta=0.1):
if reverse:
x = grad_reverse(x, eta)
# if self.normalize:
# x = F.normalize(x)
if self.dropout:
x = F.dropout(x,training=self.training,p=0.1)
x_out = self.fc2(x)
return x_out
class Discriminator(nn.Module):
def __init__(self, inc=4096):
super(Discriminator, self).__init__()
self.fc1_1 = nn.Linear(inc, 512)
self.fc2_1 = nn.Linear(512, 512)
self.fc3_1 = nn.Linear(512, 1)
def forward(self, x, reverse=True, eta=1.0):
if reverse:
x = grad_reverse(x,eta)
x = F.relu(self.fc1_1(x))
x = F.relu(self.fc2_1(x))
x_out = F.sigmoid(self.fc3_1(x))
return x_out
class Discriminator_2(nn.Module):
def __init__(self, inc=4096):
super(Discriminator_2, self).__init__()
self.fc1_1 = nn.Linear(inc, 256)
self.fc2_1 = nn.Linear(256, 128)
self.fc3_1 = nn.Linear(128, 1)
def forward(self, x, reverse=True, eta=1.0):
if reverse:
x = grad_reverse(x,eta)
x = F.relu(self.fc1_1(x))
x = F.relu(self.fc2_1(x))
x_out = F.sigmoid(self.fc3_1(x))
return x_out
| [
"torch.mul",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.sqrt",
"torch.pow",
"torch.exp",
"torch.nn.init.xavier_normal_",
"numpy.array",
"torch.sum",
"torch.nn.Sigmoid",
"torch.nn.init.zeros_",
"numpy.exp",
"torch.randn",
"torch.ones_like",
"collections.OrderedDi... | [((3489, 3514), 'torch.sum', 'torch.sum', (['entropy'], {'dim': '(1)'}), '(entropy, dim=1)\n', (3498, 3514), False, 'import torch\n'), ((5913, 5932), 'torch.pow', 'torch.pow', (['input', '(2)'], {}), '(input, 2)\n', (5922, 5932), False, 'import torch\n'), ((5990, 6007), 'torch.sqrt', 'torch.sqrt', (['normp'], {}), '(normp)\n', (6000, 6007), False, 'import torch\n'), ((6591, 6680), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, padding=0,\n bias=False)\n', (6600, 6680), True, 'import torch.nn as nn\n'), ((1080, 1114), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (1104, 1114), True, 'import torch.nn as nn\n'), ((2357, 2391), 'torch.nn.Linear', 'nn.Linear', (['in_feature', 'hidden_size'], {}), '(in_feature, hidden_size)\n', (2366, 2391), True, 'import torch.nn as nn\n'), ((2413, 2448), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2422, 2448), True, 'import torch.nn as nn\n'), ((2470, 2495), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (2479, 2495), True, 'import torch.nn as nn\n'), ((2513, 2522), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2520, 2522), True, 'import torch.nn as nn\n'), ((2540, 2549), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2547, 2549), True, 'import torch.nn as nn\n'), ((2570, 2585), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2580, 2585), True, 'import torch.nn as nn\n'), ((2606, 2621), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2616, 2621), True, 'import torch.nn as nn\n'), ((2641, 2653), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2651, 2653), True, 'import torch.nn as nn\n'), ((3447, 3474), 'torch.log', 'torch.log', (['(input_ + epsilon)'], {}), '(input_ + epsilon)\n', (3456, 3474), False, 'import torch\n'), ((4305, 4329), 'torch.ones_like', 'torch.ones_like', (['entropy'], {}), '(entropy)\n', (4320, 4329), False, 'import torch\n'), ((4441, 4465), 'torch.ones_like', 'torch.ones_like', (['entropy'], {}), '(entropy)\n', (4456, 4465), False, 'import torch\n'), ((6256, 6344), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=True)\n', (6265, 6344), True, 'import torch.nn as nn\n'), ((7043, 7081), 'torch.nn.init.constant', 'init.constant', (['self.weight', 'self.gamma'], {}), '(self.weight, self.gamma)\n', (7056, 7081), True, 'import torch.nn.init as init\n'), ((7398, 7429), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'pret'}), '(pretrained=pret)\n', (7412, 7429), False, 'from torchvision import datasets, models, transforms\n'), ((8341, 8356), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (8354, 8356), True, 'import torch.nn as nn\n'), ((11540, 11555), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (11553, 11555), True, 'import torch.nn as nn\n'), ((14582, 14601), 'torch.nn.Linear', 'nn.Linear', (['inc', '(512)'], {}), '(inc, 512)\n', (14591, 14601), True, 'import torch.nn as nn\n'), ((14621, 14646), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'num_class'], {}), '(512, num_class)\n', (14630, 14646), True, 'import torch.nn as nn\n'), ((14881, 14895), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {}), '(x)\n', (14892, 14895), True, 'import torch.nn.functional as F\n'), ((15216, 15245), 'torch.nn.Linear', 'nn.Linear', (['low_dim', 'num_class'], {}), '(low_dim, num_class)\n', (15225, 15245), True, 'import torch.nn as nn\n'), ((15813, 15836), 'torch.nn.Linear', 'nn.Linear', (['inc', 'low_dim'], {}), '(inc, low_dim)\n', (15822, 15836), True, 'import torch.nn as nn\n'), ((15856, 15885), 'torch.nn.Linear', 'nn.Linear', (['low_dim', 'num_class'], {}), '(low_dim, num_class)\n', (15865, 15885), True, 'import torch.nn as nn\n'), ((17027, 17041), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {}), '(x)\n', (17038, 17041), True, 'import torch.nn.functional as F\n'), ((17334, 17353), 'torch.nn.functional.normalize', 'F.normalize', (['x_feat'], {}), '(x_feat)\n', (17345, 17353), True, 'import torch.nn.functional as F\n'), ((17723, 17752), 'torch.nn.Linear', 'nn.Linear', (['low_dim', 'num_class'], {}), '(low_dim, num_class)\n', (17732, 17752), True, 'import torch.nn as nn\n'), ((18368, 18387), 'torch.nn.functional.normalize', 'F.normalize', (['x_feat'], {}), '(x_feat)\n', (18379, 18387), True, 'import torch.nn.functional as F\n'), ((18762, 18791), 'torch.nn.Linear', 'nn.Linear', (['low_dim', 'num_class'], {}), '(low_dim, num_class)\n', (18771, 18791), True, 'import torch.nn as nn\n'), ((19383, 19402), 'torch.nn.Linear', 'nn.Linear', (['inc', '(512)'], {}), '(inc, 512)\n', (19392, 19402), True, 'import torch.nn as nn\n'), ((19424, 19443), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (19433, 19443), True, 'import torch.nn as nn\n'), ((19465, 19482), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (19474, 19482), True, 'import torch.nn as nn\n'), ((19857, 19876), 'torch.nn.Linear', 'nn.Linear', (['inc', '(256)'], {}), '(inc, 256)\n', (19866, 19876), True, 'import torch.nn as nn\n'), ((19898, 19917), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (19907, 19917), True, 'import torch.nn as nn\n'), ((19939, 19956), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (19948, 19956), True, 'import torch.nn as nn\n'), ((1199, 1235), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(1.0)', '(0.02)'], {}), '(m.weight, 1.0, 0.02)\n', (1214, 1235), True, 'import torch.nn as nn\n'), ((1631, 1673), 'torch.randn', 'torch.randn', (['input_dim_list[i]', 'output_dim'], {}), '(input_dim_list[i], output_dim)\n', (1642, 1673), False, 'import torch\n'), ((1765, 1811), 'torch.mm', 'torch.mm', (['input_list[i]', 'self.random_matrix[i]'], {}), '(input_list[i], self.random_matrix[i])\n', (1773, 1811), False, 'import torch\n'), ((2007, 2039), 'torch.mul', 'torch.mul', (['return_tensor', 'single'], {}), '(return_tensor, single)\n', (2016, 2039), False, 'import torch\n'), ((4263, 4282), 'torch.exp', 'torch.exp', (['(-entropy)'], {}), '(-entropy)\n', (4272, 4282), False, 'import torch\n'), ((4860, 4872), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4870, 4872), True, 'import torch.nn as nn\n'), ((5078, 5110), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight'], {}), '(m.weight)\n', (5100, 5110), True, 'import torch.nn as nn\n'), ((5119, 5141), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (5133, 5141), True, 'import torch.nn as nn\n'), ((5946, 5966), 'torch.sum', 'torch.sum', (['buffer', '(1)'], {}), '(buffer, 1)\n', (5955, 5966), False, 'import torch\n'), ((6939, 6968), 'torch.Tensor', 'torch.Tensor', (['self.n_channels'], {}), '(self.n_channels)\n', (6951, 6968), False, 'import torch\n'), ((9657, 9673), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (9667, 9673), False, 'import torch\n'), ((9942, 9955), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9953, 9955), False, 'from collections import OrderedDict\n'), ((10124, 10155), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10138, 10155), False, 'from torchvision import datasets, models, transforms\n'), ((10596, 10627), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (10610, 10627), False, 'from torchvision import datasets, models, transforms\n'), ((12919, 12951), 'torchvision.models.vgg11_bn', 'models.vgg11_bn', ([], {'pretrained': 'pret'}), '(pretrained=pret)\n', (12934, 12951), False, 'from torchvision import datasets, models, transforms\n'), ((12986, 13015), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': 'pret'}), '(pretrained=pret)\n', (12998, 13015), False, 'from torchvision import datasets, models, transforms\n'), ((13216, 13239), 'torch.FloatTensor', 'torch.FloatTensor', (['[10]'], {}), '([10])\n', (13233, 13239), False, 'import torch\n'), ((13631, 13668), 'torch.nn.Linear', 'nn.Linear', (['inc', 'num_class'], {'bias': '(False)'}), '(inc, num_class, bias=False)\n', (13640, 13668), True, 'import torch.nn as nn\n'), ((13705, 13741), 'torch.nn.Linear', 'nn.Linear', (['inc', 'num_class'], {'bias': '(True)'}), '(inc, num_class, bias=True)\n', (13714, 13741), True, 'import torch.nn as nn\n'), ((14007, 14050), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (14016, 14050), True, 'import torch.nn.functional as F\n'), ((14163, 14177), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {}), '(x)\n', (14174, 14177), True, 'import torch.nn.functional as F\n'), ((14937, 14980), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (14946, 14980), True, 'import torch.nn.functional as F\n'), ((15515, 15558), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (15524, 15558), True, 'import torch.nn.functional as F\n'), ((16072, 16088), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (16082, 16088), False, 'import torch\n'), ((16357, 16370), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16368, 16370), False, 'from collections import OrderedDict\n'), ((17083, 17126), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (17092, 17126), True, 'import torch.nn.functional as F\n'), ((17396, 17439), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (17405, 17439), True, 'import torch.nn.functional as F\n'), ((18117, 18160), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (18126, 18160), True, 'import torch.nn.functional as F\n'), ((18430, 18473), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (18439, 18473), True, 'import torch.nn.functional as F\n'), ((19156, 19199), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training', 'p': '(0.1)'}), '(x, training=self.training, p=0.1)\n', (19165, 19199), True, 'import torch.nn.functional as F\n'), ((1317, 1349), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight'], {}), '(m.weight)\n', (1339, 1349), True, 'import torch.nn as nn\n'), ((779, 815), 'numpy.exp', 'np.exp', (['(-alpha * iter_num / max_iter)'], {}), '(-alpha * iter_num / max_iter)\n', (785, 815), True, 'import numpy as np\n'), ((4100, 4149), 'numpy.array', 'np.array', (['([[1]] * batch_size + [[0]] * batch_size)'], {}), '([[1]] * batch_size + [[0]] * batch_size)\n', (4108, 4149), True, 'import numpy as np\n'), ((4754, 4778), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduce': '(False)'}), '(reduce=False)\n', (4764, 4778), True, 'import torch.nn as nn\n'), ((4801, 4818), 'torch.sum', 'torch.sum', (['weight'], {}), '(weight)\n', (4810, 4818), False, 'import torch\n'), ((4589, 4613), 'torch.sum', 'torch.sum', (['source_weight'], {}), '(source_weight)\n', (4598, 4613), False, 'import torch\n'), ((4667, 4691), 'torch.sum', 'torch.sum', (['target_weight'], {}), '(target_weight)\n', (4676, 4691), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.manifold import TSNE
import seaborn as sns
import pandas as pd
def plot_metric(metric_dict, label, averaged_plot=True, n=8):
global_steps = list(metric_dict.keys())
metric = list(metric_dict.values())
if not averaged_plot:
plt.plot(global_steps, np.array(metric), label=label)
else:
num_points = len(metric) // n
mean_values = []
std_values = []
steps = []
for i in range(num_points):
values = metric[i * n: (i+1) * n]
step = global_steps[i * n + n // 2]
mean_values.append(np.mean([float(value) for value in values])) # ugly fix because metric is list of ugly tensor objects or whatever
std_values.append(np.std([float(value) for value in values]))
steps.append(step)
mean_values = np.array(mean_values)
std_values = np.array(std_values)
plt.plot(steps, mean_values, label=f'{label} averaged over {n} points')
plt.fill_between(steps, mean_values - std_values,
mean_values + std_values,
alpha=0.3, label= f'{label} variance over {n} points')
def show_images_and_reconstructions(images, title, labels = None):
"""
Plot data in RGB (3-channel data) or monochrome (one-channel data).
If data is submitted, we need to generate an example.
If there are many images, do a subplot-thing.
"""
# Just a little hacky check to not make large modifications
if isinstance(images, list):
no_images = len(images)
no_channels = images[0].shape[0]
else:
no_images = images.shape[0]
no_channels = images.shape[-1]
# Do the plotting
fig = plt.Figure()
no_rows = np.ceil(np.sqrt(no_images))
no_cols = np.ceil(no_images / no_rows)
for img_idx in range(no_images):
plt.subplot(int(no_rows), int(no_cols), int(img_idx + 1))
if no_channels == 1:
plt.imshow(images[img_idx, :, :, 0], cmap="binary")
else:
plt.imshow(images[img_idx, :, :, :].astype(np.float))
plt.xticks([])
plt.yticks([])
if labels is not None:
plt.title(f"Class is {str(int(labels[img_idx])).zfill(no_channels)}")
plt.savefig(f'figures/{title}.png')
# Show the thing ...
plt.show()
plt.close(fig)
def show_vae_generated_img(images, title):
"""
Plot data in RGB (3-channel data) or monochrome (one-channel data).
If data is submitted, we need to generate an example.
If there are many images, do a subplot-thing.
"""
# Just a little hacky check to not make large modifications
if isinstance(images, list):
no_images = len(images)
no_channels = images[0].shape[0]
else:
no_images = images.shape[0]
no_channels = images.shape[-1]
# Do the plotting
fig = plt.Figure()
no_rows = np.ceil(np.sqrt(no_images))
no_cols = np.ceil(no_images / no_rows)
for img_idx in range(no_images):
plt.subplot(int(no_rows), int(no_cols), int(img_idx + 1))
if no_channels == 1:
plt.imshow(images[img_idx, :, :, 0], cmap="binary")
else:
plt.imshow(images[img_idx, :, :, :].astype(np.float))
plt.xticks([])
plt.yticks([])
plt.savefig(f'figures/{title}.png')
# Show the thing ...
plt.show()
plt.close(fig)
def plot_t_sne(latent_vectors_and_classes: tuple):
latent_vectors = latent_vectors_and_classes[0]
classes = latent_vectors_and_classes[1]
latent_vectors_embedded = TSNE(perplexity=50, learning_rate=100).fit_transform(latent_vectors)
# create the 'data table' to show in the scatterplot
d = {'x': latent_vectors_embedded[:, 0], 'y': latent_vectors_embedded[:, 1], 'classes': classes}
df = pd.DataFrame(data=d)
sns.scatterplot(data=df, x='x', y='y', hue='classes', palette='deep')
def main():
values = np.random.uniform(0,2, 6000)
test_dict = {}
for i in range(len(values)):
test_dict[i] = values[i]
plot_metric(test_dict, 'Test_plot')
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.imshow",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.Figure",
"matplotlib.pyplot.plot",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.yticks"... | [((1770, 1782), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (1780, 1782), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1867), 'numpy.ceil', 'np.ceil', (['(no_images / no_rows)'], {}), '(no_images / no_rows)\n', (1846, 1867), True, 'import numpy as np\n'), ((2307, 2342), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figures/{title}.png"""'], {}), "(f'figures/{title}.png')\n", (2318, 2342), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2380, 2382), True, 'import matplotlib.pyplot as plt\n'), ((2387, 2401), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2396, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2931, 2943), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (2941, 2943), True, 'import matplotlib.pyplot as plt\n'), ((3000, 3028), 'numpy.ceil', 'np.ceil', (['(no_images / no_rows)'], {}), '(no_images / no_rows)\n', (3007, 3028), True, 'import numpy as np\n'), ((3356, 3391), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figures/{title}.png"""'], {}), "(f'figures/{title}.png')\n", (3367, 3391), True, 'import matplotlib.pyplot as plt\n'), ((3421, 3431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3429, 3431), True, 'import matplotlib.pyplot as plt\n'), ((3436, 3450), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3445, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3865, 3885), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (3877, 3885), True, 'import pandas as pd\n'), ((3890, 3959), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': '"""x"""', 'y': '"""y"""', 'hue': '"""classes"""', 'palette': '"""deep"""'}), "(data=df, x='x', y='y', hue='classes', palette='deep')\n", (3905, 3959), True, 'import seaborn as sns\n'), ((3986, 4015), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)', '(6000)'], {}), '(0, 2, 6000)\n', (4003, 4015), True, 'import numpy as np\n'), ((882, 903), 'numpy.array', 'np.array', (['mean_values'], {}), '(mean_values)\n', (890, 903), True, 'import numpy as np\n'), ((925, 945), 'numpy.array', 'np.array', (['std_values'], {}), '(std_values)\n', (933, 945), True, 'import numpy as np\n'), ((954, 1025), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'mean_values'], {'label': 'f"""{label} averaged over {n} points"""'}), "(steps, mean_values, label=f'{label} averaged over {n} points')\n", (962, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1167), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['steps', '(mean_values - std_values)', '(mean_values + std_values)'], {'alpha': '(0.3)', 'label': 'f"""{label} variance over {n} points"""'}), "(steps, mean_values - std_values, mean_values + std_values,\n alpha=0.3, label=f'{label} variance over {n} points')\n", (1050, 1167), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1823), 'numpy.sqrt', 'np.sqrt', (['no_images'], {}), '(no_images)\n', (1812, 1823), True, 'import numpy as np\n'), ((2152, 2166), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2162, 2166), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2189), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2185, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2966, 2984), 'numpy.sqrt', 'np.sqrt', (['no_images'], {}), '(no_images)\n', (2973, 2984), True, 'import numpy as np\n'), ((3313, 3327), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3323, 3327), True, 'import matplotlib.pyplot as plt\n'), ((3336, 3350), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3346, 3350), True, 'import matplotlib.pyplot as plt\n'), ((333, 349), 'numpy.array', 'np.array', (['metric'], {}), '(metric)\n', (341, 349), True, 'import numpy as np\n'), ((2012, 2063), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[img_idx, :, :, 0]'], {'cmap': '"""binary"""'}), "(images[img_idx, :, :, 0], cmap='binary')\n", (2022, 2063), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3224), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[img_idx, :, :, 0]'], {'cmap': '"""binary"""'}), "(images[img_idx, :, :, 0], cmap='binary')\n", (3183, 3224), True, 'import matplotlib.pyplot as plt\n'), ((3628, 3666), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(50)', 'learning_rate': '(100)'}), '(perplexity=50, learning_rate=100)\n', (3632, 3666), False, 'from sklearn.manifold import TSNE\n')] |
from ai_ct_scans import phase_correlation
from skimage import draw
import pytest
import numpy as np
import mock
@pytest.fixture()
def circle():
circle = np.zeros([100, 100], dtype=float)
rows, cols = draw.circle(50, 50, 25)
circle[rows, cols] = 1
return circle
@pytest.fixture()
def shift_2d():
return 5, 18
@pytest.fixture()
def shifted_circle(circle, shift_2d):
shifted = np.roll(circle, shift_2d, axis=(0, 1))
return shifted
def test_align_via_phase_correlation_2d_recovers_shifted_circle(circle, shifted_circle):
unshifted_circle = phase_correlation.align_via_phase_correlation_2d(
circle, shifted_circle
)
np.testing.assert_array_equal(circle, unshifted_circle)
@pytest.fixture()
def sphere():
sphere_array_size = 50
sphere_radius = 10
x, y, z = np.mgrid[0:sphere_array_size, 0:sphere_array_size, 0:sphere_array_size]
return ((x - 5) ** 2 + (y - 5) ** 2 + (z - 5) ** 2) < (sphere_radius ** 2)
@pytest.fixture()
def shift_3d():
return 5, 18, 7
@pytest.fixture()
def shifted_sphere(sphere, shift_3d):
shifted = np.roll(sphere, shift_3d, axis=(0, 1, 2))
return shifted
def test_shift_via_phase_correlation_nd_gets_expected_shift_in_2d(
circle, shifted_circle, shift_2d
):
shifts = phase_correlation.shift_via_phase_correlation_nd([circle, shifted_circle])
np.testing.assert_array_equal(shifts[1], list(shift_2d))
def test_shift_via_phase_correlation_nd_gets_expected_shift_in_3d(
sphere, shifted_sphere, shift_3d
):
shifts = phase_correlation.shift_via_phase_correlation_nd([sphere, shifted_sphere])
np.testing.assert_array_equal(shifts[1], list(shift_3d))
def test_shift_via_phase_correlation_nd_deals_with_irregular_image_shapes(
circle, shifted_circle, shift_2d
):
shifted_cropped = shifted_circle[
: shifted_circle.shape[0] - 1, : shifted_circle.shape[1] - 1
]
shifts = phase_correlation.shift_via_phase_correlation_nd([circle, shifted_cropped])
np.testing.assert_array_equal(shifts[1], list(shift_2d))
def test_shift_via_phase_correlation_nd_deals_with_negative_rolled_images(circle):
shift = (-16, -5)
shifted = np.roll(circle, shift, axis=(0, 1))
shifts = phase_correlation.shift_via_phase_correlation_nd([circle, shifted])
np.testing.assert_array_equal(shifts[1], np.array(shift))
@pytest.fixture()
def patched_lmr(monkeypatch):
monkeypatch.setattr(
phase_correlation.phase_correlation_image_processing, "lmr", mock.MagicMock()
)
def test_lmr_not_used_if_apply_lmr_false(patched_lmr, ims_nonlinear_features_2d):
ims, _, _, local_coords, region_widths = ims_nonlinear_features_2d
phase_correlation.shifts_via_local_region(
ims, local_coords=local_coords[0], region_widths=region_widths, apply_lmr=False
)
phase_correlation.phase_correlation_image_processing.lmr.assert_not_called()
@pytest.fixture()
def patched_zero_crossings(monkeypatch):
monkeypatch.setattr(
phase_correlation.phase_correlation_image_processing,
"zero_crossings",
mock.MagicMock(),
)
def test_zero_crossings_not_used_if_apply_zero_crossings_false(
patched_zero_crossings, ims_nonlinear_features_2d
):
ims, _, _, local_coords, region_widths = ims_nonlinear_features_2d
phase_correlation.shifts_via_local_region(
ims,
local_coords=local_coords[0],
region_widths=region_widths,
lmr_radius=5,
apply_zero_crossings=False,
)
phase_correlation.phase_correlation_image_processing.zero_crossings.assert_not_called()
@pytest.fixture()
def ims_nonlinear_features_2d():
ims = [np.zeros([150, 100]) for i in range(2)]
feature_1_offsets = [3, 4]
feature_2_offsets = [5, 6]
feature_1_start_stop = [[10, 20], [10, 20]]
ims[0][
feature_1_start_stop[0][0] : feature_1_start_stop[0][1],
feature_1_start_stop[1][0] : feature_1_start_stop[1][1],
] = 1
ims[1][
feature_1_start_stop[0][0]
+ feature_1_offsets[0] : feature_1_start_stop[0][1]
+ feature_1_offsets[0],
feature_1_start_stop[1][0]
+ feature_1_offsets[1] : feature_1_start_stop[1][1]
+ feature_1_offsets[1],
] = 1
feature_2_start_stop = [[100, 105], [60, 67]]
ims[0][
feature_2_start_stop[0][0] : feature_2_start_stop[0][1],
feature_2_start_stop[1][0] : feature_2_start_stop[1][1],
] = 1
ims[1][
feature_2_start_stop[0][0]
+ feature_2_offsets[0] : feature_2_start_stop[0][1]
+ feature_2_offsets[0],
feature_2_start_stop[1][0]
+ feature_2_offsets[1] : feature_2_start_stop[1][1]
+ feature_2_offsets[1],
] = 1
feature_offsets = (feature_1_offsets, feature_2_offsets)
feature_start_stops = (feature_1_start_stop, feature_2_start_stop)
local_coords = (
np.vstack(
[np.array(feature_1_start_stop)[:, 0], np.array(feature_2_start_stop)[:, 0]]
)
+ 5
)
region_widths = [15, 15]
return ims, feature_offsets, feature_start_stops, local_coords, region_widths
@pytest.fixture()
def shifts_via_local_region_2d(ims_nonlinear_features_2d):
ims, _, _, local_coords, region_widths = ims_nonlinear_features_2d
shifts_1 = phase_correlation.shifts_via_local_region(
ims, local_coords=local_coords[0], region_widths=region_widths, lmr_radius=3
)
shifts_2 = phase_correlation.shifts_via_local_region(
ims, local_coords=local_coords[1], region_widths=region_widths, lmr_radius=3
)
return shifts_1, shifts_2
def test_shifts_via_local_region_gets_correct_shifts_2d(
shifts_via_local_region_2d, ims_nonlinear_features_2d
):
_, (feature_1_offsets, feature_2_offsets), _, _, _ = ims_nonlinear_features_2d
shifts_1, shifts_2 = shifts_via_local_region_2d
np.testing.assert_array_equal(shifts_1[1], feature_1_offsets)
np.testing.assert_array_equal(shifts_2[1], feature_2_offsets)
@pytest.fixture()
def ims_nonlinear_features_3d():
ims = [np.zeros([150, 100, 120]) for i in range(2)]
feature_1_offsets = [3, 4, 5]
feature_2_offsets = [5, 6, 7]
feature_1_start_stop = [[10, 20], [10, 20], [14, 22]]
ims[0][
feature_1_start_stop[0][0] : feature_1_start_stop[0][1],
feature_1_start_stop[1][0] : feature_1_start_stop[1][1],
feature_1_start_stop[2][0] : feature_1_start_stop[2][1],
] = 1
ims[1][
feature_1_start_stop[0][0]
+ feature_1_offsets[0] : feature_1_start_stop[0][1]
+ feature_1_offsets[0],
feature_1_start_stop[1][0]
+ feature_1_offsets[1] : feature_1_start_stop[1][1]
+ feature_1_offsets[1],
feature_1_start_stop[2][0]
+ feature_1_offsets[2] : feature_1_start_stop[2][1]
+ feature_1_offsets[2],
] = 1
feature_2_start_stop = [[100, 105], [60, 67], [25, 44]]
ims[0][
feature_2_start_stop[0][0] : feature_2_start_stop[0][1],
feature_2_start_stop[1][0] : feature_2_start_stop[1][1],
feature_2_start_stop[2][0] : feature_2_start_stop[2][1],
] = 1
ims[1][
feature_2_start_stop[0][0]
+ feature_2_offsets[0] : feature_2_start_stop[0][1]
+ feature_2_offsets[0],
feature_2_start_stop[1][0]
+ feature_2_offsets[1] : feature_2_start_stop[1][1]
+ feature_2_offsets[1],
feature_2_start_stop[2][0]
+ feature_2_offsets[2] : feature_2_start_stop[2][1]
+ feature_2_offsets[2],
] = 1
feature_offsets = (feature_1_offsets, feature_2_offsets)
feature_start_stops = (feature_1_start_stop, feature_2_start_stop)
local_coords = (
np.vstack(
[np.array(feature_1_start_stop)[:, 0], np.array(feature_2_start_stop)[:, 0]]
)
+ 5
)
region_widths = [15, 15]
return ims, feature_offsets, feature_start_stops, local_coords, region_widths
@pytest.fixture()
def shifts_via_local_region_3d(ims_nonlinear_features_3d):
ims, _, _, local_coords, region_widths = ims_nonlinear_features_3d
shifts_1 = phase_correlation.shifts_via_local_region(
ims, local_coords=local_coords[0], region_widths=region_widths, lmr_radius=3
)
shifts_2 = phase_correlation.shifts_via_local_region(
ims, local_coords=local_coords[1], region_widths=region_widths, lmr_radius=3
)
return shifts_1, shifts_2
def test_shifts_via_local_region_gets_correct_shifts_3d(
shifts_via_local_region_3d, ims_nonlinear_features_3d
):
_, (feature_1_offsets, feature_2_offsets), _, _, _ = ims_nonlinear_features_3d
shifts_1, shifts_2 = shifts_via_local_region_3d
np.testing.assert_array_equal(shifts_1[1], feature_1_offsets)
np.testing.assert_array_equal(shifts_2[1], feature_2_offsets)
@pytest.mark.parametrize("intensity", (0.001, 1000))
def test_shifts_via_local_region_on_extreme_image_intensities_with_noise_2d(
ims_nonlinear_features_2d, intensity
):
np.random.seed(532)
(
ims,
(feature_1_offsets, feature_2_offsets),
_,
local_coords,
region_widths,
) = ims_nonlinear_features_2d
ims = [(im + (1 - 0.5 * np.random.rand(*im.shape))) * intensity for im in ims]
shifts_1 = phase_correlation.shifts_via_local_region(
ims,
local_coords=local_coords[0],
region_widths=region_widths,
lmr_radius=3,
zero_crossings_thresh="auto",
)
shifts_2 = phase_correlation.shifts_via_local_region(
ims,
local_coords=local_coords[1],
region_widths=region_widths,
lmr_radius=3,
zero_crossings_thresh="auto",
)
np.testing.assert_array_equal(shifts_1[1], feature_1_offsets)
np.testing.assert_array_equal(shifts_2[1], feature_2_offsets)
@pytest.mark.parametrize("intensity", (0.001, 1000))
def test_shifts_via_local_region_on_extreme_image_intensities_with_noise_3d(
ims_nonlinear_features_3d, intensity
):
np.random.seed(532)
(
ims,
(feature_1_offsets, feature_2_offsets),
_,
local_coords,
region_widths,
) = ims_nonlinear_features_3d
ims = [(im + (1 - 0.5 * np.random.rand(*im.shape))) * intensity for im in ims]
shifts_1 = phase_correlation.shifts_via_local_region(
ims,
local_coords=local_coords[0],
region_widths=region_widths,
lmr_radius=3,
zero_crossings_thresh="auto",
)
shifts_2 = phase_correlation.shifts_via_local_region(
ims,
local_coords=local_coords[1],
region_widths=region_widths,
lmr_radius=3,
zero_crossings_thresh="auto",
)
np.testing.assert_array_equal(shifts_1[1], feature_1_offsets)
np.testing.assert_array_equal(shifts_2[1], feature_2_offsets)
@pytest.mark.parametrize("n_dims", (2, 3))
def test_shift_nd_fills_border_with_blanks(n_dims):
shape = [15 for _ in range(n_dims)]
shift = [3 for _ in range(n_dims)]
arr = np.random.rand(*shape)
shifted = phase_correlation.shift_nd(arr, shift)
slices = [np.s_[0:shift_element] for shift_element in shift]
assert (shifted[slices] == 0).all()
def test_shift_nd_deals_with_zero_shifts_and_negatives():
shape = [15 for _ in range(3)]
shift = [3, 0, -1]
arr = np.random.rand(*shape)
shifted = phase_correlation.shift_nd(arr, shift)
assert (shifted[:3, :, :] == 0).all()
assert (shifted[:, :, -1:] == 0).all()
np.testing.assert_array_equal(shifted[3:, :, :-1], arr[:-3, :, 1:])
| [
"ai_ct_scans.phase_correlation.shifts_via_local_region",
"skimage.draw.circle",
"ai_ct_scans.phase_correlation.shift_nd",
"numpy.roll",
"numpy.random.rand",
"ai_ct_scans.phase_correlation.phase_correlation_image_processing.lmr.assert_not_called",
"ai_ct_scans.phase_correlation.align_via_phase_correlatio... | [((115, 131), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (129, 131), False, 'import pytest\n'), ((282, 298), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (296, 298), False, 'import pytest\n'), ((335, 351), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (349, 351), False, 'import pytest\n'), ((726, 742), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (740, 742), False, 'import pytest\n'), ((975, 991), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (989, 991), False, 'import pytest\n'), ((1031, 1047), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1045, 1047), False, 'import pytest\n'), ((2360, 2376), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2374, 2376), False, 'import pytest\n'), ((2904, 2920), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2918, 2920), False, 'import pytest\n'), ((3595, 3611), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3609, 3611), False, 'import pytest\n'), ((5116, 5132), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (5130, 5132), False, 'import pytest\n'), ((5982, 5998), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (5996, 5998), False, 'import pytest\n'), ((7918, 7934), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (7932, 7934), False, 'import pytest\n'), ((8784, 8835), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""intensity"""', '(0.001, 1000)'], {}), "('intensity', (0.001, 1000))\n", (8807, 8835), False, 'import pytest\n'), ((9781, 9832), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""intensity"""', '(0.001, 1000)'], {}), "('intensity', (0.001, 1000))\n", (9804, 9832), False, 'import pytest\n'), ((10778, 10819), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_dims"""', '(2, 3)'], {}), "('n_dims', (2, 3))\n", (10801, 10819), False, 'import pytest\n'), ((159, 192), 'numpy.zeros', 'np.zeros', (['[100, 100]'], {'dtype': 'float'}), '([100, 100], dtype=float)\n', (167, 192), True, 'import numpy as np\n'), ((210, 233), 'skimage.draw.circle', 'draw.circle', (['(50)', '(50)', '(25)'], {}), '(50, 50, 25)\n', (221, 233), False, 'from skimage import draw\n'), ((404, 442), 'numpy.roll', 'np.roll', (['circle', 'shift_2d'], {'axis': '(0, 1)'}), '(circle, shift_2d, axis=(0, 1))\n', (411, 442), True, 'import numpy as np\n'), ((576, 648), 'ai_ct_scans.phase_correlation.align_via_phase_correlation_2d', 'phase_correlation.align_via_phase_correlation_2d', (['circle', 'shifted_circle'], {}), '(circle, shifted_circle)\n', (624, 648), False, 'from ai_ct_scans import phase_correlation\n'), ((667, 722), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['circle', 'unshifted_circle'], {}), '(circle, unshifted_circle)\n', (696, 722), True, 'import numpy as np\n'), ((1100, 1141), 'numpy.roll', 'np.roll', (['sphere', 'shift_3d'], {'axis': '(0, 1, 2)'}), '(sphere, shift_3d, axis=(0, 1, 2))\n', (1107, 1141), True, 'import numpy as np\n'), ((1283, 1357), 'ai_ct_scans.phase_correlation.shift_via_phase_correlation_nd', 'phase_correlation.shift_via_phase_correlation_nd', (['[circle, shifted_circle]'], {}), '([circle, shifted_circle])\n', (1331, 1357), False, 'from ai_ct_scans import phase_correlation\n'), ((1541, 1615), 'ai_ct_scans.phase_correlation.shift_via_phase_correlation_nd', 'phase_correlation.shift_via_phase_correlation_nd', (['[sphere, shifted_sphere]'], {}), '([sphere, shifted_sphere])\n', (1589, 1615), False, 'from ai_ct_scans import phase_correlation\n'), ((1920, 1995), 'ai_ct_scans.phase_correlation.shift_via_phase_correlation_nd', 'phase_correlation.shift_via_phase_correlation_nd', (['[circle, shifted_cropped]'], {}), '([circle, shifted_cropped])\n', (1968, 1995), False, 'from ai_ct_scans import phase_correlation\n'), ((2178, 2213), 'numpy.roll', 'np.roll', (['circle', 'shift'], {'axis': '(0, 1)'}), '(circle, shift, axis=(0, 1))\n', (2185, 2213), True, 'import numpy as np\n'), ((2227, 2294), 'ai_ct_scans.phase_correlation.shift_via_phase_correlation_nd', 'phase_correlation.shift_via_phase_correlation_nd', (['[circle, shifted]'], {}), '([circle, shifted])\n', (2275, 2294), False, 'from ai_ct_scans import phase_correlation\n'), ((2683, 2809), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[0]', 'region_widths': 'region_widths', 'apply_lmr': '(False)'}), '(ims, local_coords=local_coords[0],\n region_widths=region_widths, apply_lmr=False)\n', (2724, 2809), False, 'from ai_ct_scans import phase_correlation\n'), ((2824, 2900), 'ai_ct_scans.phase_correlation.phase_correlation_image_processing.lmr.assert_not_called', 'phase_correlation.phase_correlation_image_processing.lmr.assert_not_called', ([], {}), '()\n', (2898, 2900), False, 'from ai_ct_scans import phase_correlation\n'), ((3305, 3456), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[0]', 'region_widths': 'region_widths', 'lmr_radius': '(5)', 'apply_zero_crossings': '(False)'}), '(ims, local_coords=local_coords[0],\n region_widths=region_widths, lmr_radius=5, apply_zero_crossings=False)\n', (3346, 3456), False, 'from ai_ct_scans import phase_correlation\n'), ((3504, 3596), 'ai_ct_scans.phase_correlation.phase_correlation_image_processing.zero_crossings.assert_not_called', 'phase_correlation.phase_correlation_image_processing.zero_crossings.assert_not_called', ([], {}), '(\n )\n', (3589, 3596), False, 'from ai_ct_scans import phase_correlation\n'), ((5279, 5402), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[0]', 'region_widths': 'region_widths', 'lmr_radius': '(3)'}), '(ims, local_coords=local_coords[0],\n region_widths=region_widths, lmr_radius=3)\n', (5320, 5402), False, 'from ai_ct_scans import phase_correlation\n'), ((5428, 5551), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[1]', 'region_widths': 'region_widths', 'lmr_radius': '(3)'}), '(ims, local_coords=local_coords[1],\n region_widths=region_widths, lmr_radius=3)\n', (5469, 5551), False, 'from ai_ct_scans import phase_correlation\n'), ((5851, 5912), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_1[1]', 'feature_1_offsets'], {}), '(shifts_1[1], feature_1_offsets)\n', (5880, 5912), True, 'import numpy as np\n'), ((5917, 5978), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_2[1]', 'feature_2_offsets'], {}), '(shifts_2[1], feature_2_offsets)\n', (5946, 5978), True, 'import numpy as np\n'), ((8081, 8204), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[0]', 'region_widths': 'region_widths', 'lmr_radius': '(3)'}), '(ims, local_coords=local_coords[0],\n region_widths=region_widths, lmr_radius=3)\n', (8122, 8204), False, 'from ai_ct_scans import phase_correlation\n'), ((8230, 8353), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[1]', 'region_widths': 'region_widths', 'lmr_radius': '(3)'}), '(ims, local_coords=local_coords[1],\n region_widths=region_widths, lmr_radius=3)\n', (8271, 8353), False, 'from ai_ct_scans import phase_correlation\n'), ((8653, 8714), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_1[1]', 'feature_1_offsets'], {}), '(shifts_1[1], feature_1_offsets)\n', (8682, 8714), True, 'import numpy as np\n'), ((8719, 8780), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_2[1]', 'feature_2_offsets'], {}), '(shifts_2[1], feature_2_offsets)\n', (8748, 8780), True, 'import numpy as np\n'), ((8961, 8980), 'numpy.random.seed', 'np.random.seed', (['(532)'], {}), '(532)\n', (8975, 8980), True, 'import numpy as np\n'), ((9237, 9390), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[0]', 'region_widths': 'region_widths', 'lmr_radius': '(3)', 'zero_crossings_thresh': '"""auto"""'}), "(ims, local_coords=local_coords[0],\n region_widths=region_widths, lmr_radius=3, zero_crossings_thresh='auto')\n", (9278, 9390), False, 'from ai_ct_scans import phase_correlation\n'), ((9449, 9602), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[1]', 'region_widths': 'region_widths', 'lmr_radius': '(3)', 'zero_crossings_thresh': '"""auto"""'}), "(ims, local_coords=local_coords[1],\n region_widths=region_widths, lmr_radius=3, zero_crossings_thresh='auto')\n", (9490, 9602), False, 'from ai_ct_scans import phase_correlation\n'), ((9650, 9711), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_1[1]', 'feature_1_offsets'], {}), '(shifts_1[1], feature_1_offsets)\n', (9679, 9711), True, 'import numpy as np\n'), ((9716, 9777), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_2[1]', 'feature_2_offsets'], {}), '(shifts_2[1], feature_2_offsets)\n', (9745, 9777), True, 'import numpy as np\n'), ((9958, 9977), 'numpy.random.seed', 'np.random.seed', (['(532)'], {}), '(532)\n', (9972, 9977), True, 'import numpy as np\n'), ((10234, 10387), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[0]', 'region_widths': 'region_widths', 'lmr_radius': '(3)', 'zero_crossings_thresh': '"""auto"""'}), "(ims, local_coords=local_coords[0],\n region_widths=region_widths, lmr_radius=3, zero_crossings_thresh='auto')\n", (10275, 10387), False, 'from ai_ct_scans import phase_correlation\n'), ((10446, 10599), 'ai_ct_scans.phase_correlation.shifts_via_local_region', 'phase_correlation.shifts_via_local_region', (['ims'], {'local_coords': 'local_coords[1]', 'region_widths': 'region_widths', 'lmr_radius': '(3)', 'zero_crossings_thresh': '"""auto"""'}), "(ims, local_coords=local_coords[1],\n region_widths=region_widths, lmr_radius=3, zero_crossings_thresh='auto')\n", (10487, 10599), False, 'from ai_ct_scans import phase_correlation\n'), ((10647, 10708), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_1[1]', 'feature_1_offsets'], {}), '(shifts_1[1], feature_1_offsets)\n', (10676, 10708), True, 'import numpy as np\n'), ((10713, 10774), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifts_2[1]', 'feature_2_offsets'], {}), '(shifts_2[1], feature_2_offsets)\n', (10742, 10774), True, 'import numpy as np\n'), ((10961, 10983), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (10975, 10983), True, 'import numpy as np\n'), ((10998, 11036), 'ai_ct_scans.phase_correlation.shift_nd', 'phase_correlation.shift_nd', (['arr', 'shift'], {}), '(arr, shift)\n', (11024, 11036), False, 'from ai_ct_scans import phase_correlation\n'), ((11270, 11292), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (11284, 11292), True, 'import numpy as np\n'), ((11307, 11345), 'ai_ct_scans.phase_correlation.shift_nd', 'phase_correlation.shift_nd', (['arr', 'shift'], {}), '(arr, shift)\n', (11333, 11345), False, 'from ai_ct_scans import phase_correlation\n'), ((11435, 11502), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['shifted[3:, :, :-1]', 'arr[:-3, :, 1:]'], {}), '(shifted[3:, :, :-1], arr[:-3, :, 1:])\n', (11464, 11502), True, 'import numpy as np\n'), ((2340, 2355), 'numpy.array', 'np.array', (['shift'], {}), '(shift)\n', (2348, 2355), True, 'import numpy as np\n'), ((2501, 2517), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2515, 2517), False, 'import mock\n'), ((3083, 3099), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3097, 3099), False, 'import mock\n'), ((3656, 3676), 'numpy.zeros', 'np.zeros', (['[150, 100]'], {}), '([150, 100])\n', (3664, 3676), True, 'import numpy as np\n'), ((6043, 6068), 'numpy.zeros', 'np.zeros', (['[150, 100, 120]'], {}), '([150, 100, 120])\n', (6051, 6068), True, 'import numpy as np\n'), ((4897, 4927), 'numpy.array', 'np.array', (['feature_1_start_stop'], {}), '(feature_1_start_stop)\n', (4905, 4927), True, 'import numpy as np\n'), ((4935, 4965), 'numpy.array', 'np.array', (['feature_2_start_stop'], {}), '(feature_2_start_stop)\n', (4943, 4965), True, 'import numpy as np\n'), ((7699, 7729), 'numpy.array', 'np.array', (['feature_1_start_stop'], {}), '(feature_1_start_stop)\n', (7707, 7729), True, 'import numpy as np\n'), ((7737, 7767), 'numpy.array', 'np.array', (['feature_2_start_stop'], {}), '(feature_2_start_stop)\n', (7745, 7767), True, 'import numpy as np\n'), ((9166, 9191), 'numpy.random.rand', 'np.random.rand', (['*im.shape'], {}), '(*im.shape)\n', (9180, 9191), True, 'import numpy as np\n'), ((10163, 10188), 'numpy.random.rand', 'np.random.rand', (['*im.shape'], {}), '(*im.shape)\n', (10177, 10188), True, 'import numpy as np\n')] |
"""
A Farm is a fundamental agent in this simulation. It has a number of functions:
- controlling individual cages
- managing its own finances
- choose whether to cooperate with other farms belonging to the same organisation
"""
from __future__ import annotations
import copy
import json
from collections import Counter, defaultdict
from typing import Dict, List, Optional, Tuple, cast
import numpy as np
from mypy_extensions import TypedDict
from slim import LoggableMixin, logger
from slim.simulation.cage import Cage
from slim.simulation.config import Config
from slim.JSONEncoders import CustomFarmEncoder
from slim.simulation.lice_population import GrossLiceDistrib, GenoDistrib, GenoDistribDict
from slim.types.QueueTypes import *
from slim.types.TreatmentTypes import Money, Treatment
GenoDistribByHatchDate = Dict[dt.datetime, GenoDistrib]
CageAllocation = List[GenoDistribByHatchDate]
LocationTemps = TypedDict("LocationTemps", {"northing": int, "temperatures": List[float]})
class Farm(LoggableMixin):
"""
Define a salmon farm containing salmon cages. Over time the salmon in the cages grow and are
subjected to external infestation pressure from sea lice.
"""
def __init__(self, name: int, cfg: Config, initial_lice_pop: Optional[GrossLiceDistrib] = None):
"""
Create a farm.
:param name: the id of the farm.
:param cfg: the farm configuration
:param initial_lice_pop: if provided, overrides default generated lice population
"""
super().__init__()
self.cfg = cfg
farm_cfg = cfg.farms[name]
self.farm_cfg = farm_cfg
self.name = name
self.loc_x = farm_cfg.farm_location[0]
self.loc_y = farm_cfg.farm_location[1]
self.start_date = farm_cfg.farm_start
self.available_treatments = farm_cfg.max_num_treatments
self.cages = [Cage(i, cfg, self, initial_lice_pop) for i in range(farm_cfg.n_cages)] # pytype: disable=wrong-arg-types
self.year_temperatures = self._initialise_temperatures(cfg.loch_temperatures)
# TODO: only for testing purposes
self._preemptively_assign_treatments(self.farm_cfg.treatment_starts)
# Queues
self.command_queue: PriorityQueue[FarmCommand] = PriorityQueue()
self.farm_to_org: PriorityQueue[FarmResponse] = PriorityQueue()
self.__sampling_events: PriorityQueue[SamplingEvent] = PriorityQueue()
self.generate_sampling_events()
def __str__(self):
"""
Get a human readable string representation of the farm.
:return: a description of the cage
"""
cages = ", ".join(str(a) for a in self.cages)
return f"id: {self.name}, Cages: {cages}"
def to_json_dict(self, **kwargs):
filtered_vars = vars(self).copy()
del filtered_vars["farm_cfg"]
del filtered_vars["cfg"]
del filtered_vars["logged_data"]
filtered_vars.update(kwargs)
return filtered_vars
def __repr__(self):
filtered_vars = self.to_json_dict()
return json.dumps(filtered_vars, cls=CustomFarmEncoder, indent=4)
def __eq__(self, other):
if not isinstance(other, Farm):
# don't attempt to compare against unrelated types
return NotImplemented
return self.name == other.name
@property
def num_fish(self):
"""Return the number of fish across all cages"""
return sum(cage.num_fish for cage in self.cages)
@property
def lice_population(self):
"""Return the overall lice population in a farm"""
return dict(sum([Counter(cage.lice_population.as_dict()) for cage in self.cages], Counter()))
@property
def lice_genomics(self):
"""Return the overall lice population indexed by geno distribution and stage."""
genomics = defaultdict(lambda: GenoDistrib())
for cage in self.cages:
for stage, value in cage.lice_population.geno_by_lifestage.as_dict().items():
genomics[stage] = genomics[stage] + value
return {k: v.to_json_dict() for k, v in genomics.items()}
def _initialise_temperatures(self, temperatures: np.ndarray) -> np.ndarray:
"""
Calculate the mean sea temperature at the northing coordinate of the farm at
month c_month interpolating data taken from
www.seatemperature.org
:param temperatures: the array of temperatures from January till december. The expected shape is :math:`(2, n)`.
:returns: the estimated temperature at this farm location.
"""
# Schema: 2 rows, first column = northing, remaining 12 columns: temperature starting from Jan
# We assume the first row has the highest northing
x_northing, x_temps = temperatures[0][0], temperatures[0][1:]
y_northing, y_temps = temperatures[1][0], temperatures[1][1:]
degs = (y_temps - x_temps) / abs(y_northing - x_northing)
Ndiff = self.loc_y - y_northing
return np.round(y_temps - Ndiff * degs, 1)
def generate_treatment_event(self, treatment_type: Treatment, cur_date: dt.datetime
) -> TreatmentEvent:
"""
Generate a new treatment event with the correct efficacy based on the given day
and type.
:param treatment_type: the type of treatment
:param cur_date: the current date
:returns: the treatment event
"""
cur_month = cur_date.month
ave_temp = self.year_temperatures[cur_month - 1]
treatment_cfg = self.cfg.get_treatment(treatment_type)
delay = treatment_cfg.effect_delay
efficacy = treatment_cfg.delay(ave_temp)
application_period = treatment_cfg.application_period
return TreatmentEvent(
cur_date + dt.timedelta(days=delay),
treatment_type, efficacy,
cur_date,
cur_date + dt.timedelta(days=application_period)
)
def generate_sampling_events(self):
spacing = self.farm_cfg.sampling_spacing
start_date = self.farm_cfg.farm_start
end_date = self.cfg.end_date
for days in range(0, (end_date - start_date).days, spacing):
sampling_event = SamplingEvent(start_date + dt.timedelta(days=days))
self.__sampling_events.put(sampling_event)
def _preemptively_assign_treatments(self, treatment_dates: List[dt.datetime]):
"""
Assign a few treatment dates to cages.
NOTE: Mainly used for testing. May be deprecated when a proper strategy mechanism is in place
:param treatment_dates: the dates when to apply treatment
"""
for treatment_date in treatment_dates:
self.add_treatment(self.farm_cfg.treatment_type, treatment_date)
def add_treatment(self, treatment_type: Treatment, day: dt.datetime) -> bool:
"""
Ask to add a treatment. If a treatment was applied too early or if too many treatments
have been applied so far the request is rejected.
Note that if **at least** one cage is eligible for treatment and the conditions above
are still respected this method will still return True. Eligibility depends
on whether the cage has started already or is fallowing - but that may depend on the type
of chemical treatment applied. Furthermore, no treatment should be applied on cages that are already
undergoing a treatment of the same type. This usually means the actual treatment application period
plus a variable delay period. If no cages are available no treatment can be applied and the function returns
_False_.
:param treatment_type: the treatment type to apply
:param day: the day when to start applying the treatment
:returns: whether the treatment has been added to at least one cage or not.
"""
logger.debug("\t\tFarm {} requests treatment {}".format(self.name, str(treatment_type)))
if self.available_treatments <= 0:
return False
# TODO: no support for treatment combination. See #127
eligible_cages = [cage for cage in self.cages if not
(cage.start_date > day or cage.is_fallowing or cage.is_treated(day))]
if len(eligible_cages) == 0:
logger.debug("\t\tTreatment not scheduled as no cages were eligible")
return False
event = self.generate_treatment_event(treatment_type, day)
for cage in eligible_cages:
cage.treatment_events.put(event)
self.available_treatments -= 1
return True
def ask_for_treatment(self, cur_date: dt.datetime, can_defect=True):
"""
Ask the farm to perform treatment.
The farm will thus respond in the following way:
- choose whether to apply treatment or not (regardless of the actual cage eligibility).
- if yes, which treatment to apply (according to internal evaluations, e.g. increased lice resistance).
The farm is not obliged to tell the organisation whether treatment is being performed.
:param cur_date: the current date
:param can_defect: if True, the farm has a choice to not apply treatment
"""
logger.debug("Asking farm {} to treat".format(self.name))
# TODO: this is extremely simple.
p = [self.farm_cfg.defection_proba, 1 - self.farm_cfg.defection_proba]
want_to_treat = self.cfg.rng.choice([False, True], p=p) if can_defect else True
self.log("Outcome of the vote: %r", is_treating=want_to_treat)
if not want_to_treat:
logger.debug("\tFarm {} refuses to treat".format(self.name))
return
# TODO: implement a strategy to pick a treatment of choice
treatments = list(Treatment)
picked_treatment = treatments[0]
self.add_treatment(picked_treatment, cur_date)
def update(self,
cur_date: dt.datetime,
ext_influx: int,
ext_pressure_ratios: GenoDistribDict) -> Tuple[GenoDistribByHatchDate, Money]:
"""Update the status of the farm given the growth of fish and change
in population of parasites. Also distribute the offspring across cages.
:param cur_date: Current date
:param ext_influx: the amount of lice that enter a cage
:param ext_pressure_ratios: the ratio to use for the external pressure
:returns: a pair of (dictionary of genotype distributions based on hatch date, cost of the update)
"""
self.clear_log()
if cur_date >= self.start_date:
logger.debug("Updating farm {}".format(self.name))
else:
logger.debug("Updating farm {} (non-operational)".format(self.name))
self.log("\tAdding %r new lice from the reservoir", new_reservoir_lice=ext_influx)
self.log("\tReservoir lice genetic ratios: %s", new_reservoir_lice_ratios=ext_pressure_ratios)
self._handle_events(cur_date)
# get number of lice from reservoir to be put in each cage
pressures_per_cage = self.get_cage_pressures(ext_influx)
total_cost = Money("0.00")
# collate egg batches by hatch time
eggs_by_hatch_date: GenoDistribByHatchDate = {}
eggs_log = GenoDistrib()
for cage in self.cages:
# update the cage and collect the offspring info
egg_distrib, hatch_date, cost = cage.update(cur_date,
pressures_per_cage[cage.id],
ext_pressure_ratios)
if hatch_date:
# update the total offspring info
if hatch_date in eggs_by_hatch_date:
eggs_by_hatch_date[hatch_date] += egg_distrib
else:
eggs_by_hatch_date[hatch_date] = egg_distrib
eggs_log += egg_distrib
total_cost += cost
self.log("\t\tGenerated eggs by farm %d: %s", self.name, eggs=eggs_log)
return eggs_by_hatch_date, total_cost
def get_cage_pressures(self, external_inflow: int) -> List[int]:
"""Get external pressure divided into cages
:param external_inflow: the total external pressure
:return: List of values of external pressure for each cage
"""
assert len(self.cages) >= 1, "Farm must have at least one cage."
assert external_inflow >= 0, "External pressure cannot be negative."
# assume equal chances for each cage
probs_per_cage = np.full(len(self.cages), 1/len(self.cages))
return list(self.cfg.rng.multinomial(external_inflow * len(self.cages),
probs_per_cage,
size=1)[0])
def get_farm_allocation(self, target_farm: Farm, eggs_by_hatch_date: GenoDistribByHatchDate) -> GenoDistribByHatchDate:
"""Return farm allocation of arrivals, that is a dictionary of genotype distributions based
on hatch date updated to take into account probability of making it to the target farm.
The probability accounts for interfarm water movement (currents) as well as lice egg survival.
:param target_farm: Farm the eggs are travelling to
:param eggs_by_hatch_date: Dictionary of genotype distributions based on hatch date
:return: Updated dictionary of genotype distributions based on hatch date
"""
# base the new survived arrival dictionary on the offspring one
farm_allocation = copy.deepcopy(eggs_by_hatch_date)
for hatch_date, geno_dict in farm_allocation.items():
for genotype, n in geno_dict.items():
# get the interfarm travel probability between the two farms
travel_prob = self.cfg.interfarm_probs[self.name][target_farm.name]
# calculate number of arrivals based on the probability and total
# number of offspring
# NOTE: This works only when the travel probabilities are very low.
# Otherwise there is possibility that total number of arrivals
# would be higher than total number of offspring.
arrivals = self.cfg.rng.poisson(travel_prob * n)
# update the arrival dict
farm_allocation[hatch_date][genotype] = arrivals
return farm_allocation
def get_cage_allocation(self, ncages: int, eggs_by_hatch_date: GenoDistribByHatchDate) -> CageAllocation:
"""Return allocation of eggs for given number of cages.
:param ncages: Number of bins to allocate to
:param eggs_by_hatch_date: Dictionary of genotype distributions based on hatch date
:return: List of dictionaries of genotype distributions based on hatch date per bin
"""
if ncages < 1:
raise Exception("Number of bins must be positive.")
# dummy implmentation - assumes equal probabilities
# for both intercage and interfarm travel
# TODO: complete with actual probabilities
# probs_per_farm = self.cfg.interfarm_probs[self.name]
probs_per_bin = np.full(ncages, 1 / ncages)
# preconstruct the data structure
hatch_list: CageAllocation = [{hatch_date: GenoDistrib() for hatch_date in eggs_by_hatch_date} for n in range(ncages)]
for hatch_date, geno_dict in eggs_by_hatch_date.items():
for genotype in geno_dict:
# generate the bin distribution of this genotype with
# this hatch date
genotype_per_bin = self.cfg.rng.multinomial(geno_dict[genotype],
probs_per_bin,
size=1)[0]
# update the info
for bin_ix, n in enumerate(genotype_per_bin):
hatch_list[bin_ix][hatch_date][genotype] = n
return hatch_list
def disperse_offspring(self, eggs_by_hatch_date: GenoDistribByHatchDate, farms: List[Farm], cur_date: dt.datetime):
"""Allocate new offspring between the farms and cages.
Assumes the lice can float freely across a given farm so that
they are not bound to a single cage while not attached to a fish.
NOTE: This method is not multiprocessing safe. (why?)
:param eggs_by_hatch_date: Dictionary of genotype distributions based on hatch date
:param farms: List of Farm objects
:param cur_date: Current date of the simulation
"""
logger.debug("\tDispersing total offspring Farm {}".format(self.name))
for farm in farms:
if farm.name == self.name:
logger.debug("\t\tFarm {} (current):".format(farm.name))
else:
logger.debug("\t\tFarm {}:".format(farm.name))
# allocate eggs to cages
farm_arrivals = self.get_farm_allocation(farm, eggs_by_hatch_date)
arrivals_per_cage = self.get_cage_allocation(len(farm.cages), farm_arrivals)
total, by_cage, by_geno_cage = self.get_cage_arrivals_stats(arrivals_per_cage)
logger.debug("\t\t\tTotal new eggs = {}".format(total))
logger.debug("\t\t\tPer cage distribution = {}".format(by_cage))
self.log("\t\t\tPer cage distribution (as geno) = %s", arrivals_per_cage=by_geno_cage)
# get the arrival time of the egg batch at the allocated
# destination
travel_time = self.cfg.rng.poisson(self.cfg.interfarm_times[self.name][farm.name])
arrival_date = cur_date + dt.timedelta(days=travel_time)
# update the cages
for cage in farm.cages:
cage.update_arrivals(arrivals_per_cage[cage.id], arrival_date)
@staticmethod
def get_cage_arrivals_stats(cage_arrivals: CageAllocation) -> Tuple[int, List[int], List[GenoDistrib]]:
"""Get stats about the cage arrivals for logging
:param cage_arrivals: List of Dictionaries of genotype distributions based on hatch date.
:return: Tuple representing total number of arrivals, arrival, distribution and genotype distribution by cage
"""
# Basically ignore the hatch dates and sum up the batches
geno_by_cage = [cast(GenoDistrib,
GenoDistrib.batch_sum(list(hatch_dict.values())))
for hatch_dict in cage_arrivals]
gross_by_cage = [geno.gross for geno in geno_by_cage]
return sum(gross_by_cage), gross_by_cage, geno_by_cage
def get_profit(self, cur_date: dt.datetime) -> Money:
"""
Get the current mass of fish that can be resold.
:param cur_date: the current day
:returns: the total profit that can be earned from this farm at the current time
"""
mass_per_cage = [cage.average_fish_mass((cur_date - cage.start_date).days) / 1e3 for cage in self.cages]
return self.cfg.gain_per_kg * Money(sum(mass_per_cage))
def _handle_events(self, cur_date: dt.datetime):
def cts_command_queue(command):
if isinstance(command, SampleRequestCommand):
self.__sampling_events.put(SamplingEvent(command.request_date))
pop_from_queue(self.command_queue, cur_date, cts_command_queue)
self._report_sample(cur_date)
def _report_sample(self, cur_date):
def cts(_):
# report the worst across cages
rate = max(cage.aggregation_rate for cage in self.cages)
self.farm_to_org.put(SamplingResponse(cur_date, rate))
pop_from_queue(self.__sampling_events, cur_date, cts)
| [
"copy.deepcopy",
"slim.simulation.lice_population.GenoDistrib",
"json.dumps",
"slim.types.TreatmentTypes.Money",
"slim.simulation.cage.Cage",
"collections.Counter",
"mypy_extensions.TypedDict",
"numpy.full",
"slim.logger.debug",
"numpy.round"
] | [((917, 991), 'mypy_extensions.TypedDict', 'TypedDict', (['"""LocationTemps"""', "{'northing': int, 'temperatures': List[float]}"], {}), "('LocationTemps', {'northing': int, 'temperatures': List[float]})\n", (926, 991), False, 'from mypy_extensions import TypedDict\n'), ((3095, 3153), 'json.dumps', 'json.dumps', (['filtered_vars'], {'cls': 'CustomFarmEncoder', 'indent': '(4)'}), '(filtered_vars, cls=CustomFarmEncoder, indent=4)\n', (3105, 3153), False, 'import json\n'), ((5045, 5080), 'numpy.round', 'np.round', (['(y_temps - Ndiff * degs)', '(1)'], {}), '(y_temps - Ndiff * degs, 1)\n', (5053, 5080), True, 'import numpy as np\n'), ((11235, 11248), 'slim.types.TreatmentTypes.Money', 'Money', (['"""0.00"""'], {}), "('0.00')\n", (11240, 11248), False, 'from slim.types.TreatmentTypes import Money, Treatment\n'), ((11369, 11382), 'slim.simulation.lice_population.GenoDistrib', 'GenoDistrib', ([], {}), '()\n', (11380, 11382), False, 'from slim.simulation.lice_population import GrossLiceDistrib, GenoDistrib, GenoDistribDict\n'), ((13690, 13723), 'copy.deepcopy', 'copy.deepcopy', (['eggs_by_hatch_date'], {}), '(eggs_by_hatch_date)\n', (13703, 13723), False, 'import copy\n'), ((15329, 15356), 'numpy.full', 'np.full', (['ncages', '(1 / ncages)'], {}), '(ncages, 1 / ncages)\n', (15336, 15356), True, 'import numpy as np\n'), ((1891, 1927), 'slim.simulation.cage.Cage', 'Cage', (['i', 'cfg', 'self', 'initial_lice_pop'], {}), '(i, cfg, self, initial_lice_pop)\n', (1895, 1927), False, 'from slim.simulation.cage import Cage\n'), ((8371, 8440), 'slim.logger.debug', 'logger.debug', (['"""\t\tTreatment not scheduled as no cages were eligible"""'], {}), "('\\t\\tTreatment not scheduled as no cages were eligible')\n", (8383, 8440), False, 'from slim import LoggableMixin, logger\n'), ((3709, 3718), 'collections.Counter', 'Counter', ([], {}), '()\n', (3716, 3718), False, 'from collections import Counter, defaultdict\n'), ((3894, 3907), 'slim.simulation.lice_population.GenoDistrib', 'GenoDistrib', ([], {}), '()\n', (3905, 3907), False, 'from slim.simulation.lice_population import GrossLiceDistrib, GenoDistrib, GenoDistribDict\n'), ((15451, 15464), 'slim.simulation.lice_population.GenoDistrib', 'GenoDistrib', ([], {}), '()\n', (15462, 15464), False, 'from slim.simulation.lice_population import GrossLiceDistrib, GenoDistrib, GenoDistribDict\n')] |
import matplotlib.pyplot as plt
import jieba
import xlrd
from wordcloud import WordCloud, STOPWORDS
from PIL import Image
import numpy as np
class wcd:
# workBook = xlrd.open_workbook('../data/TagSupplement2.xlsx')
def beTXT(self):
global file_handle
allSheetNames = self.workBook.sheet_names()
print(allSheetNames)
# 1.2 按索引号获取sheet的名字(string类型)
sheet1Name = self.workBook.sheet_names()[0]
print(sheet1Name)
# 2. 获取sheet内容
## 2.1 法1:按索引号获取sheet内容
sheet1_content1 = self.workBook.sheet_by_index(0) # sheet索引从0开始
for n in range(1,sheet1_content1.nrows):
x=sheet1_content1.cell(n,1).value
file_handle = open('wd.txt', mode='a')
for m in range(0,int(sheet1_content1.cell(n,2).value)):
file_handle.write(x+" ")
return file_handle
def create(self):
txt=open('../map/wd.txt','r').read()
mask_pic = Image.open("u=1302885550,4025528368&fm=26&gp=0.png")
mask_pic_array = np.array(mask_pic)
plt.figure(figsize=(16, 9))
stopwords = set(STOPWORDS)
stopwords.add("美国")
stopwords.add("说")
stopwords.add("没")
stopwords.add("没有")
wordcloud = WordCloud(font_path="simsun.ttf",
mask=mask_pic_array,
stopwords=stopwords,
collocations=False,
background_color="white").generate(txt)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# plt.savefig('口罩词云.jpg')
plt.show()
if __name__ == '__main__':
x = wcd
x.create(x)
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"wordcloud.WordCloud",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((964, 1016), 'PIL.Image.open', 'Image.open', (['"""u=1302885550,4025528368&fm=26&gp=0.png"""'], {}), "('u=1302885550,4025528368&fm=26&gp=0.png')\n", (974, 1016), False, 'from PIL import Image\n'), ((1042, 1060), 'numpy.array', 'np.array', (['mask_pic'], {}), '(mask_pic)\n', (1050, 1060), True, 'import numpy as np\n'), ((1069, 1096), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1079, 1096), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1573), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (1536, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1597), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1590, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1648, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1387), 'wordcloud.WordCloud', 'WordCloud', ([], {'font_path': '"""simsun.ttf"""', 'mask': 'mask_pic_array', 'stopwords': 'stopwords', 'collocations': '(False)', 'background_color': '"""white"""'}), "(font_path='simsun.ttf', mask=mask_pic_array, stopwords=stopwords,\n collocations=False, background_color='white')\n", (1271, 1387), False, 'from wordcloud import WordCloud, STOPWORDS\n')] |
import numpy as np
from itertools import product
def clip_gradients(in_grads, clip=1):
return np.clip(in_grads, -clip, clip)
def sigmoid(X):
return 1.0 / (1 + np.exp(-X))
def img2col(data, h_indices, w_indices, k_h, k_w):
batch = data.shape[0]
indices = list(product(h_indices, w_indices))
out = np.stack(map(
lambda x: data[:, :, x[0]:x[0]+k_h, x[1]:x[1]+k_w].reshape(batch, -1), indices), axis=-1)
return out
| [
"numpy.clip",
"numpy.exp",
"itertools.product"
] | [((99, 129), 'numpy.clip', 'np.clip', (['in_grads', '(-clip)', 'clip'], {}), '(in_grads, -clip, clip)\n', (106, 129), True, 'import numpy as np\n'), ((278, 307), 'itertools.product', 'product', (['h_indices', 'w_indices'], {}), '(h_indices, w_indices)\n', (285, 307), False, 'from itertools import product\n'), ((169, 179), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (175, 179), True, 'import numpy as np\n')] |
import tensorflow as tf
import os
import sys
import time
import numpy as np
from model.trainer import Trainer
from dataset.data_loader import KaldiMultiDataRandomQueue, KaldiMultiDataSeqQueue, DataOutOfRange
from model.common import l2_scaling
from model.loss import softmax
from model.loss import asoftmax, additive_margin_softmax, additive_angular_margin_softmax
from model.loss import semihard_triplet_loss, angular_triplet_loss, e2e_valid_loss
from misc.utils import substring_in_list, activation_summaries
from six.moves import range
class TrainerMultiInput(Trainer):
"""Trainer for multiple inputs.
The class supports multiple features as the inputs and multiple labels as the outputs.
Useful when we involving bottleneck features, linguistic features or other auxiliary features.
"""
def __init__(self, params, model_dir, single_cpu=False):
"""
Args:
params: Parameters loaded from JSON.
model_dir: The model directory.
single_cpu: Run Tensorflow on one cpu. (default = False)
"""
super(TrainerMultiInput, self).__init__(params, model_dir, single_cpu)
# In this class, we need auxiliary features to do the feed-forward operation.
# The auxiliary features are dictionary that contains multiple possible features.
# When building the network, the auxiliary features are access by their names.
# To support more features (inputs), please extend the list below.
self.train_aux_features = {}
self.valid_aux_features = {}
self.pred_aux_features = {}
def entire_network(self, features, params, is_training, reuse_variables):
"""The definition of the entire network.
Args:
features: dict, features["features"] and features["aux_features"]
params: The parameters.
is_training: True if the network is for training.
reuse_variables: Share variables.
:return: The network output and the endpoints (for other usage).
"""
features, endpoints = self.network(features["features"], params, is_training, reuse_variables,
aux_features=features["aux_features"])
endpoints["output"] = features
# Add more components (post-processing) after the main network.
if "feature_norm" in params.dict and params.feature_norm:
assert "feature_scaling_factor" in params.dict, "If feature normalization is applied, scaling factor is necessary."
features = l2_scaling(features, params.feature_scaling_factor)
endpoints["output"] = features
return features, endpoints
def build(self, mode, dim, loss_type=None, num_speakers=None, noupdate_var_list=None):
""" Build a network.
This class accept multiple network inputs so that we can use bottleneck features, linguistic features, etc,
as the network inputs.
Args:
mode: `train`, `valid` or `predict`.
dim: The dimension of the feature.
loss_type: Which loss function do we use. Could be None when mode == predict
num_speakers: The total number of speakers. Used in softmax-like network
noupdate_var_list: In the fine-tuning, some variables are fixed. The list contains their names (or part of their names).
We use `noupdate` rather than `notrain` because some variables are not trainable, e.g.
the mean and var in the batchnorm layers.
"""
assert(mode == "train" or mode == "valid" or mode == "predict")
is_training = (mode == "train")
reuse_variables = True if self.is_built else None
# Create a new path for prediction, since the training may build a tower the support multi-GPUs
if mode == "predict":
self.pred_features = tf.placeholder(tf.float32, shape=[None, None, dim], name="pred_features")
# We also need to initialize other features.
# We need to specify the dim of the auxiliary features.
assert "aux_feature_dim" in self.params.dict, "The dim of auxiliary features must be specified as a dict."
for name in self.params.aux_feature_dim:
self.pred_aux_features[name] = tf.placeholder(tf.float32,
shape=[None, None, self.params.aux_feature_dim[name]],
name="pred_" + name)
pred_features = {"features": self.pred_features,
"aux_features": self.pred_aux_features}
with tf.name_scope("predict") as scope:
tf.logging.info("Extract embedding from node %s" % self.params.embedding_node)
_, endpoints = self.entire_network(pred_features, self.params, is_training, reuse_variables)
self.embeddings = endpoints[self.params.embedding_node]
if self.saver is None:
self.saver = tf.train.Saver()
return
# global_step should be defined before loss function since some loss functions use this value to tune
# some internal parameters.
if self.global_step is None:
self.global_step = tf.placeholder(tf.int32, name="global_step")
self.params.dict["global_step"] = self.global_step
# If new loss function is added, please modify the code.
self.loss_type = loss_type
if loss_type == "softmax":
self.loss_network = softmax
elif loss_type == "asoftmax":
self.loss_network = asoftmax
elif loss_type == "additive_margin_softmax":
self.loss_network = additive_margin_softmax
elif loss_type == "additive_angular_margin_softmax":
self.loss_network = additive_angular_margin_softmax
elif loss_type == "semihard_triplet_loss":
self.loss_network = semihard_triplet_loss
elif loss_type == "angular_triplet_loss":
self.loss_network = angular_triplet_loss
else:
raise NotImplementedError("Not implement %s loss" % self.loss_type)
if mode == "valid":
tf.logging.info("Building valid network...")
assert "aux_feature_dim" in self.params.dict, "The dim of auxiliary features must be specified as a dict."
for name in self.params.aux_feature_dim:
self.valid_aux_features[name] = tf.placeholder(tf.float32,
shape=[None, None, self.params.aux_feature_dim[name]],
name="valid_" + name)
self.valid_features = tf.placeholder(tf.float32, shape=[None, None, dim], name="valid_features")
valid_features = {"features": self.valid_features,
"aux_features": self.valid_aux_features}
self.valid_labels = tf.placeholder(tf.int32, shape=[None, ], name="valid_labels")
with tf.name_scope("valid") as scope:
# We can adjust some parameters in the config when we do validation
# TODO: I'm not sure whether it is necssary to change the margin for the valid set.
# TODO: compare the performance!
# Change the margin for the valid set.
if loss_type == "softmax":
pass
elif loss_type == "asoftmax":
train_margin = self.params.asoftmax_m
self.params.asoftmax_m = 1
elif loss_type == "additive_margin_softmax":
train_margin = self.params.amsoftmax_m
self.params.amsoftmax_m = 0
elif loss_type == "additive_angular_margin_softmax":
train_margin = self.params.arcsoftmax_m
self.params.arcsoftmax_m = 0
elif loss_type == "angular_triplet_loss":
# Switch loss to e2e_valid_loss
train_loss_network = self.loss_network
self.loss_network = e2e_valid_loss
else:
pass
features, endpoints = self.entire_network(valid_features, self.params, is_training, reuse_variables)
valid_loss = self.loss_network(features, self.valid_labels, num_speakers, self.params, is_training, reuse_variables)
# Change the margin back!!!
if loss_type == "softmax":
pass
elif loss_type == "asoftmax":
self.params.asoftmax_m = train_margin
elif loss_type == "additive_margin_softmax":
self.params.amsoftmax_m = train_margin
elif loss_type == "additive_angular_margin_softmax":
self.params.arcsoftmax_m = train_margin
elif loss_type == "angular_triplet_loss":
self.loss_network = train_loss_network
else:
pass
# We can evaluate other stuff in the valid_ops. Just add the new values to the dict.
# We may also need to check other values expect for the loss. Leave the task to other functions.
# During validation, I compute the cosine EER for the final output of the network.
self.embeddings = endpoints["output"]
self.valid_ops["raw_valid_loss"] = valid_loss
mean_valid_loss, mean_valid_loss_op = tf.metrics.mean(valid_loss)
self.valid_ops["valid_loss"] = mean_valid_loss
self.valid_ops["valid_loss_op"] = mean_valid_loss_op
valid_loss_summary = tf.summary.scalar("loss", mean_valid_loss)
self.valid_summary = tf.summary.merge([valid_loss_summary])
if self.saver is None:
self.saver = tf.train.Saver(max_to_keep=self.params.keep_checkpoint_max)
if self.valid_summary_writer is None:
self.valid_summary_writer = tf.summary.FileWriter(os.path.join(self.model, "eval"), self.sess.graph)
return
tf.logging.info("Building training network...")
self.train_features = tf.placeholder(tf.float32, shape=[None, None, dim], name="train_features")
assert "aux_feature_dim" in self.params.dict, "The dim of auxiliary features must be specified as a dict."
for name in self.params.aux_feature_dim:
self.train_aux_features[name] = tf.placeholder(tf.float32,
shape=[None, None, self.params.aux_feature_dim[name]],
name="train_" + name)
train_features = {"features": self.train_features,
"aux_features": self.train_aux_features}
self.train_labels = tf.placeholder(tf.int32, shape=[None, ], name="train_labels")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
if "optimizer" not in self.params.dict:
# The default optimizer is sgd
self.params.dict["optimizer"] = "sgd"
if self.params.optimizer == "sgd":
if "momentum" in self.params.dict:
sys.exit("Using sgd as the optimizer and you should not specify the momentum.")
tf.logging.info("***** Using SGD as the optimizer.")
opt = tf.train.GradientDescentOptimizer(self.learning_rate, name="optimizer")
elif self.params.optimizer == "momentum":
# SGD with momentum
# It is also possible to use other optimizers, e.g. Adam.
tf.logging.info("***** Using Momentum as the optimizer.")
opt = tf.train.MomentumOptimizer(self.learning_rate, self.params.momentum, use_nesterov=self.params.use_nesterov, name="optimizer")
elif self.params.optimizer == "adam":
tf.logging.info("***** Using Adam as the optimizer.")
opt = tf.train.AdamOptimizer(self.learning_rate, name="optimizer")
else:
sys.exit("Optimizer %s is not supported." % self.params.optimizer)
self.optimizer = opt
# Use name_space here. Create multiple name_spaces if multi-gpus
# There is a copy in `set_trainable_variables`
with tf.name_scope("train") as scope:
features, endpoints = self.entire_network(train_features, self.params, is_training, reuse_variables)
loss = self.loss_network(features, self.train_labels, num_speakers, self.params, is_training, reuse_variables)
regularization_loss = tf.losses.get_regularization_loss()
total_loss = loss + regularization_loss
# train_summary contains all the summeries we want to inspect.
# Get the summaries define in the network and loss function.
# The summeries in the network and loss function are about the network variables.
self.train_summary = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
self.train_summary.append(tf.summary.scalar("loss", loss))
self.train_summary.append(tf.summary.scalar("regularization_loss", regularization_loss))
# We may have other losses (i.e. penalty term in attention layer)
penalty_loss = tf.get_collection("PENALTY")
if len(penalty_loss) != 0:
penalty_loss = tf.reduce_sum(penalty_loss)
total_loss += penalty_loss
self.train_summary.append(tf.summary.scalar("penalty_term", penalty_loss))
self.total_loss = total_loss
self.train_summary.append(tf.summary.scalar("total_loss", total_loss))
self.train_summary.append(tf.summary.scalar("learning_rate", self.learning_rate))
# The gradient ops is inside the scope to support multi-gpus
if noupdate_var_list is not None:
old_batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
batchnorm_update_ops = []
for op in old_batchnorm_update_ops:
if not substring_in_list(op.name, noupdate_var_list):
batchnorm_update_ops.append(op)
tf.logging.info("[Info] Update %s" % op.name)
else:
tf.logging.info("[Info] Op %s will not be executed" % op.name)
else:
batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
if noupdate_var_list is not None:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
train_var_list = []
for v in variables:
if not substring_in_list(v.name, noupdate_var_list):
train_var_list.append(v)
tf.logging.info("[Info] Train %s" % v.name)
else:
tf.logging.info("[Info] Var %s will not be updated" % v.name)
grads = opt.compute_gradients(total_loss, var_list=train_var_list)
else:
grads = opt.compute_gradients(total_loss)
# Once the model has been built (even for a tower), we set the flag
self.is_built = True
if self.params.clip_gradient:
grads, vars = zip(*grads) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, self.params.clip_gradient_norm) # l2 norm clipping
# we follow the instruction in ge2e paper to scale the learning rate for w and b
# Actually, I wonder that we can just simply set a large value for w (e.g. 20) and fix it.
if self.loss_type == "ge2e":
# The parameters w and b must be the last variables in the gradients
grads_clip = grads_clip[:-2] + [0.01 * grad for grad in grads_clip[-2:]]
# Simply check the position of w and b
for var in vars[-2:]:
assert("w" in var.name or "b" in var.name)
grads = zip(grads_clip, vars)
# # The values and gradients are added to summeries
# for grad, var in grads:
# if grad is not None:
# self.train_summary.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# self.train_summary.append(tf.summary.scalar(var.op.name + '/gradients_norm', tf.norm(grad)))
self.train_summary.append(activation_summaries(endpoints))
for var in tf.trainable_variables():
self.train_summary.append(tf.summary.histogram(var.op.name, var))
self.train_summary = tf.summary.merge(self.train_summary)
with tf.control_dependencies(batchnorm_update_ops):
self.train_op = opt.apply_gradients(grads)
# We want to inspect other values during training?
self.train_ops["loss"] = total_loss
self.train_ops["raw_loss"] = loss
# The model saver
if self.saver is None:
self.saver = tf.train.Saver(max_to_keep=self.params.keep_checkpoint_max)
# The training summary writer
if self.summary_writer is None:
self.summary_writer = tf.summary.FileWriter(self.model, self.sess.graph)
return
def train(self, data, spklist, learning_rate, aux_data=None):
"""Train the model.
Args:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
learning_rate: The learning rate is passed by the main program. The main program can easily tune the
learning rate according to the validation accuracy or anything else.
aux_data: The auxiliary data directory.
"""
# initialize all variables
self.sess.run(tf.global_variables_initializer())
# curr_step is the real step the training at.
curr_step = 0
# Load the model if we have
if os.path.isfile(os.path.join(self.model, "checkpoint")):
curr_step = self.load()
# The data loader
data_loader = KaldiMultiDataRandomQueue(data, aux_data, spklist,
num_parallel=self.params.num_parallel_datasets,
max_qsize=self.params.max_queue_size,
num_speakers=self.params.num_speakers_per_batch,
num_segments=self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
data_loader.start()
epoch = int(curr_step / self.params.num_steps_per_epoch)
for step in range(curr_step % self.params.num_steps_per_epoch, self.params.num_steps_per_epoch):
try:
features, labels = data_loader.fetch()
feed_dict = {self.train_features: features["features"],
self.train_labels: labels,
self.global_step: curr_step,
self.learning_rate: learning_rate}
for name in features:
if name == "features":
continue
feed_dict[self.train_aux_features[name]] = features[name]
if step % self.params.save_summary_steps == 0 or step % self.params.show_training_progress == 0:
train_ops = [self.train_ops, self.train_op]
if step % self.params.save_summary_steps == 0:
train_ops.append(self.train_summary)
start_time = time.time()
train_val = self.sess.run(train_ops, feed_dict=feed_dict)
end_time = time.time()
tf.logging.info(
"Epoch: [%2d] step: [%2d/%2d] time: %.4f s/step, raw loss: %f, total loss: %f"
% (epoch, step, self.params.num_steps_per_epoch, end_time - start_time,
train_val[0]["raw_loss"], train_val[0]["loss"]))
if step % self.params.save_summary_steps == 0:
self.summary_writer.add_summary(train_val[-1], curr_step)
else:
# Only compute optimizer.
_ = self.sess.run(self.train_op, feed_dict=feed_dict)
if step % self.params.save_checkpoints_steps == 0 and curr_step != 0:
self.save(curr_step)
curr_step += 1
except DataOutOfRange:
tf.logging.info("Finished reading features.")
break
data_loader.stop()
self.save(curr_step)
return
def train_tune_lr(self, data, spklist, tune_period=100, aux_data=None):
"""Tune the learning rate.
I think it is better to use sgd to test the learning rate.
According to: https://www.kdnuggets.com/2017/11/estimating-optimal-learning-rate-deep-neural-network.html
Args:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
tune_period: How many steps per learning rate.
aux_data: The auxiliary data directory.
"""
# initialize all variables
self.sess.run(tf.global_variables_initializer())
data_loader = KaldiMultiDataRandomQueue(data, aux_data, spklist,
num_parallel=self.params.num_parallel_datasets,
max_qsize=self.params.max_queue_size,
num_speakers=self.params.num_speakers_per_batch,
num_segments=self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
data_loader.start()
# The learning rate normally varies from 1e-5 to 1
# Some common values:
# 1. factor = 1.15
# tune_period = 100
# tune_times = 100
init_learning_rate = 1e-5
factor = 1.15
tune_times = 100
fp_lr = open(os.path.join(self.model, "learning_rate_tuning"), "w")
for step in range(tune_period * tune_times):
lr = init_learning_rate * (factor ** (step / tune_period))
features, labels = data_loader.fetch()
feed_dict = {self.train_features: features["features"],
self.train_labels: labels,
self.global_step: 0,
self.learning_rate: lr}
for name in features:
if name == "features":
continue
feed_dict[self.train_aux_features[name]] = features[name]
try:
if step % tune_period == 0:
train_ops = [self.train_ops, self.train_op, self.train_summary]
start_time = time.time()
train_val = self.sess.run(train_ops, feed_dict=feed_dict)
end_time = time.time()
tf.logging.info(
"Epoch: step: %2d time: %.4f s/step, lr: %f, raw loss: %f, total loss: %f" \
% (step, end_time - start_time, lr,
train_val[0]["raw_loss"], train_val[0]["loss"]))
fp_lr.write("%d %f %f\n" % (step, lr, train_val[0]["loss"]))
self.summary_writer.add_summary(train_val[-1], step)
else:
_ = self.sess.run(self.train_op, feed_dict=feed_dict)
except DataOutOfRange:
tf.logging.info("Finished reading features.")
break
data_loader.stop()
fp_lr.close()
return
def valid(self, data, spklist, batch_type="softmax", output_embeddings=False, aux_data=None):
"""Evaluate on the validation set
Args:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
batch_type: `softmax` or `end2end`. The batch is `softmax-like` or `end2end-like`.
If the batch is `softmax-like`, each sample are from different speakers;
if the batch is `end2end-like`, the samples are from N speakers with M segments per speaker.
output_embeddings: Set True to output the corresponding embeddings and labels of the valid set.
If output_embeddings, an additional valid metric (e.g. EER) should be computed outside
the function.
aux_data: The auxiliary data directory.
:return: valid_loss, embeddings and labels (None if output_embeddings is False).
"""
# Initialization will reset all the variables in the graph.
# The local variables are also need to be initialized for metrics function.
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
assert batch_type == "softmax" or batch_type == "end2end", "The batch_type can only be softmax or end2end"
curr_step = 0
# Load the model. The valid function can only be called after training (of course...)
if os.path.isfile(os.path.join(self.model, "checkpoint")):
curr_step = self.load()
else:
tf.logging.info("[Warning] Cannot find model in %s. Random initialization is used in validation." % self.model)
embeddings_val = None
labels_val = None
num_batches = 0
if output_embeddings:
# If we want to output embeddings, the features should be loaded in order
data_loader = KaldiMultiDataSeqQueue(data, aux_data, spklist,
num_parallel=1,
max_qsize=10,
batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=False)
data_loader.start()
# In this mode, the embeddings and labels will be saved and output. It needs more memory and takes longer
# to process these values.
while True:
try:
if num_batches % 100 == 0:
tf.logging.info("valid step: %d" % num_batches)
features, labels = data_loader.fetch()
feed_dict = {self.valid_features: features["features"],
self.valid_labels: labels,
self.global_step: curr_step}
for name in features:
if name == "features":
continue
feed_dict[self.valid_aux_features[name]] = features[name]
valid_emb_val, valid_labels_val = self.sess.run([self.embeddings, self.valid_labels], feed_dict=feed_dict)
# Save the embeddings and labels
if embeddings_val is None:
embeddings_val = valid_emb_val
labels_val = valid_labels_val
else:
embeddings_val = np.concatenate((embeddings_val, valid_emb_val), axis=0)
labels_val = np.concatenate((labels_val, valid_labels_val), axis=0)
num_batches += 1
except DataOutOfRange:
break
data_loader.stop()
if batch_type == "softmax":
data_loader = KaldiMultiDataSeqQueue(data, aux_data, spklist,
num_parallel=2,
max_qsize=10,
batch_size=self.params.num_speakers_per_batch * self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
elif batch_type == "end2end":
data_loader = KaldiMultiDataRandomQueue(data, aux_data, spklist,
num_parallel=2,
max_qsize=10,
num_speakers=self.params.num_speakers_per_batch,
num_segments=self.params.num_segments_per_speaker,
min_len=self.params.min_segment_len,
max_len=self.params.max_segment_len,
shuffle=True)
else:
raise ValueError
data_loader.start()
for _ in range(self.params.valid_max_iterations):
try:
if num_batches % 100 == 0:
tf.logging.info("valid step: %d" % num_batches)
features, labels = data_loader.fetch()
feed_dict = {self.valid_features: features["features"],
self.valid_labels: labels,
self.global_step: curr_step}
for name in features:
if name == "features":
continue
feed_dict[self.valid_aux_features[name]] = features[name]
_ = self.sess.run(self.valid_ops["valid_loss_op"], feed_dict=feed_dict)
num_batches += 1
except DataOutOfRange:
break
data_loader.stop()
loss, summary = self.sess.run([self.valid_ops["valid_loss"], self.valid_summary])
# We only save the summary for the last batch.
self.valid_summary_writer.add_summary(summary, curr_step)
# The valid loss is averaged over all the batches.
tf.logging.info("[Validation %d batches] valid loss: %f" % (num_batches, loss))
# The output embeddings and labels can be used to compute EER or other metrics
return loss, embeddings_val, labels_val
def predict(self, features):
"""Output the embeddings
:return: A numpy array which is the embeddings
"""
if not self.is_loaded:
if os.path.isfile(os.path.join(self.model, "checkpoint")):
self.load()
else:
sys.exit("Cannot find model in %s" % self.model)
rank = len(features["features"].shape)
assert (rank == 2 or rank == 3)
# Expand the feature if the rank is 2
if rank == 2:
for name in features:
features[name] = np.expand_dims(features[name], axis=0)
feed_dict = {self.pred_features: features["features"]}
for name in features:
if name == "features":
continue
feed_dict[self.pred_aux_features[name]] = features[name]
# The shape of the features should be the same except for the last dimension.
assert(features["features"].shape[:-1] == features[name].shape[:-1])
embeddings = self.sess.run(self.embeddings, feed_dict=feed_dict)
if rank == 2:
embeddings = np.squeeze(embeddings, axis=0)
return embeddings
def set_trainable_variables(self, variable_list=None):
"""Set the variables which we want to optimize.
The optimizer will only optimize the variables which contain sub-string in the variable list.
Basically, this is copied from the training path in `build`.
The batchnorm statistics can always be updated?
Args:
variable_list: The model variable contains sub-string in the list will be optimized.
If None, all variables will be optimized.
"""
add_train_summary = []
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
trainable_variables = []
if variable_list is None:
tf.logging.info("[Info] Add all trainable variables to the optimizer.")
trainable_variables = None
else:
for v in variables:
if substring_in_list(v.name, variable_list):
trainable_variables.append(v)
tf.logging.info("[Info] Add %s to trainable list" % v.name)
with tf.name_scope("train") as scope:
grads = self.optimizer.compute_gradients(self.total_loss, var_list=trainable_variables)
if self.params.clip_gradient:
grads, vars = zip(*grads) # compute gradients of variables with respect to loss
grads_clip, _ = tf.clip_by_global_norm(grads, self.params.clip_gradient_norm) # l2 norm clipping
grads = zip(grads_clip, vars)
# The values and gradients are added to summeries
for grad, var in grads:
if grad is not None:
add_train_summary.append(tf.summary.histogram(var.op.name + '/gradients', grad))
add_train_summary.append(tf.summary.scalar(var.op.name + '/gradients_norm', tf.norm(grad)))
if variable_list is None:
trainable_variables = tf.trainable_variables()
for var in trainable_variables:
add_train_summary.append(tf.summary.histogram(var.op.name, var))
self.train_summary = tf.summary.merge([self.train_summary, tf.summary.merge(add_train_summary)])
batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
with tf.control_dependencies(batchnorm_update_ops):
self.train_op = self.optimizer.apply_gradients(grads)
def get_finetune_model(self, excluded_list):
"""Start from a pre-trained model and other parameters are initialized using default initializer.
Actually, this function is only called at the first epoch of the fine-tuning, because in succeeded epochs,
we need to fully load the model rather than loading part of the graph.
The pre-trained model is saved in the model directory as index 0.
Backup the pre-trained model and save the new model (with random initialized parameters) as index 0 instead.
Args:
excluded_list: A list. Do NOT restore the parameters in the exclude_list. This is useful in fine-truning
an existing model. We load a part of the pre-trained model and leave the other part
randomly initialized.
Deprecated:
data: The training data directory.
spklist: The spklist is a file map speaker name to the index.
learning_rate: The learning rate is passed by the main program. The main program can easily tune the
learning rate according to the validation accuracy or anything else.
"""
# initialize all variables
self.sess.run(tf.global_variables_initializer())
# Load parts of the model
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
restore_variables = []
for v in variables:
if not substring_in_list(v.name, excluded_list):
restore_variables.append(v)
else:
tf.logging.info("[Info] Ignore %s when loading the checkpoint" % v.name)
finetune_saver = tf.train.Saver(var_list=restore_variables)
ckpt = tf.train.get_checkpoint_state(self.model)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
finetune_saver.restore(self.sess, os.path.join(self.model, ckpt_name))
# Backup the old files
import glob, shutil
model_checkpoint_path = ckpt.model_checkpoint_path
for filename in glob.glob(model_checkpoint_path + "*"):
shutil.copyfile(filename, filename + '.bak')
# Save the new model. The new model is basically the same with the pre-trained one, while parameters
# NOT in the pre-trained model are random initialized.
# Set the step to 0.
self.save(0)
return
| [
"tensorflow.local_variables_initializer",
"tensorflow.reduce_sum",
"tensorflow.norm",
"model.common.l2_scaling",
"tensorflow.metrics.mean",
"tensorflow.control_dependencies",
"dataset.data_loader.KaldiMultiDataRandomQueue",
"sys.exit",
"tensorflow.clip_by_global_norm",
"tensorflow.placeholder",
... | [((10315, 10362), 'tensorflow.logging.info', 'tf.logging.info', (['"""Building training network..."""'], {}), "('Building training network...')\n", (10330, 10362), True, 'import tensorflow as tf\n'), ((10393, 10467), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, dim]', 'name': '"""train_features"""'}), "(tf.float32, shape=[None, None, dim], name='train_features')\n", (10407, 10467), True, 'import tensorflow as tf\n'), ((11054, 11113), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""train_labels"""'}), "(tf.int32, shape=[None], name='train_labels')\n", (11068, 11113), True, 'import tensorflow as tf\n'), ((11145, 11193), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (11159, 11193), True, 'import tensorflow as tf\n'), ((16755, 16779), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (16777, 16779), True, 'import tensorflow as tf\n'), ((16888, 16924), 'tensorflow.summary.merge', 'tf.summary.merge', (['self.train_summary'], {}), '(self.train_summary)\n', (16904, 16924), True, 'import tensorflow as tf\n'), ((18371, 18713), 'dataset.data_loader.KaldiMultiDataRandomQueue', 'KaldiMultiDataRandomQueue', (['data', 'aux_data', 'spklist'], {'num_parallel': 'self.params.num_parallel_datasets', 'max_qsize': 'self.params.max_queue_size', 'num_speakers': 'self.params.num_speakers_per_batch', 'num_segments': 'self.params.num_segments_per_speaker', 'min_len': 'self.params.min_segment_len', 'max_len': 'self.params.max_segment_len', 'shuffle': '(True)'}), '(data, aux_data, spklist, num_parallel=self.params\n .num_parallel_datasets, max_qsize=self.params.max_queue_size,\n num_speakers=self.params.num_speakers_per_batch, num_segments=self.\n params.num_segments_per_speaker, min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len, shuffle=True)\n', (18396, 18713), False, 'from dataset.data_loader import KaldiMultiDataRandomQueue, KaldiMultiDataSeqQueue, DataOutOfRange\n'), ((19145, 19233), 'six.moves.range', 'range', (['(curr_step % self.params.num_steps_per_epoch)', 'self.params.num_steps_per_epoch'], {}), '(curr_step % self.params.num_steps_per_epoch, self.params.\n num_steps_per_epoch)\n', (19150, 19233), False, 'from six.moves import range\n'), ((21839, 22181), 'dataset.data_loader.KaldiMultiDataRandomQueue', 'KaldiMultiDataRandomQueue', (['data', 'aux_data', 'spklist'], {'num_parallel': 'self.params.num_parallel_datasets', 'max_qsize': 'self.params.max_queue_size', 'num_speakers': 'self.params.num_speakers_per_batch', 'num_segments': 'self.params.num_segments_per_speaker', 'min_len': 'self.params.min_segment_len', 'max_len': 'self.params.max_segment_len', 'shuffle': '(True)'}), '(data, aux_data, spklist, num_parallel=self.params\n .num_parallel_datasets, max_qsize=self.params.max_queue_size,\n num_speakers=self.params.num_speakers_per_batch, num_segments=self.\n params.num_segments_per_speaker, min_len=self.params.min_segment_len,\n max_len=self.params.max_segment_len, shuffle=True)\n', (21864, 22181), False, 'from dataset.data_loader import KaldiMultiDataRandomQueue, KaldiMultiDataSeqQueue, DataOutOfRange\n'), ((22884, 22915), 'six.moves.range', 'range', (['(tune_period * tune_times)'], {}), '(tune_period * tune_times)\n', (22889, 22915), False, 'from six.moves import range\n'), ((29869, 29908), 'six.moves.range', 'range', (['self.params.valid_max_iterations'], {}), '(self.params.valid_max_iterations)\n', (29874, 29908), False, 'from six.moves import range\n'), ((30956, 31035), 'tensorflow.logging.info', 'tf.logging.info', (["('[Validation %d batches] valid loss: %f' % (num_batches, loss))"], {}), "('[Validation %d batches] valid loss: %f' % (num_batches, loss))\n", (30971, 31035), True, 'import tensorflow as tf\n'), ((32942, 32993), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (32959, 32993), True, 'import tensorflow as tf\n'), ((34529, 34578), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'scope'], {}), '(tf.GraphKeys.UPDATE_OPS, scope)\n', (34546, 34578), True, 'import tensorflow as tf\n'), ((36046, 36094), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (36063, 36094), True, 'import tensorflow as tf\n'), ((36391, 36433), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'restore_variables'}), '(var_list=restore_variables)\n', (36405, 36433), True, 'import tensorflow as tf\n'), ((36449, 36490), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['self.model'], {}), '(self.model)\n', (36478, 36490), True, 'import tensorflow as tf\n'), ((36511, 36555), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (36527, 36555), False, 'import os\n'), ((36778, 36816), 'glob.glob', 'glob.glob', (["(model_checkpoint_path + '*')"], {}), "(model_checkpoint_path + '*')\n", (36787, 36816), False, 'import glob, shutil\n'), ((2562, 2613), 'model.common.l2_scaling', 'l2_scaling', (['features', 'params.feature_scaling_factor'], {}), '(features, params.feature_scaling_factor)\n', (2572, 2613), False, 'from model.common import l2_scaling\n'), ((3921, 3994), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, dim]', 'name': '"""pred_features"""'}), "(tf.float32, shape=[None, None, dim], name='pred_features')\n", (3935, 3994), True, 'import tensorflow as tf\n'), ((5349, 5393), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'name': '"""global_step"""'}), "(tf.int32, name='global_step')\n", (5363, 5393), True, 'import tensorflow as tf\n'), ((6289, 6333), 'tensorflow.logging.info', 'tf.logging.info', (['"""Building valid network..."""'], {}), "('Building valid network...')\n", (6304, 6333), True, 'import tensorflow as tf\n'), ((6819, 6893), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, dim]', 'name': '"""valid_features"""'}), "(tf.float32, shape=[None, None, dim], name='valid_features')\n", (6833, 6893), True, 'import tensorflow as tf\n'), ((7061, 7120), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""valid_labels"""'}), "(tf.int32, shape=[None], name='valid_labels')\n", (7075, 7120), True, 'import tensorflow as tf\n'), ((10677, 10785), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, self.params.aux_feature_dim[name]]', 'name': "('train_' + name)"}), "(tf.float32, shape=[None, None, self.params.aux_feature_dim[\n name]], name='train_' + name)\n", (10691, 10785), True, 'import tensorflow as tf\n'), ((11535, 11587), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Using SGD as the optimizer."""'], {}), "('***** Using SGD as the optimizer.')\n", (11550, 11587), True, 'import tensorflow as tf\n'), ((11606, 11677), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.learning_rate'], {'name': '"""optimizer"""'}), "(self.learning_rate, name='optimizer')\n", (11639, 11677), True, 'import tensorflow as tf\n'), ((12499, 12521), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (12512, 12521), True, 'import tensorflow as tf\n'), ((12802, 12837), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (12835, 12837), True, 'import tensorflow as tf\n'), ((13166, 13214), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES', 'scope'], {}), '(tf.GraphKeys.SUMMARIES, scope)\n', (13183, 13214), True, 'import tensorflow as tf\n'), ((13493, 13521), 'tensorflow.get_collection', 'tf.get_collection', (['"""PENALTY"""'], {}), "('PENALTY')\n", (13510, 13521), True, 'import tensorflow as tf\n'), ((15635, 15696), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'self.params.clip_gradient_norm'], {}), '(grads, self.params.clip_gradient_norm)\n', (15657, 15696), True, 'import tensorflow as tf\n'), ((16703, 16734), 'misc.utils.activation_summaries', 'activation_summaries', (['endpoints'], {}), '(endpoints)\n', (16723, 16734), False, 'from misc.utils import substring_in_list, activation_summaries\n'), ((16939, 16984), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['batchnorm_update_ops'], {}), '(batchnorm_update_ops)\n', (16962, 16984), True, 'import tensorflow as tf\n'), ((17270, 17329), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'self.params.keep_checkpoint_max'}), '(max_to_keep=self.params.keep_checkpoint_max)\n', (17284, 17329), True, 'import tensorflow as tf\n'), ((17443, 17493), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.model', 'self.sess.graph'], {}), '(self.model, self.sess.graph)\n', (17464, 17493), True, 'import tensorflow as tf\n'), ((18070, 18103), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (18101, 18103), True, 'import tensorflow as tf\n'), ((18245, 18283), 'os.path.join', 'os.path.join', (['self.model', '"""checkpoint"""'], {}), "(self.model, 'checkpoint')\n", (18257, 18283), False, 'import os\n'), ((21781, 21814), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (21812, 21814), True, 'import tensorflow as tf\n'), ((22809, 22857), 'os.path.join', 'os.path.join', (['self.model', '"""learning_rate_tuning"""'], {}), "(self.model, 'learning_rate_tuning')\n", (22821, 22857), False, 'import os\n'), ((25635, 25668), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (25666, 25668), True, 'import tensorflow as tf\n'), ((25692, 25724), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (25722, 25724), True, 'import tensorflow as tf\n'), ((25984, 26022), 'os.path.join', 'os.path.join', (['self.model', '"""checkpoint"""'], {}), "(self.model, 'checkpoint')\n", (25996, 26022), False, 'import os\n'), ((26087, 26208), 'tensorflow.logging.info', 'tf.logging.info', (["('[Warning] Cannot find model in %s. Random initialization is used in validation.'\n % self.model)"], {}), "(\n '[Warning] Cannot find model in %s. Random initialization is used in validation.'\n % self.model)\n", (26102, 26208), True, 'import tensorflow as tf\n'), ((26423, 26690), 'dataset.data_loader.KaldiMultiDataSeqQueue', 'KaldiMultiDataSeqQueue', (['data', 'aux_data', 'spklist'], {'num_parallel': '(1)', 'max_qsize': '(10)', 'batch_size': '(self.params.num_speakers_per_batch * self.params.num_segments_per_speaker)', 'min_len': 'self.params.min_segment_len', 'max_len': 'self.params.max_segment_len', 'shuffle': '(False)'}), '(data, aux_data, spklist, num_parallel=1, max_qsize=\n 10, batch_size=self.params.num_speakers_per_batch * self.params.\n num_segments_per_speaker, min_len=self.params.min_segment_len, max_len=\n self.params.max_segment_len, shuffle=False)\n', (26445, 26690), False, 'from dataset.data_loader import KaldiMultiDataRandomQueue, KaldiMultiDataSeqQueue, DataOutOfRange\n'), ((28537, 28803), 'dataset.data_loader.KaldiMultiDataSeqQueue', 'KaldiMultiDataSeqQueue', (['data', 'aux_data', 'spklist'], {'num_parallel': '(2)', 'max_qsize': '(10)', 'batch_size': '(self.params.num_speakers_per_batch * self.params.num_segments_per_speaker)', 'min_len': 'self.params.min_segment_len', 'max_len': 'self.params.max_segment_len', 'shuffle': '(True)'}), '(data, aux_data, spklist, num_parallel=2, max_qsize=\n 10, batch_size=self.params.num_speakers_per_batch * self.params.\n num_segments_per_speaker, min_len=self.params.min_segment_len, max_len=\n self.params.max_segment_len, shuffle=True)\n', (28559, 28803), False, 'from dataset.data_loader import KaldiMultiDataRandomQueue, KaldiMultiDataSeqQueue, DataOutOfRange\n'), ((32297, 32327), 'numpy.squeeze', 'np.squeeze', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (32307, 32327), True, 'import numpy as np\n'), ((33073, 33144), 'tensorflow.logging.info', 'tf.logging.info', (['"""[Info] Add all trainable variables to the optimizer."""'], {}), "('[Info] Add all trainable variables to the optimizer.')\n", (33088, 33144), True, 'import tensorflow as tf\n'), ((33435, 33457), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (33448, 33457), True, 'import tensorflow as tf\n'), ((33728, 33789), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'self.params.clip_gradient_norm'], {}), '(grads, self.params.clip_gradient_norm)\n', (33750, 33789), True, 'import tensorflow as tf\n'), ((34250, 34274), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (34272, 34274), True, 'import tensorflow as tf\n'), ((34592, 34637), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['batchnorm_update_ops'], {}), '(batchnorm_update_ops)\n', (34615, 34637), True, 'import tensorflow as tf\n'), ((35956, 35989), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (35987, 35989), True, 'import tensorflow as tf\n'), ((36598, 36633), 'os.path.join', 'os.path.join', (['self.model', 'ckpt_name'], {}), '(self.model, ckpt_name)\n', (36610, 36633), False, 'import os\n'), ((36830, 36874), 'shutil.copyfile', 'shutil.copyfile', (['filename', "(filename + '.bak')"], {}), "(filename, filename + '.bak')\n", (36845, 36874), False, 'import glob, shutil\n'), ((4340, 4447), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, self.params.aux_feature_dim[name]]', 'name': "('pred_' + name)"}), "(tf.float32, shape=[None, None, self.params.aux_feature_dim[\n name]], name='pred_' + name)\n", (4354, 4447), True, 'import tensorflow as tf\n'), ((4715, 4739), 'tensorflow.name_scope', 'tf.name_scope', (['"""predict"""'], {}), "('predict')\n", (4728, 4739), True, 'import tensorflow as tf\n'), ((4766, 4844), 'tensorflow.logging.info', 'tf.logging.info', (["('Extract embedding from node %s' % self.params.embedding_node)"], {}), "('Extract embedding from node %s' % self.params.embedding_node)\n", (4781, 4844), True, 'import tensorflow as tf\n'), ((6555, 6663), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, self.params.aux_feature_dim[name]]', 'name': "('valid_' + name)"}), "(tf.float32, shape=[None, None, self.params.aux_feature_dim[\n name]], name='valid_' + name)\n", (6569, 6663), True, 'import tensorflow as tf\n'), ((7141, 7163), 'tensorflow.name_scope', 'tf.name_scope', (['"""valid"""'], {}), "('valid')\n", (7154, 7163), True, 'import tensorflow as tf\n'), ((9664, 9691), 'tensorflow.metrics.mean', 'tf.metrics.mean', (['valid_loss'], {}), '(valid_loss)\n', (9679, 9691), True, 'import tensorflow as tf\n'), ((9861, 9903), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'mean_valid_loss'], {}), "('loss', mean_valid_loss)\n", (9878, 9903), True, 'import tensorflow as tf\n'), ((9941, 9979), 'tensorflow.summary.merge', 'tf.summary.merge', (['[valid_loss_summary]'], {}), '([valid_loss_summary])\n', (9957, 9979), True, 'import tensorflow as tf\n'), ((11443, 11522), 'sys.exit', 'sys.exit', (['"""Using sgd as the optimizer and you should not specify the momentum."""'], {}), "('Using sgd as the optimizer and you should not specify the momentum.')\n", (11451, 11522), False, 'import sys\n'), ((11842, 11899), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Using Momentum as the optimizer."""'], {}), "('***** Using Momentum as the optimizer.')\n", (11857, 11899), True, 'import tensorflow as tf\n'), ((11918, 12047), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['self.learning_rate', 'self.params.momentum'], {'use_nesterov': 'self.params.use_nesterov', 'name': '"""optimizer"""'}), "(self.learning_rate, self.params.momentum,\n use_nesterov=self.params.use_nesterov, name='optimizer')\n", (11944, 12047), True, 'import tensorflow as tf\n'), ((13253, 13284), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (13270, 13284), True, 'import tensorflow as tf\n'), ((13324, 13385), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""regularization_loss"""', 'regularization_loss'], {}), "('regularization_loss', regularization_loss)\n", (13341, 13385), True, 'import tensorflow as tf\n'), ((13592, 13619), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['penalty_loss'], {}), '(penalty_loss)\n', (13605, 13619), True, 'import tensorflow as tf\n'), ((13834, 13877), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'total_loss'], {}), "('total_loss', total_loss)\n", (13851, 13877), True, 'import tensorflow as tf\n'), ((13917, 13971), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.learning_rate'], {}), "('learning_rate', self.learning_rate)\n", (13934, 13971), True, 'import tensorflow as tf\n'), ((14136, 14185), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'scope'], {}), '(tf.GraphKeys.UPDATE_OPS, scope)\n', (14153, 14185), True, 'import tensorflow as tf\n'), ((14650, 14699), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'scope'], {}), '(tf.GraphKeys.UPDATE_OPS, scope)\n', (14667, 14699), True, 'import tensorflow as tf\n'), ((14775, 14826), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES)\n', (14792, 14826), True, 'import tensorflow as tf\n'), ((16819, 16857), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.op.name', 'var'], {}), '(var.op.name, var)\n', (16839, 16857), True, 'import tensorflow as tf\n'), ((29147, 29428), 'dataset.data_loader.KaldiMultiDataRandomQueue', 'KaldiMultiDataRandomQueue', (['data', 'aux_data', 'spklist'], {'num_parallel': '(2)', 'max_qsize': '(10)', 'num_speakers': 'self.params.num_speakers_per_batch', 'num_segments': 'self.params.num_segments_per_speaker', 'min_len': 'self.params.min_segment_len', 'max_len': 'self.params.max_segment_len', 'shuffle': '(True)'}), '(data, aux_data, spklist, num_parallel=2,\n max_qsize=10, num_speakers=self.params.num_speakers_per_batch,\n num_segments=self.params.num_segments_per_speaker, min_len=self.params.\n min_segment_len, max_len=self.params.max_segment_len, shuffle=True)\n', (29172, 29428), False, 'from dataset.data_loader import KaldiMultiDataRandomQueue, KaldiMultiDataSeqQueue, DataOutOfRange\n'), ((31368, 31406), 'os.path.join', 'os.path.join', (['self.model', '"""checkpoint"""'], {}), "(self.model, 'checkpoint')\n", (31380, 31406), False, 'import os\n'), ((31471, 31519), 'sys.exit', 'sys.exit', (["('Cannot find model in %s' % self.model)"], {}), "('Cannot find model in %s' % self.model)\n", (31479, 31519), False, 'import sys\n'), ((31743, 31781), 'numpy.expand_dims', 'np.expand_dims', (['features[name]'], {'axis': '(0)'}), '(features[name], axis=0)\n', (31757, 31781), True, 'import numpy as np\n'), ((33249, 33289), 'misc.utils.substring_in_list', 'substring_in_list', (['v.name', 'variable_list'], {}), '(v.name, variable_list)\n', (33266, 33289), False, 'from misc.utils import substring_in_list, activation_summaries\n'), ((34352, 34390), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.op.name', 'var'], {}), '(var.op.name, var)\n', (34372, 34390), True, 'import tensorflow as tf\n'), ((34459, 34494), 'tensorflow.summary.merge', 'tf.summary.merge', (['add_train_summary'], {}), '(add_train_summary)\n', (34475, 34494), True, 'import tensorflow as tf\n'), ((36173, 36213), 'misc.utils.substring_in_list', 'substring_in_list', (['v.name', 'excluded_list'], {}), '(v.name, excluded_list)\n', (36190, 36213), False, 'from misc.utils import substring_in_list, activation_summaries\n'), ((36293, 36365), 'tensorflow.logging.info', 'tf.logging.info', (["('[Info] Ignore %s when loading the checkpoint' % v.name)"], {}), "('[Info] Ignore %s when loading the checkpoint' % v.name)\n", (36308, 36365), True, 'import tensorflow as tf\n'), ((5098, 5114), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5112, 5114), True, 'import tensorflow as tf\n'), ((10052, 10111), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'self.params.keep_checkpoint_max'}), '(max_to_keep=self.params.keep_checkpoint_max)\n', (10066, 10111), True, 'import tensorflow as tf\n'), ((12102, 12155), 'tensorflow.logging.info', 'tf.logging.info', (['"""***** Using Adam as the optimizer."""'], {}), "('***** Using Adam as the optimizer.')\n", (12117, 12155), True, 'import tensorflow as tf\n'), ((12174, 12234), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {'name': '"""optimizer"""'}), "(self.learning_rate, name='optimizer')\n", (12196, 12234), True, 'import tensorflow as tf\n'), ((12261, 12327), 'sys.exit', 'sys.exit', (["('Optimizer %s is not supported.' % self.params.optimizer)"], {}), "('Optimizer %s is not supported.' % self.params.optimizer)\n", (12269, 12327), False, 'import sys\n'), ((13705, 13752), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""penalty_term"""', 'penalty_loss'], {}), "('penalty_term', penalty_loss)\n", (13722, 13752), True, 'import tensorflow as tf\n'), ((20083, 20094), 'time.time', 'time.time', ([], {}), '()\n', (20092, 20094), False, 'import time\n'), ((20204, 20215), 'time.time', 'time.time', ([], {}), '()\n', (20213, 20215), False, 'import time\n'), ((20236, 20465), 'tensorflow.logging.info', 'tf.logging.info', (["('Epoch: [%2d] step: [%2d/%2d] time: %.4f s/step, raw loss: %f, total loss: %f'\n % (epoch, step, self.params.num_steps_per_epoch, end_time - start_time,\n train_val[0]['raw_loss'], train_val[0]['loss']))"], {}), "(\n 'Epoch: [%2d] step: [%2d/%2d] time: %.4f s/step, raw loss: %f, total loss: %f'\n % (epoch, step, self.params.num_steps_per_epoch, end_time - start_time,\n train_val[0]['raw_loss'], train_val[0]['loss']))\n", (20251, 20465), True, 'import tensorflow as tf\n'), ((21029, 21074), 'tensorflow.logging.info', 'tf.logging.info', (['"""Finished reading features."""'], {}), "('Finished reading features.')\n", (21044, 21074), True, 'import tensorflow as tf\n'), ((23609, 23620), 'time.time', 'time.time', ([], {}), '()\n', (23618, 23620), False, 'import time\n'), ((23731, 23742), 'time.time', 'time.time', ([], {}), '()\n', (23740, 23742), False, 'import time\n'), ((23763, 23952), 'tensorflow.logging.info', 'tf.logging.info', (["('Epoch: step: %2d time: %.4f s/step, lr: %f, raw loss: %f, total loss: %f' %\n (step, end_time - start_time, lr, train_val[0]['raw_loss'], train_val[0\n ]['loss']))"], {}), "(\n 'Epoch: step: %2d time: %.4f s/step, lr: %f, raw loss: %f, total loss: %f'\n % (step, end_time - start_time, lr, train_val[0]['raw_loss'],\n train_val[0]['loss']))\n", (23778, 23952), True, 'import tensorflow as tf\n'), ((24318, 24363), 'tensorflow.logging.info', 'tf.logging.info', (['"""Finished reading features."""'], {}), "('Finished reading features.')\n", (24333, 24363), True, 'import tensorflow as tf\n'), ((29990, 30037), 'tensorflow.logging.info', 'tf.logging.info', (["('valid step: %d' % num_batches)"], {}), "('valid step: %d' % num_batches)\n", (30005, 30037), True, 'import tensorflow as tf\n'), ((33361, 33420), 'tensorflow.logging.info', 'tf.logging.info', (["('[Info] Add %s to trainable list' % v.name)"], {}), "('[Info] Add %s to trainable list' % v.name)\n", (33376, 33420), True, 'import tensorflow as tf\n'), ((34017, 34071), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["(var.op.name + '/gradients')", 'grad'], {}), "(var.op.name + '/gradients', grad)\n", (34037, 34071), True, 'import tensorflow as tf\n'), ((10236, 10268), 'os.path.join', 'os.path.join', (['self.model', '"""eval"""'], {}), "(self.model, 'eval')\n", (10248, 10268), False, 'import os\n'), ((14307, 14352), 'misc.utils.substring_in_list', 'substring_in_list', (['op.name', 'noupdate_var_list'], {}), '(op.name, noupdate_var_list)\n', (14324, 14352), False, 'from misc.utils import substring_in_list, activation_summaries\n'), ((14434, 14479), 'tensorflow.logging.info', 'tf.logging.info', (["('[Info] Update %s' % op.name)"], {}), "('[Info] Update %s' % op.name)\n", (14449, 14479), True, 'import tensorflow as tf\n'), ((14530, 14592), 'tensorflow.logging.info', 'tf.logging.info', (["('[Info] Op %s will not be executed' % op.name)"], {}), "('[Info] Op %s will not be executed' % op.name)\n", (14545, 14592), True, 'import tensorflow as tf\n'), ((14927, 14971), 'misc.utils.substring_in_list', 'substring_in_list', (['v.name', 'noupdate_var_list'], {}), '(v.name, noupdate_var_list)\n', (14944, 14971), False, 'from misc.utils import substring_in_list, activation_summaries\n'), ((15046, 15089), 'tensorflow.logging.info', 'tf.logging.info', (["('[Info] Train %s' % v.name)"], {}), "('[Info] Train %s' % v.name)\n", (15061, 15089), True, 'import tensorflow as tf\n'), ((15140, 15201), 'tensorflow.logging.info', 'tf.logging.info', (["('[Info] Var %s will not be updated' % v.name)"], {}), "('[Info] Var %s will not be updated' % v.name)\n", (15155, 15201), True, 'import tensorflow as tf\n'), ((27276, 27323), 'tensorflow.logging.info', 'tf.logging.info', (["('valid step: %d' % num_batches)"], {}), "('valid step: %d' % num_batches)\n", (27291, 27323), True, 'import tensorflow as tf\n'), ((28193, 28248), 'numpy.concatenate', 'np.concatenate', (['(embeddings_val, valid_emb_val)'], {'axis': '(0)'}), '((embeddings_val, valid_emb_val), axis=0)\n', (28207, 28248), True, 'import numpy as np\n'), ((28286, 28340), 'numpy.concatenate', 'np.concatenate', (['(labels_val, valid_labels_val)'], {'axis': '(0)'}), '((labels_val, valid_labels_val), axis=0)\n', (28300, 28340), True, 'import numpy as np\n'), ((34165, 34178), 'tensorflow.norm', 'tf.norm', (['grad'], {}), '(grad)\n', (34172, 34178), True, 'import tensorflow as tf\n')] |
"""Utility functions for the qp package"""
import numpy as np
from scipy import stats as sps
from scipy.interpolate import interp1d
import sys
epsilon = sys.float_info.epsilon
infty = sys.float_info.max * epsilon
lims = (epsilon, 1.)
CASE_PRODUCT = 0
CASE_FACTOR = 1
CASE_2D = 2
CASE_FLAT = 3
def safelog(arr, threshold=epsilon):
"""
Takes the natural logarithm of an array of potentially non-positive numbers
Parameters
----------
arr: numpy.ndarray, float
values to be logged
threshold: float
small, positive value to replace zeros and negative numbers
Returns
-------
logged: numpy.ndarray
logarithms, with approximation in place of zeros and negative numbers
"""
return np.log(np.array(arr).clip(threshold, np.inf))
_ = """
def normalize_quantiles(in_data, threshold=epsilon, vb=False):
Evaluates PDF from quantiles including endpoints from linear extrapolation
Parameters
----------
in_data: tuple, numpy.ndarray, float
tuple of CDF values iy corresponding to quantiles and the points x at
which those CDF values are achieved
threshold: float, optional
optional minimum threshold for PDF
vb: boolean, optional
be careful and print progress to stdout?
Returns
-------
out_data: tuple, ndarray, float
tuple of values x at which CDF is achieved, including extrema, and
normalized PDF values y at x
(iy, x) = in_data
(xs, ys) = evaluate_quantiles((iy, x), vb=vb)
# xs = xs[1:-1]
# ys = ys[1:-1]
x_min = xs[0] - 2 * iy[0] / ys[0]
x_max = xs[-1] + 2 * (1. - iy[-1]) / ys[-1]
xs = sandwich(xs, (x_min, x_max))
ys = sandwich(ys, (threshold, threshold))
out_data = (xs, ys)
return out_data
"""
def edge_to_center(edges):
"""Return the centers of a set of bins given the edges"""
return 0.5*(edges[1:] + edges[:-1])
def bin_widths(edges):
"""Return the widths of a set of bins given the edges"""
return edges[1:] - edges[:-1]
def get_bin_indices(bins, x):
"""Return the bin indexes for a set of values
If the bins are equal width this will use arithmatic,
If the bins are not equal width this will use a binary search
"""
widths = bin_widths(bins)
n_bins = np.size(bins) - 1
if np.allclose(widths, widths[0]):
idx = np.atleast_1d(np.floor((x-bins[0])/widths[0]).astype(int))
else:
idx = np.atleast_1d(np.searchsorted(bins, x, side='left')-1)
mask = (idx >= 0) * (idx < bins.size-1)
np.putmask(idx, 1-mask, 0)
xshape = np.shape(x)
return idx.reshape(xshape).clip(0, n_bins-1), mask.reshape(xshape)
def normalize_interp1d(xvals, yvals):
"""
Normalize a set of 1D interpolators
Parameters
----------
xvals : array-like
X-values used for the interpolation
yvals : array-like
Y-values used for the interpolation
Returns
-------
ynorm: array-like
Normalized y-vals
"""
#def row_integral(irow):
# return quad(interp1d(xvals[irow], yvals[irow], **kwargs), limits[0], limits[1])[0]
#vv = np.vectorize(row_integral)
#integrals = vv(np.arange(xvals.shape[0]))
integrals = np.sum(xvals[:,1:]*yvals[:,1:] - xvals[:,:-1]*yvals[:,1:], axis=1)
return (yvals.T / integrals).T
def build_kdes(samples, **kwargs):
"""
Build a set of Gaussian Kernal Density Estimates
Parameters
----------
samples : array-like
X-values used for the spline
Keywords
--------
Passed to the `scipy.stats.gaussian_kde` constructor
Returns
-------
kdes : list of `scipy.stats.gaussian_kde` objects
"""
return [ sps.gaussian_kde(row, **kwargs) for row in samples ]
def evaluate_kdes(xvals, kdes):
"""
Build a evaluate a set of kdes
Parameters
----------
xvals : array_like
X-values used for the spline
kdes : list of `sps.gaussian_kde`
The kernel density estimates
Returns
-------
yvals : array_like
The kdes evaluated at the xvamls
"""
return np.vstack([kde(xvals) for kde in kdes])
def get_eval_case(x, row):
""" Figure out which of the various input formats scipy.stats has passed us
Parameters
----------
x : array_like
Pdf x-vals
row : array_like
Pdf row indices
Returns
-------
case : `int`
The case code
xx : array_like
The x-values properly shaped
rr : array_like
The y-values, properly shaped
Notes
-----
The cases are:
CASE_FLAT : x, row have shapes (n) , (n) and do not factor
CASE_FACTOR : x, row can be factors to shapes (1, nx) and (npdf, 1)
CASE_PRODUCT : x, row have shapes (1, nx) and (npdf, 1)
CASE_2D : x, row have shapes (npdf, nx) and (npdf, nx)
"""
nd_x = np.ndim(x)
nd_row = np.ndim(row)
#if nd_x > 2 or nd_row > 2: #pragma: no cover
# raise ValueError("Too many dimensions: x(%s), row(%s)" % (np.shape(x), np.shape(row)))
if nd_x >= 2 and nd_row != 1:
return CASE_2D, x, row
if nd_x >= 2 and nd_row == 1: #pragma: no cover
raise ValueError("Dimension mismatch: x(%s), row(%s)" % (np.shape(x), np.shape(row)))
if nd_row >= 2:
return CASE_PRODUCT, x, row
if np.size(x) == 1 or np.size(row) == 1:
return CASE_FLAT, x, row
xx = np.unique(x)
rr = np.unique(row)
if np.size(xx) == np.size(x):
xx = x
if np.size(rr) == np.size(row):
rr = row
if np.size(xx) * np.size(rr) != np.size(x):
return CASE_FLAT, x, row
return CASE_FACTOR, xx, np.expand_dims(rr, -1)
def evaluate_hist_x_multi_y_flat(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (n)
The histogram values
"""
assert np.ndim(x) < 2 and np.ndim(row) < 2
idx, mask = get_bin_indices(bins, x)
if derivs is None:
deltas = np.zeros(idx.shape)
else:
deltas = x - bins[idx]
def evaluate_row(idxv, maskv, rv, delta):
if derivs is None:
return np.where(maskv, vals[rv, idxv], 0)
return np.where(maskv, vals[rv, idxv] + delta*derivs[rv, idxv], 0)
vv = np.vectorize(evaluate_row)
return vv(idx, mask, row, deltas)
def evaluate_hist_x_multi_y_product(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
#assert np.ndim(x) < 2 and np.ndim(row) == 2
idx, mask0 = get_bin_indices(bins, x)
mask = np.ones(row.shape) * mask0
if derivs is None:
return np.where(mask, vals[:,idx][np.squeeze(row)], 0)
deltas = x - bins[idx]
return np.where(mask, vals[:,idx][np.squeeze(row)] + deltas*derivs[:,idx][np.squeeze(row)] , 0)
def evaluate_hist_x_multi_y_2d(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npdf, npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
assert np.ndim(x) >= 2 and np.ndim(row) >= 2
idx, mask = get_bin_indices(bins, x)
if derivs is None:
deltas = np.zeros(idx.shape)
else:
deltas = x - bins[idx]
def evaluate_row(idxv, maskv, rv, delta):
if derivs is None:
return np.where(maskv, vals[rv, idxv], 0)
return np.where(maskv, vals[rv, idxv] + delta*derivs[rv, idxv], 0)
vv = np.vectorize(evaluate_row)
return vv(idx, mask, row, deltas)
def evaluate_hist_x_multi_y(x, row, bins, vals, derivs=None):
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like
X values to interpolate at
row : array_like
Which rows to interpolate at
bins : array_like (N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like
The histogram values
Notes
-----
Depending on the shape of 'x' and 'row' this will
use one of the three specific implementations.
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return evaluate_hist_x_multi_y_product(xx, rr, bins, vals, derivs)
if case_idx == CASE_2D:
return evaluate_hist_x_multi_y_2d(xx, rr, bins, vals, derivs)
return evaluate_hist_x_multi_y_flat(xx, rr, bins, vals, derivs)
def evaluate_hist_multi_x_multi_y_flat(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (n)
The histogram values
"""
def evaluate_row(xv, rv):
bins_row = bins[rv]
idx, mask = get_bin_indices(bins_row, xv)
delta = xv - bins_row[idx]
if derivs is None:
return np.where(mask, vals[rv, idx], 0)
return np.where(mask, vals[rv, idx] + delta*derivs[rv, idx], 0)
vv = np.vectorize(evaluate_row)
return vv(x, row)
def evaluate_hist_multi_x_multi_y_product(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
def evaluate_row(rv):
bins_flat = bins[rv].flatten()
idx, mask = get_bin_indices(bins_flat, x)
delta = x - bins_flat[idx]
if derivs is None:
return np.where(mask, np.squeeze(vals[rv])[idx], 0).flatten()
return np.where(mask, np.squeeze(vals[rv])[idx] + delta* np.squeeze(derivs[rv])[idx], 0)
vv = np.vectorize(evaluate_row, signature="(1)->(%i)" % (x.size))
return vv(row)
def evaluate_hist_multi_x_multi_y_2d(x, row, bins, vals, derivs=None): #pragma: no cover
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like (npdf, npts)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like (npdf, npts)
The histogram values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
flat_bins = bins[rv].flatten()
idx, mask = get_bin_indices(flat_bins, xv)
delta = xv - flat_bins[idx]
if derivs is None:
return np.where(mask, np.squeeze(vals[rv])[idx], 0).flatten()
return np.where(mask, np.squeeze(vals[rv])[idx] + delta*np.squeeze(derivs[rv])[idx], 0).flatten()
vv = np.vectorize(evaluate_row, signature="(1),(%i)->(%i)" % (nx, nx))
return vv(row, x)
def evaluate_hist_multi_x_multi_y(x, row, bins, vals, derivs=None):
"""
Evaluate a set of values from histograms
Parameters
----------
x : array_like
X values to interpolate at
row : array_like
Which rows to interpolate at
bins : array_like (npdf, N+1)
'x' bin edges
vals : array_like (npdf, N)
'y' bin contents
Returns
-------
out : array_like
The histogram values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return evaluate_hist_multi_x_multi_y_product(xx, rr, bins, vals, derivs)
if case_idx == CASE_2D:
return evaluate_hist_multi_x_multi_y_2d(xx, rr, bins, vals, derivs)
return evaluate_hist_multi_x_multi_y_flat(xx, rr, bins, vals, derivs)
def interpolate_x_multi_y_flat(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
def single_row(xv, rv):
return interp1d(xvals, yvals[rv], **kwargs)(xv)
vv = np.vectorize(single_row)
return vv(x, row)
def interpolate_x_multi_y_product(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
rr = np.squeeze(row)
return interp1d(xvals, yvals[rr], **kwargs)(x)
def interpolate_x_multi_y_2d(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
return interp1d(xvals, yvals[rv], **kwargs)(xv)
vv = np.vectorize(evaluate_row, signature="(1),(%i)->(%i)" % (nx, nx))
return vv(row, x)
def interpolate_x_multi_y(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like
The interpoalted values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return interpolate_x_multi_y_product(xx, rr, xvals, yvals, **kwargs)
if case_idx == CASE_2D:
return interpolate_x_multi_y_2d(xx, rr, xvals, yvals, **kwargs)
return interpolate_x_multi_y_flat(xx, rr, xvals, yvals, **kwargs)
def interpolate_multi_x_multi_y_flat(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
def single_row(xv, rv):
return interp1d(xvals[rv], yvals[rv], **kwargs)(xv)
vv = np.vectorize(single_row)
return vv(x, row)
def interpolate_multi_x_multi_y_product(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
rr = np.squeeze(row)
nx = np.shape(x)[-1]
def single_row(rv):
return interp1d(xvals[rv], yvals[rv], **kwargs)(x)
vv = np.vectorize(single_row, signature="()->(%i)" % (nx))
return vv(rr)
def interpolate_multi_x_multi_y_2d(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
return interp1d(xvals[rv], yvals[rv], **kwargs)(xv)
vv = np.vectorize(evaluate_row, signature="(),(%i)->(%i)" % (nx, nx))
return vv(np.squeeze(row), x)
def interpolate_multi_x_multi_y(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf, npts)
Y-avlues used for the inteolation
Returns
-------
vals : array_like
The interpoalted values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return interpolate_multi_x_multi_y_product(xx, rr, xvals, yvals, **kwargs)
if case_idx == CASE_2D:
return interpolate_multi_x_multi_y_2d(xx, rr, xvals, yvals, **kwargs)
return interpolate_multi_x_multi_y_flat(xx, rr, xvals, yvals, **kwargs)
def interpolate_multi_x_y_flat(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (n)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
def single_row(xv, rv):
return interp1d(xvals[rv], yvals, **kwargs)(xv)
vv = np.vectorize(single_row)
return vv(x, row)
def interpolate_multi_x_y_product(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
rr = np.squeeze(row)
nx = np.shape(x)[-1]
def single_row(rv):
return interp1d(xvals[rv], yvals, **kwargs)(x)
vv = np.vectorize(single_row, signature="()->(%i)" % (nx))
return vv(rr)
def interpolate_multi_x_y_2d(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like (npdf, n)
The interpoalted values
"""
nx = np.shape(x)[-1]
def evaluate_row(rv, xv):
return interp1d(xvals[rv], yvals, **kwargs)(xv)
vv = np.vectorize(evaluate_row, signature="(),(%i)->(%i)" % (nx, nx))
return vv(np.squeeze(row), x)
def interpolate_multi_x_y(x, row, xvals, yvals, **kwargs):
"""
Interpolate a set of values
Parameters
----------
x : array_like (npdf, n)
X values to interpolate at
row : array_like (npdf, 1)
Which rows to interpolate at
xvals : array_like (npdf, npts)
X-values used for the interpolation
yvals : array_like (npdf)
Y-avlues used for the inteolation
Returns
-------
vals : array_like
The interpoalted values
"""
case_idx, xx, rr = get_eval_case(x, row)
if case_idx in [CASE_PRODUCT, CASE_FACTOR]:
return interpolate_multi_x_y_product(xx, rr, xvals, yvals, **kwargs)
if case_idx == CASE_2D:
return interpolate_multi_x_y_2d(xx, rr, xvals, yvals, **kwargs)
return interpolate_multi_x_y_flat(xx, rr, xvals, yvals, **kwargs)
def profile(x_data, y_data, x_bins, std=True):
"""Make a 'profile' plot
Paramters
---------
x_data : array_like (n)
The x-values
y_data : array_like (n)
The y-values
x_bins : array_like (nbins+1)
The values of the bin edges
std : bool
If true, return the standard deviations, if false return the errors on the means
Returns
-------
vals : array_like (nbins)
The means
errs : array_like (nbins)
The standard deviations or errors on the means
"""
idx, mask = get_bin_indices(x_bins, x_data)
count = np.zeros(x_bins.size-1)
vals = np.zeros(x_bins.size-1)
errs = np.zeros(x_bins.size-1)
for i in range(x_bins.size-1):
mask_col = mask * (idx == i)
count[i] = mask_col.sum()
if mask_col.sum() == 0: #pragma: no cover
vals[i] = np.nan
errs[i] = np.nan
continue
masked_vals = y_data[mask_col]
vals[i] = masked_vals.mean()
errs[i] = masked_vals.std()
if not std:
errs /= np.sqrt(count)
return vals, errs
def reshape_to_pdf_size(vals, split_dim):
"""Reshape an array to match the number of PDFs in a distribution
Parameters
----------
vals : array
The input array
split_dim : int
The dimension at which to split between pdf indices and per_pdf indices
Returns
-------
out : array
The reshaped array
"""
in_shape = np.shape(vals)
npdf = np.product(in_shape[:split_dim]).astype(int)
per_pdf = in_shape[split_dim:]
out_shape = np.hstack([npdf, per_pdf])
return vals.reshape(out_shape)
def reshape_to_pdf_shape(vals, pdf_shape, per_pdf):
"""Reshape an array to match the shape of PDFs in a distribution
Parameters
----------
vals : array
The input array
pdf_shape : int
The shape for the pdfs
per_pdf : int or array_like
The shape per pdf
Returns
-------
out : array
The reshaped array
"""
outshape = np.hstack([pdf_shape, per_pdf])
return vals.reshape(outshape)
| [
"numpy.product",
"numpy.sqrt",
"numpy.hstack",
"scipy.interpolate.interp1d",
"numpy.array",
"scipy.stats.gaussian_kde",
"numpy.where",
"numpy.searchsorted",
"numpy.putmask",
"numpy.ndim",
"numpy.allclose",
"numpy.ones",
"numpy.size",
"numpy.floor",
"numpy.squeeze",
"numpy.shape",
"nu... | [((2326, 2356), 'numpy.allclose', 'np.allclose', (['widths', 'widths[0]'], {}), '(widths, widths[0])\n', (2337, 2356), True, 'import numpy as np\n'), ((2558, 2586), 'numpy.putmask', 'np.putmask', (['idx', '(1 - mask)', '(0)'], {}), '(idx, 1 - mask, 0)\n', (2568, 2586), True, 'import numpy as np\n'), ((2598, 2609), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (2606, 2609), True, 'import numpy as np\n'), ((3237, 3311), 'numpy.sum', 'np.sum', (['(xvals[:, 1:] * yvals[:, 1:] - xvals[:, :-1] * yvals[:, 1:])'], {'axis': '(1)'}), '(xvals[:, 1:] * yvals[:, 1:] - xvals[:, :-1] * yvals[:, 1:], axis=1)\n', (3243, 3311), True, 'import numpy as np\n'), ((4875, 4885), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (4882, 4885), True, 'import numpy as np\n'), ((4899, 4911), 'numpy.ndim', 'np.ndim', (['row'], {}), '(row)\n', (4906, 4911), True, 'import numpy as np\n'), ((5414, 5426), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (5423, 5426), True, 'import numpy as np\n'), ((5436, 5450), 'numpy.unique', 'np.unique', (['row'], {}), '(row)\n', (5445, 5450), True, 'import numpy as np\n'), ((6571, 6597), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {}), '(evaluate_row)\n', (6583, 6597), True, 'import numpy as np\n'), ((8396, 8422), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {}), '(evaluate_row)\n', (8408, 8422), True, 'import numpy as np\n'), ((10172, 10198), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {}), '(evaluate_row)\n', (10184, 10198), True, 'import numpy as np\n'), ((11097, 11155), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {'signature': "('(1)->(%i)' % x.size)"}), "(evaluate_row, signature='(1)->(%i)' % x.size)\n", (11109, 11155), True, 'import numpy as np\n'), ((12094, 12159), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {'signature': "('(1),(%i)->(%i)' % (nx, nx))"}), "(evaluate_row, signature='(1),(%i)->(%i)' % (nx, nx))\n", (12106, 12159), True, 'import numpy as np\n'), ((13594, 13618), 'numpy.vectorize', 'np.vectorize', (['single_row'], {}), '(single_row)\n', (13606, 13618), True, 'import numpy as np\n'), ((14165, 14180), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (14175, 14180), True, 'import numpy as np\n'), ((14868, 14933), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {'signature': "('(1),(%i)->(%i)' % (nx, nx))"}), "(evaluate_row, signature='(1),(%i)->(%i)' % (nx, nx))\n", (14880, 14933), True, 'import numpy as np\n'), ((16414, 16438), 'numpy.vectorize', 'np.vectorize', (['single_row'], {}), '(single_row)\n', (16426, 16438), True, 'import numpy as np\n'), ((16997, 17012), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (17007, 17012), True, 'import numpy as np\n'), ((17130, 17181), 'numpy.vectorize', 'np.vectorize', (['single_row'], {'signature': "('()->(%i)' % nx)"}), "(single_row, signature='()->(%i)' % nx)\n", (17142, 17181), True, 'import numpy as np\n'), ((17854, 17918), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {'signature': "('(),(%i)->(%i)' % (nx, nx))"}), "(evaluate_row, signature='(),(%i)->(%i)' % (nx, nx))\n", (17866, 17918), True, 'import numpy as np\n'), ((19425, 19449), 'numpy.vectorize', 'np.vectorize', (['single_row'], {}), '(single_row)\n', (19437, 19449), True, 'import numpy as np\n'), ((19996, 20011), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (20006, 20011), True, 'import numpy as np\n'), ((20125, 20176), 'numpy.vectorize', 'np.vectorize', (['single_row'], {'signature': "('()->(%i)' % nx)"}), "(single_row, signature='()->(%i)' % nx)\n", (20137, 20176), True, 'import numpy as np\n'), ((20833, 20897), 'numpy.vectorize', 'np.vectorize', (['evaluate_row'], {'signature': "('(),(%i)->(%i)' % (nx, nx))"}), "(evaluate_row, signature='(),(%i)->(%i)' % (nx, nx))\n", (20845, 20897), True, 'import numpy as np\n'), ((22380, 22405), 'numpy.zeros', 'np.zeros', (['(x_bins.size - 1)'], {}), '(x_bins.size - 1)\n', (22388, 22405), True, 'import numpy as np\n'), ((22415, 22440), 'numpy.zeros', 'np.zeros', (['(x_bins.size - 1)'], {}), '(x_bins.size - 1)\n', (22423, 22440), True, 'import numpy as np\n'), ((22450, 22475), 'numpy.zeros', 'np.zeros', (['(x_bins.size - 1)'], {}), '(x_bins.size - 1)\n', (22458, 22475), True, 'import numpy as np\n'), ((23265, 23279), 'numpy.shape', 'np.shape', (['vals'], {}), '(vals)\n', (23273, 23279), True, 'import numpy as np\n'), ((23387, 23413), 'numpy.hstack', 'np.hstack', (['[npdf, per_pdf]'], {}), '([npdf, per_pdf])\n', (23396, 23413), True, 'import numpy as np\n'), ((23844, 23875), 'numpy.hstack', 'np.hstack', (['[pdf_shape, per_pdf]'], {}), '([pdf_shape, per_pdf])\n', (23853, 23875), True, 'import numpy as np\n'), ((2301, 2314), 'numpy.size', 'np.size', (['bins'], {}), '(bins)\n', (2308, 2314), True, 'import numpy as np\n'), ((3714, 3745), 'scipy.stats.gaussian_kde', 'sps.gaussian_kde', (['row'], {}), '(row, **kwargs)\n', (3730, 3745), True, 'from scipy import stats as sps\n'), ((5458, 5469), 'numpy.size', 'np.size', (['xx'], {}), '(xx)\n', (5465, 5469), True, 'import numpy as np\n'), ((5473, 5483), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (5480, 5483), True, 'import numpy as np\n'), ((5507, 5518), 'numpy.size', 'np.size', (['rr'], {}), '(rr)\n', (5514, 5518), True, 'import numpy as np\n'), ((5522, 5534), 'numpy.size', 'np.size', (['row'], {}), '(row)\n', (5529, 5534), True, 'import numpy as np\n'), ((5589, 5599), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (5596, 5599), True, 'import numpy as np\n'), ((5662, 5684), 'numpy.expand_dims', 'np.expand_dims', (['rr', '(-1)'], {}), '(rr, -1)\n', (5676, 5684), True, 'import numpy as np\n'), ((6299, 6318), 'numpy.zeros', 'np.zeros', (['idx.shape'], {}), '(idx.shape)\n', (6307, 6318), True, 'import numpy as np\n'), ((6502, 6563), 'numpy.where', 'np.where', (['maskv', '(vals[rv, idxv] + delta * derivs[rv, idxv])', '(0)'], {}), '(maskv, vals[rv, idxv] + delta * derivs[rv, idxv], 0)\n', (6510, 6563), True, 'import numpy as np\n'), ((7245, 7263), 'numpy.ones', 'np.ones', (['row.shape'], {}), '(row.shape)\n', (7252, 7263), True, 'import numpy as np\n'), ((8123, 8142), 'numpy.zeros', 'np.zeros', (['idx.shape'], {}), '(idx.shape)\n', (8131, 8142), True, 'import numpy as np\n'), ((8327, 8388), 'numpy.where', 'np.where', (['maskv', '(vals[rv, idxv] + delta * derivs[rv, idxv])', '(0)'], {}), '(maskv, vals[rv, idxv] + delta * derivs[rv, idxv], 0)\n', (8335, 8388), True, 'import numpy as np\n'), ((10106, 10164), 'numpy.where', 'np.where', (['mask', '(vals[rv, idx] + delta * derivs[rv, idx])', '(0)'], {}), '(mask, vals[rv, idx] + delta * derivs[rv, idx], 0)\n', (10114, 10164), True, 'import numpy as np\n'), ((11706, 11717), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (11714, 11717), True, 'import numpy as np\n'), ((14192, 14228), 'scipy.interpolate.interp1d', 'interp1d', (['xvals', 'yvals[rr]'], {}), '(xvals, yvals[rr], **kwargs)\n', (14200, 14228), False, 'from scipy.interpolate import interp1d\n'), ((14757, 14768), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (14765, 14768), True, 'import numpy as np\n'), ((17022, 17033), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (17030, 17033), True, 'import numpy as np\n'), ((17739, 17750), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (17747, 17750), True, 'import numpy as np\n'), ((17933, 17948), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (17943, 17948), True, 'import numpy as np\n'), ((20021, 20032), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (20029, 20032), True, 'import numpy as np\n'), ((20722, 20733), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (20730, 20733), True, 'import numpy as np\n'), ((20912, 20927), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (20922, 20927), True, 'import numpy as np\n'), ((22854, 22868), 'numpy.sqrt', 'np.sqrt', (['count'], {}), '(count)\n', (22861, 22868), True, 'import numpy as np\n'), ((5334, 5344), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (5341, 5344), True, 'import numpy as np\n'), ((5353, 5365), 'numpy.size', 'np.size', (['row'], {}), '(row)\n', (5360, 5365), True, 'import numpy as np\n'), ((5560, 5571), 'numpy.size', 'np.size', (['xx'], {}), '(xx)\n', (5567, 5571), True, 'import numpy as np\n'), ((5574, 5585), 'numpy.size', 'np.size', (['rr'], {}), '(rr)\n', (5581, 5585), True, 'import numpy as np\n'), ((6182, 6192), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (6189, 6192), True, 'import numpy as np\n'), ((6201, 6213), 'numpy.ndim', 'np.ndim', (['row'], {}), '(row)\n', (6208, 6213), True, 'import numpy as np\n'), ((6452, 6486), 'numpy.where', 'np.where', (['maskv', 'vals[rv, idxv]', '(0)'], {}), '(maskv, vals[rv, idxv], 0)\n', (6460, 6486), True, 'import numpy as np\n'), ((8004, 8014), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (8011, 8014), True, 'import numpy as np\n'), ((8024, 8036), 'numpy.ndim', 'np.ndim', (['row'], {}), '(row)\n', (8031, 8036), True, 'import numpy as np\n'), ((8277, 8311), 'numpy.where', 'np.where', (['maskv', 'vals[rv, idxv]', '(0)'], {}), '(maskv, vals[rv, idxv], 0)\n', (8285, 8311), True, 'import numpy as np\n'), ((10058, 10090), 'numpy.where', 'np.where', (['mask', 'vals[rv, idx]', '(0)'], {}), '(mask, vals[rv, idx], 0)\n', (10066, 10090), True, 'import numpy as np\n'), ((13544, 13580), 'scipy.interpolate.interp1d', 'interp1d', (['xvals', 'yvals[rv]'], {}), '(xvals, yvals[rv], **kwargs)\n', (13552, 13580), False, 'from scipy.interpolate import interp1d\n'), ((14818, 14854), 'scipy.interpolate.interp1d', 'interp1d', (['xvals', 'yvals[rv]'], {}), '(xvals, yvals[rv], **kwargs)\n', (14826, 14854), False, 'from scipy.interpolate import interp1d\n'), ((16360, 16400), 'scipy.interpolate.interp1d', 'interp1d', (['xvals[rv]', 'yvals[rv]'], {}), '(xvals[rv], yvals[rv], **kwargs)\n', (16368, 16400), False, 'from scipy.interpolate import interp1d\n'), ((17077, 17117), 'scipy.interpolate.interp1d', 'interp1d', (['xvals[rv]', 'yvals[rv]'], {}), '(xvals[rv], yvals[rv], **kwargs)\n', (17085, 17117), False, 'from scipy.interpolate import interp1d\n'), ((17800, 17840), 'scipy.interpolate.interp1d', 'interp1d', (['xvals[rv]', 'yvals[rv]'], {}), '(xvals[rv], yvals[rv], **kwargs)\n', (17808, 17840), False, 'from scipy.interpolate import interp1d\n'), ((19375, 19411), 'scipy.interpolate.interp1d', 'interp1d', (['xvals[rv]', 'yvals'], {}), '(xvals[rv], yvals, **kwargs)\n', (19383, 19411), False, 'from scipy.interpolate import interp1d\n'), ((20076, 20112), 'scipy.interpolate.interp1d', 'interp1d', (['xvals[rv]', 'yvals'], {}), '(xvals[rv], yvals, **kwargs)\n', (20084, 20112), False, 'from scipy.interpolate import interp1d\n'), ((20783, 20819), 'scipy.interpolate.interp1d', 'interp1d', (['xvals[rv]', 'yvals'], {}), '(xvals[rv], yvals, **kwargs)\n', (20791, 20819), False, 'from scipy.interpolate import interp1d\n'), ((23291, 23323), 'numpy.product', 'np.product', (['in_shape[:split_dim]'], {}), '(in_shape[:split_dim])\n', (23301, 23323), True, 'import numpy as np\n'), ((756, 769), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (764, 769), True, 'import numpy as np\n'), ((2469, 2506), 'numpy.searchsorted', 'np.searchsorted', (['bins', 'x'], {'side': '"""left"""'}), "(bins, x, side='left')\n", (2484, 2506), True, 'import numpy as np\n'), ((7337, 7352), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (7347, 7352), True, 'import numpy as np\n'), ((7423, 7438), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (7433, 7438), True, 'import numpy as np\n'), ((2386, 2421), 'numpy.floor', 'np.floor', (['((x - bins[0]) / widths[0])'], {}), '((x - bins[0]) / widths[0])\n', (2394, 2421), True, 'import numpy as np\n'), ((5242, 5253), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5250, 5253), True, 'import numpy as np\n'), ((5255, 5268), 'numpy.shape', 'np.shape', (['row'], {}), '(row)\n', (5263, 5268), True, 'import numpy as np\n'), ((7463, 7478), 'numpy.squeeze', 'np.squeeze', (['row'], {}), '(row)\n', (7473, 7478), True, 'import numpy as np\n'), ((11021, 11041), 'numpy.squeeze', 'np.squeeze', (['vals[rv]'], {}), '(vals[rv])\n', (11031, 11041), True, 'import numpy as np\n'), ((11056, 11078), 'numpy.squeeze', 'np.squeeze', (['derivs[rv]'], {}), '(derivs[rv])\n', (11066, 11078), True, 'import numpy as np\n'), ((10951, 10971), 'numpy.squeeze', 'np.squeeze', (['vals[rv]'], {}), '(vals[rv])\n', (10961, 10971), True, 'import numpy as np\n'), ((11939, 11959), 'numpy.squeeze', 'np.squeeze', (['vals[rv]'], {}), '(vals[rv])\n', (11949, 11959), True, 'import numpy as np\n'), ((12009, 12029), 'numpy.squeeze', 'np.squeeze', (['vals[rv]'], {}), '(vals[rv])\n', (12019, 12029), True, 'import numpy as np\n'), ((12043, 12065), 'numpy.squeeze', 'np.squeeze', (['derivs[rv]'], {}), '(derivs[rv])\n', (12053, 12065), True, 'import numpy as np\n')] |
import vtk
import os.path
import numpy as np
import nibabel as nib
from six import iteritems
from vtk.util import numpy_support as ns
from nibabel.streamlines.tck import TckFile as tck
def read_vtk(filename):
if filename.endswith('xml') or filename.endswith('vtp'):
polydata_reader = vtk.vtkXMLPolyDataReader()
else:
polydata_reader = vtk.vtkPolyDataReader()
polydata_reader.SetFileName(filename)
polydata_reader.Update()
polydata = polydata_reader.GetOutput()
return vtkpolydata_to_tracts(polydata)
def tck2vtk(path_tck):
streamlines, _ = read_tck(path_tck)
file_name, _ = os.path.splitext(path_tck)
path_vtk = file_name + '.vtk'
save_vtk(path_vtk, streamlines)
return path_vtk
def read_tck(filename):
header = read_mrtrix_header(filename)
vertices, line_starts, line_ends = read_mrtrix_streamlines(filename, header)
streamlines = []
for s, e in zip(line_starts, line_ends):
streamlines.append(vertices[s:e, :])
return streamlines, header
def read_mrtrix_header(in_file):
fileobj = open(in_file, "rb")
header = {}
#iflogger.info("Reading header data...")
for line in fileobj:
line = line.decode()
if line == "END\n":
#iflogger.info("Reached the end of the header!")
break
elif ": " in line:
line = line.replace("\n", "")
line = line.replace("'", "")
key = line.split(": ")[0]
value = line.split(": ")[1]
header[key] = value
#iflogger.info('...adding "%s" to header for key "%s"', value, key)
fileobj.close()
header["count"] = int(header["count"].replace("\n", ""))
header["offset"] = int(header["file"].replace(".", ""))
return header
def read_mrtrix_streamlines(in_file, header):
byte_offset = header["offset"]
stream_count = header["count"]
datatype = header["datatype"]
dt = 4
if datatype.startswith( 'Float64' ):
dt = 8
elif not datatype.startswith( 'Float32' ):
print('Unsupported datatype: ' + datatype)
return
#tck format stores three floats (x/y/z) for each vertex
num_triplets = (os.path.getsize(in_file) - byte_offset) // (dt * 3)
dt = 'f' + str(dt)
if datatype.endswith( 'LE' ):
dt = '<'+dt
if datatype.endswith( 'BE' ):
dt = '>'+dt
vtx = np.fromfile(in_file, dtype=dt, count=(num_triplets*3), offset=byte_offset)
vtx = np.reshape(vtx, (-1,3))
#make sure last streamline delimited...
if not np.isnan(vtx[-2,1]):
vtx[-1,:] = np.nan
line_ends, = np.where(np.all(np.isnan(vtx), axis=1))
if stream_count != line_ends.size:
print('expected {} streamlines, found {}'.format(stream_count, line_ends.size))
line_starts = line_ends + 0
line_starts[1:line_ends.size] = line_ends[0:line_ends.size-1]
#the first line starts with the first vertex (index 0), so preceding NaN at -1
line_starts[0] = -1
#first vertex of line is the one after a NaN
line_starts = line_starts + 1
#last vertex of line is the one before NaN
line_ends = line_ends - 1
return vtx, line_starts, line_ends
def save_vtk(filename, tracts, lines_indices=None):
lengths = [len(p) for p in tracts]
line_starts = ns.numpy.r_[0, ns.numpy.cumsum(lengths)]
if lines_indices is None:
lines_indices = [ns.numpy.arange(length) + line_start for length, line_start in zip(lengths, line_starts)]
ids = ns.numpy.hstack([ns.numpy.r_[c[0], c[1]] for c in zip(lengths, lines_indices)])
vtk_ids = ns.numpy_to_vtkIdTypeArray(ids.astype('int64'), deep=True)
cell_array = vtk.vtkCellArray()
cell_array.SetCells(len(tracts), vtk_ids)
points = ns.numpy.vstack(tracts).astype(ns.get_vtk_to_numpy_typemap()[vtk.VTK_DOUBLE])
points_array = ns.numpy_to_vtk(points, deep=True)
poly_data = vtk.vtkPolyData()
vtk_points = vtk.vtkPoints()
vtk_points.SetData(points_array)
poly_data.SetPoints(vtk_points)
poly_data.SetLines(cell_array)
poly_data.BuildCells()
if filename.endswith('.xml') or filename.endswith('.vtp'):
writer = vtk.vtkXMLPolyDataWriter()
writer.SetDataModeToBinary()
else:
writer = vtk.vtkPolyDataWriter()
writer.SetFileTypeToBinary()
writer.SetFileName(filename)
if hasattr(vtk, 'VTK_MAJOR_VERSION') and vtk.VTK_MAJOR_VERSION > 5:
writer.SetInputData(poly_data)
else:
writer.SetInput(poly_data)
writer.Write()
def save_nii(fname, data, affine):
img = nib.Nifti1Image(data.astype(np.int16), affine)
nib.save(img, fname)
def vtkpolydata_to_tracts(polydata):
"""
VTK polylines loading
:param polydata: vtk file polydata
:return: tractogram, associated data
"""
result = {'lines': ns.vtk_to_numpy(polydata.GetLines().GetData()),
'points': ns.vtk_to_numpy(polydata.GetPoints().GetData()), 'numberOfLines': polydata.GetNumberOfLines()}
data = {}
if polydata.GetPointData().GetScalars():
data['ActiveScalars'] = polydata.GetPointData().GetScalars().GetName()
if polydata.GetPointData().GetVectors():
data['ActiveVectors'] = polydata.GetPointData().GetVectors().GetName()
if polydata.GetPointData().GetTensors():
data['ActiveTensors'] = polydata.GetPointData().GetTensors().GetName()
for i in range(polydata.GetPointData().GetNumberOfArrays()):
array = polydata.GetPointData().GetArray(i)
np_array = ns.vtk_to_numpy(array)
if np_array.ndim == 1:
np_array = np_array.reshape(len(np_array), 1)
data[polydata.GetPointData().GetArrayName(i)] = np_array
result['pointData'] = data
tracts, data = vtkpolydata_dictionary_to_tracts_and_data(result)
return tracts, data
def vtkpolydata_dictionary_to_tracts_and_data(dictionary):
"""
VTK polydata management
:param dictionary: polydata dictionary
:return: tractogram, associated data
"""
dictionary_keys = {'lines', 'points', 'numberOfLines'}
if not dictionary_keys.issubset(dictionary):
raise ValueError("Dictionary must have the keys lines and points" + repr(dictionary))
tract_data = {}
tracts = []
lines = np.asarray(dictionary['lines']).squeeze()
points = dictionary['points']
actual_line_index = 0
number_of_tracts = dictionary['numberOfLines']
original_lines = []
for _ in range(number_of_tracts):
tracts.append(points[lines[actual_line_index + 1:actual_line_index + lines[actual_line_index] + 1]])
original_lines.append(
np.array(lines[actual_line_index + 1:actual_line_index + lines[actual_line_index] + 1], copy=True))
actual_line_index += lines[actual_line_index] + 1
if 'pointData' in dictionary:
point_data_keys = [it[0] for it in iteritems(dictionary['pointData']) if isinstance(it[1], np.ndarray)]
for k in point_data_keys:
array_data = dictionary['pointData'][k]
if k not in tract_data:
tract_data[k] = [array_data[f] for f in original_lines]
else:
np.vstack(tract_data[k])
tract_data[k].extend([array_data[f] for f in original_lines[-number_of_tracts:]])
return tracts, tract_data
| [
"numpy.fromfile",
"vtk.vtkXMLPolyDataWriter",
"vtk.vtkCellArray",
"vtk.vtkPoints",
"numpy.array",
"vtk.vtkPolyDataReader",
"vtk.util.numpy_support.numpy_to_vtk",
"vtk.vtkPolyDataWriter",
"numpy.reshape",
"vtk.util.numpy_support.numpy.cumsum",
"numpy.asarray",
"numpy.vstack",
"vtk.vtkXMLPolyD... | [((2394, 2468), 'numpy.fromfile', 'np.fromfile', (['in_file'], {'dtype': 'dt', 'count': '(num_triplets * 3)', 'offset': 'byte_offset'}), '(in_file, dtype=dt, count=num_triplets * 3, offset=byte_offset)\n', (2405, 2468), True, 'import numpy as np\n'), ((2479, 2503), 'numpy.reshape', 'np.reshape', (['vtx', '(-1, 3)'], {}), '(vtx, (-1, 3))\n', (2489, 2503), True, 'import numpy as np\n'), ((3674, 3692), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (3690, 3692), False, 'import vtk\n'), ((3849, 3883), 'vtk.util.numpy_support.numpy_to_vtk', 'ns.numpy_to_vtk', (['points'], {'deep': '(True)'}), '(points, deep=True)\n', (3864, 3883), True, 'from vtk.util import numpy_support as ns\n'), ((3901, 3918), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (3916, 3918), False, 'import vtk\n'), ((3936, 3951), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (3949, 3951), False, 'import vtk\n'), ((4628, 4648), 'nibabel.save', 'nib.save', (['img', 'fname'], {}), '(img, fname)\n', (4636, 4648), True, 'import nibabel as nib\n'), ((298, 324), 'vtk.vtkXMLPolyDataReader', 'vtk.vtkXMLPolyDataReader', ([], {}), '()\n', (322, 324), False, 'import vtk\n'), ((361, 384), 'vtk.vtkPolyDataReader', 'vtk.vtkPolyDataReader', ([], {}), '()\n', (382, 384), False, 'import vtk\n'), ((2558, 2578), 'numpy.isnan', 'np.isnan', (['vtx[-2, 1]'], {}), '(vtx[-2, 1])\n', (2566, 2578), True, 'import numpy as np\n'), ((4169, 4195), 'vtk.vtkXMLPolyDataWriter', 'vtk.vtkXMLPolyDataWriter', ([], {}), '()\n', (4193, 4195), False, 'import vtk\n'), ((4260, 4283), 'vtk.vtkPolyDataWriter', 'vtk.vtkPolyDataWriter', ([], {}), '()\n', (4281, 4283), False, 'import vtk\n'), ((5524, 5546), 'vtk.util.numpy_support.vtk_to_numpy', 'ns.vtk_to_numpy', (['array'], {}), '(array)\n', (5539, 5546), True, 'from vtk.util import numpy_support as ns\n'), ((2639, 2652), 'numpy.isnan', 'np.isnan', (['vtx'], {}), '(vtx)\n', (2647, 2652), True, 'import numpy as np\n'), ((3321, 3345), 'vtk.util.numpy_support.numpy.cumsum', 'ns.numpy.cumsum', (['lengths'], {}), '(lengths)\n', (3336, 3345), True, 'from vtk.util import numpy_support as ns\n'), ((3752, 3775), 'vtk.util.numpy_support.numpy.vstack', 'ns.numpy.vstack', (['tracts'], {}), '(tracts)\n', (3767, 3775), True, 'from vtk.util import numpy_support as ns\n'), ((3783, 3812), 'vtk.util.numpy_support.get_vtk_to_numpy_typemap', 'ns.get_vtk_to_numpy_typemap', ([], {}), '()\n', (3810, 3812), True, 'from vtk.util import numpy_support as ns\n'), ((6268, 6299), 'numpy.asarray', 'np.asarray', (["dictionary['lines']"], {}), "(dictionary['lines'])\n", (6278, 6299), True, 'import numpy as np\n'), ((6636, 6739), 'numpy.array', 'np.array', (['lines[actual_line_index + 1:actual_line_index + lines[actual_line_index] + 1]'], {'copy': '(True)'}), '(lines[actual_line_index + 1:actual_line_index + lines[\n actual_line_index] + 1], copy=True)\n', (6644, 6739), True, 'import numpy as np\n'), ((3402, 3425), 'vtk.util.numpy_support.numpy.arange', 'ns.numpy.arange', (['length'], {}), '(length)\n', (3417, 3425), True, 'from vtk.util import numpy_support as ns\n'), ((6872, 6906), 'six.iteritems', 'iteritems', (["dictionary['pointData']"], {}), "(dictionary['pointData'])\n", (6881, 6906), False, 'from six import iteritems\n'), ((7170, 7194), 'numpy.vstack', 'np.vstack', (['tract_data[k]'], {}), '(tract_data[k])\n', (7179, 7194), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import dlib
from math import hypot
# Loading Camera and Nose image and Creating mask
cap = cv2.VideoCapture(0)
nose_image = cv2.imread("bunny.png")
_, frame = cap.read()
rows, cols, _ = frame.shape
nose_mask = np.zeros((rows, cols), np.uint8)
# Loading Face detector
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while True:
_, frame = cap.read()
nose_mask.fill(0)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(frame)
for face in faces:
landmarks = predictor(gray_frame, face)
# Nose coordinates
top_nose = (landmarks.part(29).x, landmarks.part(29).y)
center_nose = (landmarks.part(30).x, landmarks.part(30).y)
left_nose = (landmarks.part(31).x, landmarks.part(31).y)
right_nose = (landmarks.part(35).x, landmarks.part(35).y)
nose_width = int(hypot(left_nose[0] - right_nose[0],
left_nose[1] - right_nose[1]) * 1.7)
nose_height = int(nose_width * 0.77)
# New nose position
top_left = (int(center_nose[0] - nose_width / 2),
int(center_nose[1] - nose_height / 2))
bottom_right = (int(center_nose[0] + nose_width / 2),
int(center_nose[1] + nose_height / 2))
# Adding the new nose
nose_bunny = cv2.resize(nose_image, (nose_width, nose_height))
nose_bunny_gray = cv2.cvtColor(nose_bunny, cv2.COLOR_BGR2GRAY)
_, nose_mask = cv2.threshold(nose_bunny_gray, 25, 255, cv2.THRESH_BINARY_INV)
nose_area = frame[top_left[1]: top_left[1] + nose_height,
top_left[0]: top_left[0] + nose_width]
nose_area_no_nose = cv2.bitwise_and(nose_area, nose_area, mask=nose_mask)
final_nose = cv2.add(nose_area_no_nose, nose_bunny)
frame[top_left[1]: top_left[1] + nose_height,
top_left[0]: top_left[0] + nose_width] = final_nose
cv2.imshow("Nose area", nose_area)
cv2.imshow("Nose bunny", nose_bunny)
cv2.imshow("final nose", final_nose)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
| [
"cv2.threshold",
"cv2.bitwise_and",
"dlib.shape_predictor",
"cv2.imshow",
"dlib.get_frontal_face_detector",
"numpy.zeros",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.cvtColor",
"math.hypot",
"cv2.resize",
"cv2.imread",
"cv2.add"
] | [((122, 141), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (138, 141), False, 'import cv2\n'), ((155, 178), 'cv2.imread', 'cv2.imread', (['"""bunny.png"""'], {}), "('bunny.png')\n", (165, 178), False, 'import cv2\n'), ((241, 273), 'numpy.zeros', 'np.zeros', (['(rows, cols)', 'np.uint8'], {}), '((rows, cols), np.uint8)\n', (249, 273), True, 'import numpy as np\n'), ((310, 342), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (340, 342), False, 'import dlib\n'), ((355, 416), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (375, 416), False, 'import dlib\n'), ((495, 534), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (507, 534), False, 'import cv2\n'), ((2172, 2198), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2182, 2198), False, 'import cv2\n'), ((2212, 2226), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2223, 2226), False, 'import cv2\n'), ((1429, 1478), 'cv2.resize', 'cv2.resize', (['nose_image', '(nose_width, nose_height)'], {}), '(nose_image, (nose_width, nose_height))\n', (1439, 1478), False, 'import cv2\n'), ((1505, 1549), 'cv2.cvtColor', 'cv2.cvtColor', (['nose_bunny', 'cv2.COLOR_BGR2GRAY'], {}), '(nose_bunny, cv2.COLOR_BGR2GRAY)\n', (1517, 1549), False, 'import cv2\n'), ((1573, 1635), 'cv2.threshold', 'cv2.threshold', (['nose_bunny_gray', '(25)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(nose_bunny_gray, 25, 255, cv2.THRESH_BINARY_INV)\n', (1586, 1635), False, 'import cv2\n'), ((1790, 1843), 'cv2.bitwise_and', 'cv2.bitwise_and', (['nose_area', 'nose_area'], {'mask': 'nose_mask'}), '(nose_area, nose_area, mask=nose_mask)\n', (1805, 1843), False, 'import cv2\n'), ((1865, 1903), 'cv2.add', 'cv2.add', (['nose_area_no_nose', 'nose_bunny'], {}), '(nose_area_no_nose, nose_bunny)\n', (1872, 1903), False, 'import cv2\n'), ((2040, 2074), 'cv2.imshow', 'cv2.imshow', (['"""Nose area"""', 'nose_area'], {}), "('Nose area', nose_area)\n", (2050, 2074), False, 'import cv2\n'), ((2083, 2119), 'cv2.imshow', 'cv2.imshow', (['"""Nose bunny"""', 'nose_bunny'], {}), "('Nose bunny', nose_bunny)\n", (2093, 2119), False, 'import cv2\n'), ((2128, 2164), 'cv2.imshow', 'cv2.imshow', (['"""final nose"""', 'final_nose'], {}), "('final nose', final_nose)\n", (2138, 2164), False, 'import cv2\n'), ((951, 1016), 'math.hypot', 'hypot', (['(left_nose[0] - right_nose[0])', '(left_nose[1] - right_nose[1])'], {}), '(left_nose[0] - right_nose[0], left_nose[1] - right_nose[1])\n', (956, 1016), False, 'from math import hypot\n')] |
import boto3
import cv2
import greengrasssdk
import json
import logging
import mxnet as mx
import numpy as np
import requests
import sys
import tarfile
import time
from collections import namedtuple
from datetime import datetime
from picamera import PiCamera
from sense_hat import SenseHat
#########################################################################################
# Default Path to download ML Model that has been created for you to use,
# These can be commented out if you build your own model and paste that information below
ML_BUCKET_NAME = "reinvent2018-recycle-arm-us-east-1"
ML_OBJECT_NAME = "2020/ml-models/model.tar.gz"
# If you have created your own ML model using the Sagemaker notebook provided,
# the last section will print two lines that can be pasted over the following two lines
#ML_BUCKET_NAME = "sagemaker-us-east-1-0123456789"
#ML_OBJECT_NAME = "smart-recycle-kit/output/<model_directory>/output/model.tar.gz"
# S3 Bucket Name to save images taken
# If a S3 Bucket is not specified below, captured images will not be copied to S3
# For example, you can use the Sagemaker Bucket you pasted from the notebook
BUCKET_NAME = ""
#########################################################################################
# LOCAL_RESOURCE_DIR is where images taken by camera will be saved
LOCAL_RESOURCE_DIR = "/tmp"
# LOCAL_MODEL_DIR is where the ML Model has been saved
LOCAL_MODEL_DIR = "/tmp"
ML_MODEL_FILE = LOCAL_MODEL_DIR + "/" + "model.tar.gz"
# MQTT Topic to send messages to IoT Core
iot_core_topic = 'recycle/info'
#Categories that will be returned by the ML Model
CATEGORIES = ['Compost', 'Landfill', 'Recycling']
# Setup logging to stdout
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# Creating a greengrass core sdk client
gg_client = greengrasssdk.client("iot-data")
# Creating s3 client to download ML Model and to Upload images
s3 = boto3.client('s3')
B = (0, 0, 0)
G = (0, 255, 0)
Bl = (0, 0, 255)
R = (255, 0, 0)
O = (255, 103, 0)
question_mark = [
B, B, B, O, O, B, B, B,
B, B, O, B, B, O, B, B,
B, B, B, B, B, O, B, B,
B, B, B, B, O, B, B, B,
B, B, B, O, B, B, B, B,
B, B, B, O, B, B, B, B,
B, B, B, B, B, B, B, B,
B, B, B, O, B, B, B, B
]
Compost = [
B, G, G, G, G, G, G, B,
B, G, G, G, G, G, G, B,
B, G, G, B, B, B, B, B,
B, G, G, B, B, B, B, B,
B, G, G, B, B, B, B, B,
B, G, G, B, B, B, B, B,
B, G, G, G, G, G, G, B,
B, G, G, G, G, G, G, B
]
Landfill = [
B, R, R, B, B, B, B, B,
B, R, R, B, B, B, B, B,
B, R, R, B, B, B, B, B,
B, R, R, B, B, B, B, B,
B, R, R, B, B, B, B, B,
B, R, R, B, B, B, B, B,
B, R, R, R, R, R, R, B,
B, R, R, R, R, R, R, B
]
Recycling = [
B, Bl, Bl, Bl, Bl, Bl, B, B,
B, Bl, Bl, Bl, Bl, Bl, Bl, B,
B, Bl, Bl, B, B, Bl, Bl, B,
B, Bl, Bl, Bl, Bl, Bl, Bl, B,
B, Bl, Bl, Bl, Bl, Bl, B, B,
B, Bl, Bl, B, Bl, Bl, B, B,
B, Bl, Bl, B, B, Bl, Bl, B,
B, Bl, Bl, B, B, Bl, Bl, B
]
# Configure the PiCamera to take square images. Other
# resolutions will be scaled to a square when fed into
# the image-classification model which can result
# in image distortion.
camera = PiCamera(resolution=(400,400))
# Initialize SenseHat
print ("*** Initializing SenseHAT")
sense = SenseHat()
def loadModel(modelname):
t1 = time.time()
modelname = LOCAL_MODEL_DIR + "/" + modelname
sym, arg_params, aux_params = mx.model.load_checkpoint(modelname, 0)
t2 = time.time()
t = 1000*(t2-t1)
print("*** Loaded in {} milliseconds".format(t))
arg_params['prob_label'] = mx.nd.array([0])
mod = mx.mod.Module(symbol=sym)
mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))])
mod.set_params(arg_params, aux_params)
return mod
def prepareNDArray(filename):
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (224, 224,))
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = img[np.newaxis, :]
return mx.nd.array(img)
def predict(filename, model, categories, n):
array = prepareNDArray(filename)
Batch = namedtuple('Batch', ['data'])
t1 = time.time()
model.forward(Batch([array]))
t2 = time.time()
t = 1000*(t2-t1)
print("*** Predicted in {} millsecond".format(t))
prob = model.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
sortedprobindex = np.argsort(prob)[::-1]
topn = []
for i in sortedprobindex[0:n]:
topn.append([prob[i], categories[i]])
return topn
def init(modelname):
s3.download_file(ML_BUCKET_NAME, ML_OBJECT_NAME, ML_MODEL_FILE)
tar_file = tarfile.open(ML_MODEL_FILE)
tar_file.extractall(LOCAL_MODEL_DIR)
model = loadModel(modelname)
cats = ['Compost', 'Landfill', 'Recycling']
return model, cats
def capture_and_save_image_as(filename):
camera.capture(filename, format='jpeg')
def create_image_filename():
current_time_millis = int(time.time() * 1000)
filename = LOCAL_RESOURCE_DIR + "/" + str(current_time_millis) + ".jpg"
return filename
def push_to_s3(filename, folder, classify):
try:
img = open(filename, 'rb')
now = datetime.now()
key = str(folder) + "/{}-{}-{}-{}.jpg".format(
now.year, now.month, now.day,
classify)
response = s3.put_object(ACL='private',
Body=img,
Bucket=BUCKET_NAME,
Key=key,
ContentType= 'image/jpg')
print ('*** Image copied to S3: {}/{}'.format(BUCKET_NAME, key))
gg_client.publish(topic=iot_core_topic, payload=json.dumps({'message': 'Image sent to S3: {}/{}'.format(BUCKET_NAME, key)}))
return key
except Exception as e:
msg = "Pushing to S3 failed: " + str(e)
print (msg)
# Initialize The Image Classification MXNET model
ic,c = init("image-classification")
while True:
sense.set_pixels(question_mark)
print ("*** Waiting for Joystick Event")
sense.stick.wait_for_event(emptybuffer=True)
image_filename = create_image_filename()
capture_and_save_image_as(image_filename)
print ("*** Picture Taken: {}".format(image_filename))
print ("*** Image Classification")
predicted_result = predict(image_filename,ic,c,1)
print ("*** Classified image as {} with a confidence of {}".format(predicted_result[0][1], predicted_result[0][0]))
gg_client.publish(
topic=iot_core_topic,
payload=json.dumps({'message':'Classified image as {} with a confidence of {}'.format(predicted_result[0][1], str(predicted_result[0][0]))})
)
sense.set_pixels(eval(predicted_result[0][1]))
if (BUCKET_NAME != ""):
# Copy image file to s3 with classification and confidence value
folder = str("smart-recycle-kit/2020/known/") + str(predicted_result[0][1])
classified = str(predicted_result[0][1]) + "-" + str(predicted_result[0][0])
push_to_s3(image_filename, folder, classified)
time.sleep(2)
# This is a dummy handler and will not be invoked
# Instead the code above will be executed in an infinite loop
def function_handler(event, context):
return | [
"logging.getLogger",
"sense_hat.SenseHat",
"tarfile.open",
"boto3.client",
"greengrasssdk.client",
"time.sleep",
"numpy.argsort",
"mxnet.mod.Module",
"mxnet.nd.array",
"collections.namedtuple",
"picamera.PiCamera",
"numpy.squeeze",
"cv2.cvtColor",
"cv2.resize",
"mxnet.model.load_checkpoi... | [((1702, 1729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1719, 1729), False, 'import logging\n'), ((1730, 1789), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (1749, 1789), False, 'import logging\n'), ((1843, 1875), 'greengrasssdk.client', 'greengrasssdk.client', (['"""iot-data"""'], {}), "('iot-data')\n", (1863, 1875), False, 'import greengrasssdk\n'), ((1945, 1963), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (1957, 1963), False, 'import boto3\n'), ((3111, 3142), 'picamera.PiCamera', 'PiCamera', ([], {'resolution': '(400, 400)'}), '(resolution=(400, 400))\n', (3119, 3142), False, 'from picamera import PiCamera\n'), ((3209, 3219), 'sense_hat.SenseHat', 'SenseHat', ([], {}), '()\n', (3217, 3219), False, 'from sense_hat import SenseHat\n'), ((3262, 3273), 'time.time', 'time.time', ([], {}), '()\n', (3271, 3273), False, 'import time\n'), ((3366, 3404), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (['modelname', '(0)'], {}), '(modelname, 0)\n', (3390, 3404), True, 'import mxnet as mx\n'), ((3418, 3429), 'time.time', 'time.time', ([], {}), '()\n', (3427, 3429), False, 'import time\n'), ((3547, 3563), 'mxnet.nd.array', 'mx.nd.array', (['[0]'], {}), '([0])\n', (3558, 3563), True, 'import mxnet as mx\n'), ((3578, 3603), 'mxnet.mod.Module', 'mx.mod.Module', ([], {'symbol': 'sym'}), '(symbol=sym)\n', (3591, 3603), True, 'import mxnet as mx\n'), ((3792, 3812), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (3802, 3812), False, 'import cv2\n'), ((3827, 3863), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3839, 3863), False, 'import cv2\n'), ((3878, 3905), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (3888, 3905), False, 'import cv2\n'), ((3921, 3943), 'numpy.swapaxes', 'np.swapaxes', (['img', '(0)', '(2)'], {}), '(img, 0, 2)\n', (3932, 3943), True, 'import numpy as np\n'), ((3958, 3980), 'numpy.swapaxes', 'np.swapaxes', (['img', '(1)', '(2)'], {}), '(img, 1, 2)\n', (3969, 3980), True, 'import numpy as np\n'), ((4029, 4045), 'mxnet.nd.array', 'mx.nd.array', (['img'], {}), '(img)\n', (4040, 4045), True, 'import mxnet as mx\n'), ((4156, 4185), 'collections.namedtuple', 'namedtuple', (['"""Batch"""', "['data']"], {}), "('Batch', ['data'])\n", (4166, 4185), False, 'from collections import namedtuple\n'), ((4199, 4210), 'time.time', 'time.time', ([], {}), '()\n', (4208, 4210), False, 'import time\n'), ((4262, 4273), 'time.time', 'time.time', ([], {}), '()\n', (4271, 4273), False, 'import time\n'), ((4420, 4436), 'numpy.squeeze', 'np.squeeze', (['prob'], {}), '(prob)\n', (4430, 4436), True, 'import numpy as np\n'), ((4731, 4758), 'tarfile.open', 'tarfile.open', (['ML_MODEL_FILE'], {}), '(ML_MODEL_FILE)\n', (4743, 4758), False, 'import tarfile\n'), ((7305, 7318), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7315, 7318), False, 'import time\n'), ((4463, 4479), 'numpy.argsort', 'np.argsort', (['prob'], {}), '(prob)\n', (4473, 4479), True, 'import numpy as np\n'), ((5306, 5320), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5318, 5320), False, 'from datetime import datetime\n'), ((5077, 5088), 'time.time', 'time.time', ([], {}), '()\n', (5086, 5088), False, 'import time\n')] |
import numpy as np
import math
'''
Using the condensed distance matrix format as used in scipy pdist
Only a triangular matrix is kept in a vector, here we take the upper triangular matrix
Vector = [dist(0,1), dist(0,2), ..., dist(0,n), dist(1,2) ...]
k-th combination from (n C 2), where n is matrix dimension, tells the row and column associated with Vector[k]
'''
def kMedoids(D, dim, k, tmax=100):
# determine dimensions of distance matrix D
m, n = D.shape
# randomly initialize an array of k medoid indices
M = np.sort(np.random.choice(dim, k))
# create a copy of the array of medoid indices
Mnew = np.copy(M)
# initialize a dictionary to represent clusters
C = {}
for t in xrange(tmax):
# determine clusters, i. e. arrays of data indices
clust_assignments = {[] for i in range(len(M))}
for medoid_index in M:
from_index = square_to_condensed(medoid_index, 0, dim)
to_index = square_to_condensed(medoid_index, dim, dim)
assignment = np.amin(D[from_index:to_index])
clust_assignments[medoid_index].append(assignment)
clust_assignments.sort()
# update cluster medoids
for medoid_index in M:
dist_indices = np.array([D[square_to_condensed(medoid_index, j, dim)] for j in C[medoid_index]])
# TODO finish modifying this for the condensed matrix format
mean = np.mean(D[dist_indices])
j = np.argmin(mean)
Mnew[medoid_index] = C[medoid_index][j]
np.sort(Mnew)
# check for convergence
if np.array_equal(M, Mnew):
break
M = np.copy(Mnew)
else:
# final update of cluster memberships
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# return results
return M, C
def condensed_index_to_row_col(index, dimension):
# http://stackoverflow.com/a/14839010
b = 1 - 2 * dimension
i = math.floor((-b - math.sqrt(b**2 - 8*index))/2)
j = index + i*(b + i + 2)/2 + 1
return int(i), int(j)
def square_to_condensed(i, j, n):
# http: // stackoverflow.com / a / 36867493
assert i != j, "no diagonal elements in condensed matrix"
if i < j:
i, j = j, i
return n*j - j*(j+1)/2 + i - 1 - j | [
"numpy.copy",
"numpy.mean",
"numpy.amin",
"numpy.random.choice",
"numpy.where",
"numpy.sort",
"math.sqrt",
"numpy.array_equal",
"numpy.argmin"
] | [((636, 646), 'numpy.copy', 'np.copy', (['M'], {}), '(M)\n', (643, 646), True, 'import numpy as np\n'), ((547, 571), 'numpy.random.choice', 'np.random.choice', (['dim', 'k'], {}), '(dim, k)\n', (563, 571), True, 'import numpy as np\n'), ((1561, 1574), 'numpy.sort', 'np.sort', (['Mnew'], {}), '(Mnew)\n', (1568, 1574), True, 'import numpy as np\n'), ((1618, 1641), 'numpy.array_equal', 'np.array_equal', (['M', 'Mnew'], {}), '(M, Mnew)\n', (1632, 1641), True, 'import numpy as np\n'), ((1673, 1686), 'numpy.copy', 'np.copy', (['Mnew'], {}), '(Mnew)\n', (1680, 1686), True, 'import numpy as np\n'), ((1755, 1781), 'numpy.argmin', 'np.argmin', (['D[:, M]'], {'axis': '(1)'}), '(D[:, M], axis=1)\n', (1764, 1781), True, 'import numpy as np\n'), ((1046, 1077), 'numpy.amin', 'np.amin', (['D[from_index:to_index]'], {}), '(D[from_index:to_index])\n', (1053, 1077), True, 'import numpy as np\n'), ((1444, 1468), 'numpy.mean', 'np.mean', (['D[dist_indices]'], {}), '(D[dist_indices])\n', (1451, 1468), True, 'import numpy as np\n'), ((1485, 1500), 'numpy.argmin', 'np.argmin', (['mean'], {}), '(mean)\n', (1494, 1500), True, 'import numpy as np\n'), ((1835, 1855), 'numpy.where', 'np.where', (['(J == kappa)'], {}), '(J == kappa)\n', (1843, 1855), True, 'import numpy as np\n'), ((2040, 2069), 'math.sqrt', 'math.sqrt', (['(b ** 2 - 8 * index)'], {}), '(b ** 2 - 8 * index)\n', (2049, 2069), False, 'import math\n')] |
import torch
import torch.nn.functional as F
import time
import os
import cv2
import numpy as np
from sklearn.neighbors import KDTree
from random import sample
from lib.utils import save_session, AverageMeter
from lib.ransac_voting_gpu_layer.ransac_voting_gpu import ransac_voting_layer_v3
from lib.ransac_voting_gpu_layer.ransac_voting_gpu import estimate_voting_distribution_with_mean
from lib.regressor.regressor import load_wrapper, get_2d_ctypes
from src.evaluate import read_diameter
import pdb
cuda = torch.cuda.is_available()
class CoreTrainer(object):
def __init__(self, model, optimizer, train_loader, test_loader, args):
super(CoreTrainer, self).__init__()
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizer = optimizer
self.args = args
def train(self, epoch):
self.model.train()
time_record = AverageMeter()
sym_cor_loss_record = AverageMeter()
mask_loss_record = AverageMeter()
pts2d_loss_record = AverageMeter()
graph_loss_record = AverageMeter()
total_loss_record = AverageMeter()
for i_batch, batch in enumerate(self.train_loader):
start_time = time.time()
if cuda:
batch['image'] = batch['image'].cuda()
batch['sym_cor'] = batch['sym_cor'].cuda()
batch['mask'] = batch['mask'].cuda()
batch['pts2d_map'] = batch['pts2d_map'].cuda()
batch['graph'] = batch['graph'].cuda()
sym_cor_pred, mask_pred, pts2d_map_pred, graph_pred, sym_cor_loss, mask_loss, pts2d_loss, graph_loss = \
self.model(batch['image'], batch['sym_cor'], batch['mask'], batch['pts2d_map'], batch['graph'])
# losses: move to the same device
sym_cor_loss = sym_cor_loss.mean()
mask_loss = mask_loss.mean()
pts2d_loss = pts2d_loss.mean()
graph_loss = graph_loss.mean()
current_loss = self.args.lambda_sym_cor * sym_cor_loss + \
self.args.lambda_mask * mask_loss + \
self.args.lambda_pts2d * pts2d_loss + \
self.args.lambda_graph * graph_loss
# Step optimizer
self.optimizer.zero_grad()
current_loss.backward()
self.optimizer.step()
# print information during training
time_record.update(time.time() - start_time)
sym_cor_loss_record.update(sym_cor_loss.detach().cpu().numpy(), len(batch['image']))
mask_loss_record.update(mask_loss.detach().cpu().numpy(), len(batch['image']))
pts2d_loss_record.update(pts2d_loss.detach().cpu().numpy(), len(batch['image']))
graph_loss_record.update(graph_loss.detach().cpu().numpy(), len(batch['image']))
total_loss_record.update(current_loss.detach().cpu().numpy(), len(batch['image']))
print('Epoch: [{0}][{1}/{2}]\t'
'Time: {time.val:.3f} ({time.avg:.3f})\t'
'Sym: {sym.val:.4f} ({sym.avg:.4f})\t'
'Mask: {mask.val:.4f} ({mask.avg:.4f})\t'
'Pts: {pts.val:.4f} ({pts.avg:.4f})\t'
'Graph: {graph.val:.4f} ({graph.avg:.4f})\t'
'Total: {total.val:.4f} ({total.avg:.4f})'.format(epoch, i_batch, len(self.train_loader),
time=time_record, sym=sym_cor_loss_record,
mask=mask_loss_record, pts=pts2d_loss_record,
graph=graph_loss_record, total=total_loss_record))
def visualize_symmetry(self, sym_cor_pred, mask_pred, sym_cor, mask, image, epoch, i_batch):
img_dir = os.path.join(self.args.save_dir, 'image', str(self.args.lr))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# visualize prediction
image_pred = image.copy()
mask_pred = mask_pred.detach().cpu().numpy()[0]
sym_cor_pred = sym_cor_pred.detach().cpu().numpy()
ys, xs = np.nonzero(mask_pred)
for i_pt in sample([i for i in range(len(ys))], min(100, len(ys))):
y = int(round(ys[i_pt]))
x = int(round(xs[i_pt]))
x_cor, y_cor = sym_cor_pred[:, y, x]
x_cor = int(round(x + x_cor))
y_cor = int(round(y + y_cor))
image_pred = cv2.line(image_pred, (x, y), (x_cor, y_cor), (0, 0, 255), 1)
img_pred_name = os.path.join(img_dir, '{}_{}_sym.jpg'.format(epoch, i_batch))
cv2.imwrite(img_pred_name, image_pred)
# visualize ground truth
image_gt = image.copy()
mask = mask.detach().cpu().numpy()[0]
sym_cor = sym_cor.detach().cpu().numpy()
ys, xs = np.nonzero(mask)
for i_pt in sample([i for i in range(len(ys))], min(100, len(ys))):
y = int(round(ys[i_pt]))
x = int(round(xs[i_pt]))
x_cor, y_cor = sym_cor[:, y, x]
x_cor = int(round(x + x_cor))
y_cor = int(round(y + y_cor))
image_gt = cv2.line(image_gt, (x, y), (x_cor, y_cor), (0, 0, 255), 1)
img_gt_name = os.path.join(img_dir, '{}_{}_sym_gt.jpg'.format(epoch, i_batch))
cv2.imwrite(img_gt_name, image_gt)
def visualize_mask(self, mask_pred, mask, epoch, i_batch):
mask_pred = mask_pred.detach().cpu().numpy()[0]
mask = np.uint8(mask.detach().cpu().numpy()[0])
image = np.zeros((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)
# red: prediction
image[mask_pred == 1.] += np.array([0, 0, 128], dtype=np.uint8)
# blue: gt
image[mask != 0] += np.array([128, 0, 0], dtype=np.uint8)
img_dir = os.path.join(self.args.save_dir, 'image', str(self.args.lr))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
img_name = os.path.join(img_dir, '{}_{}_mask.jpg'.format(epoch, i_batch))
cv2.imwrite(img_name, image)
def visualize_keypoints(self, pts2d_map_pred, pts2d, mask_pred, image, epoch, i_batch):
img_dir = os.path.join(self.args.save_dir, 'image', str(self.args.lr))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# vote keypoints
pts2d_pred, _ = self.vote_keypoints(pts2d_map_pred, mask_pred)
pts2d_pred = pts2d_pred.detach().cpu().numpy()[0]
# draw predication
image_pred = image.copy()
for i in range(pts2d_pred.shape[0]):
x, y = pts2d_pred[i]
x = int(round(x))
y = int(round(y))
# radius=2, color=red, thickness=filled
image_pred = cv2.circle(image_pred, (x, y), 2, (0, 0, 255), thickness=-1)
img_pred_name = os.path.join(img_dir, '{}_{}_pts.jpg'.format(epoch, i_batch))
cv2.imwrite(img_pred_name, image_pred)
# draw ground truth
pts2d = pts2d.detach().cpu().numpy()
image_gt = image.copy()
for i in range(pts2d.shape[0]):
x, y = pts2d[i]
x = int(round(x))
y = int(round(y))
# radius=2, color=white, thickness=filled
image_gt = cv2.circle(image_gt, (x, y), 2, (255, 255, 255), thickness=-1)
img_gt_name = os.path.join(img_dir, '{}_{}_pts_gt.jpg'.format(epoch, i_batch))
cv2.imwrite(img_gt_name, image_gt)
def visualize_votes(self, map_pred, map_gt, mask_gt, epoch, i_batch):
img_dir = os.path.join(self.args.save_dir, 'image', str(self.args.lr))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
map_pred = map_pred.detach().cpu().numpy()
map_gt = map_gt.detach().cpu().numpy()
mask = np.uint8(mask_gt.detach().cpu().numpy()[0])
image = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8)
ys, xs = np.nonzero(mask)
# visualize pred
images_pred = [image.copy() for _ in range(map_pred.shape[0] // 2)]
for i_pt in range(len(ys)):
for i_keypt in range(map_pred.shape[0] // 2):
y = ys[i_pt]
x = xs[i_pt]
map_x = map_pred[i_keypt * 2, y, x]
map_y = map_pred[i_keypt * 2 + 1, y, x]
if map_x == 0:
continue
angle = np.arctan(np.abs(map_y) / np.abs(map_x)) / (np.pi / 2) * 90
if map_x < 0 and map_y > 0:
angle = 180 - angle
if map_x < 0 and map_y < 0:
angle = 180 + angle
if map_x >= 0 and map_y < 0:
angle = 360 - angle
images_pred[i_keypt][y, x] = int(round(angle / 360 * 255))
images_pred = [cv2.applyColorMap(im_gray, cv2.COLORMAP_HSV) for im_gray in images_pred]
for i, im in enumerate(images_pred):
im[mask == 0] = (0, 0, 0)
img_pred_name = os.path.join(img_dir, '{}_{}_vote_kp_{}_pred.jpg'.format(epoch, i_batch, i))
cv2.imwrite(img_pred_name, im)
# visualize gt
images_gt = [image.copy() for _ in range(map_gt.shape[0] // 2)]
for i_pt in range(len(ys)):
for i_keypt in range(map_gt.shape[0] // 2):
y = ys[i_pt]
x = xs[i_pt]
map_x = map_gt[i_keypt * 2, y, x]
map_y = map_gt[i_keypt * 2 + 1, y, x]
if map_x == 0:
continue
angle = np.arctan(np.abs(map_y) / np.abs(map_x)) / (np.pi / 2) * 90
if map_x < 0 and map_y > 0:
angle = 180 - angle
if map_x < 0 and map_y < 0:
angle = 180 + angle
if map_x >= 0 and map_y < 0:
angle = 360 - angle
images_gt[i_keypt][y, x] = int(round(angle / 360 * 255))
images_gt = [cv2.applyColorMap(im_gray, cv2.COLORMAP_HSV) for im_gray in images_gt]
for i, im in enumerate(images_gt):
im[mask == 0] = (0, 0, 0)
img_gt_name = os.path.join(img_dir, '{}_{}_vote_kp_{}_gt.jpg'.format(epoch, i_batch, i))
cv2.imwrite(img_gt_name, im)
def visualize_graph(self, graph_pred, graph_gt, pts2d_gt, mask_pred, mask_gt, image, epoch, i_batch):
img_dir = os.path.join(self.args.save_dir, 'image', str(self.args.lr))
if not os.path.exists(img_dir):
os.makedirs(img_dir)
image_gt = image.copy()
image_pred = image.copy()
graph_pred = graph_pred.detach().cpu().numpy()
graph_pred = graph_pred.reshape((-1, 2, image.shape[0], image.shape[1]))
graph_gt = graph_gt.detach().cpu().numpy()
graph_gt = graph_gt.reshape((-1, 2, image.shape[0], image.shape[1]))
pts2d_gt = pts2d_gt.numpy()
mask_pred = mask_pred.detach().cpu().numpy()[0]
mask_gt = mask_gt.detach().cpu().numpy()[0]
num_pts = pts2d_gt.shape[0]
i_edge = 0
for start_idx in range(0, num_pts - 1):
for end_idx in range(start_idx + 1, num_pts):
# pred, red
start = np.int16(np.round(pts2d_gt[start_idx]))
edge_x = graph_pred[i_edge, 0][mask_pred == 1.].mean()
edge_y = graph_pred[i_edge, 1][mask_pred == 1.].mean()
edge = np.array([edge_x, edge_y])
end = np.int16(np.round(pts2d_gt[start_idx] + edge))
image_pred = cv2.line(image_pred, tuple(start), tuple(end), (0, 0, 255), 1)
# gt, green
start = np.int16(np.round(pts2d_gt[start_idx]))
edge_x = graph_gt[i_edge, 0][mask_gt == 1.].mean()
edge_y = graph_gt[i_edge, 1][mask_gt == 1.].mean()
edge = np.array([edge_x, edge_y])
end = np.int16(np.round(pts2d_gt[start_idx] + edge))
image_gt = cv2.line(image_gt, tuple(start), tuple(end), (0, 255, 0), 1)
i_edge += 1
img_gt_name = os.path.join(img_dir, '{}_{}_gt_graph.jpg'.format(epoch, i_batch))
cv2.imwrite(img_gt_name, image_gt)
img_pred_name = os.path.join(img_dir, '{}_{}_pred_graph.jpg'.format(epoch, i_batch))
cv2.imwrite(img_pred_name, image_pred)
def test(self, epoch):
print('Testing...')
self.model.eval()
loss_record = AverageMeter()
data_loader = self.test_loader
with torch.no_grad():
for i_batch, batch in enumerate(data_loader):
if cuda:
batch['image'] = batch['image'].cuda()
batch['sym_cor'] = batch['sym_cor'].cuda()
batch['mask'] = batch['mask'].cuda()
batch['pts2d_map'] = batch['pts2d_map'].cuda()
batch['graph'] = batch['graph'].cuda()
sym_cor_pred, mask_pred, pts2d_map_pred, graph_pred, sym_cor_loss, mask_loss, pts2d_loss, graph_loss = \
self.model(batch['image'], batch['sym_cor'], batch['mask'], batch['pts2d_map'], batch['graph'])
mask_pred[mask_pred > 0.5] = 1.
mask_pred[mask_pred <= 0.5] = 0.
# losses: move to the same device
sym_cor_loss = sym_cor_loss.mean()
mask_loss = mask_loss.mean()
pts2d_loss = pts2d_loss.mean()
graph_loss = graph_loss.mean()
current_loss = self.args.lambda_sym_cor * sym_cor_loss + \
self.args.lambda_mask * mask_loss + \
self.args.lambda_pts2d * pts2d_loss + \
self.args.lambda_graph * graph_loss
if i_batch < 3:
# some visualizations
image = cv2.imread(batch['image_name'][0])
self.visualize_symmetry(sym_cor_pred[0],
mask_pred[0],
batch['sym_cor'][0],
batch['mask'][0],
image,
epoch,
i_batch)
self.visualize_mask(mask_pred[0],
batch['mask'][0],
epoch,
i_batch)
self.visualize_votes(pts2d_map_pred[0],
batch['pts2d_map'][0],
batch['mask'][0],
epoch,
i_batch)
try:
self.visualize_keypoints(pts2d_map_pred[:1],
batch['pts2d'][0],
batch['mask'][:1],
image,
epoch,
i_batch)
except:
# we may not be able to vote keypoints at early stages
pass
self.visualize_graph(graph_pred[0],
batch['graph'][0],
batch['pts2d'][0],
mask_pred[0],
batch['mask'][0],
image,
epoch,
i_batch)
loss_record.update(current_loss.detach().cpu().numpy(), len(batch['image']))
print('Loss: {:.4f}'.format(loss_record.avg))
return loss_record.avg
def vote_keypoints(self, pts2d_map, mask):
mask = mask[:, 0] # remove dummy dimension
mask = (mask > 0.5).long() # convert to binary and int64 to comply with pvnet interface
pts2d_map = pts2d_map.permute((0, 2, 3, 1))
bs, h, w, num_keypts_2 = pts2d_map.shape
pts2d_map = pts2d_map.view((bs, h, w, num_keypts_2 // 2, 2))
mean = ransac_voting_layer_v3(mask, pts2d_map, 512, inlier_thresh=0.99)
mean, var = estimate_voting_distribution_with_mean(mask, pts2d_map, mean)
return mean, var
def flatten_sym_cor(self, sym_cor, mask):
ys, xs = np.nonzero(mask)
flat = np.zeros((ys.shape[0], 2, 2), dtype=np.float32)
for i_pt in range(len(ys)):
y = ys[i_pt]
x = xs[i_pt]
x_cor, y_cor = sym_cor[:, y, x]
flat[i_pt, 0] = [x, y]
flat[i_pt, 1] = [x + x_cor, y + y_cor]
return flat
def filter_symmetry(self, vecs_pred, sigma=0.01, min_count=100, n_neighbors=100):
# Chen: I have to set min_count >= neighbors here.
# Otherwise kdtree will complain "k must be less than or equal to the number of training points"
if len(vecs_pred) < min_count:
qs1_cross_qs2 = np.zeros((0, 3), dtype=np.float32)
symmetry_weight = np.zeros((0,), dtype=np.float32)
return qs1_cross_qs2, symmetry_weight
vecs_pred /= np.sqrt(np.sum(vecs_pred[:, :2]**2, axis=1)).reshape((-1, 1))
kdt = KDTree(vecs_pred, leaf_size=40, metric='euclidean') # following matlab default values
dis, _ = kdt.query(vecs_pred, k=n_neighbors)
saliency = np.mean(dis * dis, axis=1, dtype=np.float32)
order = np.argsort(saliency)
seeds = np.zeros((2, order.shape[0]), dtype=np.uint32)
seeds[0][0] = order[0]
seeds[1][0] = 1
seeds_size = 1
flags = np.zeros((order.shape[0],), dtype=np.uint32)
flags[order[0]] = 0
for i in range(1, order.shape[0]):
vec = vecs_pred[order[i]]
candidates = vecs_pred[seeds[0]]
dif = candidates - vec
norm = np.linalg.norm(dif, axis=1)
closest_seed_i = norm.argmin()
min_dis = norm[closest_seed_i]
if min_dis < sigma:
flags[order[i]] = closest_seed_i
seeds[1][closest_seed_i] = seeds[1][closest_seed_i] + 1
else:
seeds[0, seeds_size] = order[i]
seeds[1, seeds_size] = 1
flags[order[i]] = seeds_size
seeds_size += 1
seeds = seeds[:, :seeds_size]
valid_is = np.argwhere(seeds[1] > (np.max(seeds[1]) / 3)).transpose()[0]
seeds = seeds[:, valid_is]
n_symmetry = seeds.shape[1]
qs1_cross_qs2 = np.zeros((n_symmetry, 3), dtype=np.float32)
for i in range(n_symmetry):
row_is = np.argwhere(flags == valid_is[i]).transpose()[0]
qs1_cross_qs2[i] = np.mean(vecs_pred[row_is], axis=0)
qs1_cross_qs2[i] /= np.linalg.norm(qs1_cross_qs2[i])
symmetry_weight = np.float32(seeds[1])
symmetry_weight /= np.max(symmetry_weight)
return qs1_cross_qs2, symmetry_weight
def fill_intermediate_predictions(self, regressor, predictions, K_inv, pts3d, pts2d_pred_loc, pts2d_pred_var, graph_pred, sym_cor_pred, mask_pred, normal_gt):
# load intermediate representations to regressor
n_keypts = self.args.num_keypoints
n_edges = n_keypts * (n_keypts - 1) // 2
# point3D_gt
regressor.set_point3D_gt(predictions, get_2d_ctypes(pts3d), n_keypts)
# point2D_pred
point2D_pred = np.matrix(np.ones((3, n_keypts), dtype=np.float32))
point2D_pred[:2] = pts2d_pred_loc.transpose()
point2D_pred = np.array((K_inv * point2D_pred)[:2]).transpose()
regressor.set_point2D_pred(predictions,
get_2d_ctypes(point2D_pred),
n_keypts)
# point_inv_half_var
point_inv_half_var = np.zeros((n_keypts, 2, 2), dtype=np.float32)
for i in range(n_keypts): # compute cov^{-1/2}
cov = np.matrix(pts2d_pred_var[i])
cov = (cov + cov.transpose()) / 2 # ensure the covariance matrix is symmetric
v, u = np.linalg.eig(cov)
v = np.matrix(np.diag(1. / np.sqrt(v)))
point_inv_half_var[i] = u * v * u.transpose()
point_inv_half_var = point_inv_half_var.reshape((n_keypts, 4))
regressor.set_point_inv_half_var(predictions,
get_2d_ctypes(point_inv_half_var),
n_keypts)
# normal_gt
regressor.set_normal_gt(predictions, normal_gt.ctypes)
# vec_pred and edge_inv_half_var
graph_pred = graph_pred.reshape((n_edges, 2, graph_pred.shape[1], graph_pred.shape[2]))
vec_pred = np.zeros((n_edges, 2), dtype=np.float32)
edge_inv_half_var = np.zeros((n_edges, 2, 2), dtype=np.float32)
for i in range(n_edges):
xs = graph_pred[i, 0][mask_pred == 1.]
ys = graph_pred[i, 1][mask_pred == 1.]
vec_pred[i] = [xs.mean(), ys.mean()]
try:
cov = np.cov(xs, ys)
cov = (cov + cov.transpose()) / 2 # ensure the covariance matrix is symmetric
v, u = np.linalg.eig(cov)
v = np.matrix(np.diag(1. / np.sqrt(v)))
edge_inv_half_var[i] = u * v * u.transpose()
except:
edge_inv_half_var[i] = np.eye(2)
vec_pred = np.array(K_inv[:2, :2] * np.matrix(vec_pred).transpose()).transpose()
edge_inv_half_var = edge_inv_half_var.reshape((n_edges, 4))
regressor.set_vec_pred(predictions,
get_2d_ctypes(vec_pred),
n_edges)
regressor.set_edge_inv_half_var(predictions,
get_2d_ctypes(edge_inv_half_var),
n_edges)
# qs1_cross_qs2 and symmetry weight
sym_cor_pred = self.flatten_sym_cor(sym_cor_pred, mask_pred)
qs1_cross_qs2_all = np.zeros((sym_cor_pred.shape[0], 3), dtype=np.float32)
for i in range(sym_cor_pred.shape[0]):
qs1 = np.ones((3,), dtype=np.float32)
qs2 = np.ones((3,), dtype=np.float32)
qs1[:2] = sym_cor_pred[i][0]
qs2[:2] = sym_cor_pred[i][1]
qs1 = np.array(K_inv * np.matrix(qs1).transpose()).transpose()[0]
qs2 = np.array(K_inv * np.matrix(qs2).transpose()).transpose()[0]
qs1_cross_qs2_all[i] = np.cross(qs1, qs2)
qs1_cross_qs2_filtered, symmetry_weight = self.filter_symmetry(qs1_cross_qs2_all)
n_symmetry = qs1_cross_qs2_filtered.shape[0]
regressor.set_qs1_cross_qs2(predictions,
get_2d_ctypes(qs1_cross_qs2_filtered),
n_symmetry)
regressor.set_symmetry_weight(predictions,
symmetry_weight.ctypes,
n_symmetry)
def regress_pose(self, regressor, predictions, pr_para, pi_para, K_inv, pts3d, pts2d_pred_loc, pts2d_pred_var, graph_pred, sym_cor_pred, mask_pred, normal_gt):
if mask_pred.sum() == 0:
# object is not detected
R = np.eye(3, dtype=np.float32)
t = np.zeros((3, 1), dtype=np.float32)
return R, t, R, t
self.fill_intermediate_predictions(regressor,
predictions,
K_inv,
pts3d,
pts2d_pred_loc,
pts2d_pred_var,
graph_pred,
sym_cor_pred,
mask_pred,
normal_gt)
# initialize pose
predictions = regressor.initialize_pose(predictions, pi_para)
pose_init = np.zeros((4, 3), dtype=np.float32)
regressor.get_pose(predictions, get_2d_ctypes(pose_init))
R_init = pose_init[1:].transpose()
t_init = pose_init[0].reshape((3, 1))
# refine pose
predictions = regressor.refine_pose(predictions, pr_para)
pose_final = np.zeros((4, 3), dtype=np.float32)
regressor.get_pose(predictions, get_2d_ctypes(pose_final))
R_final = pose_final[1:].transpose()
t_final = pose_final[0].reshape((3, 1))
return R_final, t_final, R_init, t_init
def search_para(self, regressor, predictions_para, poses_para, K_inv, normal_gt, diameter, val_set):
para_id = 0
for data_id in range(len(val_set['pts3d'])):
if val_set['mask_pred'][data_id].sum() == 0 or \
np.sum(val_set['pts2d_pred_loc'][data_id]) == 0:
# object not detected
continue
predictions = regressor.get_prediction_container(predictions_para, para_id)
# fill intermediate predictions
self.fill_intermediate_predictions(regressor,
predictions,
K_inv,
val_set['pts3d'][data_id],
val_set['pts2d_pred_loc'][data_id],
val_set['pts2d_pred_var'][data_id],
val_set['graph_pred'][data_id],
val_set['sym_cor_pred'][data_id],
val_set['mask_pred'][data_id],
normal_gt)
# fill ground-truth poses
pose_gt = np.zeros((4, 3), dtype=np.float32)
tvec = val_set['t_gt'][data_id]
r = val_set['R_gt'][data_id]
pose_gt[0] = tvec.transpose()[0]
pose_gt[1:] = r.transpose()
regressor.set_pose_gt(poses_para, para_id, get_2d_ctypes(pose_gt))
# increment number of valid examples in the val set
para_id += 1
# search parameter
# para_id is datasize for parameter search
pi_para = regressor.search_pose_initial(predictions_para, poses_para, para_id, diameter)
pr_para = regressor.search_pose_refine(predictions_para, poses_para, para_id, diameter)
return pr_para, pi_para
def generate_data(self, val_loader, val_size=50):
self.model.eval()
camera_intrinsic = self.test_loader.dataset.camera_intrinsic
n_examples = len(self.test_loader.dataset)
test_set = {
'object_name': [],
'local_idx': [],
'R_gt': np.zeros((n_examples, 3, 3), dtype=np.float32),
't_gt': np.zeros((n_examples, 3, 1), dtype=np.float32),
'R_pred': np.zeros((n_examples, 3, 3), dtype=np.float32),
't_pred': np.zeros((n_examples, 3, 1), dtype=np.float32),
'R_init': np.zeros((n_examples, 3, 3), dtype=np.float32),
't_init': np.zeros((n_examples, 3, 1), dtype=np.float32)
}
val_set = {
'pts3d' : [],
'pts2d_pred_loc' : [],
'pts2d_pred_var' : [],
'graph_pred' : [],
'sym_cor_pred' : [],
'mask_pred' : [],
'R_gt' : [],
't_gt' : []
}
K = np.matrix([[camera_intrinsic['fu'], 0, camera_intrinsic['uc']],
[0, camera_intrinsic['fv'], camera_intrinsic['vc']],
[0, 0, 1]], dtype=np.float32)
K_inv = np.linalg.inv(K)
regressor = load_wrapper()
# intermediate predictions in the test set
predictions = regressor.new_container()
# intermediate predictions in the val set
predictions_para = regressor.new_container_para()
# ground-truth poses in the val set
poses_para = regressor.new_container_pose()
with torch.no_grad():
# search parameters
keep_searching = True
for i_batch, batch in enumerate(val_loader):
if not keep_searching:
break
base_idx = self.args.batch_size * i_batch
if cuda:
batch['image'] = batch['image'].cuda()
batch['sym_cor'] = batch['sym_cor'].cuda()
batch['mask'] = batch['mask'].cuda()
batch['pts2d_map'] = batch['pts2d_map'].cuda()
batch['graph'] = batch['graph'].cuda()
sym_cor_pred, mask_pred, pts2d_map_pred, graph_pred, sym_cor_loss, mask_loss, pts2d_loss, graph_loss = \
self.model(batch['image'], batch['sym_cor'], batch['mask'], batch['pts2d_map'], batch['graph'])
mask_pred[mask_pred > 0.5] = 1.
mask_pred[mask_pred <= 0.5] = 0.
pts2d_pred_loc, pts2d_pred_var = self.vote_keypoints(pts2d_map_pred, mask_pred)
mask_pred = mask_pred.detach().cpu().numpy()
for i in range(batch['image'].shape[0]):
R = batch['R'].numpy()
t = batch['t'].numpy()
if (base_idx + i) < val_size:
# save data for parameter search
val_set['pts3d'].append(batch['pts3d'][i].numpy())
val_set['pts2d_pred_loc'].append(pts2d_pred_loc[i].detach().cpu().numpy())
val_set['pts2d_pred_var'].append(pts2d_pred_var[i].detach().cpu().numpy())
val_set['graph_pred'].append(graph_pred[i].detach().cpu().numpy())
val_set['sym_cor_pred'].append(sym_cor_pred[i].detach().cpu().numpy())
val_set['mask_pred'].append(mask_pred[i][0])
val_set['R_gt'].append(R[i])
val_set['t_gt'].append(t[i])
elif (base_idx + i) == val_size:
# search hyper-parameters of both initialization and refinement sub-modules
pr_para, pi_para = self.search_para(regressor,
predictions_para,
poses_para,
K_inv,
batch['normal'][i].numpy(),
read_diameter(self.args.object_name),
val_set)
keep_searching = False
break
# prediction
for i_batch, batch in enumerate(self.test_loader):
base_idx = self.args.batch_size * i_batch
if cuda:
batch['image'] = batch['image'].cuda()
batch['sym_cor'] = batch['sym_cor'].cuda()
batch['mask'] = batch['mask'].cuda()
batch['pts2d_map'] = batch['pts2d_map'].cuda()
batch['graph'] = batch['graph'].cuda()
sym_cor_pred, mask_pred, pts2d_map_pred, graph_pred, sym_cor_loss, mask_loss, pts2d_loss, graph_loss = \
self.model(batch['image'], batch['sym_cor'], batch['mask'], batch['pts2d_map'], batch['graph'])
mask_pred[mask_pred > 0.5] = 1.
mask_pred[mask_pred <= 0.5] = 0.
pts2d_pred_loc, pts2d_pred_var = self.vote_keypoints(pts2d_map_pred, mask_pred)
mask_pred = mask_pred.detach().cpu().numpy()
for i in range(batch['image'].shape[0]):
R = batch['R'].numpy()
t = batch['t'].numpy()
# regress pose: test set starts from the `val_size`^{th} example
# save ground-truth information
test_set['object_name'] += batch['object_name'][i:]
test_set['local_idx'] += batch['local_idx'].numpy()[i:].tolist()
test_set['R_gt'][base_idx + i] = R[i]
test_set['t_gt'][base_idx + i] = t[i]
# save predicted information
R_pred, t_pred, R_init, t_init = self.regress_pose(regressor,
predictions,
pr_para,
pi_para,
K_inv,
batch['pts3d'][i].numpy(),
pts2d_pred_loc[i].detach().cpu().numpy(),
pts2d_pred_var[i].detach().cpu().numpy(),
graph_pred[i].detach().cpu().numpy(),
sym_cor_pred[i].detach().cpu().numpy(),
mask_pred[i][0],
batch['normal'][i].numpy())
test_set['R_pred'][base_idx + i] = R_pred
test_set['t_pred'][base_idx + i] = t_pred
test_set['R_init'][base_idx + i] = R_init
test_set['t_init'][base_idx + i] = t_init
os.makedirs('output/{}'.format(self.args.dataset), exist_ok=True)
np.save('output/{}/test_set_{}.npy'.format(self.args.dataset, self.args.object_name), test_set)
print('saved')
regressor.delete_container(predictions, predictions_para, poses_para, pr_para, pi_para)
def save_model(self, epoch):
ckpt_dir = os.path.join(self.args.save_dir, 'checkpoints')
note = str(self.args.lr)
save_session(self.model, self.optimizer, ckpt_dir, note, epoch)
| [
"numpy.sqrt",
"lib.regressor.regressor.load_wrapper",
"numpy.argsort",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"numpy.cov",
"lib.ransac_voting_gpu_layer.ransac_voting_gpu.ransac_voting_layer_v3",
"numpy.mean",
"os.path.exists",
"numpy.cross",
"cv2.line",
"sklearn.neigh... | [((509, 534), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (532, 534), False, 'import torch\n'), ((927, 941), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (939, 941), False, 'from lib.utils import save_session, AverageMeter\n'), ((972, 986), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (984, 986), False, 'from lib.utils import save_session, AverageMeter\n'), ((1014, 1028), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1026, 1028), False, 'from lib.utils import save_session, AverageMeter\n'), ((1057, 1071), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1069, 1071), False, 'from lib.utils import save_session, AverageMeter\n'), ((1100, 1114), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1112, 1114), False, 'from lib.utils import save_session, AverageMeter\n'), ((1143, 1157), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1155, 1157), False, 'from lib.utils import save_session, AverageMeter\n'), ((4235, 4256), 'numpy.nonzero', 'np.nonzero', (['mask_pred'], {}), '(mask_pred)\n', (4245, 4256), True, 'import numpy as np\n'), ((4720, 4758), 'cv2.imwrite', 'cv2.imwrite', (['img_pred_name', 'image_pred'], {}), '(img_pred_name, image_pred)\n', (4731, 4758), False, 'import cv2\n'), ((4936, 4952), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (4946, 4952), True, 'import numpy as np\n'), ((5408, 5442), 'cv2.imwrite', 'cv2.imwrite', (['img_gt_name', 'image_gt'], {}), '(img_gt_name, image_gt)\n', (5419, 5442), False, 'import cv2\n'), ((5635, 5694), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1], 3)'], {'dtype': 'np.uint8'}), '((mask.shape[0], mask.shape[1], 3), dtype=np.uint8)\n', (5643, 5694), True, 'import numpy as np\n'), ((5755, 5792), 'numpy.array', 'np.array', (['[0, 0, 128]'], {'dtype': 'np.uint8'}), '([0, 0, 128], dtype=np.uint8)\n', (5763, 5792), True, 'import numpy as np\n'), ((5840, 5877), 'numpy.array', 'np.array', (['[128, 0, 0]'], {'dtype': 'np.uint8'}), '([128, 0, 0], dtype=np.uint8)\n', (5848, 5877), True, 'import numpy as np\n'), ((6120, 6148), 'cv2.imwrite', 'cv2.imwrite', (['img_name', 'image'], {}), '(img_name, image)\n', (6131, 6148), False, 'import cv2\n'), ((6979, 7017), 'cv2.imwrite', 'cv2.imwrite', (['img_pred_name', 'image_pred'], {}), '(img_pred_name, image_pred)\n', (6990, 7017), False, 'import cv2\n'), ((7486, 7520), 'cv2.imwrite', 'cv2.imwrite', (['img_gt_name', 'image_gt'], {}), '(img_gt_name, image_gt)\n', (7497, 7520), False, 'import cv2\n'), ((7921, 7977), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1])'], {'dtype': 'np.uint8'}), '((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n', (7929, 7977), True, 'import numpy as np\n'), ((7995, 8011), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (8005, 8011), True, 'import numpy as np\n'), ((12203, 12237), 'cv2.imwrite', 'cv2.imwrite', (['img_gt_name', 'image_gt'], {}), '(img_gt_name, image_gt)\n', (12214, 12237), False, 'import cv2\n'), ((12339, 12377), 'cv2.imwrite', 'cv2.imwrite', (['img_pred_name', 'image_pred'], {}), '(img_pred_name, image_pred)\n', (12350, 12377), False, 'import cv2\n'), ((12482, 12496), 'lib.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (12494, 12496), False, 'from lib.utils import save_session, AverageMeter\n'), ((16373, 16437), 'lib.ransac_voting_gpu_layer.ransac_voting_gpu.ransac_voting_layer_v3', 'ransac_voting_layer_v3', (['mask', 'pts2d_map', '(512)'], {'inlier_thresh': '(0.99)'}), '(mask, pts2d_map, 512, inlier_thresh=0.99)\n', (16395, 16437), False, 'from lib.ransac_voting_gpu_layer.ransac_voting_gpu import ransac_voting_layer_v3\n'), ((16458, 16519), 'lib.ransac_voting_gpu_layer.ransac_voting_gpu.estimate_voting_distribution_with_mean', 'estimate_voting_distribution_with_mean', (['mask', 'pts2d_map', 'mean'], {}), '(mask, pts2d_map, mean)\n', (16496, 16519), False, 'from lib.ransac_voting_gpu_layer.ransac_voting_gpu import estimate_voting_distribution_with_mean\n'), ((16609, 16625), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (16619, 16625), True, 'import numpy as np\n'), ((16641, 16688), 'numpy.zeros', 'np.zeros', (['(ys.shape[0], 2, 2)'], {'dtype': 'np.float32'}), '((ys.shape[0], 2, 2), dtype=np.float32)\n', (16649, 16688), True, 'import numpy as np\n'), ((17494, 17545), 'sklearn.neighbors.KDTree', 'KDTree', (['vecs_pred'], {'leaf_size': '(40)', 'metric': '"""euclidean"""'}), "(vecs_pred, leaf_size=40, metric='euclidean')\n", (17500, 17545), False, 'from sklearn.neighbors import KDTree\n'), ((17652, 17696), 'numpy.mean', 'np.mean', (['(dis * dis)'], {'axis': '(1)', 'dtype': 'np.float32'}), '(dis * dis, axis=1, dtype=np.float32)\n', (17659, 17696), True, 'import numpy as np\n'), ((17713, 17733), 'numpy.argsort', 'np.argsort', (['saliency'], {}), '(saliency)\n', (17723, 17733), True, 'import numpy as np\n'), ((17750, 17796), 'numpy.zeros', 'np.zeros', (['(2, order.shape[0])'], {'dtype': 'np.uint32'}), '((2, order.shape[0]), dtype=np.uint32)\n', (17758, 17796), True, 'import numpy as np\n'), ((17891, 17935), 'numpy.zeros', 'np.zeros', (['(order.shape[0],)'], {'dtype': 'np.uint32'}), '((order.shape[0],), dtype=np.uint32)\n', (17899, 17935), True, 'import numpy as np\n'), ((18809, 18852), 'numpy.zeros', 'np.zeros', (['(n_symmetry, 3)'], {'dtype': 'np.float32'}), '((n_symmetry, 3), dtype=np.float32)\n', (18817, 18852), True, 'import numpy as np\n'), ((19128, 19148), 'numpy.float32', 'np.float32', (['seeds[1]'], {}), '(seeds[1])\n', (19138, 19148), True, 'import numpy as np\n'), ((19176, 19199), 'numpy.max', 'np.max', (['symmetry_weight'], {}), '(symmetry_weight)\n', (19182, 19199), True, 'import numpy as np\n'), ((20097, 20141), 'numpy.zeros', 'np.zeros', (['(n_keypts, 2, 2)'], {'dtype': 'np.float32'}), '((n_keypts, 2, 2), dtype=np.float32)\n', (20105, 20141), True, 'import numpy as np\n'), ((20973, 21013), 'numpy.zeros', 'np.zeros', (['(n_edges, 2)'], {'dtype': 'np.float32'}), '((n_edges, 2), dtype=np.float32)\n', (20981, 21013), True, 'import numpy as np\n'), ((21042, 21085), 'numpy.zeros', 'np.zeros', (['(n_edges, 2, 2)'], {'dtype': 'np.float32'}), '((n_edges, 2, 2), dtype=np.float32)\n', (21050, 21085), True, 'import numpy as np\n'), ((22261, 22315), 'numpy.zeros', 'np.zeros', (['(sym_cor_pred.shape[0], 3)'], {'dtype': 'np.float32'}), '((sym_cor_pred.shape[0], 3), dtype=np.float32)\n', (22269, 22315), True, 'import numpy as np\n'), ((24259, 24293), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {'dtype': 'np.float32'}), '((4, 3), dtype=np.float32)\n', (24267, 24293), True, 'import numpy as np\n'), ((24558, 24592), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {'dtype': 'np.float32'}), '((4, 3), dtype=np.float32)\n', (24566, 24592), True, 'import numpy as np\n'), ((27855, 28010), 'numpy.matrix', 'np.matrix', (["[[camera_intrinsic['fu'], 0, camera_intrinsic['uc']], [0, camera_intrinsic[\n 'fv'], camera_intrinsic['vc']], [0, 0, 1]]"], {'dtype': 'np.float32'}), "([[camera_intrinsic['fu'], 0, camera_intrinsic['uc']], [0,\n camera_intrinsic['fv'], camera_intrinsic['vc']], [0, 0, 1]], dtype=np.\n float32)\n", (27864, 28010), True, 'import numpy as np\n'), ((28064, 28080), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (28077, 28080), True, 'import numpy as np\n'), ((28101, 28115), 'lib.regressor.regressor.load_wrapper', 'load_wrapper', ([], {}), '()\n', (28113, 28115), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((34522, 34569), 'os.path.join', 'os.path.join', (['self.args.save_dir', '"""checkpoints"""'], {}), "(self.args.save_dir, 'checkpoints')\n", (34534, 34569), False, 'import os\n'), ((34611, 34674), 'lib.utils.save_session', 'save_session', (['self.model', 'self.optimizer', 'ckpt_dir', 'note', 'epoch'], {}), '(self.model, self.optimizer, ckpt_dir, note, epoch)\n', (34623, 34674), False, 'from lib.utils import save_session, AverageMeter\n'), ((1244, 1255), 'time.time', 'time.time', ([], {}), '()\n', (1253, 1255), False, 'import time\n'), ((3980, 4003), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (3994, 4003), False, 'import os\n'), ((4017, 4037), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (4028, 4037), False, 'import os\n'), ((4565, 4625), 'cv2.line', 'cv2.line', (['image_pred', '(x, y)', '(x_cor, y_cor)', '(0, 0, 255)', '(1)'], {}), '(image_pred, (x, y), (x_cor, y_cor), (0, 0, 255), 1)\n', (4573, 4625), False, 'import cv2\n'), ((5254, 5312), 'cv2.line', 'cv2.line', (['image_gt', '(x, y)', '(x_cor, y_cor)', '(0, 0, 255)', '(1)'], {}), '(image_gt, (x, y), (x_cor, y_cor), (0, 0, 255), 1)\n', (5262, 5312), False, 'import cv2\n'), ((5972, 5995), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (5986, 5995), False, 'import os\n'), ((6009, 6029), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (6020, 6029), False, 'import os\n'), ((6336, 6359), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (6350, 6359), False, 'import os\n'), ((6373, 6393), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (6384, 6393), False, 'import os\n'), ((6824, 6884), 'cv2.circle', 'cv2.circle', (['image_pred', '(x, y)', '(2)', '(0, 0, 255)'], {'thickness': '(-1)'}), '(image_pred, (x, y), 2, (0, 0, 255), thickness=-1)\n', (6834, 6884), False, 'import cv2\n'), ((7328, 7390), 'cv2.circle', 'cv2.circle', (['image_gt', '(x, y)', '(2)', '(255, 255, 255)'], {'thickness': '(-1)'}), '(image_gt, (x, y), 2, (255, 255, 255), thickness=-1)\n', (7338, 7390), False, 'import cv2\n'), ((7690, 7713), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (7704, 7713), False, 'import os\n'), ((7727, 7747), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (7738, 7747), False, 'import os\n'), ((8868, 8912), 'cv2.applyColorMap', 'cv2.applyColorMap', (['im_gray', 'cv2.COLORMAP_HSV'], {}), '(im_gray, cv2.COLORMAP_HSV)\n', (8885, 8912), False, 'import cv2\n'), ((9141, 9171), 'cv2.imwrite', 'cv2.imwrite', (['img_pred_name', 'im'], {}), '(img_pred_name, im)\n', (9152, 9171), False, 'import cv2\n'), ((10012, 10056), 'cv2.applyColorMap', 'cv2.applyColorMap', (['im_gray', 'cv2.COLORMAP_HSV'], {}), '(im_gray, cv2.COLORMAP_HSV)\n', (10029, 10056), False, 'import cv2\n'), ((10277, 10305), 'cv2.imwrite', 'cv2.imwrite', (['img_gt_name', 'im'], {}), '(img_gt_name, im)\n', (10288, 10305), False, 'import cv2\n'), ((10507, 10530), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (10521, 10530), False, 'import os\n'), ((10544, 10564), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (10555, 10564), False, 'import os\n'), ((12549, 12564), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12562, 12564), False, 'import torch\n'), ((17249, 17283), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {'dtype': 'np.float32'}), '((0, 3), dtype=np.float32)\n', (17257, 17283), True, 'import numpy as np\n'), ((17314, 17346), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.float32'}), '((0,), dtype=np.float32)\n', (17322, 17346), True, 'import numpy as np\n'), ((18144, 18171), 'numpy.linalg.norm', 'np.linalg.norm', (['dif'], {'axis': '(1)'}), '(dif, axis=1)\n', (18158, 18171), True, 'import numpy as np\n'), ((19002, 19036), 'numpy.mean', 'np.mean', (['vecs_pred[row_is]'], {'axis': '(0)'}), '(vecs_pred[row_is], axis=0)\n', (19009, 19036), True, 'import numpy as np\n'), ((19069, 19101), 'numpy.linalg.norm', 'np.linalg.norm', (['qs1_cross_qs2[i]'], {}), '(qs1_cross_qs2[i])\n', (19083, 19101), True, 'import numpy as np\n'), ((19626, 19646), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['pts3d'], {}), '(pts3d)\n', (19639, 19646), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((19714, 19754), 'numpy.ones', 'np.ones', (['(3, n_keypts)'], {'dtype': 'np.float32'}), '((3, n_keypts), dtype=np.float32)\n', (19721, 19754), True, 'import numpy as np\n'), ((19965, 19992), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['point2D_pred'], {}), '(point2D_pred)\n', (19978, 19992), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((20215, 20243), 'numpy.matrix', 'np.matrix', (['pts2d_pred_var[i]'], {}), '(pts2d_pred_var[i])\n', (20224, 20243), True, 'import numpy as np\n'), ((20353, 20371), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (20366, 20371), True, 'import numpy as np\n'), ((20648, 20681), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['point_inv_half_var'], {}), '(point_inv_half_var)\n', (20661, 20681), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((21878, 21901), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['vec_pred'], {}), '(vec_pred)\n', (21891, 21901), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((22036, 22068), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['edge_inv_half_var'], {}), '(edge_inv_half_var)\n', (22049, 22068), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((22381, 22412), 'numpy.ones', 'np.ones', (['(3,)'], {'dtype': 'np.float32'}), '((3,), dtype=np.float32)\n', (22388, 22412), True, 'import numpy as np\n'), ((22431, 22462), 'numpy.ones', 'np.ones', (['(3,)'], {'dtype': 'np.float32'}), '((3,), dtype=np.float32)\n', (22438, 22462), True, 'import numpy as np\n'), ((22736, 22754), 'numpy.cross', 'np.cross', (['qs1', 'qs2'], {}), '(qs1, qs2)\n', (22744, 22754), True, 'import numpy as np\n'), ((22983, 23020), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['qs1_cross_qs2_filtered'], {}), '(qs1_cross_qs2_filtered)\n', (22996, 23020), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((23486, 23513), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (23492, 23513), True, 'import numpy as np\n'), ((23530, 23564), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {'dtype': 'np.float32'}), '((3, 1), dtype=np.float32)\n', (23538, 23564), True, 'import numpy as np\n'), ((24334, 24358), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['pose_init'], {}), '(pose_init)\n', (24347, 24358), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((24633, 24658), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['pose_final'], {}), '(pose_final)\n', (24646, 24658), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((26073, 26107), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {'dtype': 'np.float32'}), '((4, 3), dtype=np.float32)\n', (26081, 26107), True, 'import numpy as np\n'), ((27063, 27109), 'numpy.zeros', 'np.zeros', (['(n_examples, 3, 3)'], {'dtype': 'np.float32'}), '((n_examples, 3, 3), dtype=np.float32)\n', (27071, 27109), True, 'import numpy as np\n'), ((27135, 27181), 'numpy.zeros', 'np.zeros', (['(n_examples, 3, 1)'], {'dtype': 'np.float32'}), '((n_examples, 3, 1), dtype=np.float32)\n', (27143, 27181), True, 'import numpy as np\n'), ((27209, 27255), 'numpy.zeros', 'np.zeros', (['(n_examples, 3, 3)'], {'dtype': 'np.float32'}), '((n_examples, 3, 3), dtype=np.float32)\n', (27217, 27255), True, 'import numpy as np\n'), ((27283, 27329), 'numpy.zeros', 'np.zeros', (['(n_examples, 3, 1)'], {'dtype': 'np.float32'}), '((n_examples, 3, 1), dtype=np.float32)\n', (27291, 27329), True, 'import numpy as np\n'), ((27357, 27403), 'numpy.zeros', 'np.zeros', (['(n_examples, 3, 3)'], {'dtype': 'np.float32'}), '((n_examples, 3, 3), dtype=np.float32)\n', (27365, 27403), True, 'import numpy as np\n'), ((27431, 27477), 'numpy.zeros', 'np.zeros', (['(n_examples, 3, 1)'], {'dtype': 'np.float32'}), '((n_examples, 3, 1), dtype=np.float32)\n', (27439, 27477), True, 'import numpy as np\n'), ((28432, 28447), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28445, 28447), False, 'import torch\n'), ((11457, 11483), 'numpy.array', 'np.array', (['[edge_x, edge_y]'], {}), '([edge_x, edge_y])\n', (11465, 11483), True, 'import numpy as np\n'), ((11894, 11920), 'numpy.array', 'np.array', (['[edge_x, edge_y]'], {}), '([edge_x, edge_y])\n', (11902, 11920), True, 'import numpy as np\n'), ((19833, 19869), 'numpy.array', 'np.array', (['(K_inv * point2D_pred)[:2]'], {}), '((K_inv * point2D_pred)[:2])\n', (19841, 19869), True, 'import numpy as np\n'), ((21309, 21323), 'numpy.cov', 'np.cov', (['xs', 'ys'], {}), '(xs, ys)\n', (21315, 21323), True, 'import numpy as np\n'), ((21441, 21459), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (21454, 21459), True, 'import numpy as np\n'), ((26333, 26355), 'lib.regressor.regressor.get_2d_ctypes', 'get_2d_ctypes', (['pose_gt'], {}), '(pose_gt)\n', (26346, 26355), False, 'from lib.regressor.regressor import load_wrapper, get_2d_ctypes\n'), ((2500, 2511), 'time.time', 'time.time', ([], {}), '()\n', (2509, 2511), False, 'import time\n'), ((11261, 11290), 'numpy.round', 'np.round', (['pts2d_gt[start_idx]'], {}), '(pts2d_gt[start_idx])\n', (11269, 11290), True, 'import numpy as np\n'), ((11515, 11551), 'numpy.round', 'np.round', (['(pts2d_gt[start_idx] + edge)'], {}), '(pts2d_gt[start_idx] + edge)\n', (11523, 11551), True, 'import numpy as np\n'), ((11706, 11735), 'numpy.round', 'np.round', (['pts2d_gt[start_idx]'], {}), '(pts2d_gt[start_idx])\n', (11714, 11735), True, 'import numpy as np\n'), ((11952, 11988), 'numpy.round', 'np.round', (['(pts2d_gt[start_idx] + edge)'], {}), '(pts2d_gt[start_idx] + edge)\n', (11960, 11988), True, 'import numpy as np\n'), ((13917, 13951), 'cv2.imread', 'cv2.imread', (["batch['image_name'][0]"], {}), "(batch['image_name'][0])\n", (13927, 13951), False, 'import cv2\n'), ((17426, 17463), 'numpy.sum', 'np.sum', (['(vecs_pred[:, :2] ** 2)'], {'axis': '(1)'}), '(vecs_pred[:, :2] ** 2, axis=1)\n', (17432, 17463), True, 'import numpy as np\n'), ((21636, 21645), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (21642, 21645), True, 'import numpy as np\n'), ((25061, 25103), 'numpy.sum', 'np.sum', (["val_set['pts2d_pred_loc'][data_id]"], {}), "(val_set['pts2d_pred_loc'][data_id])\n", (25067, 25103), True, 'import numpy as np\n'), ((18922, 18955), 'numpy.argwhere', 'np.argwhere', (['(flags == valid_is[i])'], {}), '(flags == valid_is[i])\n', (18933, 18955), True, 'import numpy as np\n'), ((20411, 20421), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (20418, 20421), True, 'import numpy as np\n'), ((21503, 21513), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (21510, 21513), True, 'import numpy as np\n'), ((8467, 8480), 'numpy.abs', 'np.abs', (['map_y'], {}), '(map_y)\n', (8473, 8480), True, 'import numpy as np\n'), ((8483, 8496), 'numpy.abs', 'np.abs', (['map_x'], {}), '(map_x)\n', (8489, 8496), True, 'import numpy as np\n'), ((9615, 9628), 'numpy.abs', 'np.abs', (['map_y'], {}), '(map_y)\n', (9621, 9628), True, 'import numpy as np\n'), ((9631, 9644), 'numpy.abs', 'np.abs', (['map_x'], {}), '(map_x)\n', (9637, 9644), True, 'import numpy as np\n'), ((18676, 18692), 'numpy.max', 'np.max', (['seeds[1]'], {}), '(seeds[1])\n', (18682, 18692), True, 'import numpy as np\n'), ((21690, 21709), 'numpy.matrix', 'np.matrix', (['vec_pred'], {}), '(vec_pred)\n', (21699, 21709), True, 'import numpy as np\n'), ((31020, 31056), 'src.evaluate.read_diameter', 'read_diameter', (['self.args.object_name'], {}), '(self.args.object_name)\n', (31033, 31056), False, 'from src.evaluate import read_diameter\n'), ((22580, 22594), 'numpy.matrix', 'np.matrix', (['qs1'], {}), '(qs1)\n', (22589, 22594), True, 'import numpy as np\n'), ((22658, 22672), 'numpy.matrix', 'np.matrix', (['qs2'], {}), '(qs2)\n', (22667, 22672), True, 'import numpy as np\n')] |
import numpy as np
import win32gui, win32ui, win32con, win32api
def WindowDraw(self, rect):
'''
Draws a rectangle to the window
'''
if self.hwnd is None:
return
#raise Exception("HWND is none. HWND not called or invalid window name provided.")
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
#Set background mode to transparent
#dcObj.SetBkColor(0x12345)
#dcObj.SetBkMode(0)
dcObj.Rectangle(rect)
# Free Resources
dcObj.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
#Original : https://github.com/Sentdex/pygta5/blob/master/grabscreen.py
def grab_screen(region=None):
hwin = win32gui.GetDesktopWindow()
if region:
left, top, x2, y2 = region
width = x2 - left + 1
height = y2 - top + 1
else:
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.frombuffer(signedIntsArray, dtype='uint8')
img.shape = (height, width, 4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return img
| [
"win32gui.GetWindowDC",
"win32gui.GetDesktopWindow",
"win32api.GetSystemMetrics",
"win32ui.CreateDCFromHandle",
"win32gui.ReleaseDC",
"numpy.frombuffer",
"win32ui.CreateBitmap"
] | [((315, 346), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['self.hwnd'], {}), '(self.hwnd)\n', (335, 346), False, 'import win32gui, win32ui, win32con, win32api\n'), ((363, 394), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['wDC'], {}), '(wDC)\n', (389, 394), False, 'import win32gui, win32ui, win32con, win32api\n'), ((590, 624), 'win32gui.ReleaseDC', 'win32gui.ReleaseDC', (['self.hwnd', 'wDC'], {}), '(self.hwnd, wDC)\n', (608, 624), False, 'import win32gui, win32ui, win32con, win32api\n'), ((741, 768), 'win32gui.GetDesktopWindow', 'win32gui.GetDesktopWindow', ([], {}), '()\n', (766, 768), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1196, 1222), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['hwin'], {}), '(hwin)\n', (1216, 1222), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1235, 1269), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['hwindc'], {}), '(hwindc)\n', (1261, 1269), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1319, 1341), 'win32ui.CreateBitmap', 'win32ui.CreateBitmap', ([], {}), '()\n', (1339, 1341), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1564, 1609), 'numpy.frombuffer', 'np.frombuffer', (['signedIntsArray'], {'dtype': '"""uint8"""'}), "(signedIntsArray, dtype='uint8')\n", (1577, 1609), True, 'import numpy as np\n'), ((1693, 1725), 'win32gui.ReleaseDC', 'win32gui.ReleaseDC', (['hwin', 'hwindc'], {}), '(hwin, hwindc)\n', (1711, 1725), False, 'import win32gui, win32ui, win32con, win32api\n'), ((918, 972), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_CXVIRTUALSCREEN'], {}), '(win32con.SM_CXVIRTUALSCREEN)\n', (943, 972), False, 'import win32gui, win32ui, win32con, win32api\n'), ((990, 1044), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_CYVIRTUALSCREEN'], {}), '(win32con.SM_CYVIRTUALSCREEN)\n', (1015, 1044), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1060, 1113), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_XVIRTUALSCREEN'], {}), '(win32con.SM_XVIRTUALSCREEN)\n', (1085, 1113), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1128, 1181), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['win32con.SM_YVIRTUALSCREEN'], {}), '(win32con.SM_YVIRTUALSCREEN)\n', (1153, 1181), False, 'import win32gui, win32ui, win32con, win32api\n')] |
import cv2
import numpy
def Process(raw):
raw = cv2.resize(raw, (1280, 720))
height, width = raw.shape[:2]
grey = cv2.cvtColor(raw, cv2.COLOR_BGR2GRAY)
grey = cv2.GaussianBlur(grey, (3, 3), 1)
grey = cv2.bilateralFilter(grey, 11, 17, 17)
edges = cv2.Canny(grey, 20, 50)
structure = cv2.getStructuringElement(cv2.MORPH_RECT, (45, 45))
edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, structure)
img1, cnts, hq = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10]
screenCnt = None
cubeMask = numpy.zeros((height, width), numpy.uint8)
chosenRect = None
chosenDistance = 10000
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
hullPoints = cv2.convexHull(c)
cv2.polylines(raw, hullPoints, True, (0, 255, 255), 4)
rect = cv2.boundingRect(c)
x, y, w, h = rect
if cv2.contourArea(c) >= 4000:
if len(approx) <= 6 and len(approx) >= 4:
if y > (height / 4):
cv2.drawContours(raw, [approx], -1, (255, 255, 0), 3)
center = rect[0] + (rect[2] / 2)
distance = (center - (width / 2))
distance = abs(distance)
cv2.circle(raw, (int(center), int(rect[1] + (rect[3] / 2))), 4, (0, 255, 255), 3)
cv2.putText(raw, str(distance), (int(center), int(rect[1] + (rect[3] / 2))), 1, 1.5,
(255, 255, 255), 2)
print("Distance: " + str(distance) + " vs " + str(chosenDistance))
if distance < chosenDistance:
print("CHOSEN!")
chosenRect = rect
chosenDistance = distance
(x, y, w, h) = chosenRect
result = int(x + (w / 2))
cv2.rectangle(raw, (x, y), (x + w, y + h), (0, 255, 0), 5)
cv2.putText(raw, "Chosen", (x + 10, y + int(h / 2) - 40), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), 2)
cv2.putText(raw, "RESULT: " + str(result), (result, 30), 0, 1, (0, 255, 0), 1)
cv2.line(raw, (result, 0), (result, 1000), (0, 255, 0), 3)
cv2.imshow("Raw", raw)
cv2.imshow("Edges", edges)
while (True):
if cv2.waitKey(1) & 0xFF == ord('q'):
break
import glob
for path in glob.glob("./images/glyphs/*.jpg"):
Process(cv2.imread(path))
| [
"cv2.rectangle",
"cv2.imshow",
"cv2.approxPolyDP",
"cv2.arcLength",
"cv2.line",
"cv2.contourArea",
"cv2.waitKey",
"glob.glob",
"cv2.drawContours",
"cv2.polylines",
"cv2.morphologyEx",
"cv2.cvtColor",
"cv2.resize",
"cv2.Canny",
"cv2.imread",
"cv2.GaussianBlur",
"cv2.convexHull",
"cv... | [((2585, 2619), 'glob.glob', 'glob.glob', (['"""./images/glyphs/*.jpg"""'], {}), "('./images/glyphs/*.jpg')\n", (2594, 2619), False, 'import glob\n'), ((54, 82), 'cv2.resize', 'cv2.resize', (['raw', '(1280, 720)'], {}), '(raw, (1280, 720))\n', (64, 82), False, 'import cv2\n'), ((128, 165), 'cv2.cvtColor', 'cv2.cvtColor', (['raw', 'cv2.COLOR_BGR2GRAY'], {}), '(raw, cv2.COLOR_BGR2GRAY)\n', (140, 165), False, 'import cv2\n'), ((177, 210), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['grey', '(3, 3)', '(1)'], {}), '(grey, (3, 3), 1)\n', (193, 210), False, 'import cv2\n'), ((222, 259), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['grey', '(11)', '(17)', '(17)'], {}), '(grey, 11, 17, 17)\n', (241, 259), False, 'import cv2\n'), ((272, 295), 'cv2.Canny', 'cv2.Canny', (['grey', '(20)', '(50)'], {}), '(grey, 20, 50)\n', (281, 295), False, 'import cv2\n'), ((312, 363), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(45, 45)'], {}), '(cv2.MORPH_RECT, (45, 45))\n', (337, 363), False, 'import cv2\n'), ((376, 427), 'cv2.morphologyEx', 'cv2.morphologyEx', (['edges', 'cv2.MORPH_CLOSE', 'structure'], {}), '(edges, cv2.MORPH_CLOSE, structure)\n', (392, 427), False, 'import cv2\n'), ((623, 664), 'numpy.zeros', 'numpy.zeros', (['(height, width)', 'numpy.uint8'], {}), '((height, width), numpy.uint8)\n', (634, 664), False, 'import numpy\n'), ((2100, 2158), 'cv2.rectangle', 'cv2.rectangle', (['raw', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(5)'], {}), '(raw, (x, y), (x + w, y + h), (0, 255, 0), 5)\n', (2113, 2158), False, 'import cv2\n'), ((2356, 2414), 'cv2.line', 'cv2.line', (['raw', '(result, 0)', '(result, 1000)', '(0, 255, 0)', '(3)'], {}), '(raw, (result, 0), (result, 1000), (0, 255, 0), 3)\n', (2364, 2414), False, 'import cv2\n'), ((2420, 2442), 'cv2.imshow', 'cv2.imshow', (['"""Raw"""', 'raw'], {}), "('Raw', raw)\n", (2430, 2442), False, 'import cv2\n'), ((2448, 2474), 'cv2.imshow', 'cv2.imshow', (['"""Edges"""', 'edges'], {}), "('Edges', edges)\n", (2458, 2474), False, 'import cv2\n'), ((785, 807), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (798, 807), False, 'import cv2\n'), ((825, 863), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.02 * peri)', '(True)'], {}), '(c, 0.02 * peri, True)\n', (841, 863), False, 'import cv2\n'), ((1000, 1017), 'cv2.convexHull', 'cv2.convexHull', (['c'], {}), '(c)\n', (1014, 1017), False, 'import cv2\n'), ((1026, 1080), 'cv2.polylines', 'cv2.polylines', (['raw', 'hullPoints', '(True)', '(0, 255, 255)', '(4)'], {}), '(raw, hullPoints, True, (0, 255, 255), 4)\n', (1039, 1080), False, 'import cv2\n'), ((1096, 1115), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1112, 1115), False, 'import cv2\n'), ((2633, 2649), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2643, 2649), False, 'import cv2\n'), ((1154, 1172), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (1169, 1172), False, 'import cv2\n'), ((2505, 2519), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2516, 2519), False, 'import cv2\n'), ((1294, 1347), 'cv2.drawContours', 'cv2.drawContours', (['raw', '[approx]', '(-1)', '(255, 255, 0)', '(3)'], {}), '(raw, [approx], -1, (255, 255, 0), 3)\n', (1310, 1347), False, 'import cv2\n')] |
### SCIPY Y MATPLOTLIB
import numpy as np
#from scipy.special import jn
# ejemplo con funcion Bessel (foto de Rosalind Franklin)
#x = np.linspace(xmin, xmax, npts)
#layers = np.array([jn(i, x)**2 for i in range(nlayers)])
#maxi = [(np.diff(np.sign(np.diff(layers[i,:]))) < 0).nonzero()[0] + 1
# for i in range(nlayers)]
### EJEMPLO DEL MODELO EPIDEMIOLOGICO DE SIR
#from scipy.integrate import odeint
#import matplotlib.pyplot as plt
#SI: Enfermedades víricas que causan infección vitalicia, como el VIH.
#SIS: Enfermedades que no confieren inmunidad tras la infección
#SIR: Enfermedades víricas en las que una vez infectado, se obtiene inmunidad vitalicia
#def deriv(y, t, N, beta, gamma):
#S, I, R = y
#dSdt = -beta * S * I / N
#dIdt = beta * S * I / N - gamma * I
#dRdt = gamma * I
#return dSdt, dIdt, dRdt
# Initial conditions vector
#y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
#ret = odeint(deriv, y0, t, args=(N, beta, gamma))
#S, I, R = ret.T
### Grafica interactiva
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, RadioButtons # Interactivo
def dySIS(y, t, lamda, mu): # SI/SIS model
dy_dt = lamda*y*(1-y)-mu*y
return dy_dt
def dySIR(y, t, lamda, mu): # SIR model,
i, s = y
di_dt = lamda*s*i-mu*i
ds_dt = -lamda*s*i
return np.array([di_dt,ds_dt])
# valores de parametros
number = 1e5 # total number of people
lamda = 0.2 # Daily contact rate, the average number of susceptible persons who are effectively in contact with the sick each day
sigma = 2.5 # Number of contacts during infectious period
mu = lamda/sigma # Daily cure rate, the ratio of the number of patients cured each day to the total number of patients
tEnd = 200 # Forecast date length
t = np.arange(0.0,tEnd,1) # (start,stop,step)
i0 = 1e-4 # Initial value of the proportion of patients
s0 = 1-i0 # Initial value of the proportion of susceptible persons
Y0 = (i0, s0) # Initial value of the differential equation system
ySI = odeint(dySIS, i0, t, args=(lamda,0)) # SI model
ySIS = odeint(dySIS, i0, t, args=(lamda,mu)) # SIS model
ySIR = odeint(dySIR, Y0, t, args=(lamda,mu)) # SIR model
#Graficar
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.4)
plt.title("Comparison among SI, SIS and SIR models")
plt.xlabel('time')
plt.axis([0, tEnd, -0.1, 1.1])
si_plt, = plt.plot(t, ySI,':g', label='i(t)-SI')
sis_plt, = plt.plot(t, ySIS,'--g', label='i(t)-SIS')
sir_i_plt, = plt.plot(t, ySIR[:,0],'-r', label='i(t)-SIR')
sir_s_plt, = plt.plot(t, ySIR[:,1],'-b', label='s(t)-SIR')
sir_r_plt, = plt.plot(t, 1-ySIR[:,0]-ySIR[:,1],'-m', label='r(t)-SIR')
plt.legend(loc='best') # buscar la mejor localizacion
#Agregar barras interactivas
axcolor = 'lightgoldenrodyellow'
# Generamos el área de las barras
axlambda = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
axsigma = plt.axes([0.25, 0.18, 0.65, 0.03], facecolor=axcolor)
axi0 = plt.axes([0.25, 0.26, 0.65, 0.03], facecolor=axcolor)
# Agregamos la información
slambda = Slider(axlambda, 'Daily contact rate', 0.1, 1,
valinit=lamda, color="green")
ssigma = Slider(axsigma, 'Contacts during\ninfectious period', 0.1, 10,
valinit=sigma)
si0 = Slider(axi0, 'Initial proportion\nof patients', 1e-4, 5e-1,
valinit=i0, color="orange")
plt.show()
### actualizacion
def update(val, ):
lamda = slambda.val
sigma = ssigma.val
i0 = si0.val
mu = lamda / sigma
s0 = 1 - i0
Y0 = (i0, s0)
ySI = odeint(dySIS, i0, t, args=(lamda, 0)) # SI model
ySIS = odeint(dySIS, i0, t, args=(lamda, mu)) # SIS model
ySIR = odeint(dySIR, Y0, t, args=(lamda, mu)) # SIR model
si_plt.set_ydata(ySI)
sis_plt.set_ydata(ySIS)
sir_i_plt.set_ydata(ySIR[:, 0])
sir_s_plt.set_ydata(ySIR[:, 1])
sir_r_plt.set_ydata(1 - ySIR[:, 0] - ySIR[:, 1])
fig.canvas.draw_idle()
plt.show()
### se aplica la funcion
slambda.on_changed(update)
ssigma.on_changed(update)
si0.on_changed(update)
### botones para ver un solo tipo de modelo
rax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)
radio = RadioButtons(rax, ('SI', 'SIS', 'SIR'), active=0)
lines = {'SI':[si_plt], 'SIS':[sis_plt],
'SIR':[sir_i_plt, sir_s_plt, sir_r_plt]}
def select_model(label):
# la linea seleccionada no es transparente
for line_m in lines[label]:
line_m.set_alpha(1)
# las demas lineas seran transparentes
for others in set(lines.keys()) - set([label]):
for line_m in lines[others]:
line_m.set_alpha(0)
fig.canvas.draw_idle()
# donde de click, va a mostrar
radio.on_clicked(select_model)
plt.show() | [
"numpy.arange",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.axes",
"matplotlib.widgets.RadioButtons",
"matplotlib.pyplot.title",
"matplotlib.widgets.Slider",
"matplotlib.... | [((1892, 1915), 'numpy.arange', 'np.arange', (['(0.0)', 'tEnd', '(1)'], {}), '(0.0, tEnd, 1)\n', (1901, 1915), True, 'import numpy as np\n'), ((2135, 2172), 'scipy.integrate.odeint', 'odeint', (['dySIS', 'i0', 't'], {'args': '(lamda, 0)'}), '(dySIS, i0, t, args=(lamda, 0))\n', (2141, 2172), False, 'from scipy.integrate import odeint\n'), ((2191, 2229), 'scipy.integrate.odeint', 'odeint', (['dySIS', 'i0', 't'], {'args': '(lamda, mu)'}), '(dySIS, i0, t, args=(lamda, mu))\n', (2197, 2229), False, 'from scipy.integrate import odeint\n'), ((2249, 2287), 'scipy.integrate.odeint', 'odeint', (['dySIR', 'Y0', 't'], {'args': '(lamda, mu)'}), '(dySIR, Y0, t, args=(lamda, mu))\n', (2255, 2287), False, 'from scipy.integrate import odeint\n'), ((2323, 2337), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2335, 2337), True, 'import matplotlib.pyplot as plt\n'), ((2339, 2381), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.25)', 'bottom': '(0.4)'}), '(left=0.25, bottom=0.4)\n', (2358, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2437), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison among SI, SIS and SIR models"""'], {}), "('Comparison among SI, SIS and SIR models')\n", (2394, 2437), True, 'import matplotlib.pyplot as plt\n'), ((2439, 2457), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (2449, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2459, 2489), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, tEnd, -0.1, 1.1]'], {}), '([0, tEnd, -0.1, 1.1])\n', (2467, 2489), True, 'import matplotlib.pyplot as plt\n'), ((2503, 2542), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'ySI', '""":g"""'], {'label': '"""i(t)-SI"""'}), "(t, ySI, ':g', label='i(t)-SI')\n", (2511, 2542), True, 'import matplotlib.pyplot as plt\n'), ((2556, 2598), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'ySIS', '"""--g"""'], {'label': '"""i(t)-SIS"""'}), "(t, ySIS, '--g', label='i(t)-SIS')\n", (2564, 2598), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2661), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'ySIR[:, 0]', '"""-r"""'], {'label': '"""i(t)-SIR"""'}), "(t, ySIR[:, 0], '-r', label='i(t)-SIR')\n", (2622, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2721), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'ySIR[:, 1]', '"""-b"""'], {'label': '"""s(t)-SIR"""'}), "(t, ySIR[:, 1], '-b', label='s(t)-SIR')\n", (2682, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2734, 2798), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(1 - ySIR[:, 0] - ySIR[:, 1])', '"""-m"""'], {'label': '"""r(t)-SIR"""'}), "(t, 1 - ySIR[:, 0] - ySIR[:, 1], '-m', label='r(t)-SIR')\n", (2742, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2817), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2805, 2817), True, 'import matplotlib.pyplot as plt\n'), ((2963, 3015), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.1, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)\n', (2971, 3015), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3080), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.18, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.18, 0.65, 0.03], facecolor=axcolor)\n', (3035, 3080), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3142), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.26, 0.65, 0.03]'], {'facecolor': 'axcolor'}), '([0.25, 0.26, 0.65, 0.03], facecolor=axcolor)\n', (3097, 3142), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3258), 'matplotlib.widgets.Slider', 'Slider', (['axlambda', '"""Daily contact rate"""', '(0.1)', '(1)'], {'valinit': 'lamda', 'color': '"""green"""'}), "(axlambda, 'Daily contact rate', 0.1, 1, valinit=lamda, color='green')\n", (3188, 3258), False, 'from matplotlib.widgets import Slider, RadioButtons\n'), ((3285, 3370), 'matplotlib.widgets.Slider', 'Slider', (['axsigma', '"""Contacts during\ninfectious period"""', '(0.1)', '(10)'], {'valinit': 'sigma'}), '(axsigma, """Contacts during\ninfectious period""", 0.1, 10, valinit=sigma\n )\n', (3291, 3370), False, 'from matplotlib.widgets import Slider, RadioButtons\n'), ((3387, 3482), 'matplotlib.widgets.Slider', 'Slider', (['axi0', '"""Initial proportion\nof patients"""', '(0.0001)', '(0.5)'], {'valinit': 'i0', 'color': '"""orange"""'}), '(axi0, """Initial proportion\nof patients""", 0.0001, 0.5, valinit=i0,\n color=\'orange\')\n', (3393, 3482), False, 'from matplotlib.widgets import Slider, RadioButtons\n'), ((3493, 3503), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3501, 3503), True, 'import matplotlib.pyplot as plt\n'), ((4248, 4301), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.025, 0.5, 0.15, 0.15]'], {'facecolor': 'axcolor'}), '([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)\n', (4256, 4301), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4360), 'matplotlib.widgets.RadioButtons', 'RadioButtons', (['rax', "('SI', 'SIS', 'SIR')"], {'active': '(0)'}), "(rax, ('SI', 'SIS', 'SIR'), active=0)\n", (4323, 4360), False, 'from matplotlib.widgets import Slider, RadioButtons\n'), ((4855, 4865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4863, 4865), True, 'import matplotlib.pyplot as plt\n'), ((1452, 1476), 'numpy.array', 'np.array', (['[di_dt, ds_dt]'], {}), '([di_dt, ds_dt])\n', (1460, 1476), True, 'import numpy as np\n'), ((3681, 3718), 'scipy.integrate.odeint', 'odeint', (['dySIS', 'i0', 't'], {'args': '(lamda, 0)'}), '(dySIS, i0, t, args=(lamda, 0))\n', (3687, 3718), False, 'from scipy.integrate import odeint\n'), ((3743, 3781), 'scipy.integrate.odeint', 'odeint', (['dySIS', 'i0', 't'], {'args': '(lamda, mu)'}), '(dySIS, i0, t, args=(lamda, mu))\n', (3749, 3781), False, 'from scipy.integrate import odeint\n'), ((3807, 3845), 'scipy.integrate.odeint', 'odeint', (['dySIR', 'Y0', 't'], {'args': '(lamda, mu)'}), '(dySIR, Y0, t, args=(lamda, mu))\n', (3813, 3845), False, 'from scipy.integrate import odeint\n'), ((4076, 4086), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4084, 4086), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 20:59:32 2020
@author: 王少泽
"""
import random
import numpy as np
from sklearn import metrics
import pandas as pd
import matplotlib.pyplot as plt
class kmeans_input_weight:
def __check_params(self, data, k, weights, max_iter, tol):
if k <= 0 or k > data.shape[0]:
raise ValueError("k must be > 0 and <= {}, got {}".format(data.shape[0], k))
if weights.size != data.shape[1]:
raise ValueError("weights length expected {}, got {}".format(data.shape[0], len(weights)))
if max_iter <= 0:
raise ValueError("max_iter must be > 0, got {}".format(max_iter))
if tol < 0.0:
raise ValueError("tol must be >= 0.0, got {}".format(tol))
def sqrsum(self, x):
return np.sum(x * x)
# 评价标准,测试用
def get_marks(self, data, true_labels, predicted_labels):
"""获取评分,有五种需要知道数据集的实际分类信息,参考readme.txt
:data: 待分析数据
:true_labels: 真正分类标签
:predicted_labels: 模型预测分类标签
"""
print(30 * '*', "model performance", 30 * '*')
print("Homogeneity Score (均一性): ", metrics.homogeneity_score(true_labels, predicted_labels))
print("Completeness Score (完整性): ", metrics.completeness_score(true_labels, predicted_labels))
print("V-Measure Score (V量): ", metrics.v_measure_score(true_labels, predicted_labels))
print("Adjusted Rand Score (调整后兰德指数): ", metrics.adjusted_rand_score(true_labels, predicted_labels))
print("Adjusted Mutual Info Score(调整后的共同信息): ", metrics.adjusted_mutual_info_score(true_labels, predicted_labels))
print("Calinski Harabasz Score: (方差比指数) ", metrics.calinski_harabasz_score(data, predicted_labels))
print("Silhouette Score (轮廓分数): ", metrics.silhouette_score(data, predicted_labels))
def plus_plus(self, ds, k, random_state=42):
"""
Create cluster centroids using the k-means++ algorithm.
Parameters
----------
ds : numpy array
The dataset to be used for centroid initialization.
k : int
The desired number of clusters for which centroids are required.
Returns
-------
centroids : numpy array
Collection of k centroids as a numpy array.
codes taken from: https://www.kdnuggets.com/2020/06/centroid-initialization-k-means-clustering.html
"""
np.random.seed(random_state)
randidx=random.randint(0,ds.shape[0])
centroids = [ds[randidx]]
for _ in range(1, k):
dist_sq = np.array([min([np.inner(c-x,c-x) for c in centroids]) for x in ds])
probs = dist_sq/dist_sq.sum()
cumulative_probs = probs.cumsum()
r = np.random.rand()
for j, p in enumerate(cumulative_probs):
if r < p:
i = j
break
centroids.append(ds[i])
return np.array(centroids) | [
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.calinski_harabasz_score",
"numpy.random.rand",
"sklearn.metrics.adjusted_mutual_info_score",
"sklearn.metrics.adjusted_rand_score",
"numpy.inner",
"sklearn.metrics.completeness_score",
"numpy.sum",
"numpy.array",
"sklearn.metrics.v_measure_scor... | [((837, 850), 'numpy.sum', 'np.sum', (['(x * x)'], {}), '(x * x)\n', (843, 850), True, 'import numpy as np\n'), ((2540, 2568), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (2554, 2568), True, 'import numpy as np\n'), ((2586, 2616), 'random.randint', 'random.randint', (['(0)', 'ds.shape[0]'], {}), '(0, ds.shape[0])\n', (2600, 2616), False, 'import random\n'), ((3109, 3128), 'numpy.array', 'np.array', (['centroids'], {}), '(centroids)\n', (3117, 3128), True, 'import numpy as np\n'), ((1196, 1252), 'sklearn.metrics.homogeneity_score', 'metrics.homogeneity_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1221, 1252), False, 'from sklearn import metrics\n'), ((1306, 1363), 'sklearn.metrics.completeness_score', 'metrics.completeness_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1332, 1363), False, 'from sklearn import metrics\n'), ((1412, 1466), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1435, 1466), False, 'from sklearn import metrics\n'), ((1536, 1594), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1563, 1594), False, 'from sklearn import metrics\n'), ((1655, 1720), 'sklearn.metrics.adjusted_mutual_info_score', 'metrics.adjusted_mutual_info_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1689, 1720), False, 'from sklearn import metrics\n'), ((1769, 1824), 'sklearn.metrics.calinski_harabasz_score', 'metrics.calinski_harabasz_score', (['data', 'predicted_labels'], {}), '(data, predicted_labels)\n', (1800, 1824), False, 'from sklearn import metrics\n'), ((1877, 1925), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['data', 'predicted_labels'], {}), '(data, predicted_labels)\n', (1901, 1925), False, 'from sklearn import metrics\n'), ((2882, 2898), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2896, 2898), True, 'import numpy as np\n'), ((2722, 2744), 'numpy.inner', 'np.inner', (['(c - x)', '(c - x)'], {}), '(c - x, c - x)\n', (2730, 2744), True, 'import numpy as np\n')] |
import numpy as np
def blndsco_to_exprsco(blndsco):
rate, nsamps, score = blndsco
score_len = len(score)
exprsco = np.zeros((score_len, 4, 3), dtype=np.uint8)
for i, frame in enumerate(score):
for j, note in enumerate(frame[:3]):
exprsco[i, j, 0] = note
exprsco[i, j, 1] = 0 if j == 2 else 15
return (rate, nsamps, exprsco) | [
"numpy.zeros"
] | [((124, 167), 'numpy.zeros', 'np.zeros', (['(score_len, 4, 3)'], {'dtype': 'np.uint8'}), '((score_len, 4, 3), dtype=np.uint8)\n', (132, 167), True, 'import numpy as np\n')] |
import os
import math
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rc
import numpy as np
import csv
with open('2DormCorr-Limited.csv', 'r') as csvfile:
fileReader = csv.reader(csvfile, delimiter=',')
nameToVisitTimeMap = {}
currentName= ""
startValue=0
for row in fileReader:
for num,word in enumerate(row):
if num == 0:
nameToVisitTimeMap[word] = []
currentName= word
startValue=0
else:
value = long(word)/1000.0
if startValue == 0:
startValue = value
nameToVisitTimeMap[currentName].append(value-startValue)
fig = plt.figure(num=None, figsize=(50, 12), dpi=80)
for name in nameToVisitTimeMap.keys():
plt.plot(nameToVisitTimeMap.get(name),range(0,len(nameToVisitTimeMap[name]),1))
plt.xlim(0,2100)
plt.yticks(np.arange(0, 8, 1))
plt.xticks(np.arange(0, 2100, 100))
# # for tl in plt.get_yticklabels():
# # tl.set_color('r')
# font = {'size': 30}
# rc('font', **font)
# # for tick in mpl.axis.Axis.get_major_ticks():
# # tick.label.set_fontsize(30);
# # for tick in mpl.axis.YAxis.get_major_ticks():
# # tick.label.set_fontsize(30);
# plt.tick_params(axis='both', which='major', labelsize=30)
# # handles, labels = plt.get_legend_handles_labels()
# # # reverse the order
# # plt.legend(handles[::-1], labels[::-1])
# # or sort them by labels
# plt.ylabel("Total Number of Hotspots",
# fontsize=30,
# verticalalignment='center',
# horizontalalignment='right',
# rotation='vertical' )
# plt.xlabel("Percentage Trusting Device",
# fontsize=30)
# # print labels2
# # plt.legend(handles2, labels2, loc="upper right")
# # plt.legend(loc="upper right")
plt.show()
# plt.savefig("TrustProb-Scenario{0}".format(scenarioNumber), pad_inches=0)
# plt.close() | [
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlim",
"csv.reader",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((233, 267), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (243, 267), False, 'import csv\n'), ((754, 800), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(50, 12)', 'dpi': '(80)'}), '(num=None, figsize=(50, 12), dpi=80)\n', (764, 800), True, 'import matplotlib.pyplot as plt\n'), ((942, 959), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2100)'], {}), '(0, 2100)\n', (950, 959), True, 'import matplotlib.pyplot as plt\n'), ((1948, 1958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1956, 1958), True, 'import matplotlib.pyplot as plt\n'), ((974, 992), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(1)'], {}), '(0, 8, 1)\n', (983, 992), True, 'import numpy as np\n'), ((1009, 1032), 'numpy.arange', 'np.arange', (['(0)', '(2100)', '(100)'], {}), '(0, 2100, 100)\n', (1018, 1032), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from linear_classification.basic_logistic_unit import sigmoid
sns.set()
def cross_entropy(y, y_hat):
E = 0
for i in range(len(y)):
if y[i] == 1:
E -= np.log(y_hat[i])
else:
E -= np.log(1-y_hat[i])
return E
if __name__ == '__main__':
number_of_samples = 100
dimensions = 2
X = np.random.randn(number_of_samples, dimensions)
# Center the first 50 points at (-2,-2)
X[:50,:] = X[:50,:] - 2*np.ones((50,dimensions))
# Center the second 50 points at (2,2)
X[50:,:] = X[50:,:] + 2*np.ones((50,dimensions))
y = np.array([0]*50 + [1]*50)
X = np.concatenate((np.ones((number_of_samples, 1)), X), axis=1 )
W = np.random.randn(dimensions+1)
z = X.dot(W)
y_hat = sigmoid(z)
print("Cross Entropy:", cross_entropy(y, y_hat))
w_closed_form = np.array([0, 4, 4])
y_hat_closed_form = sigmoid(X.dot(w_closed_form))
print("Cross Entropy (closed form):", cross_entropy(y, y_hat_closed_form))
plt.figure()
plt.scatter(X[:,1], X[:,2], c=y, s=100, alpha=0.5)
x_axis = np.linspace(-6, 6, 100)
y_axis = -x_axis
plt.plot(x_axis, y_axis, '--r')
plt.show() | [
"seaborn.set",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.log",
"linear_classification.basic_logistic_unit.sigmoid",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.random.randn",
"matplotlib.pyplot.show"
] | [((136, 145), 'seaborn.set', 'sns.set', ([], {}), '()\n', (143, 145), True, 'import seaborn as sns\n'), ((416, 462), 'numpy.random.randn', 'np.random.randn', (['number_of_samples', 'dimensions'], {}), '(number_of_samples, dimensions)\n', (431, 462), True, 'import numpy as np\n'), ((667, 696), 'numpy.array', 'np.array', (['([0] * 50 + [1] * 50)'], {}), '([0] * 50 + [1] * 50)\n', (675, 696), True, 'import numpy as np\n'), ((773, 804), 'numpy.random.randn', 'np.random.randn', (['(dimensions + 1)'], {}), '(dimensions + 1)\n', (788, 804), True, 'import numpy as np\n'), ((833, 843), 'linear_classification.basic_logistic_unit.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (840, 843), False, 'from linear_classification.basic_logistic_unit import sigmoid\n'), ((918, 937), 'numpy.array', 'np.array', (['[0, 4, 4]'], {}), '([0, 4, 4])\n', (926, 937), True, 'import numpy as np\n'), ((1076, 1088), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1086, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1093, 1145), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 1]', 'X[:, 2]'], {'c': 'y', 's': '(100)', 'alpha': '(0.5)'}), '(X[:, 1], X[:, 2], c=y, s=100, alpha=0.5)\n', (1104, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1180), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(100)'], {}), '(-6, 6, 100)\n', (1168, 1180), True, 'import numpy as np\n'), ((1206, 1237), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'y_axis', '"""--r"""'], {}), "(x_axis, y_axis, '--r')\n", (1214, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1250, 1252), True, 'import matplotlib.pyplot as plt\n'), ((253, 269), 'numpy.log', 'np.log', (['y_hat[i]'], {}), '(y_hat[i])\n', (259, 269), True, 'import numpy as np\n'), ((301, 321), 'numpy.log', 'np.log', (['(1 - y_hat[i])'], {}), '(1 - y_hat[i])\n', (307, 321), True, 'import numpy as np\n'), ((536, 561), 'numpy.ones', 'np.ones', (['(50, dimensions)'], {}), '((50, dimensions))\n', (543, 561), True, 'import numpy as np\n'), ((633, 658), 'numpy.ones', 'np.ones', (['(50, dimensions)'], {}), '((50, dimensions))\n', (640, 658), True, 'import numpy as np\n'), ((718, 749), 'numpy.ones', 'np.ones', (['(number_of_samples, 1)'], {}), '((number_of_samples, 1))\n', (725, 749), True, 'import numpy as np\n')] |
"""save matched images in chosen directory"""
# Python libs
from collections import defaultdict
import os
# external libs
import cv2
import numpy as np
# internal libs
from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than
from Show_Images_Differences.compute_image_differences import compute_image_differences
from Show_Images_Differences.config.logger import Logger, write_in_log
# same module
from Show_Images_Differences.modes.utils import (
check_type_width,
resize_all
)
def save(width, similar_list, by_ratio, show_differences, _argv, script_run_date):
"""save matched images in chosen directory"""
if len(_argv) >= 5:
output_path = _argv[4]
else:
output_path = None
check_type_width(width) # fail fast
# Process all images, save each sequence in chosen director
# https://stackoverflow.com/a/1602964/12490791
saving_counter = defaultdict(int)
for similar_pair in similar_list:
if not similar_pair is None:
images = compute_image_differences(
similar_pair, by_ratio, show_differences)
saved = save_images_as_one(
images,
output_path,
width,
script_run_date
)
if saved:
saving_counter["saved matches"] += 1
else:
saving_counter["not saved matches"] += 1
return saving_counter
def save_images_as_one(images, output_path, width, script_run_date):
"""save source and target images with images showing differences in one image"""
# Resize to default value or custom
images = resize_all(images, width)
# Images to display
source_name = images["Source name"]
source = images["Source"]
target = images["Target"]
diff_BGR = images["Difference RGB"]
diff = images["Difference Structure"]
thresh = images["Thresh"]
# All images have to be RGB, changing grayscale back to RGB
diff = cv2.cvtColor(diff, cv2.COLOR_GRAY2RGB)
thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB)
# check if canvas is too small to add text
if is_bigger_than(100, source):
source = add_text_to_image(source, "Source")
target = add_text_to_image(target, "Target")
diff_BGR = add_text_to_image(diff_BGR, "Difference RGB")
diff = add_text_to_image(diff, "Difference Structure")
thresh = add_text_to_image(thresh, "Thresh")
# Combining all images into one NOTE: please remember that that dictionary is not ordered
numpy_horizontal_concat = np.concatenate(
[source, target, diff_BGR, diff, thresh], axis=1)
# Check if chosen location is file like
ext_file = os.path.splitext(output_path)[1]
# Define output path
if not ext_file:
output_path = os.path.join(output_path, source_name)
# Check if file already exists, if so, add new one with name incremented by one
if os.path.exists(output_path):
output_path = next_path(output_path)
# Save image into chosen location
writeStatus = cv2.imwrite(output_path, numpy_horizontal_concat)
# User notification where to search saved image: https://stackoverflow.com/a/51809038/12490791
if writeStatus is True:
print(f"Saved reference:\n {source_name}\n {output_path}")
saved = True
else:
print(f"Not saved:\n {source_name}")
saved = False
save_log = Logger().load_saving_bool()
if save_log:
write_in_log("[UNSAVED]", output_path, script_run_date)
return saved
def next_path(path_pattern): # https://stackoverflow.com/a/47087513/12490791
"""
Finds the next free path in an sequentially named list of files
e.g. path_pattern = 'file-%s.txt':
file-00001.txt
file-00002.txt
file-00003.txt
Runs in log(n) time where n is the number of existing files in sequence
"""
temp_dir = os.path.dirname(path_pattern)
temp_full_name = os.path.basename(path_pattern)
# https://stackoverflow.com/a/6670331/12490791
temp_name, temp_ext = temp_full_name.split('.', 1)
i = 1
# First do an exponential search
while os.path.exists(format_path(temp_dir, temp_name, i, temp_ext)):
i = i * 2
# Result lies somewhere in the interval (i/2..i]
# We call this interval (first..last] and narrow it down until first + 1 = last
first, last = (i // 2, i)
while first + 1 < last:
mid = (first + last) // 2 # interval midpoint
first, last = (mid, last) if os.path.exists(format_path(
temp_dir, temp_name, mid, temp_ext)) else (first, mid)
# .replace("\\", "/") to make path string more consistent
return format_path(temp_dir, temp_name, last, temp_ext).replace("\\", "/")
def format_path(temp_dir, temp_name, index, temp_ext):
"""example_dir_path/file-0000%.ext"""
return f"{temp_dir}/{temp_name}-{str(index).zfill(5)}.{temp_ext}"
| [
"os.path.exists",
"cv2.imwrite",
"Show_Images_Differences.add_text_to_image.add_text_to_image.add_text_to_image",
"Show_Images_Differences.modes.utils.check_type_width",
"os.path.splitext",
"os.path.join",
"os.path.dirname",
"collections.defaultdict",
"os.path.basename",
"cv2.cvtColor",
"numpy.c... | [((774, 797), 'Show_Images_Differences.modes.utils.check_type_width', 'check_type_width', (['width'], {}), '(width)\n', (790, 797), False, 'from Show_Images_Differences.modes.utils import check_type_width, resize_all\n'), ((949, 965), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (960, 965), False, 'from collections import defaultdict\n'), ((1701, 1726), 'Show_Images_Differences.modes.utils.resize_all', 'resize_all', (['images', 'width'], {}), '(images, width)\n', (1711, 1726), False, 'from Show_Images_Differences.modes.utils import check_type_width, resize_all\n'), ((2040, 2078), 'cv2.cvtColor', 'cv2.cvtColor', (['diff', 'cv2.COLOR_GRAY2RGB'], {}), '(diff, cv2.COLOR_GRAY2RGB)\n', (2052, 2078), False, 'import cv2\n'), ((2092, 2132), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh', 'cv2.COLOR_GRAY2RGB'], {}), '(thresh, cv2.COLOR_GRAY2RGB)\n', (2104, 2132), False, 'import cv2\n'), ((2188, 2215), 'Show_Images_Differences.add_text_to_image.add_text_to_image.is_bigger_than', 'is_bigger_than', (['(100)', 'source'], {}), '(100, source)\n', (2202, 2215), False, 'from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than\n'), ((2630, 2694), 'numpy.concatenate', 'np.concatenate', (['[source, target, diff_BGR, diff, thresh]'], {'axis': '(1)'}), '([source, target, diff_BGR, diff, thresh], axis=1)\n', (2644, 2694), True, 'import numpy as np\n'), ((2997, 3024), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (3011, 3024), False, 'import os\n'), ((3128, 3177), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'numpy_horizontal_concat'], {}), '(output_path, numpy_horizontal_concat)\n', (3139, 3177), False, 'import cv2\n'), ((3986, 4015), 'os.path.dirname', 'os.path.dirname', (['path_pattern'], {}), '(path_pattern)\n', (4001, 4015), False, 'import os\n'), ((4037, 4067), 'os.path.basename', 'os.path.basename', (['path_pattern'], {}), '(path_pattern)\n', (4053, 4067), False, 'import os\n'), ((2235, 2270), 'Show_Images_Differences.add_text_to_image.add_text_to_image.add_text_to_image', 'add_text_to_image', (['source', '"""Source"""'], {}), "(source, 'Source')\n", (2252, 2270), False, 'from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than\n'), ((2288, 2323), 'Show_Images_Differences.add_text_to_image.add_text_to_image.add_text_to_image', 'add_text_to_image', (['target', '"""Target"""'], {}), "(target, 'Target')\n", (2305, 2323), False, 'from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than\n'), ((2343, 2388), 'Show_Images_Differences.add_text_to_image.add_text_to_image.add_text_to_image', 'add_text_to_image', (['diff_BGR', '"""Difference RGB"""'], {}), "(diff_BGR, 'Difference RGB')\n", (2360, 2388), False, 'from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than\n'), ((2404, 2451), 'Show_Images_Differences.add_text_to_image.add_text_to_image.add_text_to_image', 'add_text_to_image', (['diff', '"""Difference Structure"""'], {}), "(diff, 'Difference Structure')\n", (2421, 2451), False, 'from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than\n'), ((2469, 2504), 'Show_Images_Differences.add_text_to_image.add_text_to_image.add_text_to_image', 'add_text_to_image', (['thresh', '"""Thresh"""'], {}), "(thresh, 'Thresh')\n", (2486, 2504), False, 'from Show_Images_Differences.add_text_to_image.add_text_to_image import add_text_to_image, is_bigger_than\n'), ((2764, 2793), 'os.path.splitext', 'os.path.splitext', (['output_path'], {}), '(output_path)\n', (2780, 2793), False, 'import os\n'), ((2866, 2904), 'os.path.join', 'os.path.join', (['output_path', 'source_name'], {}), '(output_path, source_name)\n', (2878, 2904), False, 'import os\n'), ((1065, 1132), 'Show_Images_Differences.compute_image_differences.compute_image_differences', 'compute_image_differences', (['similar_pair', 'by_ratio', 'show_differences'], {}), '(similar_pair, by_ratio, show_differences)\n', (1090, 1132), False, 'from Show_Images_Differences.compute_image_differences import compute_image_differences\n'), ((3558, 3613), 'Show_Images_Differences.config.logger.write_in_log', 'write_in_log', (['"""[UNSAVED]"""', 'output_path', 'script_run_date'], {}), "('[UNSAVED]', output_path, script_run_date)\n", (3570, 3613), False, 'from Show_Images_Differences.config.logger import Logger, write_in_log\n'), ((3497, 3505), 'Show_Images_Differences.config.logger.Logger', 'Logger', ([], {}), '()\n', (3503, 3505), False, 'from Show_Images_Differences.config.logger import Logger, write_in_log\n')] |
import numpy as np
def count_overlaps(*grids):
return tuple(np.sum(grid >= 2) for grid in grids)
def solve(x):
coords = np.array([[coord.split(',') for coord in line.split(' -> ')] for line in x], dtype=int)
deltas = coords[:,1] - coords[:,0]
signs = np.where(deltas >= 0, 1, -1)
grid = np.zeros((2, *np.amax(coords, axis=(0,2))+1), dtype=int)
for c, d, s in zip(coords, deltas, signs):
grid[int(np.all(d!=0))][tuple(c[0,i]+np.arange(0,d[i]+s[i],s[i]) for i in range(2))] += 1
return count_overlaps(grid[0], grid[0]+grid[1]) | [
"numpy.where",
"numpy.sum",
"numpy.all",
"numpy.amax",
"numpy.arange"
] | [((270, 298), 'numpy.where', 'np.where', (['(deltas >= 0)', '(1)', '(-1)'], {}), '(deltas >= 0, 1, -1)\n', (278, 298), True, 'import numpy as np\n'), ((66, 83), 'numpy.sum', 'np.sum', (['(grid >= 2)'], {}), '(grid >= 2)\n', (72, 83), True, 'import numpy as np\n'), ((329, 357), 'numpy.amax', 'np.amax', (['coords'], {'axis': '(0, 2)'}), '(coords, axis=(0, 2))\n', (336, 357), True, 'import numpy as np\n'), ((436, 450), 'numpy.all', 'np.all', (['(d != 0)'], {}), '(d != 0)\n', (442, 450), True, 'import numpy as np\n'), ((464, 495), 'numpy.arange', 'np.arange', (['(0)', '(d[i] + s[i])', 's[i]'], {}), '(0, d[i] + s[i], s[i])\n', (473, 495), True, 'import numpy as np\n')] |
import numpy as np
import random
import sys
import keras_rnn_preprocessing as kpp
from keras import callbacks, optimizers
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, GRU
#global constants
seq_size=40
sample_len=1000
char_level=True
verbose=1
batch_size=512
epochs=50
steps_per_epoch = None
step = 3
nietzsche = "./nietzsche/*.txt"
shakespeare = "./shakespeare/*.txt"
#load data
combined_plays, chars, vocabulary, char_indices, indices_char, x, y = kpp.get_data(seq_size, step, nietzsche)
#create model
model = Sequential()
#model.add(Embedding(vocabulary, 32))
model.add(LSTM(256, input_shape=(seq_size,vocabulary), return_sequences=True))
model.add(LSTM(256, return_sequences=True))
model.add(LSTM(256))
model.add(Dense(vocabulary, activation = 'softmax'))
print(model.summary())
adam = optimizers.Adam()
model.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics = ['categorical_accuracy'])
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def generate_sample(epoch, logs):
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(combined_plays) - seq_size - 1)
generated = ''
sentence = combined_plays[start_index: start_index + seq_size]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
print()
print("-"*40 + 'Generated sequence' + '-'*40)
sys.stdout.write(generated)
for i in range(sample_len):
x_pred = np.zeros((1, seq_size, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print("-"*40 + 'Terminate sequence' + '-'*40)
print()
#create callbacks
path = 'saved_models/nieztsche_{epoch:02d}.h5'
checkpoint = callbacks.ModelCheckpoint(path, verbose=verbose)
reduceLR = callbacks.ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, factor=.5)
print_sample = callbacks.LambdaCallback(on_epoch_end=generate_sample)
#fit model
model.fit(x,y,epochs=epochs,verbose=verbose, batch_size = batch_size, steps_per_epoch=steps_per_epoch,
validation_split=0.2, callbacks=[print_sample, checkpoint, reduceLR]) | [
"keras.optimizers.Adam",
"keras_rnn_preprocessing.get_data",
"keras.callbacks.ModelCheckpoint",
"keras.callbacks.ReduceLROnPlateau",
"numpy.log",
"numpy.asarray",
"numpy.argmax",
"keras.models.Sequential",
"numpy.exp",
"keras.layers.LSTM",
"numpy.random.multinomial",
"numpy.sum",
"keras.call... | [((492, 531), 'keras_rnn_preprocessing.get_data', 'kpp.get_data', (['seq_size', 'step', 'nietzsche'], {}), '(seq_size, step, nietzsche)\n', (504, 531), True, 'import keras_rnn_preprocessing as kpp\n'), ((555, 567), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (565, 567), False, 'from keras.models import Sequential\n'), ((836, 853), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (851, 853), False, 'from keras import callbacks, optimizers\n'), ((2282, 2330), 'keras.callbacks.ModelCheckpoint', 'callbacks.ModelCheckpoint', (['path'], {'verbose': 'verbose'}), '(path, verbose=verbose)\n', (2307, 2330), False, 'from keras import callbacks, optimizers\n'), ((2342, 2428), 'keras.callbacks.ReduceLROnPlateau', 'callbacks.ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'patience': '(2)', 'verbose': '(1)', 'factor': '(0.5)'}), "(monitor='val_loss', patience=2, verbose=1,\n factor=0.5)\n", (2369, 2428), False, 'from keras import callbacks, optimizers\n'), ((2439, 2493), 'keras.callbacks.LambdaCallback', 'callbacks.LambdaCallback', ([], {'on_epoch_end': 'generate_sample'}), '(on_epoch_end=generate_sample)\n', (2463, 2493), False, 'from keras import callbacks, optimizers\n'), ((617, 685), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'input_shape': '(seq_size, vocabulary)', 'return_sequences': '(True)'}), '(256, input_shape=(seq_size, vocabulary), return_sequences=True)\n', (621, 685), False, 'from keras.layers import Dense, LSTM, Embedding, GRU\n'), ((696, 728), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)'}), '(256, return_sequences=True)\n', (700, 728), False, 'from keras.layers import Dense, LSTM, Embedding, GRU\n'), ((740, 749), 'keras.layers.LSTM', 'LSTM', (['(256)'], {}), '(256)\n', (744, 749), False, 'from keras.layers import Dense, LSTM, Embedding, GRU\n'), ((761, 800), 'keras.layers.Dense', 'Dense', (['vocabulary'], {'activation': '"""softmax"""'}), "(vocabulary, activation='softmax')\n", (766, 800), False, 'from keras.layers import Dense, LSTM, Embedding, GRU\n'), ((1098, 1111), 'numpy.exp', 'np.exp', (['preds'], {}), '(preds)\n', (1104, 1111), True, 'import numpy as np\n'), ((1167, 1201), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'preds', '(1)'], {}), '(1, preds, 1)\n', (1188, 1201), True, 'import numpy as np\n'), ((1213, 1230), 'numpy.argmax', 'np.argmax', (['probas'], {}), '(probas)\n', (1222, 1230), True, 'import numpy as np\n'), ((1649, 1676), 'sys.stdout.write', 'sys.stdout.write', (['generated'], {}), '(generated)\n', (1665, 1676), False, 'import sys\n'), ((1054, 1067), 'numpy.log', 'np.log', (['preds'], {}), '(preds)\n', (1060, 1067), True, 'import numpy as np\n'), ((1136, 1153), 'numpy.sum', 'np.sum', (['exp_preds'], {}), '(exp_preds)\n', (1142, 1153), True, 'import numpy as np\n'), ((2074, 2101), 'sys.stdout.write', 'sys.stdout.write', (['next_char'], {}), '(next_char)\n', (2090, 2101), False, 'import sys\n'), ((2110, 2128), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2126, 2128), False, 'import sys\n'), ((1006, 1023), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (1016, 1023), True, 'import numpy as np\n')] |
import numpy as np
import plotly.express as px
import pandas as pd
data = pd.read_csv('/opt/irisapp/src/dash/pages/page2/covid_19_data.csv', index_col='SNo')
data['ObservationDate'] = pd.to_datetime(data['ObservationDate'])
data['Confirmed'] = data['Confirmed'].astype('int')
data['Deaths'] = data['Deaths'].astype('int')
data['Recovered'] = data['Recovered'].astype('int')
data.loc[(data['Country/Region'] == ' Azerbaijan'), 'Country/Region'] = 'Azerbaijan'
data.loc[(data['Country/Region'] == 'US'), 'Country/Region'] = 'United States'
data.loc[(data['Country/Region'] == "('St. Martin',)"), 'Country/Region'] = 'St Martin'
data.loc[(data['Country/Region'] == "UK"), 'Country/Region'] = 'United Kingdom'
data.loc[(data['Country/Region'] == "Bahamas, The"), 'Country/Region'] = 'Bahamas'
covid_data = data.groupby(['Country/Region', 'ObservationDate']).sum().reset_index()
covid_data = covid_data.sort_values(['ObservationDate'])
covid_data['ObservationDate'] = covid_data['ObservationDate'].astype('str')
def getFigure():
fig = px.choropleth(covid_data, locations="Country/Region",
color=np.log10(covid_data["Confirmed"]),
hover_name="Country/Region",
hover_data=["Confirmed", 'Deaths', 'Recovered'],
locationmode="country names",
animation_frame='ObservationDate',
color_continuous_midpoint=3,
color_continuous_scale=px.colors.sequential.thermal)
fig.update_layout(margin=dict(l=20, r=0, b=0, t=70, pad=0), paper_bgcolor="white", height=700,
title_text='Number of daily COVID-19 cases worldwide', font_size=18)
return fig
| [
"pandas.to_datetime",
"numpy.log10",
"pandas.read_csv"
] | [((75, 162), 'pandas.read_csv', 'pd.read_csv', (['"""/opt/irisapp/src/dash/pages/page2/covid_19_data.csv"""'], {'index_col': '"""SNo"""'}), "('/opt/irisapp/src/dash/pages/page2/covid_19_data.csv',\n index_col='SNo')\n", (86, 162), True, 'import pandas as pd\n'), ((186, 225), 'pandas.to_datetime', 'pd.to_datetime', (["data['ObservationDate']"], {}), "(data['ObservationDate'])\n", (200, 225), True, 'import pandas as pd\n'), ((1122, 1155), 'numpy.log10', 'np.log10', (["covid_data['Confirmed']"], {}), "(covid_data['Confirmed'])\n", (1130, 1155), True, 'import numpy as np\n')] |
import os
import sys
from collections import OrderedDict as ODict
from math import floor
from copy import copy, deepcopy
import shutil
import errno
import matplotlib
import numpy as np
from cached_property import cached_property
from scipy import stats
from experiment_resampler import ExperimentResampler
from plotting.experiment_plotter import ExperimentPlotter
from signal_processing.resampled_matrix import ResampledMatrix
#sys.path.append('/home/crousse/code/pyphys/pyphys')
# noinspection PyUnresolvedReferences
#from pyphys import PxpParser as Parser
from signal_processing.signal_processing import low_pass, count_points_between_values
from signal_processing import mat_utils
from utils.utils import dprint, shell_hilite
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
r_stats = importr("stats")
class Experiment(object):
"""
.. warning:: clockwise is the first direction regardless of the real direction
"""
def __init__(self, path, ext='png', channel='A', cell_type='', layer=''):
"""
:param path:
:param string ext: File extension for the figures
:param string channel: The igor recording channel
:param string cell_type: e.g. pyramid, ct, cc ...
:param string layer: cortical layer
"""
self.path = path
self.name = os.path.splitext(os.path.basename(self.path))[0]
self.parent_dir = os.path.dirname(path)
self.dir = os.path.join(self.parent_dir, self.name)
self.create_dir()
self.ext = ext
self.cell_type = cell_type # TODO: use metadata ?
self.layer = layer
self.exp_id, self.data = self.get_data()
self.raw_data = [self.data[name] for name in self.get_raw_names()]
self.fix_nans(self.raw_data)
self.raw_clipped_baselined = [self.data[name] for name in self.get_raw_clipped_baselined_names()]
self.re_baseline() # WARNING: baselined on pseudo minimum not mean hence re-baseline on mean
self.fix_nans(self.raw_clipped_baselined)
self.raw_clipped_data = self.get_raw_clipped_data()
self.raw_clipped_baselined_avg = mat_utils.avg_waves(self.raw_clipped_baselined)
self.bsl_spiking_freq = self.get_baseline_spiking()
self.resampler = ExperimentResampler(self)
self.plotter = ExperimentPlotter(self)
self.write_tables()
def get_pooled_vms_bsl_cw_ccw(self):
bsls = []
cws = []
ccws = []
for w in self.raw_clipped_baselined:
bsl = self.extract_bsl(w)
bsls.extend(bsl)
cycles = mat_utils.cutAndGetMultiple(self.cmd, w)
for c in cycles:
mid = int(len(c)/2)
cw = c[:mid]
cws.extend(cw)
ccw = c[mid:]
ccws.extend(ccw)
bsls = np.array(bsls, dtype=np.float64)
cws = np.array(cws, dtype=np.float64)
ccws = np.array(ccws, dtype=np.float64)
return bsls, cws, ccws
def re_baseline(self):
"""
Fixes baseline of raw_clipped_baseline to baseline at 0 not at minimum (that was used for polar plots)
:return:
"""
for i in range(len(self.raw_clipped_baselined)):
bsl = self.extract_bsl(self.raw_clipped_baselined[i])
offset = bsl.mean()
self.raw_clipped_baselined[i] = self.raw_clipped_baselined[i] - offset
def create_dir(self):
try:
os.mkdir(self.dir)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(self.dir):
pass
else:
raise
def move_to_folder(self):
# exp_files = [os.path.join(self.parent_dir, f) for f in os.listdir(self.parent_dir) if self.name in f]
# exp_files = [f for f in exp_files if os.path.isfile(f)]
shutil.move(self.path, os.path.join(self.dir, self.name+'.pxp'))
def fix_nans(self, waves_list, warning_threshold=10, max_nans=35):
"""
Removes up to max_nans NaNs at the end of the last wave in waves_list.
If the number of removed NaNs exceed warning_threshold, a Warning is issued.
If the number would exceed max_nans, an exception is raised.
Threshold set rather high because in case a spike occurs at the end and the last points are NaN
then the spike interpolation will give NaN values.
.. warning:
Modifies in place
:param list waves_list:
:param int warning_threshold:
:param int max_nans:
:return:
"""
nans_indices = np.where(np.isnan(waves_list[-1]))[0]
n_nans = nans_indices.size
if n_nans > max_nans:
raise ValueError("Wave has too many NaNs, {} found ({})".format(n_nans, nans_indices))
elif n_nans > warning_threshold:
print("{}: Wave has too many NaNs, {} found ({})".format(
shell_hilite('WARNING: ', 'red', True),
n_nans,
nans_indices)
)
expected_nans = np.arange(waves_list[-1].size - n_nans, waves_list[-1].size, 1)
if not np.array_equal(nans_indices, expected_nans):
raise ValueError("The NaNs are expected to be located at the end, indices found: {}, on wave of {} points"
.format(nans_indices, waves_list[-1].size))
else:
if n_nans > 0:
waves_list[-1][nans_indices] = waves_list[-1][nans_indices[0] - 1]
@property
def sampling(self):
return self.data['vars']['samplingInterval'][0]
def point_to_ms(self, p):
"""
Convert a point information to time (in ms)
:param int p: The point to convert
:return: The value in milliseconds
"""
return p * self.sampling * 1000
@property
def cmd(self):
return self.data['cpgCommand']
@property
def velocity(self):
return self.data['polarVelocity']
@property
def acceleration(self):
return self.data['polarAcceleration']
def get_data(self):
parser = Parser(self.path)
protocols = list(parser.data.keys())[2:] # Discard the 2 first folders
# exp_id = self.__prompt_id(protocols) # TODO: make optional if used from batch or not
exp_id = self._get_id(protocols)
if exp_id not in self.path:
raise ValueError("Name mismatch between experiment {} and path {}".format(exp_id, self.path))
data = parser.data[exp_id]
return exp_id, data
def _get_waves_names(self, match_str, good_ids=None):
"""
Gets the list of names of the waves of a certain type in the current experiment
Assumes channel A or B only in recording (-2 stripping)
"""
names = [name for name in list(self.data.keys()) if name.startswith(match_str) and name.endswith('0')] # FIXME: use channel here
# Python doesn't do natural sorting of non 0 padded strings
waves_ids = [int(name[len(match_str):-2]) for name in names]
if good_ids is not None:
waves_names = [name for rid, name in sorted(zip(waves_ids, names)) if rid in good_ids]
else:
waves_names = [name for rid, name in sorted(zip(waves_ids, names))]
return waves_names
def get_raw_names(self): # TODO: cached_property
"""
Return the list of raw data waves in the current experiment
"""
return self._get_waves_names('CombRaw', good_ids=self.keep_ids)
def get_raw_clipped_baselined_names(self):
"""
Return the list of raw data waves with spikes clipped in the current experiment
"""
return self._get_waves_names('CombFS', good_ids=self.keep_ids)
def get_rasters_names(self):
"""
Returns a sorted list of names of the form occurrenceCombFSxxx of the spike times waves
"""
return self._get_waves_names('occurrenceCombFS', good_ids=self.keep_ids)
def get_raw_clipped_data(self): # TODO: Test that minima of strides are the same
raw_clipped_data = []
for raw_wave, raw_clipped_baselined_wave in zip(self.raw_data, self.raw_clipped_baselined):
diff = raw_clipped_baselined_wave.min() - raw_wave.min()
raw_clipped_data.append(raw_clipped_baselined_wave - diff)
return raw_clipped_data
def get_clipped_cycles(self):
"""
.. warning::
Will only work with 2 cycles (cutInHalf)
:return columns: t1bsl1+t1cycle1, t1bsl2+t1cycle2, t2bsl1+t2cycle1, t2bsl2+t2cycle2
:rtype: list
"""
columns = []
for trial in self.raw_clipped_data:
bsl = self.extract_bsl(trial)
bsls = mat_utils.cutInHalf(bsl) # WARNING: works only with 2 cycles
cycles = mat_utils.cutAndGetMultiple(self.cmd, trial) # WARNING: works only with 2 cycles
for segment_id in range(self.n_segments):
column = np.hstack((bsls[segment_id], cycles[segment_id]))
columns.append(column)
return columns
def cut_and_average(self, wave):
return mat_utils.cutAndAvgSine(self.cmd, wave)
@property
def baseline_end(self):
"""
Index of the last baseline point
:return:
"""
return self.recording_start - 1
@property
def baseline_length(self):
return self.baseline_end + 1
@property
def baseline_duration(self):
"""
Baseline duration in seconds
:return:
"""
return self.baseline_length * self.sampling
@property
def bsl_plot_segment_length(self):
"""
:return: The width of a single baseline segment in points
"""
return int(floor((self.baseline_length / self.n_segments)))
@property
def bsl_plot_segment_duration(self):
"""
typically half of baseline duration (because plot segments)
:return:
"""
return self.bsl_plot_segment_length * self.sampling
def extract_bsl(self, wave):
"""
Extract the portion of wave that corresponds to the first baseline
"""
return deepcopy(wave[:self.baseline_length])
def extract_baseline_plot_segment(self, wave):
"""
averaged 2 halves of baseline
:param wave:
:return:
"""
return mat_utils.cut_and_avg_halves(self.extract_bsl(wave))
@cached_property
def segment_length(self):
cmd_segment = mat_utils.cutAndAvgSine(self.cmd, self.cmd)
n_pnts_segment = cmd_segment.shape[0]
return n_pnts_segment
@cached_property
def segment_duration(self):
"""
The duration in time of a segment (e.g. a single clockwise cycle)
:return:
"""
segment_duration = self.segment_length * self.sampling
return segment_duration
@property
def half_segment_duration(self):
"""
Return the duration of a half display segment (i.e. a single clockwise or counter_clockwise ramp) in seconds
:return:
"""
mid = self.segment_duration / 2.
return mid
def get_command_plot_baselines(self): # TODO: merge with method below
cmd_bsl = self.extract_baseline_plot_segment(self.cmd)
vel_bsl = self.extract_baseline_plot_segment(self.velocity)
acc_bsl = self.extract_baseline_plot_segment(self.acceleration)
return cmd_bsl, vel_bsl, acc_bsl
def get_command_plot_segments(self):
cmd_segment = self.cut_and_average(self.cmd) # WARNING: not correct if amplitude of sine decreases or increases
vel_segment = self.cut_and_average(self.velocity)
acc_segment = self.cut_and_average(self.acceleration)
return cmd_segment, vel_segment, acc_segment
@property
def data_plot_segment_half(self):
mid = int(self.data_plot_segment.size / 2)
return mid
@cached_property
def n_segments(self):
"""
Number of cycles cut from trial
:return:
"""
return len(mat_utils.cutAndGetMultiple(self.cmd, self.cmd))
@cached_property
def data_plot_segment(self):
"""
clipped, baselined, averaged (and cut in segments)
:return:
"""
return self.cut_and_average(self.raw_clipped_baselined_avg)
@cached_property
def bsl_clipped_baselined_mean(self):
bsl_mean = self.extract_bsl(self.raw_clipped_baselined_avg)
bsl_mean = mat_utils.cut_and_avg_halves(bsl_mean) # WARNING: n segments should not be halves but self.n_segments
return bsl_mean
@cached_property
def clock_wise_clipped_baselined_mean(self):
return deepcopy(self.data_plot_segment[:self.data_plot_segment_half])
@cached_property
def c_clock_wise_clipped_baselined_mean(self):
return deepcopy(self.data_plot_segment[self.data_plot_segment_half:])
def _get_trend(self, wave):
return low_pass(wave, 5001)
@cached_property
def bsl_trend(self): # TODO: use
return self._get_trend(self.bsl_clipped_baselined_mean)
@cached_property
def clock_wise_trend(self):
return self._get_trend(self.clock_wise_clipped_baselined_mean)
@cached_property
def c_clock_wise_trend(self):
return self._get_trend(self.c_clock_wise_clipped_baselined_mean)
def get_peaks_indices(self):
return np.array(mat_utils.findSinePeaks(self.cmd))
def _get_segment_raster(self, raster, segment_start_p, segment_end_p):
"""
Return the part of the raster that falls between start and end points
Raster is scaled in ms
:param raster:
:param int segment_start_p:
:param int segment_end_p:
:return:
"""
segment_start_t = self.point_to_ms(segment_start_p) # rasters from Neuromatic are in ms
segment_end_t = self.point_to_ms(segment_end_p)
segment_raster = raster[np.logical_and(raster >= segment_start_t, raster < segment_end_t)]
return segment_raster.copy()
def get_rasters(self):
"""
Return the rasters as a list of raster segments (t1s1, t1s2, t1s3, t1s4t2s1, t2s2, t2s3...)
The rasters are values in seconds of spikes occurences in absolute since sweep start.
The start time of each segment within the sweep is stored in rasters_start_ts (in seconds)
.. glossary::
t: trial
s: segment
:return: bsl_rasters, rasters, rasters_start_ts
"""
peaks_pos = self.get_peaks_indices() # In points
negative_peaks = peaks_pos[::2]
bsl_rasters = []
rasters = []
rasters_start_ts = []
for name in self.get_rasters_names():
raster = self.data[name]
for i in range(self.n_segments):
bsl_start_p = i * self.bsl_plot_segment_length
bsl_end_p = bsl_start_p + self.bsl_plot_segment_length
bls_segt_raster = self._get_segment_raster(raster, bsl_start_p, bsl_end_p)
bls_segt_raster = np.array(bls_segt_raster) / 1000. # NeuroMatic rasters in ms
bsl_rasters.append(bls_segt_raster)
start_p = negative_peaks[i]
end_p = negative_peaks[i + 1]
segt_raster = self._get_segment_raster(raster, start_p, end_p)
segt_raster = np.array(segt_raster) / 1000. # NeuroMatic rasters in ms
rasters.append(segt_raster)
rasters_start_ts.append(start_p * self.sampling)
return bsl_rasters, rasters, rasters_start_ts
def get_spiking_freq_lists_per_trial(self):
"""
Non stacked rasters (list of spiking frequencies) per trial
:return:
"""
bsl_rasters, rasters, rasters_starts = self.get_rasters()
bsl_freqs = []
clock_wise_freqs = []
c_clock_wise_freqs = []
first_quarter_freqs = []
second_quarter_freqs = []
third_quarter_freqs = []
fourth_quarter_freqs = []
# Using bsl_plot_segment_duration since bsl_rasters split to compare with clockwise/counterclock
# BASELINE
for r in bsl_rasters:
bsl_freqs.append(r.size / self.bsl_plot_segment_duration)
# CW CCW
mid = self.half_segment_duration
for s, raster in zip(rasters_starts, rasters):
r = raster - s # tODO: check if needs np.array
n_spikes_clock_wise = r[r < mid].size
clock_wise_freqs.append(n_spikes_clock_wise / mid)
n_spikes_c_clock_wise = r[r >= mid].size
c_clock_wise_freqs.append(n_spikes_c_clock_wise / mid)
# CONTRA IPSI
quarter_duration = mid / 2
for s, raster in zip(rasters_starts, rasters):
r = raster - s # tODO: check if needs np.array
first_quarter_freqs.append(r[r < quarter_duration].size / quarter_duration)
fourth_quarter_freqs.append(r[r >= (quarter_duration*3)].size /quarter_duration)
second_quarter_n_spikes = r[np.logical_and(r >= quarter_duration, r < mid)].size
second_quarter_freqs.append(second_quarter_n_spikes / quarter_duration)
third_quarter_n_spikes = r[np.logical_and(r >= mid, r < (quarter_duration*3))].size
third_quarter_freqs.append(third_quarter_n_spikes / quarter_duration)
table = ODict()
table['bsl'] = bsl_freqs
table['clockWise'] = clock_wise_freqs
table['cClockWise'] = c_clock_wise_freqs
table['cw_contra'] = first_quarter_freqs
table['ccw_contra'] = fourth_quarter_freqs
table['cw_ipsi'] = second_quarter_freqs
table['ccw_ipsi'] = third_quarter_freqs
return table
def get_spiking_frequencies(self):
"""
Uses stacked version of rasters to produce only 3 integers necessary to compute global osi/dsi
:return tuple(int): baseline spiking frequency, clockwise and counter_clockwise
"""
bsl_rasters, rasters, raster_starts = self.get_rasters() # Keep absolute values for mid
bsl_raster = self._concatenate_rasters(bsl_rasters)
bsl_freq = bsl_raster.size / self.bsl_plot_segment_duration
mid = self.half_segment_duration
raster = self._concatenate_rasters([r - s for r, s in zip(rasters, raster_starts)])
c_wise_freq = raster[raster < mid].size / mid
c_c_wise_freq = raster[raster >= mid].size / mid
return bsl_freq, c_wise_freq, c_c_wise_freq
def _concatenate_rasters(self, rasters):
"""
Concatenates the list of rasters
:param rasters:
:return:
"""
raster = np.zeros(0) # empty array
for rstr in rasters:
raster = np.hstack((raster, deepcopy(rstr))) # TODO: check if flattent would not work
return raster
@cached_property
def keep_ids(self):
try:
remove_ids = self.data['ind']
except KeyError:
print("{} Experiment {} 'ind' wave missing, assuming keep all.".format(
shell_hilite("WARNING:", 'yellow', True),
shell_hilite("{}".format(self.exp_id), 'magenta')
))
remove_ids = []
all_ids = list(range(len(self._get_waves_names('CombRaw')))) # Number of raw waves before filtering
good_ids = [_id for _id in all_ids if _id not in remove_ids]
good_ids = np.array(good_ids, dtype=np.uint16) + 1 # Neuromatic indexes from 1
return good_ids
@cached_property
def clipped_avgs_per_trial_table(self):
"""
.. csv-table:: table
:delim: space
bsl_trial_0_part1 c_wise_trial_0_part1 c_c_wise_trial_0_part1
bsl_trial_0_part2 c_wise_trial_0_part2 c_c_wise_trial_0_part2
bsl_trial_1_part1 c_wise_trial_1_part1 c_c_wise_trial_1_part1
bsl_trial_1_part2 c_wise_trial_1_part2 c_c_wise_trial_1_part2
:return ODict: table
"""
avgs_c_wise, avgs_c_c_wise = self.extract_clipped_avgs()
table = ODict()
table['bsl'] = self.extract_clipped_avgs_bsl()
table['clockWise'] = avgs_c_wise
table['cClockWise'] = avgs_c_c_wise
table['cw_contra'] = self.extract_cw_contra_clipped_avgs()
table['ccw_contra'] = self.extract_ccw_contra_clipped_avgs()
table['cw_ipsi'] = self.extract_cw_ipsi_clipped_avgs()
table['ccw_ipsi'] = self.extract_ccw_ipsi_clipped_avgs()
return table
def extract_clipped_avgs_bsl(self):
"""
To be used by compund method clipped_avgs_per_trial_table
:return: The list of means of each baseline for each trial (2 baseline halves to have same dimension as clockwise/counterclockwise
"""
avgs_bsl = []
for trial in self.raw_clipped_data:
bsl = self.extract_bsl(trial)
halves = mat_utils.cutInHalf(bsl)
for half in halves:
avgs_bsl.append(half.mean())
return avgs_bsl
def extract_clipped_avgs(self):
"""
To be used by clipped_avgs_per_trial_table
:return: list of pairs of clockwise/counter_clockwise averages per trial
"""
avgs_c_wise = []
avgs_c_c_wise = []
for trial in self.raw_clipped_data:
segments = mat_utils.cutAndGetMultiple(self.cmd, trial)
for segment in segments:
avgs_c_wise.append(segment[:self.data_plot_segment_half].mean())
avgs_c_c_wise.append(segment[self.data_plot_segment_half:].mean())
return avgs_c_wise, avgs_c_c_wise
def extract_cw_contra_clipped_avgs(self):
avgs = []
for trial in self.raw_clipped_data:
segments = mat_utils.cutAndGetMultiple(self.cmd, trial)
for segment in segments:
avgs.append(self.extract_cw_contra_from_plot_segment(segment).mean())
return avgs
def extract_ccw_contra_clipped_avgs(self):
avgs = []
for trial in self.raw_clipped_data:
segments = mat_utils.cutAndGetMultiple(self.cmd, trial)
for segment in segments:
avgs.append(self.extract_ccw_contra_from_plot_segment(segment).mean())
return avgs
def extract_cw_ipsi_clipped_avgs(self):
avgs = []
for trial in self.raw_clipped_data:
segments = mat_utils.cutAndGetMultiple(self.cmd, trial)
for segment in segments:
avgs.append(self.extract_cw_ipsi_from_plot_segment(segment).mean())
return avgs
def extract_ccw_ipsi_clipped_avgs(self):
avgs = []
for trial in self.raw_clipped_data:
segments = mat_utils.cutAndGetMultiple(self.cmd, trial)
for segment in segments:
avgs.append(self.extract_ccw_ipsi_from_plot_segment(segment).mean())
return avgs
def extract_cw_contra_from_plot_segment(self, segment):
quarter_len = int(len(segment) / 4.0) # OPTIMISE: extract
first_quarter = segment[:quarter_len]
return deepcopy(first_quarter) # OPTIMISE: check if deepcopy necessary
def extract_ccw_contra_from_plot_segment(self, segment):
quarter_len = int(len(segment) / 4.0) # OPTIMISE: extract
last_quarter = segment[-quarter_len:]
return deepcopy(last_quarter) # OPTIMISE: check if deepcopy necessary
def extract_cw_ipsi_from_plot_segment(self, segment):
quarter_len = int(len(segment) / 4.0) # OPTIMISE: extract
mid = int(len(segment) / 2.0) # TODO: use built in method
second_quarter = segment[quarter_len:mid]
return deepcopy(second_quarter)
def extract_ccw_ipsi_from_plot_segment(self, segment):
quarter_len = int(len(segment) / 4.0) # OPTIMISE: extract
mid = int(len(segment) / 2.0) # TODO: use built in method
third_quarter = segment[mid:mid+quarter_len]
return deepcopy(third_quarter)
def independant_t_test(self, vect1, vect2):
"""
Performs an independant t test and returns only the p value
:param vect1:
:param vect2:
:return:
"""
return stats.ttest_ind(vect1, vect2)[1]
def paired_t_test(self, vect1, vect2):
"""
Performs a paired t_test and returns only the p value
:param vect1:
:param vect2:
:return:
"""
try:
p_value = stats.ttest_rel(vect1, vect2)[1]
except ValueError as err:
raise ValueError("{}; array lengths: {}, {}".format(err, len(vect1), len(vect2)))
return p_value
def wilcoxon_test(self, vect1, vect2):
if len(vect1) != len(vect2):
raise ValueError("Arrays have different length: {}, {} (exp: {})".
format(len(vect1), len(vect2), self.name))
results = r_stats.wilcox_test(FloatVector(vect1), FloatVector(vect2), paired=True, exact=True)
return results[results.names.index('p.value')][0]
def get_deltas(self, bsl_avg, c_wise_avg, cc_wise_avg, cw_contra_avg, ccw_contra_avg, cw_ipsi_avg, ccw_ipsi_avg):
bsl_delta = 0
c_wise_delta = c_wise_avg - bsl_avg
cc_wise_delta = cc_wise_avg - bsl_avg
cw_contra_delta = cw_contra_avg - bsl_avg
ccw_contra_delta = ccw_contra_avg - bsl_avg
cw_ipsi_delta = cw_ipsi_avg - bsl_avg
ccw_ipsi_delta = ccw_ipsi_avg - bsl_avg
return bsl_delta, c_wise_delta, cc_wise_delta, cw_contra_delta, ccw_contra_delta, cw_ipsi_delta, ccw_ipsi_delta
def get_dsi(self, bsl_avg, c_wise_avg, cc_wise_avg):
"""
:param c_wise_avg:
:param cc_wise_avg:
:return:
"""
_, c_wise_delta, cc_wise_delta = self.get_deltas(bsl_avg, c_wise_avg, cc_wise_avg, 0, 0, 0, 0)[:3] # HACK: to avoid rewriting function without last 4 args
if abs(c_wise_delta) + abs(cc_wise_delta) != abs(c_wise_delta + cc_wise_delta): # different signs
return 1.
preferred_response = max(abs(c_wise_delta), abs(cc_wise_delta))
non_preferred_response = min(abs(c_wise_delta), abs(cc_wise_delta))
if (preferred_response + non_preferred_response) == 0:
return 'NaN'
else:
return (preferred_response - non_preferred_response) / (preferred_response + non_preferred_response)
def get_max_diff(self):
"""
The maximum duration bewtween two elements (e.g. angles) in the spike normalisation.
This function is only to avoid hard coding the number (1000ms or 1 s).
:return: 1000
"""
return 1000
def normalise_spiking(self, levels_w_name):
"""
levels is of the form:
.. csv-table::
:delim: space
:header: segment, 0, 1, 2, 3, ..., 360
clockwise_segment1 t0deg t1deg t2deg t3deg " " t360deg
c_clockwise_segmt1 t0deg t1deg t2deg t3deg " " t360deg
clockwise_segment2 t0deg t1deg t2deg t3deg " " t360deg
c_clockwise_segmt2 t0deg t1deg t2deg t3deg " " t360deg
The result is of the form (transposed 1, 0) with a 3rd dimension n_trials
.. csv-table:: spiking
:delim: space
:header: segment, 0, 1, 2, 3, ..., 360
clockwise_segment1 n_spikes0deg n_spikes1deg n_spikes2deg " " n_spikes360deg
c_clockwise_segmt1 n_spikes0deg n_spikes1deg n_spikes2deg " " n_spikes360deg
clockwise_segment2 n_spikes0deg n_spikes1deg n_spikes2deg " " n_spikes360deg
c_clockwise_segmt2 n_spikes0deg n_spikes1deg n_spikes2deg " " n_spikes360deg
.. csv-table:: times
:delim: space
:header: segment, 0, 1, 2, 3, ..., 360
clockwise_segment1 duration0deg duration1deg duration2deg " " duration360deg
c_clockwise_segmt1 duration0deg duration1deg duration2deg " " duration360deg
clockwise_segment2 duration0deg duration1deg duration2deg " " duration360deg
c_clockwise_segmt2 duration0deg duration1deg duration2deg " " duration360deg
:param string levels_w_name: The name in self.data (igor data) of the levels_wave we want (e.g. degrees.)
:return: (spiking, times)
"""
levels = deepcopy(self.data[levels_w_name]).transpose((1, 0)) # dimensions = (nOrientations, nDegrees)
spiking, times = self.normalise_spiking_sampling_method_2(levels)
return spiking, times
def normalise_spiking_sampling_method_2(self, levels_wave):
"""
Normalise the spiking of each trial (by degrees, degrees/sec... depending on levels_wave)
and convert durations to seconds
:param levels_wave:
:return:
"""
norm_raster = []
durations = []
for name in self.get_rasters_names(): # For each trial
raster = np.squeeze(deepcopy(self.data[name]))
out = self._normalise_spike_sampling_method_2(raster, levels_wave, self.get_max_diff())
trial_norm_raster, trial_norm_durations = out
norm_raster.append(trial_norm_raster)
durations.append(trial_norm_durations)
norm_raster = np.array(norm_raster).transpose( (2, 0, 1) )
durations = np.array(durations).transpose( (2, 0, 1) )
durations /= 1000. # NM uses ms
return norm_raster, durations
def _normalise_spike_sampling_method_2(self, raster, levels_wave, max_diff): # WARNING: explain
"""
return the number of spikes in each degree (or degree/sec, segre/sec/sec) bin and the duration of the bin
:param raster:
:param np.array levels_wave:
:param float max_diff:
:return:
"""
norm_raster = np.zeros(levels_wave.shape)
durations = np.zeros(levels_wave.shape)
for i in range(levels_wave.shape[0]):
for j in range(levels_wave.shape[1] - 1):
start_time = levels_wave[i, j]
end_time = levels_wave[i, j + 1]
duration = abs(end_time - start_time) # abs because levels_wave not sorted by time (but by level)
if duration < max_diff:
n_levels = count_points_between_values(start_time, end_time, raster)
else:
n_levels = np.nan
norm_raster[i, j] = n_levels
durations[i, j] = duration
return norm_raster, durations
def get_baseline_spiking(self):
"""
Returns a list of spikes frequencies computed as :math:`n_spikes / bsl_duration`
for all rasters matched to self.get_rasters_names.
Used for normalised matrices.
"""
spike_ns = []
i = 0
bsl_rasters, rasters, rasters_start_ts = self.get_rasters()
for raster in bsl_rasters:
if len(raster) > 0:
try:
n_spikes = raster.size
except RuntimeWarning:
print('{} Could not get spikes in baseline from the following wave: {}'.format(
shell_hilite('Error:', 'red', True),
i)
)
n_spikes = 0
else:
n_spikes = 0
dprint('Wave: {}, number of spikes in baseline: {}.'.format(i, n_spikes))
spike_ns.append(n_spikes)
i += 1
spike_freqs = np.array(spike_ns) / self.baseline_duration
return spike_freqs
@cached_property
def recording_start(self):
"""
Returns the index of the fist non 0 point
"""
for i in range(len(self.cmd)):
if self.cmd[i] != 0:
return i
@cached_property
def recording_end(self): # TODO: check if used
for i in range(len(self.cmd), 0, -1):
if self.cmd[i] != 0:
return i
def analyse(self, do_spiking_difference=False, do_spiking_ratio=False):
"""
Analyse (stats and plots) all matrices in self
"""
for mat in self.matrices:
mat.analyse(do_spiking_difference=do_spiking_difference, do_spiking_ratio=do_spiking_ratio)
def write(self):
"""
Save all matrices to csv
"""
for mat in self.matrices:
mat.save_binned_data(mat.matrix_name)
def __prompt_id(self, keys):
"""
Prompts user for protocols and validates response
"""
exp_ids = list(range(len(keys)))
while True:
print('Experiments available:')
for _id, key in zip(exp_ids, keys):
print('\t{}: {}'.format(_id, key))
prompt = "Please type in the number corresponding to the protocol: "
exp_id = int(input(prompt))
if exp_id in exp_ids:
return exp_id
else:
'Please select a valid experiment id (from {})'.format(exp_ids)
def _get_id(self, protocols):
return [k for k in protocols if k.startswith('m')][0]
def write_tables(self):
csv_file_path = os.path.join(self.dir, '{}_clipped_traces.csv'.format(self.name))
self.write_avgs_across_trials_table(csv_file_path)
csv_file_path = os.path.join(self.dir, '{}_clipped_avgs_per_trial.csv'.format(self.name))
self.write_avgs_per_trial_table(csv_file_path, self.clipped_avgs_per_trial_table)
csv_file_path = os.path.join(self.dir, '{}_spiking_frequencies_per_trial.csv'.format(self.name))
self.write_avgs_per_trial_table(csv_file_path, self.get_spiking_freq_lists_per_trial(), True)
def write_avgs_per_trial_table(self, path, table, is_spiking=False):
"""
Used for avg vm per trial and avg spiking per trial
:param string path: The path to save the figure
:param ODict table:
:param bool is_spiking: Whether Vm or spiking data
:return:
"""
header = ('bsl', 'clockWise', 'cClockWise', 'cw_contra', 'ccw_contra', 'cw_ipsi', 'ccw_ipsi')
for k in header:
assert k in list(table.keys()), 'key {} not in {}'.format(k, list(table.keys()))
table[k] = np.array(table[k])
with open(path, 'w') as csv_file:
# ALL TRIALS
csv_file.write('\t'.join(header) + '\n')
for elements in zip(*[table[k] for k in header]): # REFACTOR: rename elements
csv_file.write('{},{},{},{},{},{},{}\n'.format(*elements)) # trial1 cycle1, t1c2, t2c1, t2c2, ... tnc1, tnc2
csv_file.write('\n')
averages = [table[k].mean() for k in header]
sds = [table[k].std() for k in header]
csv_file.write('Mean:\n{},{},{},{},{},{},{}\n'.format(*averages))
csv_file.write('Delta:\n{},{},{},{},{},{},{}\n\n'.format(*self.get_deltas(*averages)))
csv_file.write('SD:\n{},{},{},{},{},{},{}\n\n'.format(*sds))
# STATS
csv_file.write('Stats (Wilcoxon signed-rank):\n')
import warnings
warnings.filterwarnings('ignore')
csv_file.write('baseline/clockwise,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[0]], table[header[1]])))
csv_file.write('clockwise/cClockWise,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[1]], table[header[2]])))
csv_file.write('baseline/cClockwise,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[0]], table[header[2]])))
csv_file.write('baseline/cw_contra,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[0]], table[header[3]])))
csv_file.write('baseline/ccw_contra,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[0]], table[header[4]])))
csv_file.write('baseline/cw_ipsi,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[0]], table[header[5]])))
csv_file.write('baseline/ccw_ipsi,p-value:,{}\n'.
format(self.wilcoxon_test(table[header[0]], table[header[6]])))
warnings.filterwarnings('error')
csv_file.write('\n')
# DSI
if is_spiking:
dsi = self.get_dsi(*self.get_spiking_frequencies())
self.dsi = dsi # WARNING: set outside __init__
else:
dsi = self.get_dsi(self.bsl_clipped_baselined_mean.mean(),
self.clock_wise_clipped_baselined_mean.mean(),
self.c_clock_wise_clipped_baselined_mean.mean())
csv_file.write('\n')
csv_file.write('DSI:,{}\n'.format(dsi))
def write_avgs_across_trials_table(self, path, sep=','):
with open(path, 'w') as csv_file:
columns = self.get_clipped_cycles()
cycles_header = sep.join(['cycle{}'.format(i) for i in range(len(columns))])
csv_file.write(cycles_header + sep + 'average\n')
for l in zip(*columns): # transpose columns to lines
csv_file.write(sep.join([str(p) for p in l]) + sep + str(np.mean(l)))
csv_file.write('\n')
csv_file.write('\n')
@property
def duration(self):
return self.sampling * self.cmd.shape[0]
def resample_matrices(self):
"""
Resample the data in position, velocity or acceleration to get even representation of each degree, degree/s...
:return:
"""
self.position_spiking, self.position_durations = self.normalise_spiking('degreesLocs') # FIXME: investigate
self.velocity_spiking, self.velocity_durations = self.normalise_spiking('velocitiesLocs')
self.acceleration_spiking, self.acceleration_durations = self.normalise_spiking('accelerationsLocs')
stats_path = self._init_stats_file()
vm_mat = ResampledMatrix(self, 'normalisedMatrix', self.ext, stats_path)
spiking = ResampledMatrix(self, 'rasterMatrix', self.ext, stats_path)
vm_vel_mat = ResampledMatrix(self, 'velocityNormalisedMatrix', self.ext, stats_path)
spiking_vel_mat = ResampledMatrix(self, 'velocityRasterMatrix', self.ext, stats_path)
vm_acc_mat = ResampledMatrix(self, 'accelerationNormalisedMatrix', self.ext, stats_path)
spiking_acc_mat = ResampledMatrix(self, 'accelerationRasterMatrix', self.ext, stats_path)
self.matrices = (vm_mat, spiking, vm_vel_mat, spiking_vel_mat, vm_acc_mat, spiking_acc_mat)
def _init_stats_file(self):
stats_path = os.path.join(self.dir, '{}_stats.txt'.format(self.name))
with open(stats_path, 'w') as out_file: # Ensure file is empty
out_file.write('')
return stats_path
| [
"utils.utils.shell_hilite",
"math.floor",
"numpy.hstack",
"rpy2.robjects.vectors.FloatVector",
"experiment_resampler.ExperimentResampler",
"numpy.array",
"plotting.experiment_plotter.ExperimentPlotter",
"signal_processing.resampled_matrix.ResampledMatrix",
"signal_processing.mat_utils.findSinePeaks"... | [((916, 932), 'rpy2.robjects.packages.importr', 'importr', (['"""stats"""'], {}), "('stats')\n", (923, 932), False, 'from rpy2.robjects.packages import importr\n'), ((1524, 1545), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1539, 1545), False, 'import os\n'), ((1565, 1605), 'os.path.join', 'os.path.join', (['self.parent_dir', 'self.name'], {}), '(self.parent_dir, self.name)\n', (1577, 1605), False, 'import os\n'), ((2265, 2312), 'signal_processing.mat_utils.avg_waves', 'mat_utils.avg_waves', (['self.raw_clipped_baselined'], {}), '(self.raw_clipped_baselined)\n', (2284, 2312), False, 'from signal_processing import mat_utils\n'), ((2400, 2425), 'experiment_resampler.ExperimentResampler', 'ExperimentResampler', (['self'], {}), '(self)\n', (2419, 2425), False, 'from experiment_resampler import ExperimentResampler\n'), ((2449, 2472), 'plotting.experiment_plotter.ExperimentPlotter', 'ExperimentPlotter', (['self'], {}), '(self)\n', (2466, 2472), False, 'from plotting.experiment_plotter import ExperimentPlotter\n'), ((2973, 3005), 'numpy.array', 'np.array', (['bsls'], {'dtype': 'np.float64'}), '(bsls, dtype=np.float64)\n', (2981, 3005), True, 'import numpy as np\n'), ((3020, 3051), 'numpy.array', 'np.array', (['cws'], {'dtype': 'np.float64'}), '(cws, dtype=np.float64)\n', (3028, 3051), True, 'import numpy as np\n'), ((3067, 3099), 'numpy.array', 'np.array', (['ccws'], {'dtype': 'np.float64'}), '(ccws, dtype=np.float64)\n', (3075, 3099), True, 'import numpy as np\n'), ((5209, 5272), 'numpy.arange', 'np.arange', (['(waves_list[-1].size - n_nans)', 'waves_list[-1].size', '(1)'], {}), '(waves_list[-1].size - n_nans, waves_list[-1].size, 1)\n', (5218, 5272), True, 'import numpy as np\n'), ((9326, 9365), 'signal_processing.mat_utils.cutAndAvgSine', 'mat_utils.cutAndAvgSine', (['self.cmd', 'wave'], {}), '(self.cmd, wave)\n', (9349, 9365), False, 'from signal_processing import mat_utils\n'), ((10380, 10417), 'copy.deepcopy', 'deepcopy', (['wave[:self.baseline_length]'], {}), '(wave[:self.baseline_length])\n', (10388, 10417), False, 'from copy import copy, deepcopy\n'), ((10713, 10756), 'signal_processing.mat_utils.cutAndAvgSine', 'mat_utils.cutAndAvgSine', (['self.cmd', 'self.cmd'], {}), '(self.cmd, self.cmd)\n', (10736, 10756), False, 'from signal_processing import mat_utils\n'), ((12720, 12758), 'signal_processing.mat_utils.cut_and_avg_halves', 'mat_utils.cut_and_avg_halves', (['bsl_mean'], {}), '(bsl_mean)\n', (12748, 12758), False, 'from signal_processing import mat_utils\n'), ((12933, 12995), 'copy.deepcopy', 'deepcopy', (['self.data_plot_segment[:self.data_plot_segment_half]'], {}), '(self.data_plot_segment[:self.data_plot_segment_half])\n', (12941, 12995), False, 'from copy import copy, deepcopy\n'), ((13084, 13146), 'copy.deepcopy', 'deepcopy', (['self.data_plot_segment[self.data_plot_segment_half:]'], {}), '(self.data_plot_segment[self.data_plot_segment_half:])\n', (13092, 13146), False, 'from copy import copy, deepcopy\n'), ((13195, 13215), 'signal_processing.signal_processing.low_pass', 'low_pass', (['wave', '(5001)'], {}), '(wave, 5001)\n', (13203, 13215), False, 'from signal_processing.signal_processing import low_pass, count_points_between_values\n'), ((17668, 17675), 'collections.OrderedDict', 'ODict', ([], {}), '()\n', (17673, 17675), True, 'from collections import OrderedDict as ODict\n'), ((18972, 18983), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18980, 18983), True, 'import numpy as np\n'), ((20389, 20396), 'collections.OrderedDict', 'ODict', ([], {}), '()\n', (20394, 20396), True, 'from collections import OrderedDict as ODict\n'), ((23414, 23437), 'copy.deepcopy', 'deepcopy', (['first_quarter'], {}), '(first_quarter)\n', (23422, 23437), False, 'from copy import copy, deepcopy\n'), ((23669, 23691), 'copy.deepcopy', 'deepcopy', (['last_quarter'], {}), '(last_quarter)\n', (23677, 23691), False, 'from copy import copy, deepcopy\n'), ((23991, 24015), 'copy.deepcopy', 'deepcopy', (['second_quarter'], {}), '(second_quarter)\n', (23999, 24015), False, 'from copy import copy, deepcopy\n'), ((24278, 24301), 'copy.deepcopy', 'deepcopy', (['third_quarter'], {}), '(third_quarter)\n', (24286, 24301), False, 'from copy import copy, deepcopy\n'), ((30278, 30305), 'numpy.zeros', 'np.zeros', (['levels_wave.shape'], {}), '(levels_wave.shape)\n', (30286, 30305), True, 'import numpy as np\n'), ((30326, 30353), 'numpy.zeros', 'np.zeros', (['levels_wave.shape'], {}), '(levels_wave.shape)\n', (30334, 30353), True, 'import numpy as np\n'), ((38519, 38582), 'signal_processing.resampled_matrix.ResampledMatrix', 'ResampledMatrix', (['self', '"""normalisedMatrix"""', 'self.ext', 'stats_path'], {}), "(self, 'normalisedMatrix', self.ext, stats_path)\n", (38534, 38582), False, 'from signal_processing.resampled_matrix import ResampledMatrix\n'), ((38601, 38660), 'signal_processing.resampled_matrix.ResampledMatrix', 'ResampledMatrix', (['self', '"""rasterMatrix"""', 'self.ext', 'stats_path'], {}), "(self, 'rasterMatrix', self.ext, stats_path)\n", (38616, 38660), False, 'from signal_processing.resampled_matrix import ResampledMatrix\n'), ((38682, 38753), 'signal_processing.resampled_matrix.ResampledMatrix', 'ResampledMatrix', (['self', '"""velocityNormalisedMatrix"""', 'self.ext', 'stats_path'], {}), "(self, 'velocityNormalisedMatrix', self.ext, stats_path)\n", (38697, 38753), False, 'from signal_processing.resampled_matrix import ResampledMatrix\n'), ((38780, 38847), 'signal_processing.resampled_matrix.ResampledMatrix', 'ResampledMatrix', (['self', '"""velocityRasterMatrix"""', 'self.ext', 'stats_path'], {}), "(self, 'velocityRasterMatrix', self.ext, stats_path)\n", (38795, 38847), False, 'from signal_processing.resampled_matrix import ResampledMatrix\n'), ((38869, 38944), 'signal_processing.resampled_matrix.ResampledMatrix', 'ResampledMatrix', (['self', '"""accelerationNormalisedMatrix"""', 'self.ext', 'stats_path'], {}), "(self, 'accelerationNormalisedMatrix', self.ext, stats_path)\n", (38884, 38944), False, 'from signal_processing.resampled_matrix import ResampledMatrix\n'), ((38971, 39042), 'signal_processing.resampled_matrix.ResampledMatrix', 'ResampledMatrix', (['self', '"""accelerationRasterMatrix"""', 'self.ext', 'stats_path'], {}), "(self, 'accelerationRasterMatrix', self.ext, stats_path)\n", (38986, 39042), False, 'from signal_processing.resampled_matrix import ResampledMatrix\n'), ((2729, 2769), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'w'], {}), '(self.cmd, w)\n', (2756, 2769), False, 'from signal_processing import mat_utils\n'), ((3602, 3620), 'os.mkdir', 'os.mkdir', (['self.dir'], {}), '(self.dir)\n', (3610, 3620), False, 'import os\n'), ((4023, 4065), 'os.path.join', 'os.path.join', (['self.dir', "(self.name + '.pxp')"], {}), "(self.dir, self.name + '.pxp')\n", (4035, 4065), False, 'import os\n'), ((5288, 5331), 'numpy.array_equal', 'np.array_equal', (['nans_indices', 'expected_nans'], {}), '(nans_indices, expected_nans)\n', (5302, 5331), True, 'import numpy as np\n'), ((8917, 8941), 'signal_processing.mat_utils.cutInHalf', 'mat_utils.cutInHalf', (['bsl'], {}), '(bsl)\n', (8936, 8941), False, 'from signal_processing import mat_utils\n'), ((9000, 9044), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'trial'], {}), '(self.cmd, trial)\n', (9027, 9044), False, 'from signal_processing import mat_utils\n'), ((9958, 10003), 'math.floor', 'floor', (['(self.baseline_length / self.n_segments)'], {}), '(self.baseline_length / self.n_segments)\n', (9963, 10003), False, 'from math import floor\n'), ((12296, 12343), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'self.cmd'], {}), '(self.cmd, self.cmd)\n', (12323, 12343), False, 'from signal_processing import mat_utils\n'), ((13652, 13685), 'signal_processing.mat_utils.findSinePeaks', 'mat_utils.findSinePeaks', (['self.cmd'], {}), '(self.cmd)\n', (13675, 13685), False, 'from signal_processing import mat_utils\n'), ((14192, 14257), 'numpy.logical_and', 'np.logical_and', (['(raster >= segment_start_t)', '(raster < segment_end_t)'], {}), '(raster >= segment_start_t, raster < segment_end_t)\n', (14206, 14257), True, 'import numpy as np\n'), ((19723, 19758), 'numpy.array', 'np.array', (['good_ids'], {'dtype': 'np.uint16'}), '(good_ids, dtype=np.uint16)\n', (19731, 19758), True, 'import numpy as np\n'), ((21222, 21246), 'signal_processing.mat_utils.cutInHalf', 'mat_utils.cutInHalf', (['bsl'], {}), '(bsl)\n', (21241, 21246), False, 'from signal_processing import mat_utils\n'), ((21661, 21705), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'trial'], {}), '(self.cmd, trial)\n', (21688, 21705), False, 'from signal_processing import mat_utils\n'), ((22081, 22125), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'trial'], {}), '(self.cmd, trial)\n', (22108, 22125), False, 'from signal_processing import mat_utils\n'), ((22402, 22446), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'trial'], {}), '(self.cmd, trial)\n', (22429, 22446), False, 'from signal_processing import mat_utils\n'), ((22721, 22765), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'trial'], {}), '(self.cmd, trial)\n', (22748, 22765), False, 'from signal_processing import mat_utils\n'), ((23038, 23082), 'signal_processing.mat_utils.cutAndGetMultiple', 'mat_utils.cutAndGetMultiple', (['self.cmd', 'trial'], {}), '(self.cmd, trial)\n', (23065, 23082), False, 'from signal_processing import mat_utils\n'), ((24519, 24548), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['vect1', 'vect2'], {}), '(vect1, vect2)\n', (24534, 24548), False, 'from scipy import stats\n'), ((25233, 25251), 'rpy2.robjects.vectors.FloatVector', 'FloatVector', (['vect1'], {}), '(vect1)\n', (25244, 25251), False, 'from rpy2.robjects.vectors import FloatVector\n'), ((25253, 25271), 'rpy2.robjects.vectors.FloatVector', 'FloatVector', (['vect2'], {}), '(vect2)\n', (25264, 25271), False, 'from rpy2.robjects.vectors import FloatVector\n'), ((31962, 31980), 'numpy.array', 'np.array', (['spike_ns'], {}), '(spike_ns)\n', (31970, 31980), True, 'import numpy as np\n'), ((34736, 34754), 'numpy.array', 'np.array', (['table[k]'], {}), '(table[k])\n', (34744, 34754), True, 'import numpy as np\n'), ((35608, 35641), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (35631, 35641), False, 'import warnings\n'), ((36733, 36765), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (36756, 36765), False, 'import warnings\n'), ((1466, 1493), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (1482, 1493), False, 'import os\n'), ((4757, 4781), 'numpy.isnan', 'np.isnan', (['waves_list[-1]'], {}), '(waves_list[-1])\n', (4765, 4781), True, 'import numpy as np\n'), ((9161, 9210), 'numpy.hstack', 'np.hstack', (['(bsls[segment_id], cycles[segment_id])'], {}), '((bsls[segment_id], cycles[segment_id]))\n', (9170, 9210), True, 'import numpy as np\n'), ((24779, 24808), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['vect1', 'vect2'], {}), '(vect1, vect2)\n', (24794, 24808), False, 'from scipy import stats\n'), ((28795, 28829), 'copy.deepcopy', 'deepcopy', (['self.data[levels_w_name]'], {}), '(self.data[levels_w_name])\n', (28803, 28829), False, 'from copy import copy, deepcopy\n'), ((29412, 29437), 'copy.deepcopy', 'deepcopy', (['self.data[name]'], {}), '(self.data[name])\n', (29420, 29437), False, 'from copy import copy, deepcopy\n'), ((29720, 29741), 'numpy.array', 'np.array', (['norm_raster'], {}), '(norm_raster)\n', (29728, 29741), True, 'import numpy as np\n'), ((29785, 29804), 'numpy.array', 'np.array', (['durations'], {}), '(durations)\n', (29793, 29804), True, 'import numpy as np\n'), ((3697, 3720), 'os.path.isdir', 'os.path.isdir', (['self.dir'], {}), '(self.dir)\n', (3710, 3720), False, 'import os\n'), ((15327, 15352), 'numpy.array', 'np.array', (['bls_segt_raster'], {}), '(bls_segt_raster)\n', (15335, 15352), True, 'import numpy as np\n'), ((15641, 15662), 'numpy.array', 'np.array', (['segt_raster'], {}), '(segt_raster)\n', (15649, 15662), True, 'import numpy as np\n'), ((17335, 17381), 'numpy.logical_and', 'np.logical_and', (['(r >= quarter_duration)', '(r < mid)'], {}), '(r >= quarter_duration, r < mid)\n', (17349, 17381), True, 'import numpy as np\n'), ((17512, 17562), 'numpy.logical_and', 'np.logical_and', (['(r >= mid)', '(r < quarter_duration * 3)'], {}), '(r >= mid, r < quarter_duration * 3)\n', (17526, 17562), True, 'import numpy as np\n'), ((19068, 19082), 'copy.deepcopy', 'deepcopy', (['rstr'], {}), '(rstr)\n', (19076, 19082), False, 'from copy import copy, deepcopy\n'), ((30737, 30794), 'signal_processing.signal_processing.count_points_between_values', 'count_points_between_values', (['start_time', 'end_time', 'raster'], {}), '(start_time, end_time, raster)\n', (30764, 30794), False, 'from signal_processing.signal_processing import low_pass, count_points_between_values\n'), ((5077, 5115), 'utils.utils.shell_hilite', 'shell_hilite', (['"""WARNING: """', '"""red"""', '(True)'], {}), "('WARNING: ', 'red', True)\n", (5089, 5115), False, 'from utils.utils import dprint, shell_hilite\n'), ((19375, 19415), 'utils.utils.shell_hilite', 'shell_hilite', (['"""WARNING:"""', '"""yellow"""', '(True)'], {}), "('WARNING:', 'yellow', True)\n", (19387, 19415), False, 'from utils.utils import dprint, shell_hilite\n'), ((37765, 37775), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (37772, 37775), True, 'import numpy as np\n'), ((31631, 31666), 'utils.utils.shell_hilite', 'shell_hilite', (['"""Error:"""', '"""red"""', '(True)'], {}), "('Error:', 'red', True)\n", (31643, 31666), False, 'from utils.utils import dprint, shell_hilite\n')] |
"""Implementation of Receiver Operator Characteristic."""
import numpy as np
from warnings import warn
def _check(scores, true_labels):
"""Raise exceptions or warnings for wrong or questionable inputs."""
if scores.ndim != 1 or true_labels.ndim !=1:
raise ValueError("Scores and labels must be one dimensional arrays")
if scores.size != true_labels.size:
raise ValueError("Scores and labels must have same number of entries")
# test that labels are exclusively [0, 1]
test_value = np.setdiff1d(np.array([0, 1]), true_labels).size
test_value += np.setdiff1d(true_labels, np.array([0, 1])).size
if test_value > 0:
raise ValueError("True sample class labels\n"
"must be either 0 or 1, exclusively.")
if np.unique(scores).size != scores.size:
warn("Duplicate scores detected, may cause arbitrary sample ranking.")
class Roc:
"""Receiver Operating Characteristic.
Args:
s: Sample scores, relatively high score indicative of postive class samples
((N sample,) ndarray)
t: true sample labels [0, 1] ((N sample,) ndarray)
Important Attributes:
self.tpr: true positive rates (ndarray)
self.fpr: false positive rates (ndarray)
self.auc: Area Under Curve (float)
Public Methods:
to_dict: returns dictionary of Important attributes.
Raises:
ValueError: Number of input data entries do not match, or
are not 1d array, or true class labels not exclusively [0,1].
UserWarning: if duplicate sample scores are detected
"""
def __init__(self, s, t):
_check(s,t)
self.N = len(s)
self.tpr = np.zeros(self.N)
self.fpr = np.zeros(self.N)
self.Npositive = np.sum(t)
self.Nnegative = self.N - self.Npositive
self.deltaTPR = 1. / self.Npositive
self.deltaFPR = 1. / self.Nnegative
self._sort_data(s, t)
self._curve()
def _sort_data(self, s, t):
idx = np.argsort(s)
self.s = s[idx]
self.t = t[idx]
def _curve(self):
i = self.N-1
if self.t[i] == 1:
self.tpr[0] = self.deltaTPR
else:
self.fpr[0] = self.deltaFPR
self.auc = 0
j = 1
i -= 1
while i >= 0:
if self.t[i] == 1:
self.tpr[j] = self.tpr[j-1] + self.deltaTPR
self.fpr[j] = self.fpr[j-1]
else:
self.tpr[j] = self.tpr[j-1]
self.fpr[j] = self.fpr[j-1] + self.deltaFPR
self.auc += self.tpr[j] * self.deltaFPR
j += 1
i -= 1
return None
def to_dict(self):
return {'fpr':self.fpr.tolist(),
'tpr':self.tpr.tolist(),
'auc':self.auc}
| [
"numpy.unique",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"warnings.warn"
] | [((840, 910), 'warnings.warn', 'warn', (['"""Duplicate scores detected, may cause arbitrary sample ranking."""'], {}), "('Duplicate scores detected, may cause arbitrary sample ranking.')\n", (844, 910), False, 'from warnings import warn\n'), ((1716, 1732), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (1724, 1732), True, 'import numpy as np\n'), ((1752, 1768), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (1760, 1768), True, 'import numpy as np\n'), ((1794, 1803), 'numpy.sum', 'np.sum', (['t'], {}), '(t)\n', (1800, 1803), True, 'import numpy as np\n'), ((2057, 2070), 'numpy.argsort', 'np.argsort', (['s'], {}), '(s)\n', (2067, 2070), True, 'import numpy as np\n'), ((541, 557), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (549, 557), True, 'import numpy as np\n'), ((621, 637), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (629, 637), True, 'import numpy as np\n'), ((793, 810), 'numpy.unique', 'np.unique', (['scores'], {}), '(scores)\n', (802, 810), True, 'import numpy as np\n')] |
import logging
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from typing import Dict, List, Optional, Tuple, Union
from detectron2.config import configurable
from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple
from detectron2.structures import Instances, Boxes, RotatedBoxes, pairwise_iou_rotated, ImageList
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from ..box_regression import Box2BoxTransformRotated
from ..poolers import ROIPooler
from ..matcher import SampleMatcher
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
from .box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from .rotated_fast_rcnn import RROIHeads
logger = logging.getLogger(__name__)
##########################################################################################################################################
############################################### Grasp Rotated RCNN Output ################################################################
##########################################################################################################################################
def grasp_fast_rcnn_inference_rotated(
boxes, scores, tilts, zs, image_shapes, score_thresh, nms_thresh, topk_per_image
):
"""
Call `fast_rcnn_inference_single_image_rotated` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 5) if doing
class-specific regression, or (Ri, 5) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
grasp_fast_rcnn_inference_single_image_rotated(
scores_per_image, boxes_per_image, tilts_per_image, zs_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, tilts_per_image, zs_per_image, image_shape in zip(scores, boxes, tilts, zs, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def grasp_fast_rcnn_inference_single_image_rotated(
scores, boxes, tilts, zs, image_shape, score_thresh, nms_thresh, topk_per_image
):
"""
Single-image inference. Return rotated bounding-box detection results by thresholding
on scores and applying rotated non-maximum suppression (Rotated NMS).
Args:
Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference_rotated`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1) & torch.isfinite(tilts).all(dim=1) & torch.isfinite(zs).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
tilts = tilts[valid_mask]
zs = zs[valid_mask]
B = 5 # box dimension
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // B
# Convert to Boxes to use the `clip` function ...
boxes = RotatedBoxes(boxes.reshape(-1, B))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B
# Filter results based on detection scores
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
tilts = tilts[filter_inds[:, 0]]
zs = zs[filter_inds[:, 0]]
# Apply per-class Rotated NMS
keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
tilts, zs = tilts[keep], zs[keep]
result = Instances(image_shape)
result.pred_boxes = RotatedBoxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
result.pred_zs = torch.flatten(zs)
result.pred_tilts = torch.flatten(tilts)
return result, filter_inds[:, 0]
class GraspRotatedFastRCNNOutputs(FastRCNNOutputs):
"""
An internal implementation that stores information about outputs of a Fast R-CNN head,
and provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
pred_zs,
pred_tilts,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
):
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.pred_zs = torch.flatten(pred_zs)
self.pred_tilts = torch.flatten(pred_tilts)
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
self.image_shapes = [x.image_size for x in proposals]
self.mse_loss = nn.MSELoss()
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not self.proposals.tensor.requires_grad
), "Proposals should not require gradients!"
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
self.gt_zs = cat([p.gt_z for p in proposals], dim=0)
self.gt_tilts = cat([p.gt_tilts for p in proposals], dim=0)
else:
self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
self._no_instances = len(proposals) == 0 # no instances found
def z_mse_loss(self):
return self.mse_loss(self.pred_zs, self.gt_zs)
def tilt_mse_loss(self):
return self.mse_loss(self.pred_tilts, self.gt_tilts)
def losses(self):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss(), "loss_mse_z": self.z_mse_loss(), "loss_mse_tilt": self.tilt_mse_loss()}
class GraspRotatedFastRCNNOutputLayers(FastRCNNOutputLayers):
"""
Two linear layers for predicting Rotated Fast R-CNN outputs.
Edited for grasp planning.
"""
@configurable
def __init__(self, **kwargs):
"""
NOTE: this interface is experimental.
"""
super().__init__(**kwargs)
self.z_pred = Linear(self.input_size, 1)
self.tilt_pred = Linear(self.input_size, 1)
@classmethod
def from_config(cls, cfg, input_shape):
args = super().from_config(cfg, input_shape)
args["box2box_transform"] = Box2BoxTransformRotated(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
)
args["loss_weight"] = {k: v for k, v in zip(["loss_cls", "loss_box_reg", "loss_mse_tilt", "loss_mse_z"], cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT)}
return args
def forward(self, x):
"""
Args:
x: per-region features of shape (N, ...) for N bounding boxes to predict.
Returns:
Tensor: shape (N,K+1), scores for each of the N box. Each row contains the scores for
K object categories and 1 background class.
Tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4), or (N,4)
for class-agnostic regression.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
tilts = self.tilt_pred(x)
zs = self.z_pred(x)
return (scores, proposal_deltas), (tilts, zs)
def losses(self, predictions, proposals, tilts_and_zs=None):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were used
to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,
``gt_classes`` are expected.
Returns:
Dict[str, Tensor]: dict of losses
"""
scores, proposal_deltas = predictions
tilts, zs = tilts_and_zs
losses = GraspRotatedFastRCNNOutputs(
self.box2box_transform,
scores,
proposal_deltas,
tilts,
zs,
proposals,
self.smooth_l1_beta,
self.box_reg_loss_type,
).losses()
return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
def inference(self, predictions, proposals, tilts_and_zs):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference_rotated`.
list[Tensor]: same as `fast_rcnn_inference_rotated`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
tilts, zs = self.predict_tilts_zs(tilts_and_zs, proposals)
image_shapes = [x.image_size for x in proposals]
return grasp_fast_rcnn_inference_rotated(
boxes,
scores,
tilts,
zs,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_tilts_zs(self, tilts_and_zs, proposals):
"""
Args:
tilts_and_zs: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, 1), where Ri is the number of proposals for image i.
"""
tilts, zs = tilts_and_zs
num_inst_per_image = [len(p) for p in proposals]
return tilts.split(num_inst_per_image), zs.split(num_inst_per_image)
@ROI_HEADS_REGISTRY.register()
class GraspRROIHeads(RROIHeads):
"""
This class is used by Rotated Fast R-CNN to detect rotated boxes.
For now, it only supports box predictions but not mask or keypoints.
"""
@configurable
def __init__(self, **kwargs):
"""
NOTE: this interface is experimental.
"""
super().__init__(**kwargs)
assert (
not self.mask_on and not self.keypoint_on
), "Mask/Keypoints not supported in Rotated ROIHeads."
assert not self.train_on_pred_boxes, "train_on_pred_boxes not implemented for RROIHeads!"
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg, input_shape)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
ret["proposal_matcher"] = SampleMatcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=True,
)
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
assert pooler_type in ["ROIAlignRotated"], pooler_type
# assume all channel counts are equal
in_channels = [input_shape[f].channels for f in in_features][0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
# This line is the only difference v.s. StandardROIHeads
box_predictor = GraspRotatedFastRCNNOutputLayers(cfg, box_head.output_shape)
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the RROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`StandardROIHeads.forward`
Returns:
list[Instances]: length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the rotated proposal boxes
- gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
- gt_classes: the ground-truth classification lable for each proposal
"""
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou_rotated(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
match_quality_matrix = F.relu(match_quality_matrix)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets]
proposals_per_image.gt_z = targets_per_image.gt_z[sampled_targets]
proposals_per_image.gt_tilts = targets_per_image.gt_tilts[sampled_targets]
else:
gt_boxes = RotatedBoxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 5))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training:
losses = self._forward_grasp(features, proposals)
# losses = self._forward_box(features, proposals)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_grasp(features, proposals)
# pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def _forward_grasp(
self, features: Dict[str, torch.Tensor], proposals: List[Instances],
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the grasp prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions, tilts_and_zs = self.box_predictor(box_features)
del box_features
if self.training:
losses = self.box_predictor.losses(predictions, proposals, tilts_and_zs)
# proposals is modified in-place below, so losses must be computed first.
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals, tilts_and_zs)
return pred_instances | [
"logging.getLogger",
"detectron2.structures.RotatedBoxes",
"numpy.mean",
"detectron2.structures.Boxes",
"detectron2.layers.ShapeSpec",
"torch.isfinite",
"detectron2.layers.Linear",
"detectron2.utils.events.get_event_storage",
"detectron2.structures.Instances",
"detectron2.layers.batched_nms_rotate... | [((900, 927), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (917, 927), False, 'import logging\n'), ((5165, 5230), 'detectron2.layers.batched_nms_rotated', 'batched_nms_rotated', (['boxes', 'scores', 'filter_inds[:, 1]', 'nms_thresh'], {}), '(boxes, scores, filter_inds[:, 1], nms_thresh)\n', (5184, 5230), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((5430, 5452), 'detectron2.structures.Instances', 'Instances', (['image_shape'], {}), '(image_shape)\n', (5439, 5452), False, 'from detectron2.structures import Instances, Boxes, RotatedBoxes, pairwise_iou_rotated, ImageList\n'), ((5477, 5496), 'detectron2.structures.RotatedBoxes', 'RotatedBoxes', (['boxes'], {}), '(boxes)\n', (5489, 5496), False, 'from detectron2.structures import Instances, Boxes, RotatedBoxes, pairwise_iou_rotated, ImageList\n'), ((5589, 5606), 'torch.flatten', 'torch.flatten', (['zs'], {}), '(zs)\n', (5602, 5606), False, 'import torch\n'), ((5631, 5651), 'torch.flatten', 'torch.flatten', (['tilts'], {}), '(tilts)\n', (5644, 5651), False, 'import torch\n'), ((14688, 14703), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14701, 14703), False, 'import torch\n'), ((6435, 6457), 'torch.flatten', 'torch.flatten', (['pred_zs'], {}), '(pred_zs)\n', (6448, 6457), False, 'import torch\n'), ((6484, 6509), 'torch.flatten', 'torch.flatten', (['pred_tilts'], {}), '(pred_tilts)\n', (6497, 6509), False, 'import torch\n'), ((6694, 6706), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6704, 6706), False, 'from torch import nn\n'), ((8741, 8767), 'detectron2.layers.Linear', 'Linear', (['self.input_size', '(1)'], {}), '(self.input_size, 1)\n', (8747, 8767), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((8793, 8819), 'detectron2.layers.Linear', 'Linear', (['self.input_size', '(1)'], {}), '(self.input_size, 1)\n', (8799, 8819), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((17648, 17667), 'detectron2.utils.events.get_event_storage', 'get_event_storage', ([], {}), '()\n', (17665, 17667), False, 'from detectron2.utils.events import get_event_storage\n'), ((9765, 9794), 'torch.flatten', 'torch.flatten', (['x'], {'start_dim': '(1)'}), '(x, start_dim=1)\n', (9778, 9794), False, 'import torch\n'), ((14252, 14339), 'detectron2.layers.ShapeSpec', 'ShapeSpec', ([], {'channels': 'in_channels', 'height': 'pooler_resolution', 'width': 'pooler_resolution'}), '(channels=in_channels, height=pooler_resolution, width=\n pooler_resolution)\n', (14261, 14339), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((16215, 16304), 'detectron2.structures.pairwise_iou_rotated', 'pairwise_iou_rotated', (['targets_per_image.gt_boxes', 'proposals_per_image.proposal_boxes'], {}), '(targets_per_image.gt_boxes, proposals_per_image.\n proposal_boxes)\n', (16235, 16304), False, 'from detectron2.structures import Instances, Boxes, RotatedBoxes, pairwise_iou_rotated, ImageList\n'), ((16366, 16394), 'torch.nn.functional.relu', 'F.relu', (['match_quality_matrix'], {}), '(match_quality_matrix)\n', (16372, 16394), True, 'from torch.nn import functional as F\n'), ((17722, 17745), 'numpy.mean', 'np.mean', (['num_fg_samples'], {}), '(num_fg_samples)\n', (17729, 17745), True, 'import numpy as np\n'), ((17801, 17824), 'numpy.mean', 'np.mean', (['num_bg_samples'], {}), '(num_bg_samples)\n', (17808, 17824), True, 'import numpy as np\n'), ((4145, 4163), 'torch.isfinite', 'torch.isfinite', (['zs'], {}), '(zs)\n', (4159, 4163), False, 'import torch\n'), ((7367, 7412), 'detectron2.layers.cat', 'cat', (['[p.gt_classes for p in proposals]'], {'dim': '(0)'}), '([p.gt_classes for p in proposals], dim=0)\n', (7370, 7412), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((7442, 7481), 'detectron2.layers.cat', 'cat', (['[p.gt_z for p in proposals]'], {'dim': '(0)'}), '([p.gt_z for p in proposals], dim=0)\n', (7445, 7481), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((7514, 7557), 'detectron2.layers.cat', 'cat', (['[p.gt_tilts for p in proposals]'], {'dim': '(0)'}), '([p.gt_tilts for p in proposals], dim=0)\n', (7517, 7557), False, 'from detectron2.layers import Linear, ShapeSpec, batched_nms_rotated, cat, nonzero_tuple\n'), ((7607, 7665), 'torch.zeros', 'torch.zeros', (['(0)', '(4)'], {'device': 'self.pred_proposal_deltas.device'}), '(0, 4, device=self.pred_proposal_deltas.device)\n', (7618, 7665), False, 'import torch\n'), ((4110, 4131), 'torch.isfinite', 'torch.isfinite', (['tilts'], {}), '(tilts)\n', (4124, 4131), False, 'import torch\n'), ((20789, 20804), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20802, 20804), False, 'import torch\n'), ((4039, 4060), 'torch.isfinite', 'torch.isfinite', (['boxes'], {}), '(boxes)\n', (4053, 4060), False, 'import torch\n'), ((4074, 4096), 'torch.isfinite', 'torch.isfinite', (['scores'], {}), '(scores)\n', (4088, 4096), False, 'import torch\n'), ((21115, 21142), 'detectron2.structures.Boxes', 'Boxes', (['pred_boxes_per_image'], {}), '(pred_boxes_per_image)\n', (21120, 21142), False, 'from detectron2.structures import Instances, Boxes, RotatedBoxes, pairwise_iou_rotated, ImageList\n')] |
import numpy as np
from . import tools
class IntervalTestData(object):
functions = [tools.f]
first_derivs = [tools.fd]
domains = [(1,2),(0,2),(-1,0),(-.2*np.pi,.2*np.e),(-1,1)]
integrals = [
[ 0.032346217980525, 0.030893429600387, -0.014887469493652,
-0.033389463703032, -0.016340257873789, ]
]
roots = [
[
np.array([
1.004742754531498, 1.038773298601836, 1.073913103930722,
1.115303578807479, 1.138876334576409, 1.186037005063195,
1.200100773491540, 1.251812490296546, 1.257982114030372,
1.312857486088040, 1.313296484543653, 1.365016316032836,
1.371027655848883, 1.414708808202124, 1.425447888640173,
1.462152640981920, 1.476924360913394, 1.507538306301423,
1.525765627652155, 1.551033406767893, 1.572233571395834,
1.592786143530423, 1.616552437657155, 1.632928169757349,
1.658915772490721, 1.671576942342459, 1.699491823230094,
1.708837673403015, 1.738427795274605, 1.744804960074507,
1.775853245044121, 1.779564153811983, 1.811882812082608,
1.813192517312102, 1.845760207165999, 1.846618439572035,
1.877331112646444, 1.880151194495009, 1.907963575049332,
1.912562771369236, 1.937711007329229, 1.943926743585850,
1.966622430081970, 1.974309611716701, 1.994742937003962,
]),
np.array([
0.038699154393837, 0.170621357069026, 0.196642349303247,
0.335710810755860, 0.360022217617733, 0.459687243605995,
0.515107092342894, 0.571365105600701, 0.646902333813374,
0.672854750953472, 0.761751991347867, 0.765783134619707,
0.851427319155724, 0.863669737544800, 0.930805860269712,
0.955368374256150,
1.004742754531498, 1.038773298601836, 1.073913103930722,
1.115303578807479, 1.138876334576409, 1.186037005063195,
1.200100773491540, 1.251812490296546, 1.257982114030372,
1.312857486088040, 1.313296484543653, 1.365016316032836,
1.371027655848883, 1.414708808202124, 1.425447888640173,
1.462152640981920, 1.476924360913394, 1.507538306301423,
1.525765627652155, 1.551033406767893, 1.572233571395834,
1.592786143530423, 1.616552437657155, 1.632928169757349,
1.658915772490721, 1.671576942342459, 1.699491823230094,
1.708837673403015, 1.738427795274605, 1.744804960074507,
1.775853245044121, 1.779564153811983, 1.811882812082608,
1.813192517312102, 1.845760207165999, 1.846618439572035,
1.877331112646444, 1.880151194495009, 1.907963575049332,
1.912562771369236, 1.937711007329229, 1.943926743585850,
1.966622430081970, 1.974309611716701, 1.994742937003962,
]),
np.array([
-0.928510879374692, -0.613329324979852, -0.437747415493617,
-0.357059979912156, -0.143371301774133, -0.075365172766102,
]),
np.array([
-0.613329324979852, -0.437747415493618, -0.357059979912156,
-0.143371301774133, -0.075365172766103, 0.038699154393837,
0.170621357069026, 0.196642349303248, 0.335710810755860,
0.360022217617734, 0.459687243605995, 0.515107092342894,
]),
np.array([
-0.928510879374692, -0.613329324979852, -0.437747415493617,
-0.357059979912156, -0.143371301774133, -0.075365172766102,
0.038699154393837, 0.170621357069026, 0.196642349303247,
0.335710810755860, 0.360022217617733, 0.459687243605995,
0.515107092342894, 0.571365105600701, 0.646902333813374,
0.672854750953472, 0.761751991347867, 0.765783134619707,
0.851427319155724, 0.863669737544800, 0.930805860269712,
0.955368374256150,
])
]
]
#------------------------------------------------------------------------------
# Variables utilised in the unit-tests
#------------------------------------------------------------------------------
flat_chebfun_vals = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.999999997, 0.999999986,
0.999999941, 0.999999789, 0.999999393, 0.999998563, 0.999997184,
0.999995348, 0.999993366, 0.99999154, 0.999989915, 0.999988271,
0.999986403, 0.999984359, 0.999982364, 0.999980474, 0.999978307,
0.999975142, 0.999970324, 0.999963643, 0.999955361, 0.999945898,
0.999935363, 0.999923205, 0.999908262, 0.999889202, 0.999864879,
0.999834246, 0.999796326, 0.99975088, 0.99969903, 0.999642342,
0.999580651, 0.999510781, 0.999427251, 0.999323793, 0.999194567,
0.999035416, 0.998844745, 0.998622138, 0.998364734, 0.998065025,
0.997713245, 0.997302283, 0.996829434, 0.996292659, 0.995685794,
0.994998503, 0.994220762, 0.993346077, 0.992368917, 0.99127826,
0.990054121, 0.988671759, 0.98710944, 0.985350209, 0.983374834,
0.981155291, 0.978659102, 0.975861583, 0.972752611, 0.969329423,
0.965579958, 0.961469433, 0.956940511, 0.95192781, 0.94637807,
0.940262855, 0.93357411, 0.926305629, 0.918439616, 0.909957562,
0.90086621, 0.891201576, 0.880993483, 0.870226297, 0.858845216,
0.84680415, 0.834100554, 0.820763048, 0.806814451, 0.792250772,
0.777051706, 0.761210269, 0.744753854, 0.727735506, 0.710206105,
0.692195444, 0.673708895, 0.65473279, 0.635254799, 0.615298016,
0.594939271, 0.574286603, 0.553432126, 0.532428562, 0.511317921,
0.490178078, 0.46912582, 0.448268747, 0.427662956, 0.40732279,
0.387259893, 0.367495569, 0.348043802, 0.328916616, 0.310158617,
0.291852411, 0.274075761, 0.256866646, 0.240236008, 0.22419954,
0.208787517, 0.19403154, 0.179950519, 0.166547846, 0.15381606,
0.14174234, 0.130312476, 0.1195121, 0.109325601, 0.099737909,
0.090739786, 0.082329246, 0.074508439, 0.067280706, 0.060641503,
0.054562011, 0.048986606, 0.043855339, 0.039128943, 0.034791594,
0.030835863, 0.027249885, 0.024013122, 0.021095398, 0.018461179,
0.01608212, 0.013947278, 0.012058915, 0.010417872, 0.009010476,
0.00780443, 0.006757475, 0.00583398, 0.005014532, 0.004291684,
0.003660989, 0.003116582, 0.002650172, 0.002250345, 0.001904272,
0.001602206, 0.00133936, 0.001113409, 0.000922497, 0.000765091,
0.000638606, 0.000537747, 0.000455515, 0.000385761, 0.000324789,
0.000271423, 0.000225886, 0.000188349, 0.000158104, 0.000133678,
0.000113573, 9.67E-05, 8.23E-05, 6.91E-05, 5.67E-05, 4.53E-05,
3.54E-05, 2.74E-05, 2.12E-05, 1.65E-05, 1.32E-05, 1.12E-05, 9.83E-06,
8.52E-06, 6.74E-06, 4.57E-06, 2.55E-06, 1.13E-06, 3.89E-07, 1.03E-07,
2.06E-08, 3.12E-09, 3.54E-10, 3.00E-11, 1.88E-12, 7.43E-14, -1.68E-14,
-1.49E-14, -1.99E-14, -1.38E-14, -2.05E-14, -1.37E-14, -2.15E-14,
-1.30E-14, -2.04E-14, -1.35E-14, -2.01E-14, -1.29E-14, -2.09E-14,
-1.35E-14, -2.11E-14, -1.25E-14, -2.13E-14, -1.25E-14, -2.16E-14,
-1.26E-14, -2.11E-14, -1.25E-14, -2.12E-14, -1.18E-14, -2.18E-14,
-1.16E-14, -2.23E-14, -1.03E-14, -2.15E-14, -1.07E-14, -2.25E-14,
-1.08E-14, -2.24E-14, -1.01E-14, -2.27E-14, -1.02E-14, -2.21E-14,
-1.06E-14, -2.25E-14, -9.85E-15, -2.33E-14, -1.01E-14, -2.37E-14,
-9.41E-15, -2.39E-14, -9.85E-15, -2.38E-14, -9.94E-15, -2.41E-14,
-9.74E-15, -2.41E-14, -9.62E-15, -2.45E-14, -8.83E-15, -2.45E-14,
-9.26E-15, -2.45E-14, -9.08E-15, -2.51E-14, -9.16E-15, -2.52E-14,
-8.80E-15, -2.51E-14, -7.80E-15, -2.57E-14, -8.15E-15, -2.55E-14,
-7.97E-15, -2.52E-14, -7.80E-15, -2.54E-14, -7.54E-15, -2.59E-14,
-6.99E-15, -2.63E-14, -7.51E-15, -2.65E-14, -6.88E-15, -2.65E-14,
-6.24E-15, -2.68E-14, -7.35E-15, -2.71E-14, -7.19E-15, -2.68E-14,
-6.97E-15, -2.64E-14, -7.16E-15, -2.68E-14, -7.66E-15, -2.72E-14,
-6.41E-15, -2.69E-14, -6.84E-15, -2.70E-14, -5.26E-15, -2.68E-14,
-6.39E-15, -2.72E-14, -5.83E-15, -2.88E-14, -6.13E-15, -2.74E-14,
-6.60E-15, -2.68E-14, -6.57E-15, -2.75E-14, -6.24E-15, -2.82E-14,
-6.09E-15, -2.65E-14, -5.79E-15, -2.74E-14, -5.21E-15, -2.85E-14,
-4.83E-15, -2.88E-14, -5.58E-15, -2.77E-14, -5.77E-15, -2.74E-14,
-5.66E-15, -2.83E-14, -5.44E-15, -2.80E-14, -5.87E-15, -2.81E-14,
-5.15E-15, -2.80E-14, -4.67E-15, -2.80E-14, -4.50E-15, -2.85E-14,
-5.25E-15, -2.83E-14, -4.83E-15, -2.79E-14, -4.88E-15, -2.86E-14,
-4.65E-15, -2.87E-14, -4.29E-15, -2.90E-14, -4.09E-15, -2.84E-14,
-5.16E-15, -2.87E-14, -4.55E-15, -2.93E-14, -4.36E-15, -2.93E-14,
-3.92E-15, -2.84E-14, -4.34E-15, -2.89E-14, -4.40E-15, -2.89E-14,
-4.51E-15, -2.90E-14, -3.67E-15, -2.86E-14, -5.12E-15, -2.92E-14,
-4.05E-15, -2.93E-14, -5.19E-15, -2.89E-14, -4.23E-15, -2.89E-14,
-4.74E-15, -2.88E-14, -4.28E-15, -2.94E-14, -4.26E-15, -2.95E-14,
-4.12E-15, -2.90E-14, -4.26E-15, -2.99E-14, -4.54E-15, -2.99E-14,
-4.64E-15, -3.00E-14, -3.64E-15, -2.90E-14, -4.40E-15, -3.03E-14,
-3.89E-15, -2.98E-14, -5.00E-15, -2.94E-14, -3.97E-15, -3.12E-14,
-4.19E-15, -3.01E-14, -4.36E-15, -3.01E-14, -4.58E-15, -2.94E-14,
-4.72E-15, -2.98E-14, -4.25E-15, -3.04E-14, -4.33E-15, -3.05E-14,
-4.22E-15, -2.99E-14, -4.39E-15, -3.00E-14, -4.44E-15, -2.98E-14,
-4.11E-15, -2.98E-14, -3.89E-15, -2.96E-14, -3.83E-15, -2.91E-14,
-5.05E-15, -2.99E-14, -3.72E-15, -2.96E-14, -3.94E-15, -3.04E-14,
-3.66E-15, -2.90E-14, -2.72E-15, -2.90E-14, -3.77E-15, -3.09E-14,
-3.16E-15, -2.88E-14, -2.28E-15, -3.09E-14, -3.44E-15, -2.93E-14,
-2.78E-15, -2.95E-14, -3.00E-15, -2.95E-14, -2.22E-15, -2.99E-14,
-4.27E-15, -2.89E-14, -2.28E-15, -2.93E-14, -4.16E-15, -2.98E-14,
-2.00E-15, -2.91E-14, -2.33E-15, -3.03E-14, -3.00E-15, -2.98E-14,
-3.05E-15, -2.96E-14, -2.61E-15, -3.00E-14, -2.33E-15, -3.00E-14,
-2.22E-15, -2.95E-14, -2.66E-15, -2.94E-14, -2.66E-15, -2.94E-14,
-2.39E-15, -3.03E-14, -2.22E-15, -2.97E-14, -2.66E-15, -2.93E-14,
-2.83E-15, -2.95E-14, -2.61E-15, -2.94E-14, -2.16E-15, -2.99E-14,
-2.28E-15, -2.94E-14, -2.00E-15, -2.92E-14, -3.66E-15, -2.90E-14,
-2.28E-15, -3.04E-14, -2.33E-15, -2.91E-14, -1.50E-15, -2.91E-14,
-1.39E-15, -3.01E-14, -1.67E-15, -2.88E-14, -2.28E-15, -2.80E-14,
-2.44E-15, -2.87E-14, -9.99E-16, -2.84E-14, -1.83E-15, -2.90E-14,
-1.33E-15, -2.87E-14, -2.78E-16, -2.86E-14, -1.33E-15, -2.85E-14,
-9.44E-16, -2.99E-14, -8.88E-16, -2.77E-14, -9.99E-16, -2.91E-14,
-1.17E-15, -2.82E-14, -2.78E-16, -2.83E-14, -1.55E-15, -2.76E-14,
-1.05E-15, -2.75E-14, -8.88E-16, -2.81E-14, -3.33E-16, -2.79E-14,
-1.11E-15, -2.93E-14, -4.44E-16, -2.81E-14, -2.78E-16, -2.85E-14,
-1.33E-15, -2.79E-14, -1.67E-16, -2.79E-14, -7.77E-16, -2.78E-14,
-5.55E-17, -2.84E-14, -6.66E-16, -2.85E-14, -7.77E-16, -2.74E-14,
-7.77E-16, -2.76E-14, -2.22E-16, -2.81E-14, 4.44E-16, -2.84E-14,
-7.22E-16, -2.75E-14, -6.66E-16, -2.75E-14, -6.66E-16, -2.78E-14,
2.78E-16, -2.77E-14, -8.88E-16, -2.70E-14, -3.89E-16, -2.69E-14,
-4.44E-16, -2.84E-14, 1.50E-15, -2.75E-14, -3.33E-16, -2.76E-14,
1.11E-16, -2.79E-14, 0, -2.82E-14, 2.78E-16, -2.82E-14, -7.22E-16,
-2.73E-14, 3.33E-16, -2.74E-14, 1.11E-16, -2.79E-14, -1.11E-16,
-2.80E-14, -2.22E-16, -2.79E-14, -7.77E-16, -2.74E-14, -1.11E-16,
-2.80E-14, 4.44E-16, -2.79E-14, 9.99E-16, -2.71E-14, -3.33E-16,
-2.65E-14, -3.33E-16, -2.73E-14, 1.44E-15, -2.70E-14, 0, -2.69E-14,
3.33E-16, -2.65E-14, 5.55E-16, -2.80E-14, 1.78E-15, -2.74E-14, 7.77E-16,
-2.74E-14, -5.55E-16, -2.76E-14, 8.88E-16, -2.80E-14, 1.22E-15, -2.76E-14,
6.66E-16, -2.81E-14, 3.33E-16, -2.80E-14, 1.11E-15, -2.93E-14, 7.77E-16,
-2.74E-14, 4.44E-16, -2.87E-14, 9.99E-16, -2.80E-14, 6.66E-16, -2.88E-14,
9.99E-16, -2.79E-14, 8.88E-16, -2.77E-14, 7.77E-16, -2.78E-14, -2.22E-16,
-2.83E-14, 1.11E-15, -2.71E-14, 1.22E-15, -2.78E-14, 5.55E-16, -2.66E-14,
4.44E-16, -2.76E-14, 1.11E-16, -2.73E-14, 7.77E-16, -2.70E-14, -1.11E-16,
-2.70E-14, -3.33E-16, -2.78E-14, 9.99E-16, -2.82E-14, 6.66E-16, -2.79E-14,
4.44E-16, -2.82E-14, -1.11E-16, -2.84E-14, 6.66E-16, -2.83E-14, 1.11E-16,
-2.69E-14, 2.22E-16, -2.72E-14, 8.88E-16, -2.80E-14, 1.11E-16, -2.81E-14,
2.22E-16, -2.84E-14, 7.77E-16, -2.79E-14, 8.88E-16, -2.83E-14, 4.44E-16,
-2.86E-14, 1.44E-15, -2.84E-14, 4.44E-16, -2.76E-14, 0, -2.76E-14,
-1.11E-16, -2.83E-14, 9.99E-16, -2.75E-14, 7.77E-16, -2.77E-14, 7.77E-16,
-2.86E-14, 2.22E-16, -2.90E-14, 2.22E-16, -2.81E-14, 3.33E-16, -2.76E-14,
-3.33E-16, -2.81E-14, 5.55E-16, -2.82E-14, -5.55E-16, -2.89E-14, 1.11E-15,
-2.80E-14, -1.11E-16, -2.82E-14, 6.66E-16, -2.84E-14, -6.66E-16, -2.83E-14,
-1.11E-16, -2.79E-14, 0, -2.98E-14, -3.33E-16, -2.86E-14, 1.11E-16,
-2.81E-14, -3.33E-16, -2.79E-14, -7.77E-16, -2.83E-14, -7.77E-16,
-2.90E-14, 2.22E-16, -2.83E-14, -5.55E-16, -2.88E-14, -3.33E-16, -2.84E-14,
-1.11E-16, -3.06E-14, -5.55E-16, -2.88E-14, -3.33E-16, -2.93E-14, 5.55E-16,
-2.91E-14, -1.11E-16, -2.96E-14, -1.22E-15, -2.83E-14, -5.55E-16,
-2.88E-14, -9.99E-16, -2.87E-14, -1.11E-15, -2.94E-14, -6.66E-16,
-3.01E-14, -3.33E-16, -2.90E-14, -6.66E-16, -3.01E-14, -8.88E-16,
-3.14E-14, -8.88E-16, -2.88E-14, -2.78E-15, -2.96E-14, -1.11E-15,
-3.01E-14, -9.99E-16, -3.05E-14, -9.99E-16, -2.99E-14, -1.67E-15,
-2.95E-14, -1.44E-15, -3.06E-14, -1.67E-15, -3.05E-14, -1.55E-15,
-3.12E-14, -1.55E-15, -2.96E-14, -1.55E-15, -3.11E-14, -1.55E-15,
-3.04E-14, -5.00E-16, -2.99E-14, -5.55E-16, -3.10E-14, -1.39E-15,
-3.14E-14, -1.72E-15, -3.10E-14, -2.22E-15, -3.06E-14, -7.22E-16,
-2.87E-14, -7.77E-16, -3.10E-14, -3.61E-15, -3.13E-14, -7.77E-16,
-3.05E-14, -3.83E-15, -3.08E-14, -1.72E-15, -3.18E-14, -2.05E-15,
-3.13E-14, -2.44E-15, -3.05E-14, -2.55E-15, -3.27E-14, -2.11E-15,
-3.04E-14, -2.00E-15, -3.29E-14, -2.05E-15, -2.98E-14, -6.66E-16,
-2.96E-14, -2.44E-15, -3.20E-14, -2.33E-15, -3.14E-14, -2.50E-15,
-3.05E-14, -2.94E-15, -3.02E-14, -2.05E-15, -3.03E-14, -1.78E-15,
-3.13E-14, -1.94E-15, -3.11E-14, -1.72E-15, -3.03E-14, -2.61E-15,
-3.14E-14, -2.28E-15, -3.14E-14, -2.16E-15, -3.06E-14, -1.78E-15,
-3.08E-14, -1.72E-15, -3.08E-14, -2.50E-15, -3.08E-14, -2.66E-15,
-3.04E-14, -2.44E-15, -3.21E-14, -1.22E-15, -3.13E-14, -3.61E-15,
-3.08E-14, -2.16E-15, -2.99E-14, -2.05E-15, -2.99E-14, -2.89E-15,
-3.09E-14, -1.83E-15, -3.16E-14, -5.55E-16, -3.08E-14, -1.50E-15,
3.02E-14, -9.99E-16, -3.23E-14, -2.72E-15, -3.04E-14, -1.39E-15,
-2.90E-14, -2.33E-15, -3.00E-14, -1.50E-15, -3.08E-14, -3.33E-15,
-3.12E-14, -9.99E-16, -3.11E-14, -2.66E-15, -3.06E-14, -1.44E-15,
-3.02E-14, -1.67E-15, -3.16E-14, -7.77E-16, -3.06E-14, -9.99E-16,
-3.05E-14, -1.50E-15, -3.11E-14, -1.33E-15, -3.14E-14, -3.33E-15,
3.09E-14, -1.39E-15, -3.08E-14, -2.00E-15, -3.16E-14, -1.61E-15,
-3.02E-14, -1.55E-15, -3.10E-14, -1.78E-15, -2.95E-14, -2.16E-15,
-3.09E-14, -1.33E-15, -3.06E-14, -9.44E-16, -3.10E-14, -1.11E-15,
-3.02E-14, -1.83E-15, -3.06E-14, -1.94E-15, -3.13E-14, -1.61E-15,
-3.00E-14, -1.11E-15, -3.00E-14, -9.99E-16, -3.00E-14, -1.61E-15,
-3.11E-14, -1.11E-15, -3.05E-14, -7.77E-16, -3.00E-14, -1.94E-15,
-2.90E-14, -1.28E-15, -3.11E-14, -1.39E-15, -2.95E-14, -8.33E-16,
-2.98E-14, -1.67E-15, -3.02E-14, -2.05E-15, -3.08E-14, -5.55E-16,
-2.96E-14, -9.44E-16, -2.95E-14, -6.66E-16, -3.06E-14, -1.39E-15,
-3.04E-14, -1.17E-15, -3.05E-14, -6.11E-16, -2.94E-14, -1.17E-15,
-2.96E-14, -7.22E-16, -2.96E-14, -5.00E-16, -3.03E-14, 2.78E-16,
-3.02E-14, -5.55E-17, -2.94E-14, -3.33E-16, -3.06E-14, -1.67E-16,
-2.89E-14, 8.33E-16, -2.93E-14, -1.05E-15, -2.88E-14, -6.11E-16,
-3.00E-14, -1.22E-15, -3.04E-14, -9.44E-16, -2.91E-14, -1.28E-15,
-2.93E-14, -5.55E-16, -2.99E-14, -1.33E-15, -2.97E-14, -1.17E-15,
-2.90E-14, -1.89E-15, -3.00E-14, -5.55E-17,
]
| [
"numpy.array"
] | [((373, 1303), 'numpy.array', 'np.array', (['[1.004742754531498, 1.038773298601836, 1.073913103930722, 1.115303578807479,\n 1.138876334576409, 1.186037005063195, 1.20010077349154, \n 1.251812490296546, 1.257982114030372, 1.31285748608804, \n 1.313296484543653, 1.365016316032836, 1.371027655848883, \n 1.414708808202124, 1.425447888640173, 1.46215264098192, \n 1.476924360913394, 1.507538306301423, 1.525765627652155, \n 1.551033406767893, 1.572233571395834, 1.592786143530423, \n 1.616552437657155, 1.632928169757349, 1.658915772490721, \n 1.671576942342459, 1.699491823230094, 1.708837673403015, \n 1.738427795274605, 1.744804960074507, 1.775853245044121, \n 1.779564153811983, 1.811882812082608, 1.813192517312102, \n 1.845760207165999, 1.846618439572035, 1.877331112646444, \n 1.880151194495009, 1.907963575049332, 1.912562771369236, \n 1.937711007329229, 1.94392674358585, 1.96662243008197, \n 1.974309611716701, 1.994742937003962]'], {}), '([1.004742754531498, 1.038773298601836, 1.073913103930722, \n 1.115303578807479, 1.138876334576409, 1.186037005063195, \n 1.20010077349154, 1.251812490296546, 1.257982114030372, \n 1.31285748608804, 1.313296484543653, 1.365016316032836, \n 1.371027655848883, 1.414708808202124, 1.425447888640173, \n 1.46215264098192, 1.476924360913394, 1.507538306301423, \n 1.525765627652155, 1.551033406767893, 1.572233571395834, \n 1.592786143530423, 1.616552437657155, 1.632928169757349, \n 1.658915772490721, 1.671576942342459, 1.699491823230094, \n 1.708837673403015, 1.738427795274605, 1.744804960074507, \n 1.775853245044121, 1.779564153811983, 1.811882812082608, \n 1.813192517312102, 1.845760207165999, 1.846618439572035, \n 1.877331112646444, 1.880151194495009, 1.907963575049332, \n 1.912562771369236, 1.937711007329229, 1.94392674358585, \n 1.96662243008197, 1.974309611716701, 1.994742937003962])\n', (381, 1303), True, 'import numpy as np\n'), ((1508, 2762), 'numpy.array', 'np.array', (['[0.038699154393837, 0.170621357069026, 0.196642349303247, 0.33571081075586,\n 0.360022217617733, 0.459687243605995, 0.515107092342894, \n 0.571365105600701, 0.646902333813374, 0.672854750953472, \n 0.761751991347867, 0.765783134619707, 0.851427319155724, \n 0.8636697375448, 0.930805860269712, 0.95536837425615, 1.004742754531498,\n 1.038773298601836, 1.073913103930722, 1.115303578807479, \n 1.138876334576409, 1.186037005063195, 1.20010077349154, \n 1.251812490296546, 1.257982114030372, 1.31285748608804, \n 1.313296484543653, 1.365016316032836, 1.371027655848883, \n 1.414708808202124, 1.425447888640173, 1.46215264098192, \n 1.476924360913394, 1.507538306301423, 1.525765627652155, \n 1.551033406767893, 1.572233571395834, 1.592786143530423, \n 1.616552437657155, 1.632928169757349, 1.658915772490721, \n 1.671576942342459, 1.699491823230094, 1.708837673403015, \n 1.738427795274605, 1.744804960074507, 1.775853245044121, \n 1.779564153811983, 1.811882812082608, 1.813192517312102, \n 1.845760207165999, 1.846618439572035, 1.877331112646444, \n 1.880151194495009, 1.907963575049332, 1.912562771369236, \n 1.937711007329229, 1.94392674358585, 1.96662243008197, \n 1.974309611716701, 1.994742937003962]'], {}), '([0.038699154393837, 0.170621357069026, 0.196642349303247, \n 0.33571081075586, 0.360022217617733, 0.459687243605995, \n 0.515107092342894, 0.571365105600701, 0.646902333813374, \n 0.672854750953472, 0.761751991347867, 0.765783134619707, \n 0.851427319155724, 0.8636697375448, 0.930805860269712, 0.95536837425615,\n 1.004742754531498, 1.038773298601836, 1.073913103930722, \n 1.115303578807479, 1.138876334576409, 1.186037005063195, \n 1.20010077349154, 1.251812490296546, 1.257982114030372, \n 1.31285748608804, 1.313296484543653, 1.365016316032836, \n 1.371027655848883, 1.414708808202124, 1.425447888640173, \n 1.46215264098192, 1.476924360913394, 1.507538306301423, \n 1.525765627652155, 1.551033406767893, 1.572233571395834, \n 1.592786143530423, 1.616552437657155, 1.632928169757349, \n 1.658915772490721, 1.671576942342459, 1.699491823230094, \n 1.708837673403015, 1.738427795274605, 1.744804960074507, \n 1.775853245044121, 1.779564153811983, 1.811882812082608, \n 1.813192517312102, 1.845760207165999, 1.846618439572035, \n 1.877331112646444, 1.880151194495009, 1.907963575049332, \n 1.912562771369236, 1.937711007329229, 1.94392674358585, \n 1.96662243008197, 1.974309611716701, 1.994742937003962])\n', (1516, 2762), True, 'import numpy as np\n'), ((3043, 3178), 'numpy.array', 'np.array', (['[-0.928510879374692, -0.613329324979852, -0.437747415493617, -\n 0.357059979912156, -0.143371301774133, -0.075365172766102]'], {}), '([-0.928510879374692, -0.613329324979852, -0.437747415493617, -\n 0.357059979912156, -0.143371301774133, -0.075365172766102])\n', (3051, 3178), True, 'import numpy as np\n'), ((3235, 3492), 'numpy.array', 'np.array', (['[-0.613329324979852, -0.437747415493618, -0.357059979912156, -\n 0.143371301774133, -0.075365172766103, 0.038699154393837, \n 0.170621357069026, 0.196642349303248, 0.33571081075586, \n 0.360022217617734, 0.459687243605995, 0.515107092342894]'], {}), '([-0.613329324979852, -0.437747415493618, -0.357059979912156, -\n 0.143371301774133, -0.075365172766103, 0.038699154393837, \n 0.170621357069026, 0.196642349303248, 0.33571081075586, \n 0.360022217617734, 0.459687243605995, 0.515107092342894])\n', (3243, 3492), True, 'import numpy as np\n'), ((3579, 4039), 'numpy.array', 'np.array', (['[-0.928510879374692, -0.613329324979852, -0.437747415493617, -\n 0.357059979912156, -0.143371301774133, -0.075365172766102, \n 0.038699154393837, 0.170621357069026, 0.196642349303247, \n 0.33571081075586, 0.360022217617733, 0.459687243605995, \n 0.515107092342894, 0.571365105600701, 0.646902333813374, \n 0.672854750953472, 0.761751991347867, 0.765783134619707, \n 0.851427319155724, 0.8636697375448, 0.930805860269712, 0.95536837425615]'], {}), '([-0.928510879374692, -0.613329324979852, -0.437747415493617, -\n 0.357059979912156, -0.143371301774133, -0.075365172766102, \n 0.038699154393837, 0.170621357069026, 0.196642349303247, \n 0.33571081075586, 0.360022217617733, 0.459687243605995, \n 0.515107092342894, 0.571365105600701, 0.646902333813374, \n 0.672854750953472, 0.761751991347867, 0.765783134619707, \n 0.851427319155724, 0.8636697375448, 0.930805860269712, 0.95536837425615])\n', (3587, 4039), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-06 23:16
from edparser.utils.io_util import load_pickle, save_pickle
from iwpt2020 import cdroot
import numpy as np
import matplotlib.pyplot as plt
cdroot()
gold_file = 'data/iwpt2020/test-udpipe/en.fixed.conllu'
template = 'data/model/iwpt2020/bert/dep/en.conllu'
def load_conll(path):
with open(path) as src:
text = src.read()
sents = text.split('\n\n')
sents = [x for x in sents if x.strip()]
return sents
def load_sent(text: str):
return [x for x in text.split('\n') if not x.startswith('#')]
iv = set()
for each in load_conll('data/iwpt2020/train-dev-combined/en/train.conllu'):
for cell in load_sent(each):
form = cell.split('\t')[1].lower()
iv.add(form)
def calc_f1(path):
correct = 0
ngold = 0
npred = 0
for gold, pred in zip(load_conll(gold_file), load_conll(path)):
gt = set()
pt = set()
for gold, pred in zip(load_sent(gold), load_sent(pred)):
gf = gold.split('\t')[1].lower()
pf = pred.split('\t')[1].lower()
if gf in iv:
continue
idx = gold.split('\t')[0]
for rel in gold.split('\t')[8].split('|'):
gt.add((idx,) + tuple(rel.split(':')))
for rel in pred.split('\t')[8].split('|'):
pt.add((idx,) + tuple(rel.split(':')))
ngold += len(gt)
npred += len(pt)
correct += len(gt & pt)
p = correct / npred
r = correct / ngold
f1 = 2 * p * r / (p + r)
return f1
fig, ax = plt.subplots()
ind = np.arange(3)
width = 0.35
try:
cache = load_pickle('cache_f1.pkl')
except FileNotFoundError:
cache = {}
for lang in ['mbert', 'bert']:
f1s = []
for model, color in zip(['dep', 'sdp', 'ens'], 'rgb'):
key = f'{lang}-{model}'
if key in cache:
f1 = cache[key]
else:
pred_file = template.replace('bert', lang).replace('dep', model)
f1 = calc_f1(pred_file)
cache[key] = f1
f1s.append(f1)
print(key)
ax.bar(ind + (width if lang == 'bert' else 0), f1s, width, label='multilingual' if lang.startswith('m') else 'language-specific')
save_pickle(cache, 'cache_f1.pkl')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(['DTP', 'DGP', 'ENS'])
plt.ylabel('ELAS of OOV')
ax.legend()
plt.savefig('oov.pdf')
plt.show()
| [
"edparser.utils.io_util.save_pickle",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"edparser.utils.io_util.load_pickle",
"iwpt2020.cdroot",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((205, 213), 'iwpt2020.cdroot', 'cdroot', ([], {}), '()\n', (211, 213), False, 'from iwpt2020 import cdroot\n'), ((1606, 1620), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1618, 1620), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1639), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1636, 1639), True, 'import numpy as np\n'), ((2259, 2293), 'edparser.utils.io_util.save_pickle', 'save_pickle', (['cache', '"""cache_f1.pkl"""'], {}), "(cache, 'cache_f1.pkl')\n", (2270, 2293), False, 'from edparser.utils.io_util import load_pickle, save_pickle\n'), ((2367, 2392), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ELAS of OOV"""'], {}), "('ELAS of OOV')\n", (2377, 2392), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2427), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""oov.pdf"""'], {}), "('oov.pdf')\n", (2416, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2436, 2438), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1697), 'edparser.utils.io_util.load_pickle', 'load_pickle', (['"""cache_f1.pkl"""'], {}), "('cache_f1.pkl')\n", (1681, 1697), False, 'from edparser.utils.io_util import load_pickle, save_pickle\n')] |
import numpy as np
import pandas
from ggplot import *
"""
In this question, you need to:
1) implement the compute_cost() and gradient_descent() procedures
2) Select features (in the predictions procedure) and make predictions.
"""
def normalize_features(df):
"""
Normalize the features in the data set.
"""
mu = df.mean()
sigma = df.std()
if (sigma == 0).any():
raise Exception("One or more features had the same value for all samples, and thus could " + \
"not be normalized. Please do not include features with only a single value " + \
"in your model.")
df_normalized = (df - df.mean()) / df.std()
return df_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values,
and the values for our thetas.
This can be the same code as the compute_cost function in the lesson #3 exercises,
but feel free to implement your own.
"""
m = len(values)
sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()
cost = sum_of_square_errors / (2*m)
return cost
def gradient_descent(features, values, theta, alpha, num_iterations):
"""
Perform gradient descent given a data set with an arbitrary number of features.
This can be the same gradient descent code as in the lesson #3 exercises,
but feel free to implement your own.
"""
m = len(values)
cost_history = []
for i in range(num_iterations):
cost = compute_cost(features,values,theta)
cost_history.append(cost)
theta = theta + (alpha/m)*np.dot((values - np.dot(features,theta)),features)
return theta, pandas.Series(cost_history)
def predictions(dataframe):
'''
The NYC turnstile data is stored in a pandas dataframe called weather_turnstile.
Your prediction should have a R^2 value of 0.40 or better.
You need to experiment using various input features contained in the dataframe.
'''
# print dataframe # see which fields we can use
# Select Features (try different features!)
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']]
# Add UNIT to features using dummy variables
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = features.join(dummy_units)
# Values
values = dataframe['ENTRIESn_hourly']
m = len(values)
features, mu, sigma = normalize_features(features)
features['ones'] = np.ones(m) # Add a column of 1s (y intercept)
# Convert features and values to numpy arrays
features_array = np.array(features)
values_array = np.array(values)
# Set values for alpha, number of iterations.
alpha = 0.1 # please feel free to change this value
num_iterations = 75 # please feel free to change this value
# Initialize theta, perform gradient descent
theta_gradient_descent = np.zeros(len(features.columns))
theta_gradient_descent, cost_history = gradient_descent(features_array,
values_array,
theta_gradient_descent,
alpha,
num_iterations)
plot = None
predictions = np.dot(features_array, theta_gradient_descent)
return predictions, plot
def plot_cost_history(alpha, cost_history):
"""This function is for viewing the plot of your cost history.
You can run it by uncommenting this
plot_cost_history(alpha, cost_history)
call in predictions.
If you want to run this locally, you should print the return value
from this function.
"""
cost_df = pandas.DataFrame({
'Cost_History': cost_history,
'Iteration': range(len(cost_history))
})
return ggplot(cost_df, aes('Iteration', 'Cost_History')) + \
geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha )
| [
"pandas.Series",
"numpy.ones",
"numpy.array",
"numpy.dot",
"pandas.get_dummies"
] | [((2296, 2348), 'pandas.get_dummies', 'pandas.get_dummies', (["dataframe['UNIT']"], {'prefix': '"""unit"""'}), "(dataframe['UNIT'], prefix='unit')\n", (2314, 2348), False, 'import pandas\n'), ((2550, 2560), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2557, 2560), True, 'import numpy as np\n'), ((2672, 2690), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2680, 2690), True, 'import numpy as np\n'), ((2710, 2726), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2718, 2726), True, 'import numpy as np\n'), ((3434, 3480), 'numpy.dot', 'np.dot', (['features_array', 'theta_gradient_descent'], {}), '(features_array, theta_gradient_descent)\n', (3440, 3480), True, 'import numpy as np\n'), ((1746, 1773), 'pandas.Series', 'pandas.Series', (['cost_history'], {}), '(cost_history)\n', (1759, 1773), False, 'import pandas\n'), ((1082, 1105), 'numpy.dot', 'np.dot', (['features', 'theta'], {}), '(features, theta)\n', (1088, 1105), True, 'import numpy as np\n'), ((1693, 1716), 'numpy.dot', 'np.dot', (['features', 'theta'], {}), '(features, theta)\n', (1699, 1716), True, 'import numpy as np\n')] |
"""
Plot a time series of volume-integrated buoyancy flux. Focused
on a high-resolution nested model of Admiralty Inlet.
"""
import xarray as xr
import numpy as np
import seawater as sw
import pandas as pd
from lo_tools import Lfun, zrfun
Ldir = Lfun.Lstart(gridname='ai0', tag='v0', ex_name='n0k')
fn_list = Lfun.get_fn_list('hourly', Ldir, '2018.01.01', '2018.01.14')
if False:
fn_list = fn_list[:5]
ot_list = []
eta_list = []
fb_list = []
for fn in fn_list:
print(fn.name)
if fn == fn_list[0]:
G = zrfun.get_basic_info(fn, only_G=True)
DA = G['DX'] * G['DY']
DA[G['mask_rho']==0] = np.nan
A = np.nansum(DA)
ds = xr.open_dataset(fn)
ot = ds.ocean_time.values[0]
ot_list.append(ot)
rho = sw.dens0(ds.salt.values.squeeze(), ds.temp.values.squeeze())
# calculate vertically-integrated buoyancy flux
fb = -9.8 * np.sum(ds.AKs[0,1:-1,:,:].squeeze() * np.diff(rho, axis=0), axis=0).values
Fb = np.nansum(DA * fb) / A
fb_list.append(Fb)
eta = ds.zeta.values.squeeze()
Eta = np.nansum(DA * eta) / A
eta_list.append(Eta)
df = pd.DataFrame(index=ot_list)
df['Eta'] = eta_list
df['Fb'] = fb_list
out_dir = Ldir['parent'] / 'LPM_output' / 'buoyancy_flux'
Lfun.make_dir(out_dir)
out_fn = out_dir / 'AI_highres.p'
df.to_pickle(out_fn)
| [
"lo_tools.zrfun.get_basic_info",
"numpy.nansum",
"numpy.diff",
"pandas.DataFrame",
"lo_tools.Lfun.make_dir",
"xarray.open_dataset",
"lo_tools.Lfun.get_fn_list",
"lo_tools.Lfun.Lstart"
] | [((250, 302), 'lo_tools.Lfun.Lstart', 'Lfun.Lstart', ([], {'gridname': '"""ai0"""', 'tag': '"""v0"""', 'ex_name': '"""n0k"""'}), "(gridname='ai0', tag='v0', ex_name='n0k')\n", (261, 302), False, 'from lo_tools import Lfun, zrfun\n'), ((314, 374), 'lo_tools.Lfun.get_fn_list', 'Lfun.get_fn_list', (['"""hourly"""', 'Ldir', '"""2018.01.01"""', '"""2018.01.14"""'], {}), "('hourly', Ldir, '2018.01.01', '2018.01.14')\n", (330, 374), False, 'from lo_tools import Lfun, zrfun\n'), ((1144, 1171), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'ot_list'}), '(index=ot_list)\n', (1156, 1171), True, 'import pandas as pd\n'), ((1271, 1293), 'lo_tools.Lfun.make_dir', 'Lfun.make_dir', (['out_dir'], {}), '(out_dir)\n', (1284, 1293), False, 'from lo_tools import Lfun, zrfun\n'), ((680, 699), 'xarray.open_dataset', 'xr.open_dataset', (['fn'], {}), '(fn)\n', (695, 699), True, 'import xarray as xr\n'), ((529, 566), 'lo_tools.zrfun.get_basic_info', 'zrfun.get_basic_info', (['fn'], {'only_G': '(True)'}), '(fn, only_G=True)\n', (549, 566), False, 'from lo_tools import Lfun, zrfun\n'), ((648, 661), 'numpy.nansum', 'np.nansum', (['DA'], {}), '(DA)\n', (657, 661), True, 'import numpy as np\n'), ((989, 1007), 'numpy.nansum', 'np.nansum', (['(DA * fb)'], {}), '(DA * fb)\n', (998, 1007), True, 'import numpy as np\n'), ((1085, 1104), 'numpy.nansum', 'np.nansum', (['(DA * eta)'], {}), '(DA * eta)\n', (1094, 1104), True, 'import numpy as np\n'), ((943, 963), 'numpy.diff', 'np.diff', (['rho'], {'axis': '(0)'}), '(rho, axis=0)\n', (950, 963), True, 'import numpy as np\n')] |
# Reference Book: Python Data Science Handbook (page:(222-223))
# Date(14 April, 2019) Day-4
# Importing matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn; seaborn.set() #set plot styles
import pandas as pd
fig = plt.figure()
x = pd.read_csv('/media/nahid/New Volume/GitHub/Matplotlib/president_heights.csv')
x = np.array(x['height(cm)'])
plt.hist(x,10)
print(plt.show())
fig.savefig('Height Hist Diagram.png')
| [
"seaborn.set",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((209, 222), 'seaborn.set', 'seaborn.set', ([], {}), '()\n', (220, 222), False, 'import seaborn\n'), ((268, 280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (278, 280), True, 'import matplotlib.pyplot as plt\n'), ((285, 363), 'pandas.read_csv', 'pd.read_csv', (['"""/media/nahid/New Volume/GitHub/Matplotlib/president_heights.csv"""'], {}), "('/media/nahid/New Volume/GitHub/Matplotlib/president_heights.csv')\n", (296, 363), True, 'import pandas as pd\n'), ((368, 393), 'numpy.array', 'np.array', (["x['height(cm)']"], {}), "(x['height(cm)'])\n", (376, 393), True, 'import numpy as np\n'), ((394, 409), 'matplotlib.pyplot.hist', 'plt.hist', (['x', '(10)'], {}), '(x, 10)\n', (402, 409), True, 'import matplotlib.pyplot as plt\n'), ((416, 426), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (424, 426), True, 'import matplotlib.pyplot as plt\n')] |
from CycleGAN_ls import CycleGAN_LightningSystem
from dataModule import ImageTransform, WatercolorDataset, WatercolorDataModule
from discriminator import CycleGAN_Discriminator
from generator import CycleGAN_Unet_Generator
import os
import glob
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
# Seed -------------------------------------------------------------------
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# Config -----------------------------------------------------------------
data_dir = "/content/drive/MyDrive/data/"
transform = ImageTransform(img_size=256)
batch_size = 1
lr = {
"G": 0.0002,
"D": 0.0002
}
epoch = 160
seed = 42
reconstr_w = 10
id_w = 5
seed_everything(seed)
# DataModule -----------------------------------------------------------------
dm = WatercolorDataModule(data_dir, transform, batch_size, seed=seed)
G_basestyle = CycleGAN_Unet_Generator()
G_stylebase = CycleGAN_Unet_Generator()
D_base = CycleGAN_Discriminator()
D_style = CycleGAN_Discriminator()
# LightningModule --------------------------------------------------------------
model = CycleGAN_LightningSystem(G_basestyle, G_stylebase, D_base, D_style,
lr, transform, reconstr_w, id_w)
# Callback
checkpoint_callback = ModelCheckpoint(dirpath="/content/drive/MyDrive/checkpoint",
period=10)
# Trainer --------------------------------------------------------------
trainer = Trainer(
logger=False,
max_epochs=epoch,
gpus=1,
checkpoint_callback=checkpoint_callback,
reload_dataloaders_every_epoch=True,
num_sanity_val_steps=0,
# resume_from_checkpoint="/content/drive/MyDrive/checkpoint/epoch=279-step=100799.ckpt"
)
# Train ------------------------------------------------------------------------
trainer.fit(model, datamodule=dm)
| [
"pytorch_lightning.callbacks.ModelCheckpoint",
"torch.manual_seed",
"generator.CycleGAN_Unet_Generator",
"dataModule.WatercolorDataModule",
"discriminator.CycleGAN_Discriminator",
"random.seed",
"dataModule.ImageTransform",
"CycleGAN_ls.CycleGAN_LightningSystem",
"pytorch_lightning.Trainer",
"nump... | [((990, 1018), 'dataModule.ImageTransform', 'ImageTransform', ([], {'img_size': '(256)'}), '(img_size=256)\n', (1004, 1018), False, 'from dataModule import ImageTransform, WatercolorDataset, WatercolorDataModule\n'), ((1231, 1295), 'dataModule.WatercolorDataModule', 'WatercolorDataModule', (['data_dir', 'transform', 'batch_size'], {'seed': 'seed'}), '(data_dir, transform, batch_size, seed=seed)\n', (1251, 1295), False, 'from dataModule import ImageTransform, WatercolorDataset, WatercolorDataModule\n'), ((1311, 1336), 'generator.CycleGAN_Unet_Generator', 'CycleGAN_Unet_Generator', ([], {}), '()\n', (1334, 1336), False, 'from generator import CycleGAN_Unet_Generator\n'), ((1351, 1376), 'generator.CycleGAN_Unet_Generator', 'CycleGAN_Unet_Generator', ([], {}), '()\n', (1374, 1376), False, 'from generator import CycleGAN_Unet_Generator\n'), ((1386, 1410), 'discriminator.CycleGAN_Discriminator', 'CycleGAN_Discriminator', ([], {}), '()\n', (1408, 1410), False, 'from discriminator import CycleGAN_Discriminator\n'), ((1421, 1445), 'discriminator.CycleGAN_Discriminator', 'CycleGAN_Discriminator', ([], {}), '()\n', (1443, 1445), False, 'from discriminator import CycleGAN_Discriminator\n'), ((1537, 1641), 'CycleGAN_ls.CycleGAN_LightningSystem', 'CycleGAN_LightningSystem', (['G_basestyle', 'G_stylebase', 'D_base', 'D_style', 'lr', 'transform', 'reconstr_w', 'id_w'], {}), '(G_basestyle, G_stylebase, D_base, D_style, lr,\n transform, reconstr_w, id_w)\n', (1561, 1641), False, 'from CycleGAN_ls import CycleGAN_LightningSystem\n'), ((1705, 1776), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'dirpath': '"""/content/drive/MyDrive/checkpoint"""', 'period': '(10)'}), "(dirpath='/content/drive/MyDrive/checkpoint', period=10)\n", (1720, 1776), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((1899, 2057), 'pytorch_lightning.Trainer', 'Trainer', ([], {'logger': '(False)', 'max_epochs': 'epoch', 'gpus': '(1)', 'checkpoint_callback': 'checkpoint_callback', 'reload_dataloaders_every_epoch': '(True)', 'num_sanity_val_steps': '(0)'}), '(logger=False, max_epochs=epoch, gpus=1, checkpoint_callback=\n checkpoint_callback, reload_dataloaders_every_epoch=True,\n num_sanity_val_steps=0)\n', (1906, 2057), False, 'from pytorch_lightning import Trainer\n'), ((622, 639), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (633, 639), False, 'import random\n'), ((689, 709), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (703, 709), True, 'import numpy as np\n'), ((714, 737), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (731, 737), False, 'import torch\n'), ((742, 770), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (764, 770), False, 'import torch\n')] |
import numpy as np
arr1 = np.empty([2, 3], dtype=int)
print("Empty 2D Array")
print(arr1)
#array of int garbagevalues | [
"numpy.empty"
] | [((27, 54), 'numpy.empty', 'np.empty', (['[2, 3]'], {'dtype': 'int'}), '([2, 3], dtype=int)\n', (35, 54), True, 'import numpy as np\n')] |
"""Contains Datasets for Trainer class."""
import numpy as np
import pickle
from scipy.io import loadmat
from torch.utils.data import Dataset
def load(path):
"""Load file depending on type.
Convenience wrapper.
Args:
path (str): Valid path to dataset as created by envs/envs.py.
Returns:
data (dict): Dataset for use with Stove.
"""
file_format = path.split('.')[-1]
if file_format == 'mat':
return loadmat(path)
elif file_format == 'pkl':
f = open(path, 'rb')
data = pickle.load(f)
f.close()
return data
else:
raise ValueError('File format {} not recognized.'.format(file_format))
class StoveDataset(Dataset):
"""Dataset class for Stove.
Usually called from main.py when assembling trainer instance.
Handles some data preprocessing such as data scaling.
Works with all datasets creatable by envs/envs.py.
Works with and without action-conditioned data.
In data creation, long continuous sequences are created. From these, we
subsample shorter sequences on which to train the model.
"""
def __init__(self, config, test=False, data=None):
"""Load data and data info."""
self.c = config
if data is None:
if test:
data = load(self.c.testdata)
else:
data = load(self.c.traindata)
if 'action' in data.keys():
self.rl = True
action_space = data['action_space']
else:
self.rl = False
action_space = None
self.total_img = data['X'][:self.c.num_episodes]
# Transpose, as PyTorch images have shape (c, h, w)
self.total_img = np.transpose(self.total_img, (0, 1, 4, 2, 3))
if (data['y'].shape[0] < self.c.num_episodes) and not test:
print('WARNING: Data shape smaller than num_episodes specified.')
self.total_data = data['y'][:self.c.num_episodes]
if self.rl:
self.total_actions = data['action'][:self.c.num_episodes]
# rescale rewards to [0, 1] to make then work with bce loss
# (standard loss is mse, but bce is option)
self.total_rewards = data['reward'][:self.c.num_episodes] + 1
self.total_dones = data['done'][:self.c.num_episodes]
# Gather information on dataset. This is accessed by main.py, which then
# sets data specific entries in the config.
height = self.total_img.shape[-2]
width = self.total_img.shape[-1]
coord_lim = data.get('coord_lim', 10)
r = data.get('r', 1.3)
num_obj = self.total_data.shape[2]
num_frames = self.total_data.shape[1]
self.data_info = {
'width': width, 'height': height, 'r': r, 'coord_lim': coord_lim,
'action_space': action_space, 'action_conditioned': self.rl,
'num_obj': num_obj, 'num_frames': num_frames,
}
if self.c.debug_add_noise and not test:
print("Adding random normal noise.")
# from eval notebooks it looks like we have 0.02 noise on x and v
# in -1, 1 frame
#
noise = np.random.normal(size=self.total_data.shape)
self.total_data += 0.02 / 2 * coord_lim * noise
if self.c.supairvised:
# custom rescaling for sup(er/air)vised, only works for billiard?
self.total_data *= 10 / coord_lim
self.total_data[..., :2] /= 5
self.total_data[..., 2:] *= 2
x = self.total_img.sum(2)
x = np.clip(x, 0, 1)
x = np.expand_dims(x, 2)
self.total_img = x
else:
# scale to match native size of STOVE, i.e. pos in [-1, 1]
self.total_data *= 1 / coord_lim * 2
self.total_data[..., :2] -= 1
# clips can start at any frame, but not too late
num_eps, num_frames = self.total_img.shape[0:2]
clips_per_ep = num_frames - ((self.c.num_visible +
self.c.num_rollout) *
self.c.frame_step) + 1
idx_ep, idx_fr = np.meshgrid(list(range(num_eps)),
list(range(clips_per_ep)))
self.idxs = np.reshape(np.stack([idx_ep, idx_fr], 2), (-1, 2))
def __len__(self):
"""Len of iterator."""
return len(self.idxs)
def __getitem__(self, idx):
"""Use to access sequences.
Needed for torch DataLoader.
"""
step = self.c.frame_step
i, j = self.idxs[idx, 0], self.idxs[idx, 1]
end_visible = j + self.c.num_visible * step
end_rollout = end_visible + self.c.num_rollout * step
present_images = self.total_img[i, j:end_visible:step]
future_images = self.total_img[i, end_visible:end_rollout:step]
present = self.total_data[i, j:end_visible:step]
future = self.total_data[i, end_visible:end_rollout:step]
sample = {
'present_images': present_images,
'future_images': future_images,
'present_labels': present,
'future_labels': future,
}
if self.rl:
present_actions = self.total_actions[i, j:end_visible:step]
future_actions = self.total_actions[i, end_visible:end_rollout:step]
present_rewards = self.total_rewards[i, j:end_visible:step]
future_rewards = self.total_rewards[i, end_visible:end_rollout:step]
present_dones = self.total_dones[i, j:end_visible:step]
future_dones = self.total_dones[i, end_visible:end_rollout:step]
sample.update({
'present_actions': present_actions,
'future_actions': future_actions,
'present_rewards': present_rewards,
'future_rewards': future_rewards,
'present_dones': present_dones,
'future_dones': future_dones,
})
return sample
| [
"numpy.random.normal",
"numpy.clip",
"scipy.io.loadmat",
"pickle.load",
"numpy.stack",
"numpy.expand_dims",
"numpy.transpose"
] | [((458, 471), 'scipy.io.loadmat', 'loadmat', (['path'], {}), '(path)\n', (465, 471), False, 'from scipy.io import loadmat\n'), ((1734, 1779), 'numpy.transpose', 'np.transpose', (['self.total_img', '(0, 1, 4, 2, 3)'], {}), '(self.total_img, (0, 1, 4, 2, 3))\n', (1746, 1779), True, 'import numpy as np\n'), ((547, 561), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (558, 561), False, 'import pickle\n'), ((3218, 3262), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.total_data.shape'}), '(size=self.total_data.shape)\n', (3234, 3262), True, 'import numpy as np\n'), ((3618, 3634), 'numpy.clip', 'np.clip', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (3625, 3634), True, 'import numpy as np\n'), ((3651, 3671), 'numpy.expand_dims', 'np.expand_dims', (['x', '(2)'], {}), '(x, 2)\n', (3665, 3671), True, 'import numpy as np\n'), ((4328, 4357), 'numpy.stack', 'np.stack', (['[idx_ep, idx_fr]', '(2)'], {}), '([idx_ep, idx_fr], 2)\n', (4336, 4357), True, 'import numpy as np\n')] |
import pytest
import os
import sys
import numpy
import librosa
my_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, my_path + '/../')
import dj_feet.song as song
@pytest.fixture
def no_process_base_song(random_song_file):
yield song.Song(random_song_file, process=False)
@pytest.fixture
def process_base_song(random_song_file):
yield song.Song(random_song_file)
def test_no_process_data(no_process_base_song):
assert no_process_base_song.tempo is None
assert no_process_base_song.beat_track is None
assert no_process_base_song.time_series is None
assert no_process_base_song.sampling_rate is None
no_process_base_song.set_process_data()
assert isinstance(no_process_base_song.tempo, numpy.float)
assert isinstance(no_process_base_song.beat_track, numpy.ndarray)
assert isinstance(no_process_base_song.time_series, numpy.ndarray)
assert isinstance(no_process_base_song.sampling_rate, int)
def test_process_data(process_base_song):
assert isinstance(process_base_song.tempo, float)
assert isinstance(process_base_song.beat_track, numpy.ndarray)
assert isinstance(process_base_song.time_series, numpy.ndarray)
assert isinstance(process_base_song.sampling_rate, int)
@pytest.mark.parametrize(
"begin,size",
[(True, 30), (False, 15), (True, 15), (True, 1)])
def test_next_segment(process_base_song, begin, size):
start, end = librosa.core.time_to_samples(
numpy.array([0, size]), process_base_song.sampling_rate)
delta = end - start
out = process_base_song.next_segment(size, begin=begin)
assert out[0] == 0
assert out[0] < out[1]
assert delta == out[1] - out[0]
@pytest.mark.parametrize("start,frames,expected",
[(10, 0, 0), (0, 0, 0), (-1, -1, -1)])
def test_time_delta(process_base_song, start, frames, expected):
if frames < 0:
start = 0
frames = len(process_base_song.time_series) - 1
expected = librosa.samples_to_time(
numpy.array([frames]), process_base_song.sampling_rate)[0]
assert process_base_song.time_delta(start, start + frames) == expected
def test_frame_to_segment_time(process_base_song):
pass
@pytest.mark.parametrize("time, start", [(10, 0), (0, 0), (25, 10)])
def test_frame_to_segment_time(process_base_song, time, start):
frame_idx = process_base_song.frame_to_segment_time(time, start)
if time == 0:
assert frame_idx == start
assert process_base_song.time_delta(start, frame_idx) == time
@pytest.mark.parametrize("start, end, expected",
[(0, 50000, True), (0, 0, False), (1000, 0, False),
(1000, 1100, False), (50000, 100000, True)])
def test_beat_tracks_in_segement(process_base_song, start, end, expected):
beats = process_base_song.beat_tracks_in_segment(start, end)
if expected:
assert beats
else:
assert not beats
| [
"sys.path.insert",
"dj_feet.song.Song",
"pytest.mark.parametrize",
"numpy.array",
"os.path.abspath"
] | [((117, 153), 'sys.path.insert', 'sys.path.insert', (['(0)', "(my_path + '/../')"], {}), "(0, my_path + '/../')\n", (132, 153), False, 'import sys\n'), ((1255, 1346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""begin,size"""', '[(True, 30), (False, 15), (True, 15), (True, 1)]'], {}), "('begin,size', [(True, 30), (False, 15), (True, 15),\n (True, 1)])\n", (1278, 1346), False, 'import pytest\n'), ((1692, 1784), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start,frames,expected"""', '[(10, 0, 0), (0, 0, 0), (-1, -1, -1)]'], {}), "('start,frames,expected', [(10, 0, 0), (0, 0, 0), (-\n 1, -1, -1)])\n", (1715, 1784), False, 'import pytest\n'), ((2218, 2285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""time, start"""', '[(10, 0), (0, 0), (25, 10)]'], {}), "('time, start', [(10, 0), (0, 0), (25, 10)])\n", (2241, 2285), False, 'import pytest\n'), ((2540, 2689), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start, end, expected"""', '[(0, 50000, True), (0, 0, False), (1000, 0, False), (1000, 1100, False), (\n 50000, 100000, True)]'], {}), "('start, end, expected', [(0, 50000, True), (0, 0, \n False), (1000, 0, False), (1000, 1100, False), (50000, 100000, True)])\n", (2563, 2689), False, 'import pytest\n'), ((90, 115), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n'), ((255, 297), 'dj_feet.song.Song', 'song.Song', (['random_song_file'], {'process': '(False)'}), '(random_song_file, process=False)\n', (264, 297), True, 'import dj_feet.song as song\n'), ((367, 394), 'dj_feet.song.Song', 'song.Song', (['random_song_file'], {}), '(random_song_file)\n', (376, 394), True, 'import dj_feet.song as song\n'), ((1462, 1484), 'numpy.array', 'numpy.array', (['[0, size]'], {}), '([0, size])\n', (1473, 1484), False, 'import numpy\n'), ((2019, 2040), 'numpy.array', 'numpy.array', (['[frames]'], {}), '([frames])\n', (2030, 2040), False, 'import numpy\n')] |
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
import datetime
import numpy
import slycat.web.client
import threading
import time
def generate_model(connection, pid, marking, index):
def random_failure(probability):
if numpy.random.uniform(0, 1) < probability:
connection.update_model(mid, state="finished", result="failed", finished=datetime.datetime.utcnow().isoformat(), message="RANDOM FAILURE!!!")
return True
return False
# Wait awhile before starting
time.sleep(numpy.random.uniform(0, 5))
mid = connection.post_project_models(pid, "generic", "Model %s %s" % (index, datetime.datetime.now()), marking)
if random_failure(probability=0.01):
return
# Simulate uploading
for index, progress in enumerate(numpy.linspace(0, 1, 4)):
connection.update_model(mid, progress=progress, message="Uploading artifact %s" % index)
if random_failure(probability=0.01):
return
time.sleep(numpy.random.uniform(0.1, 2))
# Simulate computing
for timestep, progress in enumerate(numpy.linspace(0, 1)):
connection.update_model(mid, state="running", progress=progress, message="Timestep %s" % timestep)
if random_failure(probability=0.005):
return
time.sleep(numpy.random.uniform(0.1, 0.5))
# The model is ready
connection.update_model(mid, state="finished", result="succeeded", finished=datetime.datetime.utcnow().isoformat(), progress=1.0, message="")
parser = slycat.web.client.ArgumentParser()
parser.add_argument("--marking", default="", help="Marking type. Default: %(default)s")
parser.add_argument("--model-count", type=int, default=8, help="Model count. Default: %(default)s")
parser.add_argument("--project-name", default="Demo Model Progress Project", help="New project name. Default: %(default)s")
arguments = parser.parse_args()
connection = slycat.web.client.connect(arguments)
pid = connection.find_or_create_project(arguments.project_name)
threads = [threading.Thread(target=generate_model, args=(connection, pid, arguments.marking, i)) for i in range(arguments.model_count)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
| [
"datetime.datetime.utcnow",
"datetime.datetime.now",
"numpy.linspace",
"numpy.random.uniform",
"threading.Thread"
] | [((2162, 2252), 'threading.Thread', 'threading.Thread', ([], {'target': 'generate_model', 'args': '(connection, pid, arguments.marking, i)'}), '(target=generate_model, args=(connection, pid, arguments.\n marking, i))\n', (2178, 2252), False, 'import threading\n'), ((714, 740), 'numpy.random.uniform', 'numpy.random.uniform', (['(0)', '(5)'], {}), '(0, 5)\n', (734, 740), False, 'import numpy\n'), ((967, 990), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (981, 990), False, 'import numpy\n'), ((1247, 1267), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (1261, 1267), False, 'import numpy\n'), ((443, 469), 'numpy.random.uniform', 'numpy.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (463, 469), False, 'import numpy\n'), ((1155, 1183), 'numpy.random.uniform', 'numpy.random.uniform', (['(0.1)', '(2)'], {}), '(0.1, 2)\n', (1175, 1183), False, 'import numpy\n'), ((1443, 1473), 'numpy.random.uniform', 'numpy.random.uniform', (['(0.1)', '(0.5)'], {}), '(0.1, 0.5)\n', (1463, 1473), False, 'import numpy\n'), ((822, 845), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (843, 845), False, 'import datetime\n'), ((1577, 1603), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1601, 1603), False, 'import datetime\n'), ((564, 590), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (588, 590), False, 'import datetime\n')] |
#-*-coding:utf-8-*-
"""
Gauss Newton
Method specialized for LS problems.
"""
import torch
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mp3
from torch.autograd import Variable as Var
from torch.autograd import grad
# One specified non-linear function.
def testFunc(dot, param):
return dot.T @ dot * param[0] + dot[0] * param[1] + dot[1] * param[2] + 1 / (dot.T @ dot + param[3])
"""
Gauss-Newton Method
Easy to understand. Knowing that it's costy to compute Hessian Matrix.
Why don't we just approximate it with Jacobian?
This Gauss-Newton Method can be applied to LS problem only? Maybe, for in LSP
The Hessian of error term is $2(J^TJ+S)$, where S can be ignored.
The problem of 3D curve fitting.
Each row of data represents a 2D dot(x, y)(for 2D is easy to visualize)
whereas each row possesses a 3-column structure, for the pos[2] is the oberseved value
"""
def GaussNewton(data, param_init, func, max_iter = 30, criteria = 1e-4):
def evaluate(dot, param, func):
val = func(dot[:2], param)
res = float(val.data - dot[2])
g = grad(val, param)[0]
return g, res
if not type(data) == torch.Tensor:
data = torch.Tensor(data)
ndim = len(param_init)
param = Var(param_init, requires_grad = True)
for i in range(max_iter):
residual = torch.zeros((data.size()[0], 1))
J, residual[0] = evaluate(data[0], param, func)
for i, dot in enumerate(data[1:]): # for each sample dot, Jacobian and residual should be evaluated.
_J, _res = evaluate(dot, param, func)
J = torch.vstack((J, _J))
residual[i + 1] = _res
H_aprx = J.T @ J
invH, _ = torch.solve(torch.eye(ndim), H_aprx)
temp = invH @ (J.T @ residual)
param.data -= temp.view(-1)
return param.data.numpy()
def generateData(func, param, xy = -6, n = 20, sigma = 4):
xx, yy = np.meshgrid(np.linspace(-xy, xy, n), np.linspace(-xy, xy, n))
dots = np.c_[xx.ravel(), yy.ravel()]
val = np.array([func(dot, param) for dot in dots])
val_perturb = val + np.random.normal(0, sigma, val.size)
truth = np.hstack((dots, val.reshape(-1, 1)))
noised = np.hstack((dots, val_perturb.reshape(-1, 1)))
return truth, noised, xx, yy
def showResult(func, param, truth, noised, xx, yy):
fig = plt.figure()
ax = mp3.Axes3D(fig)
xs = truth[:, 0]
ys = truth[:, 1]
# ax.plot3D(xs, ys, truth[:, 2], c = 'b')
ax.scatter3D(xs, ys, truth[:, 2], c = 'b', s = 7)
ax.scatter3D(noised[:, 0], noised[:, 1], noised[:, 2], c = 'r', s = 7)
ax.plot_surface(xx, yy, truth[:, 2].reshape(xx.shape), color = 'b', alpha = 0.4)
dots = truth[:, :2]
res = np.array([func(dot, param) for dot in dots])
ax.plot_surface(xx, yy, res.reshape(xx.shape), color = 'g', alpha = 0.4)
ax.scatter3D(xs, ys, res, c = 'g', s = 7)
plt.show()
if __name__ == '__main__':
params = torch.FloatTensor([-0.1, 2, 1, 1])
truth, noised, xx, yy = generateData(testFunc, params)
res_param = GaussNewton(torch.Tensor(noised), params, testFunc)
showResult(testFunc, res_param, truth, noised, xx, yy) | [
"numpy.random.normal",
"torch.vstack",
"torch.eye",
"torch.Tensor",
"matplotlib.pyplot.figure",
"numpy.linspace",
"torch.autograd.grad",
"torch.autograd.Variable",
"torch.FloatTensor",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show"
] | [((1294, 1329), 'torch.autograd.Variable', 'Var', (['param_init'], {'requires_grad': '(True)'}), '(param_init, requires_grad=True)\n', (1297, 1329), True, 'from torch.autograd import Variable as Var\n'), ((2386, 2398), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2396, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2423), 'mpl_toolkits.mplot3d.Axes3D', 'mp3.Axes3D', (['fig'], {}), '(fig)\n', (2418, 2423), True, 'import mpl_toolkits.mplot3d as mp3\n'), ((2933, 2943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2941, 2943), True, 'import matplotlib.pyplot as plt\n'), ((2985, 3019), 'torch.FloatTensor', 'torch.FloatTensor', (['[-0.1, 2, 1, 1]'], {}), '([-0.1, 2, 1, 1])\n', (3002, 3019), False, 'import torch\n'), ((1236, 1254), 'torch.Tensor', 'torch.Tensor', (['data'], {}), '(data)\n', (1248, 1254), False, 'import torch\n'), ((1974, 1997), 'numpy.linspace', 'np.linspace', (['(-xy)', 'xy', 'n'], {}), '(-xy, xy, n)\n', (1985, 1997), True, 'import numpy as np\n'), ((1999, 2022), 'numpy.linspace', 'np.linspace', (['(-xy)', 'xy', 'n'], {}), '(-xy, xy, n)\n', (2010, 2022), True, 'import numpy as np\n'), ((2144, 2180), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'val.size'], {}), '(0, sigma, val.size)\n', (2160, 2180), True, 'import numpy as np\n'), ((3108, 3128), 'torch.Tensor', 'torch.Tensor', (['noised'], {}), '(noised)\n', (3120, 3128), False, 'import torch\n'), ((1140, 1156), 'torch.autograd.grad', 'grad', (['val', 'param'], {}), '(val, param)\n', (1144, 1156), False, 'from torch.autograd import grad\n'), ((1646, 1667), 'torch.vstack', 'torch.vstack', (['(J, _J)'], {}), '((J, _J))\n', (1658, 1667), False, 'import torch\n'), ((1759, 1774), 'torch.eye', 'torch.eye', (['ndim'], {}), '(ndim)\n', (1768, 1774), False, 'import torch\n')] |
"""
Test 3
SVM
"""
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import tensorflow as tf
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, MaxAbsScaler
from sklearn.preprocessing import Normalizer, QuantileTransformer, PowerTransformer
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.metrics import classification_report
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
classifier.fit(X, y)
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
# highlight test examples
if test_idx:
# plot all examples
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
edgecolor='black',
alpha=1.0,
linewidth=1,
marker='o',
s=100,
label='test set')
# Reading data
df = pd.read_csv('NewBioDegWCols.csv')
df.columns = ['SpMax_L','J_Dz','nHM','F01','F04','NssssC','nCb-','C%','nCp',
'n0','F03CN','SdssC','HyWi_B','LOC','SM6_L','F03CO','Me','Mi',
'nN-N','nArN02','nCRX3','SpPosA_B','nCIR','B01','B03','N-073',
'SpMax_A','Psi_i_1d','B04','Sd0','TI2_L','nCrt','c-026','F02',
'nHDon','SpMax_B','Psi_i_A','nN','SM6_B','nArCOOR','nX','TAR']
df['TAR'] = df['TAR'].replace(['RB', 'NRB'], [1, 0])
df.replace(to_replace='NaN', value=np.nan, regex=True, inplace=True)
# df.mean(), df.median()
df.fillna(df.mean(), inplace=True)
#Remove features that cause a net increase in metrics when removed and
#target feature, obv
X = df[[i for i in list(df.columns) if i != 'TAR' and i!= 'C%'
and i!= 'F03CO' and i!= 'J_Dz'and i!= 'HyWi_B' and i!= ''
]]
y = df['TAR']
feat_labels = X.columns
#131
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2,
random_state=131)
#54
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.25,
random_state=54)
# Standardizing the features:
#Scalars: MinMax - lowers all scores
# Robust - performs slightly worse than standard
# MaxAbs - Lowers all scores
# QuantileTransformer - Lowers all scores
# powerTransformer - Lowers all scores
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
#random forest feature selection
forest = RandomForestClassifier(n_estimators=500,
random_state=1)
forest.fit(X, y)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
sfm = SelectFromModel(forest, prefit=True)
X_selected = sfm.transform(X)
print('Number of features that meet this threshold criterion:',
X_selected.shape[1])
print("Threshold %f" % np.mean(importances))
# Now, let's print the features that met the threshold criterion for feature selection that we set earlier (note that this code snippet does not appear in the actual book but was added to this notebook later for illustrative purposes):
cols = []
for f in range(X_selected.shape[1]):
cols.append(feat_labels[indices[f]])
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
X_train_std = X_train_std[:, :X_selected.shape[1]]
X_test_std = X_test_std[: , :X_selected.shape[1]]
##
'''
param_grid = {'C': [10, 15, 20, 25, 30, 35, 40, 45, 50],
'kernel': ['rbf', 'sigmoid', 'linear', 'poly'],
'random_state': range(0,30),
'gamma': ['scale', 'auto']}
lg = GridSearchCV(SVC(), param_grid, verbose = 0, scoring = 'accuracy')
lg.fit(X_train_std, y_train)
print()
print(lg.best_params_)
'''
svm = SVC(kernel='rbf', C=20.0, random_state=0, gamma = 'auto')
scores = cross_val_score(svm, X_train, y_train, cv=5)
print("CV Train Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
scores = cross_val_score(svm, X_test, y_test, cv=5)
print("CV Test Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
scores = cross_val_score(svm, X_val, y_val, cv=5)
print("CV Validation Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
svm.fit(X_train_std, y_train)
svc_pred = svm.predict(X_test_std)
X_combined_std = np.vstack((X_train_std[:, 1:], X_test_std[:, 1:]))
y_combined = np.hstack((y_train, y_test))
#plot_decision_regions(X=X_combined_std, y=y_combined, classifier=SVC(kernel='rbf', C=10.0, random_state=1))
#plt.savefig("svm.png")
#plt.show()
print(classification_report(y_test, svc_pred))
print("SVM Testing Accuracy: %.3f" % accuracy_score(y_test, svc_pred))
print("SVM Testing F1-Score: %.3f" % f1_score(y_test, svc_pred))
print("SVM Testing Precision: %.3f" % precision_score(y_test, svc_pred))
print("SVM Testing Recall: %.3f" % recall_score(y_test, svc_pred))
| [
"pandas.read_csv",
"numpy.hstack",
"sklearn.metrics.classification_report",
"sklearn.metrics.precision_score",
"numpy.argsort",
"sklearn.metrics.recall_score",
"numpy.arange",
"matplotlib.pyplot.contourf",
"numpy.mean",
"numpy.vstack",
"matplotlib.pyplot.scatter",
"sklearn.model_selection.cros... | [((2346, 2379), 'pandas.read_csv', 'pd.read_csv', (['"""NewBioDegWCols.csv"""'], {}), "('NewBioDegWCols.csv')\n", (2357, 2379), True, 'import pandas as pd\n'), ((3267, 3322), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(131)'}), '(X, y, test_size=0.2, random_state=131)\n', (3283, 3322), False, 'from sklearn.model_selection import train_test_split\n'), ((3414, 3481), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.25)', 'random_state': '(54)'}), '(X_train, y_train, test_size=0.25, random_state=54)\n', (3430, 3481), False, 'from sklearn.model_selection import train_test_split\n'), ((3873, 3889), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3887, 3889), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, MaxAbsScaler\n'), ((4019, 4075), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(500)', 'random_state': '(1)'}), '(n_estimators=500, random_state=1)\n', (4041, 4075), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4395, 4431), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['forest'], {'prefit': '(True)'}), '(forest, prefit=True)\n', (4410, 4431), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((5541, 5596), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'C': '(20.0)', 'random_state': '(0)', 'gamma': '"""auto"""'}), "(kernel='rbf', C=20.0, random_state=0, gamma='auto')\n", (5544, 5596), False, 'from sklearn.svm import SVC\n'), ((5610, 5654), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['svm', 'X_train', 'y_train'], {'cv': '(5)'}), '(svm, X_train, y_train, cv=5)\n', (5625, 5654), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((5747, 5789), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['svm', 'X_test', 'y_test'], {'cv': '(5)'}), '(svm, X_test, y_test, cv=5)\n', (5762, 5789), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((5881, 5921), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['svm', 'X_val', 'y_val'], {'cv': '(5)'}), '(svm, X_val, y_val, cv=5)\n', (5896, 5921), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score\n'), ((6094, 6144), 'numpy.vstack', 'np.vstack', (['(X_train_std[:, 1:], X_test_std[:, 1:])'], {}), '((X_train_std[:, 1:], X_test_std[:, 1:]))\n', (6103, 6144), True, 'import numpy as np\n'), ((6158, 6186), 'numpy.hstack', 'np.hstack', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (6167, 6186), True, 'import numpy as np\n'), ((1484, 1531), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx1', 'xx2', 'Z'], {'alpha': '(0.3)', 'cmap': 'cmap'}), '(xx1, xx2, Z, alpha=0.3, cmap=cmap)\n', (1496, 1531), True, 'import matplotlib.pyplot as plt\n'), ((4177, 4200), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (4187, 4200), True, 'import numpy as np\n'), ((6339, 6378), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'svc_pred'], {}), '(y_test, svc_pred)\n', (6360, 6378), False, 'from sklearn.metrics import classification_report\n'), ((1279, 1316), 'numpy.arange', 'np.arange', (['x1_min', 'x1_max', 'resolution'], {}), '(x1_min, x1_max, resolution)\n', (1288, 1316), True, 'import numpy as np\n'), ((1345, 1382), 'numpy.arange', 'np.arange', (['x2_min', 'x2_max', 'resolution'], {}), '(x2_min, x2_max, resolution)\n', (1354, 1382), True, 'import numpy as np\n'), ((1632, 1644), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1641, 1644), True, 'import numpy as np\n'), ((1655, 1780), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'X[y == cl, 0]', 'y': 'X[y == cl, 1]', 'alpha': '(0.8)', 'c': 'colors[idx]', 'marker': 'markers[idx]', 'label': 'cl', 'edgecolor': '"""black"""'}), "(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=colors[idx],\n marker=markers[idx], label=cl, edgecolor='black')\n", (1666, 1780), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2167), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_test[:, 0]', 'X_test[:, 1]'], {'c': '""""""', 'edgecolor': '"""black"""', 'alpha': '(1.0)', 'linewidth': '(1)', 'marker': '"""o"""', 's': '(100)', 'label': '"""test set"""'}), "(X_test[:, 0], X_test[:, 1], c='', edgecolor='black', alpha=1.0,\n linewidth=1, marker='o', s=100, label='test set')\n", (2049, 2167), True, 'import matplotlib.pyplot as plt\n'), ((4577, 4597), 'numpy.mean', 'np.mean', (['importances'], {}), '(importances)\n', (4584, 4597), True, 'import numpy as np\n'), ((6418, 6450), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'svc_pred'], {}), '(y_test, svc_pred)\n', (6432, 6450), False, 'from sklearn.metrics import accuracy_score\n'), ((6489, 6515), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'svc_pred'], {}), '(y_test, svc_pred)\n', (6497, 6515), False, 'from sklearn.metrics import f1_score\n'), ((6555, 6588), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'svc_pred'], {}), '(y_test, svc_pred)\n', (6570, 6588), False, 'from sklearn.metrics import precision_score\n'), ((6625, 6655), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'svc_pred'], {}), '(y_test, svc_pred)\n', (6637, 6655), False, 'from sklearn.metrics import recall_score\n'), ((1063, 1075), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1072, 1075), True, 'import numpy as np\n')] |
import numpy
numpy.set_printoptions(legacy='1.13')
if __name__ == "__main__":
array = input().strip().split(" ")
array = numpy.array(array, float)
print(numpy.floor(array))
print(numpy.ceil(array))
print(numpy.rint(array)) | [
"numpy.ceil",
"numpy.floor",
"numpy.array",
"numpy.rint",
"numpy.set_printoptions"
] | [((14, 51), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'legacy': '"""1.13"""'}), "(legacy='1.13')\n", (36, 51), False, 'import numpy\n'), ((130, 155), 'numpy.array', 'numpy.array', (['array', 'float'], {}), '(array, float)\n', (141, 155), False, 'import numpy\n'), ((167, 185), 'numpy.floor', 'numpy.floor', (['array'], {}), '(array)\n', (178, 185), False, 'import numpy\n'), ((197, 214), 'numpy.ceil', 'numpy.ceil', (['array'], {}), '(array)\n', (207, 214), False, 'import numpy\n'), ((226, 243), 'numpy.rint', 'numpy.rint', (['array'], {}), '(array)\n', (236, 243), False, 'import numpy\n')] |
import itertools
import logging
import math
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.ndimage.filters import uniform_filter1d
import basty.utils.misc as misc
np.seterr(all="ignore")
class SpatioTemporal:
def __init__(self, fps, stft_cfg={}):
self.stft_cfg = deepcopy(stft_cfg)
self.logger = logging.getLogger("main")
assert fps > 0
self.get_delta = lambda x, scale: self.calc_delta(x, scale, fps)
self.get_moving_mean = lambda x, winsize: self.calc_moving_mean(x, winsize, fps)
self.get_moving_std = lambda x, winsize: self.calc_moving_std(x, winsize, fps)
delta_scales_ = [100, 300, 500]
window_sizes_ = [300, 500]
if "delta_scales" not in stft_cfg.keys():
self.logger.info(
"Scale valuess can not be found in configuration for delta features."
+ f"Default values are {str(delta_scales_)[1:-1]}."
)
if "window_sizes" not in stft_cfg.keys():
self.logger.info(
"Window sizes can not be found in configuration for window features."
+ f"Default values are {str(window_sizes_)[1:-1]}."
)
self.stft_cfg["delta_scales"] = stft_cfg.get("delta_scales", delta_scales_)
self.stft_cfg["window_sizes"] = stft_cfg.get("window_sizes", window_sizes_)
self.stft_set = ["pose", "distance", "angle"]
for ft_set in self.stft_set:
ft_set_dt = ft_set + "_delta"
self.stft_cfg[ft_set] = stft_cfg.get(ft_set, [])
self.stft_cfg[ft_set_dt] = stft_cfg.get(ft_set_dt, [])
self.angle_between = self.angle_between_atan
@staticmethod
def angle_between_arccos(v1, v2):
"""
Returns the abs(angle) in radians between vectors 'v1' and 'v2'.
angle_between((1, 0, 0), (0, 1, 0)) --> 1.5707963267948966
angle_between((1, 0, 0), (1, 0, 0)) --> 0.0
angle_between((1, 0, 0), (-1, 0, 0)) --> 3.141592653589793
"""
assert isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
@staticmethod
def angle_between_atan(v1, v2):
"""
Returns the abs(angle) in radians between vectors 'v1' and 'v2'.
"""
assert isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)
angle = np.math.atan2(np.linalg.det([v1, v2]), np.dot(v1, v2))
return np.abs(angle)
def get_group_value(self, stft_group, opt):
if opt == "avg":
group_value = np.nanmean(stft_group, axis=1)
elif opt == "min":
group_value = np.nanamin(stft_group, axis=1)
elif opt == "max":
group_value = np.nanmax(stft_group, axis=1)
else:
raise ValueError(f"Unkown option {opt} is given for feature group.")
return group_value
@staticmethod
def calc_delta(x, scale, fps):
# In terms of millisecond.
delta_values = []
scale_frame = math.ceil(fps * (1000 / scale))
y = uniform_filter1d(x, size=scale_frame, axis=0)
delta_y = np.abs(np.gradient(y, 1 / fps * 1000, axis=0, edge_order=2))
delta_values.append(delta_y)
return delta_values
@staticmethod
def calc_moving_mean(x, winsize, fps):
mean_values = []
w_frame = math.ceil(fps * (winsize / 1000))
mean_values.append(x.rolling(w_frame, min_periods=1, center=True).mean())
return mean_values
@staticmethod
def calc_moving_std(x, winsize, fps):
std_values = []
w_frame = math.ceil(fps * (winsize / 1000))
std_values.append(x.rolling(w_frame, min_periods=1, center=True).std())
return std_values
def extract(self, ft_set, df_pose, ft_cfg_set):
extraction_functions = {
"pose": self._extract_pose,
"angle": self._extract_angle,
"distance": self._extract_distance,
}
val = extraction_functions[ft_set](df_pose, ft_cfg_set)
return val
def get_column_names(self, ft_set):
stft_cfg = self.stft_cfg
name_col = []
def get_stft_name(defn):
if isinstance(defn, dict):
name = (
list(defn.keys())[0]
+ "("
+ ",".join(["-".join(item) for item in list(defn.values())[0]])
+ ")"
)
elif isinstance(defn, list):
name = "-".join(defn)
else:
raise ValueError(
f"Given feature definition {defn} has incorrect formatting."
)
return name
if not stft_cfg.get(ft_set, False):
raise ValueError(f"Unkown value {ft_set} is given for feature set.")
if "pose" in ft_set:
ft_names = list(
itertools.chain.from_iterable(
([item + "_x"], [item + "_y"]) for item in stft_cfg[ft_set]
)
)
else:
ft_names = stft_cfg[ft_set]
if "delta" not in ft_set:
name_col = [ft_set + "." + get_stft_name(item) for item in ft_names]
else:
scales = stft_cfg["delta_scales"]
name_col = misc.flatten(
[
[
ft_set + "." + get_stft_name(item) + ".s" + str(t)
for item in ft_names
]
for t in scales
]
)
return name_col
@staticmethod
def _get_coord(df_pose, name, axis):
# Axis name x or y.
name_c = name + "_" + axis
if name_c in df_pose.columns:
coord = df_pose[name_c]
elif name == "origin":
coord = np.zeros(df_pose.shape[0])
else:
raise ValueError(f"No coordinate values can be found for {name}.")
return coord
def _extract_pose(self, df_pose, body_parts):
xy_pose_values = np.ndarray((df_pose.shape[0], len(body_parts) * 2))
if not isinstance(body_parts, list):
raise ValueError(
f"Given argument has type {type(body_parts)}."
+ "Pose features should be defined by a list of body-parts."
)
for i, bp in enumerate(body_parts):
if not isinstance(bp, str):
raise ValueError(
f"Given feature definition contains {bp}, which is not a body-part."
)
xy_pose_values[:, i * 2] = self.__class__._get_coord(df_pose, bp, "x")
xy_pose_values[:, i * 2 + 1] = self.__class__._get_coord(df_pose, bp, "y")
return xy_pose_values
def _extract_angle(self, df_pose, triplets):
angle_values = np.ndarray((df_pose.shape[0], len(triplets)))
def f_angle(x):
return self.angle_between(x[:2] - x[2:4], x[4:] - x[2:4])
def angle_along_axis(xy_values, angle_values):
for j in range(xy_values.shape[0]):
v1 = xy_values[j, :2] - xy_values[j, 2:4]
v2 = xy_values[j, 4:] - xy_values[j, 2:4]
angle_values[j, i] = self.angle_between(v1, v2)
return angle_values
for i, triplet in enumerate(triplets):
if isinstance(triplet, dict):
opt = list(triplet.keys())[0]
group = list(triplet.values())[0]
if len(group) > 0 and opt in ["avg", "min", "max"]:
angle_group = self._extract_angle(df_pose, group)
else:
raise ValueError(f"Given feature definition {triplet} is unknown.")
angle_values[:, i] = self.get_group_value(angle_group, opt)
else:
xy_values, _ = self._extract_pose(df_pose, triplet)
# angle_values[:, i] = np.apply_along_axis(f_angle, 1, xy_values)
# This is somehow faster.
angle_values[:, i] = angle_along_axis(xy_values, angle_values)
return angle_values
def _extract_distance(self, df_pose, pairs):
distance_values = np.ndarray((df_pose.shape[0], len(pairs)))
for i, pair in enumerate(pairs):
if isinstance(pair, dict):
opt = list(pair.keys())[0]
group = list(pair.values())[0]
if len(group) > 0 and opt in ["avg", "min", "max"]:
distance_group = self._extract_distance(df_pose, group)
else:
raise ValueError(f"Given feature definition {pair} is unkwon.")
distance_values[:, i] = self.get_group_value(distance_group, opt)
else:
xy_values = self._extract_pose(df_pose, pair)
diff_xy = xy_values[:, 2:4] - xy_values[:, :2]
distance_values[:, i] = np.sqrt(diff_xy[:, 0] ** 2 + diff_xy[:, 1] ** 2)
return distance_values
def _extract_moving_stat(self, df_stft, stft_names_dict, stat, winsizes):
if stat == "mean":
get_moving_stat = self.get_moving_mean
elif stat == "std":
get_moving_stat = self.get_moving_std
else:
raise ValueError(f"Unkown value {stat} is given for moving statistics.")
name_col = df_stft.columns
mv_stat = pd.concat(
itertools.chain(*map(lambda w: get_moving_stat(df_stft, w), winsizes)),
axis=1,
)
df_stat = pd.DataFrame(data=mv_stat)
stat_columns = misc.flatten(
[
[
stat + "." + stft_names_dict[name] + ".w" + str(w)
for name in name_col
]
for w in winsizes
]
)
name_dict = {i: stat_columns[i] for i in range(len(stat_columns))}
df_stat.columns = list(name_dict.keys())
return df_stat, name_dict
def extract_snap_stft(self, df_pose):
stft_cfg = self.stft_cfg
df_snap_list = []
for ft_set in self.stft_set:
if stft_cfg.get(ft_set, False):
temp_df = pd.DataFrame(self.extract(ft_set, df_pose, stft_cfg[ft_set]))
temp_df.columns = self.get_column_names(ft_set)
df_snap_list.append(temp_df)
if len(df_snap_list) <= 0:
raise ValueError(
"At least one snap feature must given in the feature configuration."
)
df_snap = pd.concat(df_snap_list, axis=1)
name_col = df_snap.columns
name_dict = {i: name_col[i] for i in range(len(name_col))}
df_snap.columns = list(name_dict.keys())
self.ftname_to_snapft = name_dict
return df_snap, name_dict
def extract_delta_stft(self, df_pose):
stft_cfg = self.stft_cfg
delta_scales = stft_cfg["delta_scales"]
df_delta_list = []
for ft_set in self.stft_set:
ft_set_dt = ft_set + "_delta"
if stft_cfg.get(ft_set_dt, False):
temp_snap = self.extract(ft_set, df_pose, stft_cfg[ft_set_dt])
temp_delta = itertools.chain(
*map(
lambda s: self.get_delta(temp_snap, s),
delta_scales,
)
)
temp_df = pd.DataFrame(
np.concatenate(
tuple(temp_delta),
axis=1,
),
columns=self.get_column_names(ft_set_dt),
)
df_delta_list.append(temp_df)
if len(df_delta_list) <= 0:
raise ValueError(
"At least one delta feature must given in the feature configuration."
)
df_delta = pd.concat(df_delta_list, axis=1)
name_col = df_delta.columns
name_dict = {i: name_col[i] for i in range(len(name_col))}
df_delta.columns = list(name_dict.keys())
self.ftname_to_deltaft = name_dict
return df_delta, name_dict
def extract_window_snap_stft(self, df_stft, opt):
window_sizes = self.stft_cfg["window_sizes"]
if opt == "mean":
df_window, name_dict = self._extract_moving_stat(
df_stft, self.ftname_to_snapft, "mean", window_sizes
)
self.ftname_to_snapft_mean = name_dict
elif opt == "std":
df_window, name_dict = self._extract_moving_stat(
df_stft, self.ftname_to_snapft, "std", window_sizes
)
self.ftname_to_snapft_std = name_dict
return df_window, name_dict
def extract_window_delta_stft(self, df_stft, opt):
window_sizes = self.stft_cfg["window_sizes"]
if opt == "mean":
df_window, name_dict = self._extract_moving_stat(
df_stft, self.ftname_to_deltaft, "mean", window_sizes
)
self.ftname_to_deltaft_mean = name_dict
elif opt == "std":
df_window, name_dict = self._extract_moving_stat(
df_stft, self.ftname_to_deltaft, "std", window_sizes
)
self.ftname_to_deltaft_std = name_dict
return df_window, name_dict
| [
"logging.getLogger",
"numpy.abs",
"math.ceil",
"numpy.nanamin",
"numpy.sqrt",
"numpy.linalg.norm",
"numpy.linalg.det",
"numpy.nanmean",
"numpy.dot",
"itertools.chain.from_iterable",
"pandas.concat",
"numpy.zeros",
"numpy.nanmax",
"copy.deepcopy",
"pandas.DataFrame",
"numpy.gradient",
... | [((195, 218), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (204, 218), True, 'import numpy as np\n'), ((309, 327), 'copy.deepcopy', 'deepcopy', (['stft_cfg'], {}), '(stft_cfg)\n', (317, 327), False, 'from copy import deepcopy\n'), ((350, 375), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (367, 375), False, 'import logging\n'), ((2579, 2592), 'numpy.abs', 'np.abs', (['angle'], {}), '(angle)\n', (2585, 2592), True, 'import numpy as np\n'), ((3150, 3181), 'math.ceil', 'math.ceil', (['(fps * (1000 / scale))'], {}), '(fps * (1000 / scale))\n', (3159, 3181), False, 'import math\n'), ((3195, 3240), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['x'], {'size': 'scale_frame', 'axis': '(0)'}), '(x, size=scale_frame, axis=0)\n', (3211, 3240), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((3491, 3524), 'math.ceil', 'math.ceil', (['(fps * (winsize / 1000))'], {}), '(fps * (winsize / 1000))\n', (3500, 3524), False, 'import math\n'), ((3739, 3772), 'math.ceil', 'math.ceil', (['(fps * (winsize / 1000))'], {}), '(fps * (winsize / 1000))\n', (3748, 3772), False, 'import math\n'), ((9672, 9698), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'mv_stat'}), '(data=mv_stat)\n', (9684, 9698), True, 'import pandas as pd\n'), ((10682, 10713), 'pandas.concat', 'pd.concat', (['df_snap_list'], {'axis': '(1)'}), '(df_snap_list, axis=1)\n', (10691, 10713), True, 'import pandas as pd\n'), ((12005, 12037), 'pandas.concat', 'pd.concat', (['df_delta_list'], {'axis': '(1)'}), '(df_delta_list, axis=1)\n', (12014, 12037), True, 'import pandas as pd\n'), ((2142, 2160), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2156, 2160), True, 'import numpy as np\n'), ((2181, 2199), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2195, 2199), True, 'import numpy as np\n'), ((2522, 2545), 'numpy.linalg.det', 'np.linalg.det', (['[v1, v2]'], {}), '([v1, v2])\n', (2535, 2545), True, 'import numpy as np\n'), ((2547, 2561), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (2553, 2561), True, 'import numpy as np\n'), ((2693, 2723), 'numpy.nanmean', 'np.nanmean', (['stft_group'], {'axis': '(1)'}), '(stft_group, axis=1)\n', (2703, 2723), True, 'import numpy as np\n'), ((3266, 3318), 'numpy.gradient', 'np.gradient', (['y', '(1 / fps * 1000)'], {'axis': '(0)', 'edge_order': '(2)'}), '(y, 1 / fps * 1000, axis=0, edge_order=2)\n', (3277, 3318), True, 'import numpy as np\n'), ((2234, 2252), 'numpy.dot', 'np.dot', (['v1_u', 'v2_u'], {}), '(v1_u, v2_u)\n', (2240, 2252), True, 'import numpy as np\n'), ((2777, 2807), 'numpy.nanamin', 'np.nanamin', (['stft_group'], {'axis': '(1)'}), '(stft_group, axis=1)\n', (2787, 2807), True, 'import numpy as np\n'), ((5034, 5128), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (["(([item + '_x'], [item + '_y']) for item in stft_cfg[ft_set])"], {}), "(([item + '_x'], [item + '_y']) for item in\n stft_cfg[ft_set])\n", (5063, 5128), False, 'import itertools\n'), ((5969, 5995), 'numpy.zeros', 'np.zeros', (['df_pose.shape[0]'], {}), '(df_pose.shape[0])\n', (5977, 5995), True, 'import numpy as np\n'), ((9059, 9107), 'numpy.sqrt', 'np.sqrt', (['(diff_xy[:, 0] ** 2 + diff_xy[:, 1] ** 2)'], {}), '(diff_xy[:, 0] ** 2 + diff_xy[:, 1] ** 2)\n', (9066, 9107), True, 'import numpy as np\n'), ((2861, 2890), 'numpy.nanmax', 'np.nanmax', (['stft_group'], {'axis': '(1)'}), '(stft_group, axis=1)\n', (2870, 2890), True, 'import numpy as np\n')] |
import collections
import json
import numpy as np
import pandas as pd
import sklearn
def cv_results_to_df(cv_results):
"""
Convert a `sklearn.grid_search.GridSearchCV.cv_results_` attribute to a tidy
pandas DataFrame where each row is a hyperparameter combinatination.
"""
cv_results_df = pd.DataFrame(cv_results)
columns = [x for x in cv_results_df.columns if x.startswith('param_')]
columns += ['mean_train_score', 'mean_test_score', 'std_test_score']
cv_results_df = cv_results_df[columns]
return cv_results_df
def expand_grid(data_dict):
"""
Create a dataframe from every combination of given values.
"""
rows = itertools.product(*data_dict.values())
grid_df = pd.DataFrame.from_records(rows, columns=data_dict.keys())
return grid_df
def df_to_datatables(df, double_precision=5, indent=2):
"""
Convert a pandas dataframe to a JSON object formatted for datatables input.
"""
dump_str = df.to_json(orient='split', double_precision=double_precision)
obj = json.loads(dump_str)
del obj['index']
obj = collections.OrderedDict(obj)
obj.move_to_end('data')
return obj
def class_metrics(y_true, y_pred):
metrics = collections.OrderedDict()
metrics['precision'] = sklearn.metrics.precision_score(y_true, y_pred)
metrics['recall'] = sklearn.metrics.recall_score(y_true, y_pred)
metrics['f1'] = sklearn.metrics.f1_score(y_true, y_pred)
metrics['accuracy'] = sklearn.metrics.accuracy_score(y_true, y_pred)
# See https://github.com/scikit-learn/scikit-learn/pull/6752
metrics['balanced_accuracy'] = sklearn.metrics.recall_score(
y_true, y_pred, pos_label=None, average='macro')
return metrics
def threshold_metrics(y_true, y_pred):
metrics = collections.OrderedDict()
metrics['auroc'] = sklearn.metrics.roc_auc_score(y_true, y_pred)
metrics['auprc'] = sklearn.metrics.average_precision_score(y_true, y_pred)
return metrics
def model_info(estimator):
model = collections.OrderedDict()
model['class'] = type(estimator).__name__
model['module'] = estimator.__module__
model['parameters'] = sort_dict(estimator.get_params())
return model
def get_feature_df(grid_search, features):
"""
Return the feature names and coefficients from the final classifier of the
best pipeline found by GridSearchCV. See https://git.io/vPWLI. This function
assumes every selection step of the pipeline has a name starting with
`select`.
Params
------
grid_search: GridSearchCV object
A post-fit GridSearchCV object where the estimator is a Pipeline.
features: list
initial feature names
Returns
-------
pandas.DataFrame
Dataframe of feature name and coefficient values
"""
features = np.array(features)
pipeline = grid_search.best_estimator_
for name, transformer in pipeline.steps:
if name.startswith('select'):
X_index = np.arange(len(features)).reshape(1, -1)
indexes = transformer.transform(X_index).tolist()
features = features[indexes]
step_name, classifier = pipeline.steps[-1]
coefficients, = classifier.coef_
feature_df = pd.DataFrame.from_items([
('feature', features),
('coefficient', coefficients),
])
return feature_df
def sort_dict(dictionary):
"""
Return a dictionary as an OrderedDict sorted by keys.
"""
items = sorted(dictionary.items())
return collections.OrderedDict(items)
def make_json_serializable(obj):
"""
Convert an object to be JSON serializable. Unsupported types throw a
ValueError.
"""
if isinstance(obj, dict):
return collections.OrderedDict(
(make_json_serializable(k), make_json_serializable(v)) for k, v in obj.items())
if isinstance(obj, (list, tuple)):
return [make_json_serializable(x) for x in obj]
if isinstance(obj, pd.DataFrame):
return df_to_datatables(obj)
if type(obj).__module__ == 'numpy':
obj = obj.item()
if isinstance(obj, float):
return float(format(obj, '.5g'))
if isinstance(obj, (int, str)):
return obj
raise ValueError(type(obj), 'cannot be JSON sanitized')
| [
"json.loads",
"collections.OrderedDict",
"sklearn.metrics.f1_score",
"pandas.DataFrame.from_items",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"pandas.DataFrame",
"sklearn.metrics.a... | [((311, 335), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (323, 335), True, 'import pandas as pd\n'), ((1041, 1061), 'json.loads', 'json.loads', (['dump_str'], {}), '(dump_str)\n', (1051, 1061), False, 'import json\n'), ((1093, 1121), 'collections.OrderedDict', 'collections.OrderedDict', (['obj'], {}), '(obj)\n', (1116, 1121), False, 'import collections\n'), ((1215, 1240), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1238, 1240), False, 'import collections\n'), ((1268, 1315), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1299, 1315), False, 'import sklearn\n'), ((1340, 1384), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1368, 1384), False, 'import sklearn\n'), ((1405, 1445), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1429, 1445), False, 'import sklearn\n'), ((1472, 1518), 'sklearn.metrics.accuracy_score', 'sklearn.metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1502, 1518), False, 'import sklearn\n'), ((1619, 1696), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['y_true', 'y_pred'], {'pos_label': 'None', 'average': '"""macro"""'}), "(y_true, y_pred, pos_label=None, average='macro')\n", (1647, 1696), False, 'import sklearn\n'), ((1779, 1804), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (1802, 1804), False, 'import collections\n'), ((1828, 1873), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1857, 1873), False, 'import sklearn\n'), ((1897, 1952), 'sklearn.metrics.average_precision_score', 'sklearn.metrics.average_precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1936, 1952), False, 'import sklearn\n'), ((2012, 2037), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2035, 2037), False, 'import collections\n'), ((2821, 2839), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2829, 2839), True, 'import numpy as np\n'), ((3232, 3311), 'pandas.DataFrame.from_items', 'pd.DataFrame.from_items', (["[('feature', features), ('coefficient', coefficients)]"], {}), "([('feature', features), ('coefficient', coefficients)])\n", (3255, 3311), True, 'import pandas as pd\n'), ((3509, 3539), 'collections.OrderedDict', 'collections.OrderedDict', (['items'], {}), '(items)\n', (3532, 3539), False, 'import collections\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 01:55:22 2020
@author: balajiramesh
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 00:25:12 2020
@author: balajiramesh
Raw : 16,319230 2,641562
Within study timeline: 14393806 2247749
Within study area and timeline: 7892752 1246896
AFter removing washout period: 7816138 1233913
After removeing missing data: 7,813,866 and 1,233,600 OP and IP ED visit records
"""
import pandas as pd
import numpy as np
import geopandas
import statsmodels.api as sm
import statsmodels.formula.api as smf
from datetime import timedelta, date,datetime
from dateutil import parser
import glob
import sys
import gc
sys.path.insert(1, r'H:\Balaji\GRAScripts\dhs_scripts')
from recalculate_svi import recalculateSVI
#%%functions
def filter_mortality(df):
pat_sta=df.PAT_STATUS.copy()
pat_sta=pd.to_numeric(pat_sta,errors="coerce")
return pat_sta.isin([20,40,41,42]).astype('int') #status code for died
def get_sp_outcomes(sp,Dis_cat):
global sp_outcomes
return sp.merge(sp_outcomes.loc[:,['RECORD_ID','op',Dis_cat]],on=['RECORD_ID','op'],how='left')[Dis_cat].values
#%%read from pickle - shortcut ===================================================================================
#=================================================================================================================
INPUT_IPOP_DIR=r'H:\Balaji\DSHS ED visit data(PII)\CleanedMergedJoined'
sp=pd.read_pickle(r'Z:\Balaji\R session_home_dir (PII)\sp_pyton_EdVisit.pkl')
#read the categories file
outcome_cats=pd.read_csv('H:/Balaji/GRAScripts/dhs_scripts/categories.csv')
outcome_cats.fillna('',inplace=True)
#read op/ip outcomes df
sp_outcomes=pd.read_csv(INPUT_IPOP_DIR+'\\ip_op_outcomes.csv')
flood_join_field='PAT_ADDR_CENSUS_TRACT'
Dis_cat='Pregnancy_complic'
#%%merege Dr Samarth's dtataset
evacDf_raw=pd.read_csv('Z:/Balaji/EvacuationDataDrSamarth/overall_sim_feature_values.csv')
evacDf=evacDf_raw.rename(columns={'flooding_close_proximity_duration_hr':'floodCloseProxDur',
'tri_close_proximity_duration_hr':'triCloseProxDur', 'tri_distance_mi':'triDistMiles',
'heavy_rainfall_duration_hr':'hvyRainDur', 'rainfall_total_mm':'totRainfall'
})
#make quantile bins for each variable
# evacDfCat=evacDf.loc[:,evacDf.columns != 'FIPS'] \
# .apply(axis=0,func=lambda x: \
# pd.cut(x,np.round(np.insert(np.quantile(x,[.25,.5,.75,1]),0,-1),3),labels=np.round(np.quantile(x,[.25,.5,.75,1]),3)))
#convert everything to categorical
#evacDf=pd.concat([evacDf.loc[:,'FIPS'],evacDfCat],axis=1)
#subset df for census tracts in evac df
sp=sp.loc[sp.PAT_ADDR_CENSUS_TRACT.isin(evacDf.FIPS),:]
#merge evacDF
sp=sp.merge(evacDf,how='left',left_on='PAT_ADDR_CENSUS_TRACT',right_on='FIPS')
#subset sp_outcomes to save memory
sp_outcomes=sp_outcomes.loc[sp_outcomes.RECORD_ID.isin(sp.RECORD_ID),:]
#redifine floodcat
#%%merge flood ratio ctegories
tractsfloodr=sp.loc[~sp.duplicated(flood_join_field),[flood_join_field,'floodr']]
s=tractsfloodr.loc[tractsfloodr.floodr>0,'floodr']
flood_bins=[0,0.00000001,s.quantile(0.5),1]
sp['floodr_cat']=pd.cut(sp.floodr,bins=flood_bins,right=True,include_lowest=True,labels=['NO','FloodCat1','FloodCat2'])
#%%function for looping
exposure='evacuation_pct'
def run():
#%%filter records for specific outcome
df=sp#.sample(500000)#[sp.SVI_Cat=='SVI_filter'] #--------------Edit here for stratified model
if Dis_cat=="DEATH":df.loc[:,'Outcome']=filter_mortality(sp)
if Dis_cat=="ALL":df.loc[:,'Outcome']=1
if Dis_cat in outcome_cats.category.to_list():df.loc[:,'Outcome']=get_sp_outcomes(df, Dis_cat)
#%%for filtering flooded or non flooded alone
#df=df[df.floodr_cat=="FLood_1"].copy()
#df=df[df.SEX_CODE==FIL_COL].copy()
#df=df[df.AGE_cat==FIL_COL].copy()
#df=df[df[SVI_COL]==FIL_COL].copy()
#df=df[df.RACE==FIL_COL].copy()
#%%stratified model for each period
#df=df.loc[df.Time.isin(['control', 'flood']),]
#df.Time.cat.remove_unused_categories(inplace=True)
#%% save cross tab
#counts_outcome=pd.DataFrame(df.Outcome.value_counts())
# outcomes_recs=df.loc[(df.Outcome>0)&(~pd.isna(df.loc[:,[exposure,'Time','year','month','weekday' ,'PAT_AGE_YEARS',
# 'SEX_CODE','RACE','ETHNICITY','SVI_Cat']]).any(axis=1)),]
# counts_outcome=pd.crosstab(outcomes_recs[exposure],outcomes_recs.Time)
# counts_outcome.to_csv(Dis_cat+"_"+exposure+"_aux"+".csv")
# print(counts_outcome)
# del outcomes_recs
#%%for total ED visits using grouped / coutns
if Dis_cat=="ALL":
grouped_tracts=df.loc[:,['STMT_PERIOD_FROM','PAT_AGE_YEARS','PAT_ADDR_CENSUS_TRACT','Outcome']]
grouped_tracts=pd.concat([grouped_tracts]+[pd.get_dummies(df[i],prefix=i) for i in ['SEX_CODE','RACE','ETHNICITY','op','AGE_cat']],axis=1)
grouped_tracts=grouped_tracts.groupby(['STMT_PERIOD_FROM', 'PAT_ADDR_CENSUS_TRACT']).agg({'Outcome':'sum',
'PAT_AGE_YEARS':'mean',
'SEX_CODE_M':'sum','SEX_CODE_F':'sum',
'RACE_white':'sum','RACE_black':'sum','RACE_other':'sum',
'ETHNICITY_Non_Hispanic':'sum','ETHNICITY_Hispanic':'sum',
'op_False':'sum','op_True':'sum',
'AGE_cat_lte1':'sum', 'AGE_cat_2-5':'sum', 'AGE_cat_6-12':'sum', 'AGE_cat_13-17':'sum','AGE_cat_18-45':'sum', 'AGE_cat_46-64':'sum', 'AGE_cat_gt64':'sum'
}).reset_index()
grouped_tracts=grouped_tracts.merge(df.drop_duplicates(['STMT_PERIOD_FROM','PAT_ADDR_CENSUS_TRACT']).loc[:,['STMT_PERIOD_FROM','PAT_ADDR_CENSUS_TRACT','floodr_cat','Population','Time','year','month','weekday','SVI_Cat','RPL_THEMES_1','RPL_THEMES_2','RPL_THEMES_3','RPL_THEMES_4','floodr', 'triCloseProxDur','evacuation_pct', 'hvyRainDur']],how='left',on=["PAT_ADDR_CENSUS_TRACT",'STMT_PERIOD_FROM'])
dummy_cols=['SEX_CODE_M', 'SEX_CODE_F', 'RACE_white', 'RACE_black', 'RACE_other','ETHNICITY_Non_Hispanic', 'ETHNICITY_Hispanic', 'op_False', 'op_True','AGE_cat_lte1', 'AGE_cat_2-5', 'AGE_cat_6-12', 'AGE_cat_13-17','AGE_cat_18-45', 'AGE_cat_46-64', 'AGE_cat_gt64']
grouped_tracts.loc[:,dummy_cols]=grouped_tracts.loc[:,dummy_cols].divide(grouped_tracts.Outcome,axis=0)
del df
df=grouped_tracts
#%%running the model
if Dis_cat!="ALL":offset=np.log(df.TotalVisits)
#offset=None
if Dis_cat=="ALL":offset=np.log(df.Population)
#change floodr into 0-100
df.floodr=df.floodr*100
formula='Outcome'+' ~ floodr_cat * '+exposure+' * Time '+' + year + month + weekday '+'+ op + RACE + SEX_CODE + PAT_AGE_YEARS + ETHNICITY + triCloseProxDur + hvyRainDur'
if Dis_cat=='ALL': formula='Outcome'+' ~ floodr_cat * '+exposure + ' * Time'+' + year + month + weekday + '+' + '.join(['SEX_CODE_M','op_True','PAT_AGE_YEARS','RACE_white', 'RACE_black','ETHNICITY_Non_Hispanic','triCloseProxDur', 'hvyRainDur'])
#if Dis_cat=='ALL': formula='Outcome'+' ~ '+' floodr_cat * Time'+' + year + month + weekday + '+' + '.join(['SEX_CODE_M','op_True','RACE_white', 'RACE_black','ETHNICITY_Non_Hispanic','PAT_AGE_YEARS'])
#formula=formula+' + Median_H_Income'
formula=formula.replace('SEX_CODE_M +','').replace('SEX_CODE +','') if Dis_cat=='Pregnancy_complic' else formula
model = smf.gee(formula=formula,groups=df[flood_join_field],offset=offset, data=df,missing='drop',family=sm.families.Poisson(link=sm.families.links.log()))
#model = smf.logit(formula=formula, data=df,missing='drop')
#model = smf.glm(formula=formula, data=df,missing='drop',offset=offset,family=sm.families.Binomial(sm.families.links.logit()))
results=model.fit()
# print(results.summary())
print(np.exp(results.params))
# print(np.exp(results.conf_int()))
#%% creating result dataframe tables
results_as_html = results.summary().tables[1].as_html()
reg_table=pd.read_html(results_as_html, header=0, index_col=0)[0].reset_index()
reg_table.loc[:,'coef']=np.exp(reg_table.coef)
reg_table.loc[:,['[0.025', '0.975]']]=np.exp(reg_table.loc[:,['[0.025', '0.975]']])
reg_table=reg_table.loc[~(reg_table['index'].str.contains('month')
| reg_table['index'].str.contains('weekday')
#| reg_table['index'].str.contains('year')
#| reg_table['index'].str.contains('PAT_AGE_YEARS'))
),]
reg_table['index']=reg_table['index'].str.replace("\[T.",'_').str.replace('\]','')
reg_table['model']=exposure
reg_table_dev=pd.read_html(results.summary().tables[0].as_html())[0]
del model,results
gc.collect()
# counts_outcome.loc["flood_bins",'Outcome']=str(flood_bins)
#return reg_table
#%%write the output
reg_table.to_csv(Dis_cat+"_"+exposure+"_reg"+".csv")
#reg_table_dev.to_csv(Dis_cat+"_dev"+".csv")
Dis_cats=[ 'ALL',
#'Psychiatric',
'Intestinal_infectious_diseases',
'ARI',
'Bite-Insect',
#'DEATH',
# #'Flood_Storms',
#'CO_Exposure',
#'Drowning',
#'Heat_Related_But_Not_dehydration',
# 'Hypothermia',
# #'Dialysis',
# #'Medication_Refill',
# 'Asthma',
'Pregnancy_complic',
'Chest_pain',
'Dehydration',
]
for exposure in ['evacuation_pct']:
print(exposure)
for Dis_cat in Dis_cats:
try:
print(Dis_cat)
print("-"*50)
run()
except Exception as e: print(e)
#%%combined merge
import glob, os
req_files=glob.glob("*_reg.csv")
merge_df=pd.DataFrame()
for file in req_files:
df=pd.read_csv(file)[['index','coef','P>|z|','[0.025','0.975]','model']]
df=df.round(5)
Dis_cat=os.path.basename(file).replace("_reg.csv","")
Dis_cat=Dis_cat.split('_')[0]
df['outcome']=Dis_cat
merge_df=pd.concat([merge_df,df],axis=0)
merge_df.columns=['covar', 'RR', 'P', 'conf25', 'conf95','model', 'outcome']
merge_df['covar']=merge_df['covar'].str.replace("\[T.",'_').str.replace('\]','')
#merge_df['folder']='SVI_Cat_T4'
#% outupt
merge_df.to_excel('merged_flood_cat8.xlsx',index=False) | [
"pandas.read_pickle",
"sys.path.insert",
"pandas.read_csv",
"pandas.read_html",
"numpy.log",
"pandas.cut",
"pandas.get_dummies",
"numpy.exp",
"statsmodels.api.families.links.log",
"pandas.to_numeric",
"os.path.basename",
"gc.collect",
"pandas.DataFrame",
"pandas.concat",
"glob.glob"
] | [((657, 714), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""H:\\\\Balaji\\\\GRAScripts\\\\dhs_scripts"""'], {}), "(1, 'H:\\\\Balaji\\\\GRAScripts\\\\dhs_scripts')\n", (672, 714), False, 'import sys\n'), ((1435, 1511), 'pandas.read_pickle', 'pd.read_pickle', (['"""Z:\\\\Balaji\\\\R session_home_dir (PII)\\\\sp_pyton_EdVisit.pkl"""'], {}), "('Z:\\\\Balaji\\\\R session_home_dir (PII)\\\\sp_pyton_EdVisit.pkl')\n", (1449, 1511), True, 'import pandas as pd\n'), ((1549, 1611), 'pandas.read_csv', 'pd.read_csv', (['"""H:/Balaji/GRAScripts/dhs_scripts/categories.csv"""'], {}), "('H:/Balaji/GRAScripts/dhs_scripts/categories.csv')\n", (1560, 1611), True, 'import pandas as pd\n'), ((1685, 1737), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_IPOP_DIR + '\\\\ip_op_outcomes.csv')"], {}), "(INPUT_IPOP_DIR + '\\\\ip_op_outcomes.csv')\n", (1696, 1737), True, 'import pandas as pd\n'), ((1849, 1928), 'pandas.read_csv', 'pd.read_csv', (['"""Z:/Balaji/EvacuationDataDrSamarth/overall_sim_feature_values.csv"""'], {}), "('Z:/Balaji/EvacuationDataDrSamarth/overall_sim_feature_values.csv')\n", (1860, 1928), True, 'import pandas as pd\n'), ((3107, 3220), 'pandas.cut', 'pd.cut', (['sp.floodr'], {'bins': 'flood_bins', 'right': '(True)', 'include_lowest': '(True)', 'labels': "['NO', 'FloodCat1', 'FloodCat2']"}), "(sp.floodr, bins=flood_bins, right=True, include_lowest=True, labels=\n ['NO', 'FloodCat1', 'FloodCat2'])\n", (3113, 3220), True, 'import pandas as pd\n'), ((10322, 10344), 'glob.glob', 'glob.glob', (['"""*_reg.csv"""'], {}), "('*_reg.csv')\n", (10331, 10344), False, 'import glob, os\n'), ((10355, 10369), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10367, 10369), True, 'import pandas as pd\n'), ((841, 880), 'pandas.to_numeric', 'pd.to_numeric', (['pat_sta'], {'errors': '"""coerce"""'}), "(pat_sta, errors='coerce')\n", (854, 880), True, 'import pandas as pd\n'), ((8574, 8596), 'numpy.exp', 'np.exp', (['reg_table.coef'], {}), '(reg_table.coef)\n', (8580, 8596), True, 'import numpy as np\n'), ((8639, 8685), 'numpy.exp', 'np.exp', (["reg_table.loc[:, ['[0.025', '0.975]']]"], {}), "(reg_table.loc[:, ['[0.025', '0.975]']])\n", (8645, 8685), True, 'import numpy as np\n'), ((9276, 9288), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9286, 9288), False, 'import gc\n'), ((10621, 10654), 'pandas.concat', 'pd.concat', (['[merge_df, df]'], {'axis': '(0)'}), '([merge_df, df], axis=0)\n', (10630, 10654), True, 'import pandas as pd\n'), ((6919, 6941), 'numpy.log', 'np.log', (['df.TotalVisits'], {}), '(df.TotalVisits)\n', (6925, 6941), True, 'import numpy as np\n'), ((6988, 7009), 'numpy.log', 'np.log', (['df.Population'], {}), '(df.Population)\n', (6994, 7009), True, 'import numpy as np\n'), ((8286, 8308), 'numpy.exp', 'np.exp', (['results.params'], {}), '(results.params)\n', (8292, 8308), True, 'import numpy as np\n'), ((10401, 10418), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (10412, 10418), True, 'import pandas as pd\n'), ((10502, 10524), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (10518, 10524), False, 'import glob, os\n'), ((8476, 8528), 'pandas.read_html', 'pd.read_html', (['results_as_html'], {'header': '(0)', 'index_col': '(0)'}), '(results_as_html, header=0, index_col=0)\n', (8488, 8528), True, 'import pandas as pd\n'), ((4787, 4818), 'pandas.get_dummies', 'pd.get_dummies', (['df[i]'], {'prefix': 'i'}), '(df[i], prefix=i)\n', (4801, 4818), True, 'import pandas as pd\n'), ((7995, 8018), 'statsmodels.api.families.links.log', 'sm.families.links.log', ([], {}), '()\n', (8016, 8018), True, 'import statsmodels.api as sm\n')] |
import io
import cv2
import base64
import numpy as np
from PIL import Image
from os.path import dirname, join
import tensorflow as tf
from alibi.explainers import AnchorImage
"""
模型解释(改用 alibi )
pip install alibi
alibi 没有针对image regression问题的解释逻辑
"""
# Load TFLite model and allocate tensors.
model_file = join(dirname(__file__), "model_beauty_q_v2.tflite")
interpreter = tf.compat.v1.lite.Interpreter(model_path=model_file)
def predict_batch(inp):
"""
参考: https://www.kaggle.com/grafael/fast-predictions-tflite-1h-3x-faster
"""
global interpreter
interpreter.allocate_tensors()
input_det = interpreter.get_input_details()[0]
output_det = interpreter.get_output_details()[0]
input_index = input_det["index"]
output_index = output_det["index"]
input_shape = input_det["shape"]
output_shape = output_det["shape"]
input_dtype = input_det["dtype"]
output_dtype = output_det["dtype"]
inp = inp.astype(input_dtype)
count = inp.shape[0]
out = np.zeros((count, output_shape[1]), dtype=output_dtype)
for i in range(count):
interpreter.set_tensor(input_index, inp[i:i+1])
interpreter.invoke()
out[i] = interpreter.get_tensor(output_index)[0]
return out
def predict(img):
"""
参考:https://github.com/tensorflow/tensorflow/issues/37012
"""
global interpreter
batch_input = np.vstack(img)
print("batch_input shape:", batch_input.shape)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.resize_tensor_input(input_details[0]['index'], batch_input.shape)
interpreter.resize_tensor_input(output_details[0]['index'], batch_input.shape)
# output_shape = output_details[1]['shape']
# interpreter.resize_tensor_input(output_details[1]['index'], (batch_input.shape[0], output_shape[1], output_shape[2], output_shape[3]))
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], batch_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
return output_data
predict_fn = lambda x: predict_batch(x)
def explain_img(old_img):
"""
参考: https://docs.seldon.io/projects/alibi/en/latest/examples/anchor_image_imagenet.html
"""
img = cv2.resize(old_img, (300, 300), interpolation=cv2.INTER_NEAREST)
img = img.astype(np.float32)
img /= 255.0
print("get img shape:", img.shape, img.dtype)
segmentation_fn = 'slic'
kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
explainer = AnchorImage(predict_batch, (300, 300, 3), segmentation_fn=segmentation_fn,
segmentation_kwargs=kwargs, images_background=None)
explanation = explainer.explain(img, threshold=.95, p_sample=.5, tau=0.25)
pil_img = Image.fromarray(explanation.anchor)
buff = io.BytesIO()
pil_img.save(buff, format="PNG")
return buff.getvalue()
def gen_result(str_data):
decode_data = base64.b64decode(str_data)
np_data = np.fromstring(decode_data, np.uint8)
old_img = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED)
explain_result = explain_img(old_img)
return explain_result
if __name__=="__main__":
# python -X faulthandler report_lite.py
img = cv2.imread("/opt/data/SCUT-FBP5500_v2/Images/train/face/AF1031.jpg")
grid = explain_img(img)
# explainer.save(grid, ".", "occ_sens_lite.png")
grid.save('occ_sens_lite.png')
| [
"PIL.Image.fromarray",
"io.BytesIO",
"base64.b64decode",
"os.path.dirname",
"tensorflow.compat.v1.lite.Interpreter",
"numpy.zeros",
"numpy.vstack",
"cv2.imdecode",
"alibi.explainers.AnchorImage",
"cv2.resize",
"numpy.fromstring",
"cv2.imread"
] | [((374, 426), 'tensorflow.compat.v1.lite.Interpreter', 'tf.compat.v1.lite.Interpreter', ([], {'model_path': 'model_file'}), '(model_path=model_file)\n', (403, 426), True, 'import tensorflow as tf\n'), ((313, 330), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (320, 330), False, 'from os.path import dirname, join\n'), ((1004, 1058), 'numpy.zeros', 'np.zeros', (['(count, output_shape[1])'], {'dtype': 'output_dtype'}), '((count, output_shape[1]), dtype=output_dtype)\n', (1012, 1058), True, 'import numpy as np\n'), ((1381, 1395), 'numpy.vstack', 'np.vstack', (['img'], {}), '(img)\n', (1390, 1395), True, 'import numpy as np\n'), ((2314, 2378), 'cv2.resize', 'cv2.resize', (['old_img', '(300, 300)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(old_img, (300, 300), interpolation=cv2.INTER_NEAREST)\n', (2324, 2378), False, 'import cv2\n'), ((2588, 2718), 'alibi.explainers.AnchorImage', 'AnchorImage', (['predict_batch', '(300, 300, 3)'], {'segmentation_fn': 'segmentation_fn', 'segmentation_kwargs': 'kwargs', 'images_background': 'None'}), '(predict_batch, (300, 300, 3), segmentation_fn=segmentation_fn,\n segmentation_kwargs=kwargs, images_background=None)\n', (2599, 2718), False, 'from alibi.explainers import AnchorImage\n'), ((2836, 2871), 'PIL.Image.fromarray', 'Image.fromarray', (['explanation.anchor'], {}), '(explanation.anchor)\n', (2851, 2871), False, 'from PIL import Image\n'), ((2883, 2895), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2893, 2895), False, 'import io\n'), ((3006, 3032), 'base64.b64decode', 'base64.b64decode', (['str_data'], {}), '(str_data)\n', (3022, 3032), False, 'import base64\n'), ((3047, 3083), 'numpy.fromstring', 'np.fromstring', (['decode_data', 'np.uint8'], {}), '(decode_data, np.uint8)\n', (3060, 3083), True, 'import numpy as np\n'), ((3098, 3141), 'cv2.imdecode', 'cv2.imdecode', (['np_data', 'cv2.IMREAD_UNCHANGED'], {}), '(np_data, cv2.IMREAD_UNCHANGED)\n', (3110, 3141), False, 'import cv2\n'), ((3291, 3359), 'cv2.imread', 'cv2.imread', (['"""/opt/data/SCUT-FBP5500_v2/Images/train/face/AF1031.jpg"""'], {}), "('/opt/data/SCUT-FBP5500_v2/Images/train/face/AF1031.jpg')\n", (3301, 3359), False, 'import cv2\n')] |
import yfinance
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import statistics
import math
import time
def get_stock_data(stock):
data = yfinance.Ticker(stock)
df = pd.DataFrame(data.history(period="2y"))
adjusted_close = df['Close']
df['daily_pct_change'] = df['Close'].pct_change()
mean_return = df["daily_pct_change"].mean()
std_return = df["daily_pct_change"].std()
return monte_carlo_sim(mean_return, std_return, float(df['Close'][-1]))
def monte_carlo_sim(mean_return, std_return, initial_price):
simulation = {}
final_price = []
for sim in range(1, 1000):
simulation["sim_" + str(sim)] = []
price = initial_price # set the initial price
for i in range(14):
new_price = price*(mean_return + std_return*np.random.normal())
price = price + new_price
simulation["sim_" + str(sim)] += [price]
final_price += [price]
return statistics.stdev(final_price) / statistics.mean(final_price)
| [
"statistics.mean",
"statistics.stdev",
"yfinance.Ticker",
"numpy.random.normal"
] | [((194, 216), 'yfinance.Ticker', 'yfinance.Ticker', (['stock'], {}), '(stock)\n', (209, 216), False, 'import yfinance\n'), ((1015, 1044), 'statistics.stdev', 'statistics.stdev', (['final_price'], {}), '(final_price)\n', (1031, 1044), False, 'import statistics\n'), ((1047, 1075), 'statistics.mean', 'statistics.mean', (['final_price'], {}), '(final_price)\n', (1062, 1075), False, 'import statistics\n'), ((857, 875), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (873, 875), True, 'import numpy as np\n')] |
from itertools import chain
from operator import itemgetter
from collections import defaultdict
import numpy as np
from gym import spaces
from coordination.environment.deployment import ServiceCoordination
class NFVdeepCoordination(ServiceCoordination):
COMPUTE_UNIT_COST = 0.2
MEMORY_UNIT_COST = 0.2
DATARATE_UNIT_COST = 6.0 * 1e-4
# worked best in our experiments; set similar to threshold in MAVEN-S
REVENUE = 5.0
def __init__(self, net_path, process, vnfs, services):
super().__init__(net_path, process, vnfs, services)
# observation space of NFVdeep simulation environment
self.OBS_SIZE = 3 * len(self.net.nodes) + 6
self.observation_space = spaces.Box(low=0.0, high=1.0, shape=(self.OBS_SIZE,), dtype=np.float16)
def compute_state(self) -> np.ndarray:
if self.done:
return np.zeros(self.OBS_SIZE)
# (1) encode remaining compute resources
computing = [c / self.MAX_COMPUTE for c in self.computing.values()]
# (2) encode remaining memory resources
memory = [m / self.MAX_MEMORY for m in self.memory.values()]
# (3) encode remaining output datarate
MAX_OUTPUT = self.MAX_DEGREE * max(data['datarate'] for _, _, data in self.net.edges(data=True))
output_rates = defaultdict(float)
for src in self.net.nodes:
for trg in self.net.nodes:
if frozenset({src, trg}) in self.datarate:
output_rates[src] += self.datarate[frozenset({src, trg})]
output_rates = list(itemgetter(*self.net.nodes)(output_rates))
output_rates = [rate / MAX_OUTPUT for rate in output_rates]
# (4) encode request specific properties
rate = self.request.datarate / self.MAX_LINKRATE
resd_lat = self.request.resd_lat / 100.0
num_components = (len(self.request.vtypes) - len(self.vtype_bidict.mirror[self.request])) / max(len(s) for s in self.services)
# resource consumption depend on placement decisions; use the mean resource demand
cdemands, mdemands = [], []
vnum = len(self.vtype_bidict.mirror[self.request])
vtype = self.request.vtypes[vnum]
config = self.vnfs[vtype]
for node in self.net.nodes:
supplied_rate = sum([service.datarate for service in self.vtype_bidict[(node, vtype)]])
after_cdem, after_mdem = self.score(supplied_rate + self.request.datarate, config)
prev_cdem, prev_mdem = self.score(supplied_rate, config)
cdemand = np.clip((after_cdem - prev_cdem) / self.MAX_COMPUTE, a_min=0.0, a_max=1.0)
mdemand = np.clip((after_mdem - prev_mdem) / self.MAX_MEMORY, a_min=0.0, a_max=1.0)
cdemands.append(cdemand)
mdemands.append(mdemand)
cdemand = np.mean(cdemands) / self.MAX_COMPUTE
mdemand = np.mean(mdemands) / self.MAX_MEMORY
duration = self.request.duration / 100
request = [rate, resd_lat, num_components, cdemand, mdemand, duration]
state = chain(computing, memory, output_rates, request)
return np.asarray(list(state))
def compute_reward(self, finalized, deployed, req) -> float:
if deployed:
cresources = np.asarray([data['compute'] for node, data in self.net.nodes(data=True)])
cavailable = np.asarray([self.computing[node] for node in self.net.nodes])
ccost = np.sum(((cresources - cavailable) > 0) * cresources) * self.COMPUTE_UNIT_COST / self.MAX_COMPUTE
mresources = np.asarray([data['memory'] for node, data in self.net.nodes(data=True)])
mavailable = np.asarray([self.memory[node] for node in self.net.nodes])
mcost = np.sum(((mresources - mavailable) > 0) * mresources) * self.MEMORY_UNIT_COST / self.MAX_MEMORY
dresources = np.asarray([data['datarate'] for src, trg, data in self.net.edges(data=True)])
davailable = np.asarray([self.datarate[frozenset({src, trg})] for src, trg in self.net.edges])
dcost = np.sum(((dresources - davailable) > 0) * dresources) * self.DATARATE_UNIT_COST / self.MAX_LINKRATE
# in our setting, the revenue is the same for any request
return self.REVENUE - (ccost + mcost + dcost)
return 0.0
| [
"itertools.chain",
"numpy.clip",
"numpy.mean",
"numpy.asarray",
"gym.spaces.Box",
"numpy.sum",
"numpy.zeros",
"collections.defaultdict",
"operator.itemgetter"
] | [((713, 784), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': '(self.OBS_SIZE,)', 'dtype': 'np.float16'}), '(low=0.0, high=1.0, shape=(self.OBS_SIZE,), dtype=np.float16)\n', (723, 784), False, 'from gym import spaces\n'), ((1323, 1341), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (1334, 1341), False, 'from collections import defaultdict\n'), ((3079, 3126), 'itertools.chain', 'chain', (['computing', 'memory', 'output_rates', 'request'], {}), '(computing, memory, output_rates, request)\n', (3084, 3126), False, 'from itertools import chain\n'), ((870, 893), 'numpy.zeros', 'np.zeros', (['self.OBS_SIZE'], {}), '(self.OBS_SIZE)\n', (878, 893), True, 'import numpy as np\n'), ((2579, 2653), 'numpy.clip', 'np.clip', (['((after_cdem - prev_cdem) / self.MAX_COMPUTE)'], {'a_min': '(0.0)', 'a_max': '(1.0)'}), '((after_cdem - prev_cdem) / self.MAX_COMPUTE, a_min=0.0, a_max=1.0)\n', (2586, 2653), True, 'import numpy as np\n'), ((2676, 2749), 'numpy.clip', 'np.clip', (['((after_mdem - prev_mdem) / self.MAX_MEMORY)'], {'a_min': '(0.0)', 'a_max': '(1.0)'}), '((after_mdem - prev_mdem) / self.MAX_MEMORY, a_min=0.0, a_max=1.0)\n', (2683, 2749), True, 'import numpy as np\n'), ((2844, 2861), 'numpy.mean', 'np.mean', (['cdemands'], {}), '(cdemands)\n', (2851, 2861), True, 'import numpy as np\n'), ((2899, 2916), 'numpy.mean', 'np.mean', (['mdemands'], {}), '(mdemands)\n', (2906, 2916), True, 'import numpy as np\n'), ((3377, 3438), 'numpy.asarray', 'np.asarray', (['[self.computing[node] for node in self.net.nodes]'], {}), '([self.computing[node] for node in self.net.nodes])\n', (3387, 3438), True, 'import numpy as np\n'), ((3680, 3738), 'numpy.asarray', 'np.asarray', (['[self.memory[node] for node in self.net.nodes]'], {}), '([self.memory[node] for node in self.net.nodes])\n', (3690, 3738), True, 'import numpy as np\n'), ((1582, 1609), 'operator.itemgetter', 'itemgetter', (['*self.net.nodes'], {}), '(*self.net.nodes)\n', (1592, 1609), False, 'from operator import itemgetter\n'), ((3459, 3509), 'numpy.sum', 'np.sum', (['((cresources - cavailable > 0) * cresources)'], {}), '((cresources - cavailable > 0) * cresources)\n', (3465, 3509), True, 'import numpy as np\n'), ((3759, 3809), 'numpy.sum', 'np.sum', (['((mresources - mavailable > 0) * mresources)'], {}), '((mresources - mavailable > 0) * mresources)\n', (3765, 3809), True, 'import numpy as np\n'), ((4086, 4136), 'numpy.sum', 'np.sum', (['((dresources - davailable > 0) * dresources)'], {}), '((dresources - davailable > 0) * dresources)\n', (4092, 4136), True, 'import numpy as np\n')] |
import moviepy.editor as mpy
import matplotlib.pyplot as plt
import numpy as np
import cv2, wave
import settings
# デバッグ用
import shutil, os
def get_music(id):
"""
音楽の長さが何秒か(浮動小数)と
フレーム数を返す関数
Parameter
---------
id : str
個人識別用uuid
Returns
-------
1.0 * music_frames / SAMPLING_RATE : float
音楽の長さ(秒)
"""
MOVIE_PATH = MOVIE_PATH = './movie/' + id + '/'
WAVE_PATH = MOVIE_PATH + settings.WAV_FILE_NAME
SAMPLING_RATE = 44100
with wave.open(WAVE_PATH, 'r') as music:
music_frames = music.getnframes()
return 1.0 * music_frames / SAMPLING_RATE
def create_clip(path, id, bpm=0, is_icon=False, is_related=False):
"""
画像から音楽と同じ長さの無音動画を作る関数
Parameters
----------
path : str
動画化したい画像のパス
id : str
個人識別用uuid
bpm : int
作成した曲のbpm
is_icon : bool
Twitterアイコンであるかどうか
is_related : bool
その人に関係ある画像であるかどうか
Return
------
concat_clip :
画像から生成した音楽と同じ長さの無音動画
"""
MOVIE_PATH = f'./movie/{id}/'
FPS = 30
SECONDS_PER_FRAME = 1/30
# 音楽の長さ,フレーム数を取得
music_length = get_music(id)
# 画像を格納する処理
clips = []
if is_icon: # Twitterアイコンのとき
img_list = clip_circle(path, id, bpm, music_length)
for i in img_list:
clip = mpy.ImageClip(i).set_duration(SECONDS_PER_FRAME)
clips.append(clip)
elif is_related: # 関係ある画像のとき
img_list = clip_related(path, id, bpm, music_length)
for i in img_list:
clip = mpy.ImageClip(i).set_duration(SECONDS_PER_FRAME)
clips.append(clip)
else: # 背景のとき
# 画像を取得
img = cv2.imread(path, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
clip = mpy.ImageClip(img).set_duration(music_length)
clips.append(clip)
# 動画を作成する処理
concat_clip = mpy.concatenate_videoclips(clips, method='compose')
clip.close()
return concat_clip
def clip_circle(path, id, bpm, music_length):
"""
正方形のTwitterアイコンを円形に切り出し,
スライドショー用の配列に格納する関数
Parameters
----------
path : str
正方形のTwitterアイコンのパス
id : str
個人識別用uuid
bpm : int
作成した曲のbpm
music_length : float
音楽の長さ(秒)
Return
------
img_list : ndarray
円形に切り出したTwitterアイコンの配列
"""
MOVIE_PATH = './movie/' + id + '/'
FPS = 30
# 画像の読み込み
img_origin = cv2.imread(path, -1)
img_origin = cv2.cvtColor(img_origin, cv2.COLOR_BGRA2RGBA)
img_list = []
movie_frames = int(music_length * FPS)
for i in range(movie_frames):
'''
bpmに合わせて拡大縮小を行う.
bpm
60(s)でpbm(拍) = 60/bpm(s)で1(拍)
fps
1(s)で30(枚) = 60/bpm(s)で1800/bpm(枚)
'''
SECONDS_PER_MINUTE = 60
FPS = 30
FRAMES_PER_BEAT = SECONDS_PER_MINUTE * FPS // bpm
# 深いコピー
img = img_origin.copy()
# 画像の拡大縮小
if i % FRAMES_PER_BEAT < FRAMES_PER_BEAT // 2:
new_size = 200 - 50 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
else:
new_size = 150 + 50 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
# マスク作成 (黒く塗りつぶす画素の値は0)
mask = np.zeros((new_size, new_size), dtype=np.uint8)
# 円を描画する関数circle()を利用してマスクの残したい部分を 255 にしている。
cv2.circle(mask, center=(new_size//2, new_size//2), radius=new_size//2, color=255, thickness=-1)
# 画像の拡縮
img = cv2.resize(img, dsize=(new_size, new_size))
# maskの値が0の画素は透過する
img[mask==0] = [0, 0, 0, 0]
img_list.append(img)
return img_list
def clip_related(path, id, bpm, music_length):
"""
その人に関係ある画像を,
スライドショー用の配列に格納する関数
Parameters
----------
path : str
その人に関係ある画像のパス
id : str
個人識別用uuid
bpm : int
曲の速さ(♩/秒)
music_length : float
音楽の長さ(秒)
Return
------
img_list : ndarray
その人に関係ある画像の配列
"""
MOVIE_PATH = './movie/' + id + '/'
FPS = 30
# 画像の読み込み
img_origin = cv2.imread(path, -1)
img_origin = cv2.cvtColor(img_origin, cv2.COLOR_BGRA2RGBA)
height = img_origin.shape[0]
width = img_origin.shape[1]
img_list = []
movie_frames = int(music_length * FPS)
for i in range(movie_frames):
'''
bpmに合わせてスイングを行う.
bpm
60(s)でpbm(拍) = 60/bpm(s)で1(拍)
fps
1(s)で30(枚) = 60/bpm(s)で1800/bpm(枚)
'''
SECONDS_PER_MINUTE = 60
FPS = 30
FRAMES_PER_BEAT = SECONDS_PER_MINUTE * FPS // bpm
# 深いコピー
img = img_origin.copy()
# 画像を回転する角度を決定
if i % FRAMES_PER_BEAT < FRAMES_PER_BEAT // 2:
angle = 15 - 30 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
else:
angle = -15 + 30 * (i % (FRAMES_PER_BEAT // 2)) // (FRAMES_PER_BEAT // 2)
rad_angle = np.radians(angle)
width_rot = int(np.round(width*abs(np.cos(rad_angle)) + height*abs(np.sin(rad_angle))))
height_rot = int(np.round(width*abs(np.sin(rad_angle)) + height*abs(np.cos(rad_angle))))
# 回転行列を生成
mat = cv2.getRotationMatrix2D((width//2, height), angle, 1)
mat[0][2] += -width/2 + width_rot/2
mat[1][2] += -height/2 + height_rot/2
# アフィン変換
affine_img = cv2.warpAffine(img, mat, (width_rot, height_rot))
img_list.append(affine_img)
return img_list
def movie_create(id, bpm, related_list):
"""
Parameters
----------
id : str
個人識別用uuid
bpm : int
作成した曲のbpm
related_list : array
関連するキーワードのリスト
"""
MOVIE_PATH = './movie/' + id + '/'
WAVE_PATH = MOVIE_PATH + settings.WAV_FILE_NAME
BASE_IMG_PATH = './movie_create/common_images/cake_background.PNG'
ICON_IMG_PATH = MOVIE_PATH + '/icon.png'
IMGAGES_PATH = './movie_create/images/'
BASE_HEIGHT = 720
BASE_WIDTH = 720
FPS = 30
# クリップを作成
base_clip = create_clip(BASE_IMG_PATH, id)
icon_clip = create_clip(ICON_IMG_PATH, id, bpm, is_icon=True)
#related_clip_0 = create_clip(IMGAGES_PATH + related_list[0] + '/01.PNG', id, bpm, is_related=True)
#related_clip_1 = create_clip(IMGAGES_PATH + related_list[1] + '/01.PNG', id, bpm, is_related=True)
#related_clip_2 = create_clip(IMGAGES_PATH + related_list[2] + '/01.PNG', id, bpm, is_related=True)
# クリップの合成
final_clip = mpy.CompositeVideoClip([base_clip, icon_clip.set_position((BASE_WIDTH * 0.38, BASE_HEIGHT * 0.2))])#, \
#related_clip_0.set_position((0, BASE_HEIGHT * 0.55)), related_clip_1.set_position((BASE_WIDTH * 0.37, BASE_HEIGHT * 0.65)), \
#related_clip_2.set_position((BASE_WIDTH * 0.7, BASE_HEIGHT * 0.55))])
# 音と動画を合成
final_clip = final_clip.set_audio(mpy.AudioFileClip(WAVE_PATH))
final_clip.write_videofile(filename = MOVIE_PATH + 'happy_birthday.mp4', codec='libx264', audio_codec='aac', fps=FPS)
final_clip.close()
#related_clip_2.close()
#related_clip_1.close()
#related_clip_0.close()
icon_clip.close()
base_clip.close()
| [
"numpy.radians",
"moviepy.editor.AudioFileClip",
"wave.open",
"cv2.warpAffine",
"moviepy.editor.concatenate_videoclips",
"numpy.zeros",
"cv2.circle",
"moviepy.editor.ImageClip",
"cv2.cvtColor",
"numpy.cos",
"numpy.sin",
"cv2.getRotationMatrix2D",
"cv2.resize",
"cv2.imread"
] | [((1879, 1930), 'moviepy.editor.concatenate_videoclips', 'mpy.concatenate_videoclips', (['clips'], {'method': '"""compose"""'}), "(clips, method='compose')\n", (1905, 1930), True, 'import moviepy.editor as mpy\n'), ((2425, 2445), 'cv2.imread', 'cv2.imread', (['path', '(-1)'], {}), '(path, -1)\n', (2435, 2445), False, 'import cv2, wave\n'), ((2463, 2508), 'cv2.cvtColor', 'cv2.cvtColor', (['img_origin', 'cv2.COLOR_BGRA2RGBA'], {}), '(img_origin, cv2.COLOR_BGRA2RGBA)\n', (2475, 2508), False, 'import cv2, wave\n'), ((4057, 4077), 'cv2.imread', 'cv2.imread', (['path', '(-1)'], {}), '(path, -1)\n', (4067, 4077), False, 'import cv2, wave\n'), ((4095, 4140), 'cv2.cvtColor', 'cv2.cvtColor', (['img_origin', 'cv2.COLOR_BGRA2RGBA'], {}), '(img_origin, cv2.COLOR_BGRA2RGBA)\n', (4107, 4140), False, 'import cv2, wave\n'), ((501, 526), 'wave.open', 'wave.open', (['WAVE_PATH', '"""r"""'], {}), "(WAVE_PATH, 'r')\n", (510, 526), False, 'import cv2, wave\n'), ((3232, 3278), 'numpy.zeros', 'np.zeros', (['(new_size, new_size)'], {'dtype': 'np.uint8'}), '((new_size, new_size), dtype=np.uint8)\n', (3240, 3278), True, 'import numpy as np\n'), ((3342, 3449), 'cv2.circle', 'cv2.circle', (['mask'], {'center': '(new_size // 2, new_size // 2)', 'radius': '(new_size // 2)', 'color': '(255)', 'thickness': '(-1)'}), '(mask, center=(new_size // 2, new_size // 2), radius=new_size // \n 2, color=255, thickness=-1)\n', (3352, 3449), False, 'import cv2, wave\n'), ((3470, 3513), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(new_size, new_size)'}), '(img, dsize=(new_size, new_size))\n', (3480, 3513), False, 'import cv2, wave\n'), ((4900, 4917), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (4910, 4917), True, 'import numpy as np\n'), ((5144, 5199), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(width // 2, height)', 'angle', '(1)'], {}), '((width // 2, height), angle, 1)\n', (5167, 5199), False, 'import cv2, wave\n'), ((5327, 5376), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'mat', '(width_rot, height_rot)'], {}), '(img, mat, (width_rot, height_rot))\n', (5341, 5376), False, 'import cv2, wave\n'), ((6774, 6802), 'moviepy.editor.AudioFileClip', 'mpy.AudioFileClip', (['WAVE_PATH'], {}), '(WAVE_PATH)\n', (6791, 6802), True, 'import moviepy.editor as mpy\n'), ((1682, 1702), 'cv2.imread', 'cv2.imread', (['path', '(-1)'], {}), '(path, -1)\n', (1692, 1702), False, 'import cv2, wave\n'), ((1717, 1755), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2RGBA'], {}), '(img, cv2.COLOR_BGRA2RGBA)\n', (1729, 1755), False, 'import cv2, wave\n'), ((1333, 1349), 'moviepy.editor.ImageClip', 'mpy.ImageClip', (['i'], {}), '(i)\n', (1346, 1349), True, 'import moviepy.editor as mpy\n'), ((1771, 1789), 'moviepy.editor.ImageClip', 'mpy.ImageClip', (['img'], {}), '(img)\n', (1784, 1789), True, 'import moviepy.editor as mpy\n'), ((1554, 1570), 'moviepy.editor.ImageClip', 'mpy.ImageClip', (['i'], {}), '(i)\n', (1567, 1570), True, 'import moviepy.editor as mpy\n'), ((4961, 4978), 'numpy.cos', 'np.cos', (['rad_angle'], {}), '(rad_angle)\n', (4967, 4978), True, 'import numpy as np\n'), ((4993, 5010), 'numpy.sin', 'np.sin', (['rad_angle'], {}), '(rad_angle)\n', (4999, 5010), True, 'import numpy as np\n'), ((5058, 5075), 'numpy.sin', 'np.sin', (['rad_angle'], {}), '(rad_angle)\n', (5064, 5075), True, 'import numpy as np\n'), ((5090, 5107), 'numpy.cos', 'np.cos', (['rad_angle'], {}), '(rad_angle)\n', (5096, 5107), True, 'import numpy as np\n')] |
"""
# Module 4- Example Plot of Weather Data
#### author: <NAME>
In this module, I we will be using data from RadWatch's AirMonitor to create a plot that compares
counts per second (CPS) due to Bismuth-214 against the CPS of the lesser occurring isotope Cesium-137.
I will be using the following link:
https://radwatch.berkeley.edu/sites/default/files/pictures/rooftop_tmp/weather.csv
The first step in creating a plot is being aware of the format of your CSV file. This weather.csv
is organized 9 columns. The 1st column contains important timestamp information, the 2nd column
contains Bi-234 CPS, and the 5th column contains Cs-137 CPS. In addition, we are interested in
the 6th column of Bi-234 margin of error and the 9th column with Cs-137's margin of error.
"""
import csv
import io
import urllib.request
import matplotlib.pyplot as plt
import matplotlib.dates as mdates # another matplotlib convention; this extension facilitates dates as axes labels.
from datetime import datetime # we will use the datetime extension so we can group the timestamp data into manageable units of year, month, date, and time.
url = 'https://radwatch.berkeley.edu/sites/default/files/pictures/rooftop_tmp/weather.csv'
response = urllib.request.urlopen(url)
reader = csv.reader(io.TextIOWrapper(response))
timedata = []
Bi214 = []
Cs137 = []
line = 0
for row in reader:
if line != 0:
timedata.append(datetime.strptime(row[0], '%Y-%m-%d %H:%M:%S'))
#datetime.strptime is a class object that facilitates usage of date/time data in Python
Bi214.append(float(row[1]))
Cs137.append(float(row[4]))
line += 1
def weather_plot1():
fig, ax = plt.subplots() # matplotlib convention that unpacks figures into variables for ax (axis manipulation) and fig (figure manipulation)
# concise line for: fig = plt.figure()
# AND: fig.add_subplot(1,1,1)
ax.plot(timedata, Bi214, 'ro-', label="Bismuth-214")
ax.plot(timedata, Cs137, 'bs-', label="Cesium-137")
plt.title('AirMonitor Data: Bi-214 and Cs-137 CPS from %s-%s to %s-%s'
%(timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1].day))
# string interpolation (represented by %s): The '%s' are replaced by the strings given in %(-,-,-,-)
plt.xlabel('Time')
plt.ylabel('counts per second')
plt.legend(loc='best') # loc=best places the legend where it will obstruct the data the least.
# There are a few problems with this current plot. One simple fix to make the x-tick labels more visible is via rotation. We can also convey the data more objectively with a logarithmic y-axis:
def weather_plot2():
weather_plot1()
# adjustments
plt.xticks(rotation=30)
plt.yscale('log')
plt.title('AirMonitor Data: Bi-214 and Cs-137 CPS (logarithmic) from %s-%s to %s-%s'
%(timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1].day))
plt.show()
#While these plots are fine, many professional graphics thoroughly control every aspect of the plot such as in the following example. Also, this example will calculate error and include error bars (similar to error seen on the AirMonitor website).
def weather_plot3():
import numpy as np
# 1st step: plot the data
fig, ax = plt.subplots()
ax.plot(timedata, Bi214, 'ro-', label='Bismuth-214')
ax.errorbar(timedata, Bi214, yerr=np.sqrt(Bi214)/60, fmt='ro', ecolor='r')
ax.plot(timedata, Cs137, 'bs-', label='Cesium-137')
ax.errorbar(timedata, Cs137, yerr=np.sqrt(Cs137)/60, fmt='bs', ecolor='b')
# for ax.errorbar, fmt=format and is identical to corresponding plot for coherence
# ecolor=line color and is identical to corresponding plot for same reason.
# 2nd step: legend and axis manipulations:
plt.legend(loc='best')
plt.yscale('log')
# 3rd step: format ticks along axis; we will use matplotlib's built-in datetime commands to format the axis:
ax.xaxis.set_major_locator(mdates.DayLocator())
# ticks on x-axis day-by-day basis
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
# tick labels only occur on days in the format: Month-Day
# you can customize the format, i.e. '%m-%d-%Y %H:00' would be Month-Day-Year Hour:00
ax.xaxis.set_minor_locator(mdates.HourLocator())
# minor ticks on x-axis occur on hour marks
plt.xticks(rotation=30)
# 4th step: titles and labels
plt.title('AirMonitor Data: Bi-214 and Cs-137 CPS from %s-%s to %s-%s'
%(timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1].day))
plt.xlabel('Time')
plt.ylabel('counts per second') | [
"numpy.sqrt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"datetime.datetime.strptime",
"matplotlib.pyplot.xlabel",
"matplotlib.dates.DateFormatter",
"matplotlib.dates.HourLocator",
"io.TextIOWrapper",
"matplotlib.dates.DayLocator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscal... | [((1315, 1341), 'io.TextIOWrapper', 'io.TextIOWrapper', (['response'], {}), '(response)\n', (1331, 1341), False, 'import io\n'), ((1740, 1754), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1752, 1754), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2237), 'matplotlib.pyplot.title', 'plt.title', (["('AirMonitor Data: Bi-214 and Cs-137 CPS from %s-%s to %s-%s' % (timedata[0\n ].month, timedata[0].day, timedata[-1].month, timedata[-1].day))"], {}), "('AirMonitor Data: Bi-214 and Cs-137 CPS from %s-%s to %s-%s' % (\n timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1].day))\n", (2093, 2237), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2372), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2364, 2372), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""counts per second"""'], {}), "('counts per second')\n", (2388, 2409), True, 'import matplotlib.pyplot as plt\n'), ((2415, 2437), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2425, 2437), True, 'import matplotlib.pyplot as plt\n'), ((2785, 2808), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (2795, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2831), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2824, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2837, 3014), 'matplotlib.pyplot.title', 'plt.title', (["('AirMonitor Data: Bi-214 and Cs-137 CPS (logarithmic) from %s-%s to %s-%s' %\n (timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1].day))"], {}), "(\n 'AirMonitor Data: Bi-214 and Cs-137 CPS (logarithmic) from %s-%s to %s-%s'\n % (timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1\n ].day))\n", (2846, 3014), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3018, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3367, 3381), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3379, 3381), True, 'import matplotlib.pyplot as plt\n'), ((3910, 3932), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3920, 3932), True, 'import matplotlib.pyplot as plt\n'), ((3938, 3955), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3948, 3955), True, 'import matplotlib.pyplot as plt\n'), ((4512, 4535), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (4522, 4535), True, 'import matplotlib.pyplot as plt\n'), ((4578, 4731), 'matplotlib.pyplot.title', 'plt.title', (["('AirMonitor Data: Bi-214 and Cs-137 CPS from %s-%s to %s-%s' % (timedata[0\n ].month, timedata[0].day, timedata[-1].month, timedata[-1].day))"], {}), "('AirMonitor Data: Bi-214 and Cs-137 CPS from %s-%s to %s-%s' % (\n timedata[0].month, timedata[0].day, timedata[-1].month, timedata[-1].day))\n", (4587, 4731), True, 'import matplotlib.pyplot as plt\n'), ((4737, 4755), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (4747, 4755), True, 'import matplotlib.pyplot as plt\n'), ((4761, 4792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""counts per second"""'], {}), "('counts per second')\n", (4771, 4792), True, 'import matplotlib.pyplot as plt\n'), ((4108, 4127), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (4125, 4127), True, 'import matplotlib.dates as mdates\n'), ((4207, 4236), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%m-%d"""'], {}), "('%m-%d')\n", (4227, 4236), True, 'import matplotlib.dates as mdates\n'), ((4432, 4452), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {}), '()\n', (4450, 4452), True, 'import matplotlib.dates as mdates\n'), ((1459, 1505), 'datetime.datetime.strptime', 'datetime.strptime', (['row[0]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row[0], '%Y-%m-%d %H:%M:%S')\n", (1476, 1505), False, 'from datetime import datetime\n'), ((3479, 3493), 'numpy.sqrt', 'np.sqrt', (['Bi214'], {}), '(Bi214)\n', (3486, 3493), True, 'import numpy as np\n'), ((3616, 3630), 'numpy.sqrt', 'np.sqrt', (['Cs137'], {}), '(Cs137)\n', (3623, 3630), True, 'import numpy as np\n')] |
import os
import argparse
from tqdm import tqdm
import cv2
import numpy as np
import random
from PIL import Image
import glob
parser = argparse.ArgumentParser()
parser.add_argument("--option", type=str, required=True,
help="Use resize|count|group|verify|split|resample to preprocess datasets.")
parser.add_argument("--input", type=str, required=True,
help="Input directory, usually the directory of images.")
parser.add_argument("--output", type=str, required=True,
help="Output directory if there is any output")
parser.add_argument("--data_root", type=str, required=False,
help="Taikoda data root")
parser.add_argument("--split_csv", type=str, required=False,
help="Split file")
parser.add_argument("--lbl_dir", type=str, required=False,
help="Label directory")
parser.add_argument("--width", type=int, default=1920)
parser.add_argument("--height", type=int, default=1080)
parser.add_argument("--nearest", type=bool, default=True)
parser.add_argument("--resampling", type=bool, default=False)
parser.add_argument("--test", action='store_true')
args = parser.parse_args()
def mask_imgs(in_dir, out_dir, split_csv, lbl_dir):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
split_csv = args.split_csv
images = []
with open(split_csv, 'r') as f:
lines = f.readlines()
for line in lines:
images.append(line.strip('\n'))
images = list(images)
print(images[0])
print("Testing images for damage detection: ", len(images))
for i in tqdm(range(len(images)), desc="Masking validation images..."):
img = np.array(cv2.imread(os.path.join(in_dir, images[i]+'_Scene.png'), cv2.IMREAD_UNCHANGED))
lbl = np.tile(np.expand_dims(np.array(Image.open(os.path.join(lbl_dir, images[i]+'.bmp')).resize((1920,1080))),2),(1,1,3)).astype(np.uint8)
if img is None:
print('Wrong path:', os.path.join(in_dir, images[i]))
else:
img[lbl != 4] = 0
cv2.imwrite(os.path.join(out_dir, images[i]+'_Scene.png'), img)
def resize_imgs(in_dir, out_dir, width, height, nearest=True):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
img_list = os.listdir(in_dir)
img_list.sort()
for img_name in tqdm(img_list, desc="Processing ..."):
img = cv2.imread(os.path.join(in_dir, img_name), cv2.IMREAD_UNCHANGED)
if img is None:
print('Wrong path:', os.path.join(in_dir, img_name))
else:
out_img = cv2.resize(img, (width , height), cv2.INTER_NEAREST)
cv2.imwrite(os.path.join(out_dir, img_name), out_img)
def splitbycase(in_dir, out_dir, data_root, seed=13, resampling=False):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
train_images_cmp = []
train_images_dmg = []
with open(in_dir, 'r') as f:
lines = f.readlines()
for line in lines:
words = line.replace("\\", "/").strip("\n").split(",")
# valid image for cmp training
if (words[5] == 'True'):
train_images_cmp.append(os.path.basename(words[0].strip("_Scene.png")))
if (words[6] == 'True'):
train_images_dmg.append(os.path.basename(words[0].strip("_Scene.png")))
train_images_cmp = list(train_images_cmp)
train_images_dmg = list(train_images_dmg)
random.seed(seed)
cases = list(np.arange(0,175,1))
random.shuffle(cases)
linspace = list(np.arange(0,175,10))
# 10-fold
for i in range(10):
train_images_cmp_train = []
train_images_cmp_val = []
train_images_dmg_train = []
train_images_dmg_val = []
case_id = cases[linspace[i]:linspace[i+1]]
for name in train_images_cmp:
if name.split('_')[1][4:] in str(case_id):
train_images_cmp_val.append(name)
else:
train_images_cmp_train.append(name)
for name in train_images_dmg:
if name.split('_')[1][4:] in str(case_id):
train_images_dmg_val.append(name)
else:
train_images_dmg_train.append(name)
with open(os.path.join(out_dir, 'train_cmp'+str(i)+'.txt'), 'w') as f:
# select the ratio portion as training set
n=1
for line in train_images_cmp_train:
repeat = 1
if resampling:
file_name = data_root+'/synthetic/train/labcmp/'+line+'.bmp'
img = np.array(Image.open(file_name))
# if sleeper
er_labels = np.where(img==7)[0]
if len(er_labels) >= 10:
n+=1
repeat = 10
# if non-structural
er_labels1 = np.where(img==5)[0]
if len(er_labels1) >= 10:
n+=1
repeat = 10
for r in range(repeat):
f.writelines(line + '\n')
with open(os.path.join(out_dir, 'val_cmp'+str(i)+'.txt'), 'w') as f:
# select the rest as validation set
f.writelines(line + '\n' for line in train_images_cmp_val)
with open(os.path.join(out_dir, 'train_dmg'+str(i)+'.txt'), 'w') as f:
# select the ratio portion as training set
for line in train_images_dmg_train:
repeat = 1
if resampling:
file_name = data_root+'/synthetic/train/labdmg/'+line+'.bmp'
img = np.array(Image.open(file_name))
# if exposed rebar
er_labels = np.where(img==3)[0]
if len(er_labels) >= 10:
n+=1
repeat = 3
for r in range(repeat):
f.writelines(line + '\n')
with open(os.path.join(out_dir, 'val_dmg'+str(i)+'.txt'), 'w') as f:
# select the rest as validation set
f.writelines(line + '\n' for line in train_images_dmg_val)
def split_puretex(in_dir, out_dir, data_root, test=False, train_ratio=0.9, seed=13, resampling=False):
if not os.path.exists(in_dir):
print("Input directory {0} not exists".format(in_dir))
return
if not os.path.exists(out_dir):
os.makedirs(out_dir)
train_images = []
with open(in_dir, 'r') as f:
lines = f.readlines()
for line in lines:
words = line.replace("\\", "/").strip("\n").split(",")
# valid image
train_images.append(os.path.basename(words[0].strip(".png")))
train_images = list(train_images)
if not test:
random.seed(seed)
random.shuffle(train_images)
with open(os.path.join(out_dir, 'train_puretex.txt'), 'w') as f:
# select the ratio portion as training set
train_length = int(len(train_images) * train_ratio)
for line in train_images[:train_length]:
repeat = 1
if resampling:
file_name = data_root+'/synthetic_puretex/labdmg/'+line+'.bmp'
img = np.array(Image.open(file_name))
er_labels = np.where(img==3)[0]
# print(er_labels)
if len(er_labels) >= 10:
repeat = 5
for r in range(repeat):
f.writelines(line + '\n')
with open(os.path.join(out_dir, 'val_puretex.txt'), 'w') as f:
# select the rest as validation set
f.writelines(line + '\n' for line in train_images[train_length:])
else:
with open(os.path.join(out_dir, 'test_puretex.txt'), 'w') as f:
f.writelines(line+'\n' for line in train_images)
def main():
print(args.option)
if(args.option == "resize"):
resize_imgs(args.input, args.output, args.width, args.height)
if(args.option == "split_puretex"):
split_puretex(args.input, args.output, args.data_root, args.test, resampling=args.resampling)
if(args.option == "splitbycase"):
splitbycase(args.input, args.output, args.data_root, resampling=args.resampling)
if(args.option == "mask_imgs"):
mask_imgs(args.input, args.output, args.split_csv, args.lbl_dir)
if __name__ == "__main__":
main()
| [
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"random.shuffle",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.where",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"cv2.resize",
"numpy.arange"
] | [((136, 161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (159, 161), False, 'import argparse\n'), ((2517, 2535), 'os.listdir', 'os.listdir', (['in_dir'], {}), '(in_dir)\n', (2527, 2535), False, 'import os\n'), ((2576, 2613), 'tqdm.tqdm', 'tqdm', (['img_list'], {'desc': '"""Processing ..."""'}), "(img_list, desc='Processing ...')\n", (2580, 2613), False, 'from tqdm import tqdm\n'), ((3793, 3810), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3804, 3810), False, 'import random\n'), ((3853, 3874), 'random.shuffle', 'random.shuffle', (['cases'], {}), '(cases)\n', (3867, 3874), False, 'import random\n'), ((1258, 1280), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (1272, 1280), False, 'import os\n'), ((1371, 1394), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (1385, 1394), False, 'import os\n'), ((1404, 1424), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (1415, 1424), False, 'import os\n'), ((2334, 2356), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (2348, 2356), False, 'import os\n'), ((2447, 2470), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (2461, 2470), False, 'import os\n'), ((2480, 2500), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2491, 2500), False, 'import os\n'), ((3022, 3044), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (3036, 3044), False, 'import os\n'), ((3135, 3158), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (3149, 3158), False, 'import os\n'), ((3168, 3188), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (3179, 3188), False, 'import os\n'), ((3829, 3849), 'numpy.arange', 'np.arange', (['(0)', '(175)', '(1)'], {}), '(0, 175, 1)\n', (3838, 3849), True, 'import numpy as np\n'), ((3895, 3916), 'numpy.arange', 'np.arange', (['(0)', '(175)', '(10)'], {}), '(0, 175, 10)\n', (3904, 3916), True, 'import numpy as np\n'), ((6633, 6655), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (6647, 6655), False, 'import os\n'), ((6746, 6769), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (6760, 6769), False, 'import os\n'), ((6779, 6799), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (6790, 6799), False, 'import os\n'), ((7148, 7165), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7159, 7165), False, 'import random\n'), ((7174, 7202), 'random.shuffle', 'random.shuffle', (['train_images'], {}), '(train_images)\n', (7188, 7202), False, 'import random\n'), ((2640, 2670), 'os.path.join', 'os.path.join', (['in_dir', 'img_name'], {}), '(in_dir, img_name)\n', (2652, 2670), False, 'import os\n'), ((2819, 2870), 'cv2.resize', 'cv2.resize', (['img', '(width, height)', 'cv2.INTER_NEAREST'], {}), '(img, (width, height), cv2.INTER_NEAREST)\n', (2829, 2870), False, 'import cv2\n'), ((1832, 1878), 'os.path.join', 'os.path.join', (['in_dir', "(images[i] + '_Scene.png')"], {}), "(in_dir, images[i] + '_Scene.png')\n", (1844, 1878), False, 'import os\n'), ((2106, 2137), 'os.path.join', 'os.path.join', (['in_dir', 'images[i]'], {}), '(in_dir, images[i])\n', (2118, 2137), False, 'import os\n'), ((2207, 2254), 'os.path.join', 'os.path.join', (['out_dir', "(images[i] + '_Scene.png')"], {}), "(out_dir, images[i] + '_Scene.png')\n", (2219, 2254), False, 'import os\n'), ((2751, 2781), 'os.path.join', 'os.path.join', (['in_dir', 'img_name'], {}), '(in_dir, img_name)\n', (2763, 2781), False, 'import os\n'), ((2896, 2927), 'os.path.join', 'os.path.join', (['out_dir', 'img_name'], {}), '(out_dir, img_name)\n', (2908, 2927), False, 'import os\n'), ((7222, 7264), 'os.path.join', 'os.path.join', (['out_dir', '"""train_puretex.txt"""'], {}), "(out_dir, 'train_puretex.txt')\n", (7234, 7264), False, 'import os\n'), ((7924, 7964), 'os.path.join', 'os.path.join', (['out_dir', '"""val_puretex.txt"""'], {}), "(out_dir, 'val_puretex.txt')\n", (7936, 7964), False, 'import os\n'), ((8131, 8172), 'os.path.join', 'os.path.join', (['out_dir', '"""test_puretex.txt"""'], {}), "(out_dir, 'test_puretex.txt')\n", (8143, 8172), False, 'import os\n'), ((4944, 4965), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (4954, 4965), False, 'from PIL import Image\n'), ((5032, 5050), 'numpy.where', 'np.where', (['(img == 7)'], {}), '(img == 7)\n', (5040, 5050), True, 'import numpy as np\n'), ((5235, 5253), 'numpy.where', 'np.where', (['(img == 5)'], {}), '(img == 5)\n', (5243, 5253), True, 'import numpy as np\n'), ((6005, 6026), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (6015, 6026), False, 'from PIL import Image\n'), ((6099, 6117), 'numpy.where', 'np.where', (['(img == 3)'], {}), '(img == 3)\n', (6107, 6117), True, 'import numpy as np\n'), ((7626, 7647), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (7636, 7647), False, 'from PIL import Image\n'), ((7681, 7699), 'numpy.where', 'np.where', (['(img == 3)'], {}), '(img == 3)\n', (7689, 7699), True, 'import numpy as np\n'), ((1958, 1999), 'os.path.join', 'os.path.join', (['lbl_dir', "(images[i] + '.bmp')"], {}), "(lbl_dir, images[i] + '.bmp')\n", (1970, 1999), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
京东手机TOP10数据分析.
问题列表:
%matplotlib widget在这里如何使用?或者说Spyder中如何便利的使用matplotlib
1. 长度
"""
# Part I. 基础图表
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
import pandas as pd
import numpy as np
matplotlib.rcParams['font.family'] = ['DengXian', 'sans-serif']
matplotlib.rcParams['axes.unicode_minus'] = False
#%%1.数据准备
"""markdown
基础图表1 - 长宽比例图
每一条折线表示一款手机,其有三个顶点,左下原点,屏幕右上点,手机右上点。
屏幕尺寸的计算方法:
> $\frac{\sqrt{x^2+y^2}}{Z}$
计算对角线的像素数除以对角线长度算出PPI,之后计算屏幕长与宽。
"""
fn = r'E:\notebooks\data_visualization_notebooks\phone_data2.csv'
df = pd.read_csv(fn).iloc[0:15]
row, col = df.shape
c = df['CPU'].astype('category')
ec = list(enumerate(c.cat.categories))
ppi = np.sqrt(df['分辨率宽']**2 + df['分辨率长']**2) / (df['屏'] * 25.4)
x1 = df['宽']
y1 = df['长']
x2 = df['分辨率宽'] / ppi
y2 = df['分辨率长'] / ppi
px = list(zip([0]*15, x2, x1))
py = list(zip([0]*15, y2, y1))
#%%2.长宽折线图
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(121)
ax.set_aspect(1)
for i in range(15):
ax.plot(px[i], py[i], lw=0.35, marker='o', alpha=0.75)
axins = zoomed_inset_axes(ax, 4, loc=2, borderpad=0,
bbox_to_anchor = (1.2, .3, .8, .7),
bbox_transform = ax.transAxes
)
for i in range(15):
axins.plot(px[i], py[i], lw=0.35, marker='o', alpha=0.75)
#ax.set_aspect(1)
axins.set_xlim(65, 80)
axins.set_ylim(145, 165)
mark_inset(ax, axins, loc1=2, loc2=2, fc="none", ec="0.5")
#%%3.
"""
横坐标怎样添加偏置?
heft 重量
"""
fig2, ax2 = plt.subplots(figsize=(3,5))
heft_df = df.sort_values(by = '重')
heft_df.index = np.arange(row)
for n, r in ec:
tdf = heft_df[heft_df['CPU'] == r]
ax2.barh(tdf.index, tdf['重'] - 180,
color='C%d' % n,
height=0.7
)
ax2.set_yticks(heft_df.index.values)
ax2.set_yticklabels(heft_df['name'])
ax2.set_xticklabels(['140','160','180','200','220'])
| [
"numpy.sqrt",
"pandas.read_csv",
"mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes",
"matplotlib.pyplot.figure",
"mpl_toolkits.axes_grid1.inset_locator.mark_inset",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((1000, 1026), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1010, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1273), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax', '(4)'], {'loc': '(2)', 'borderpad': '(0)', 'bbox_to_anchor': '(1.2, 0.3, 0.8, 0.7)', 'bbox_transform': 'ax.transAxes'}), '(ax, 4, loc=2, borderpad=0, bbox_to_anchor=(1.2, 0.3, 0.8,\n 0.7), bbox_transform=ax.transAxes)\n', (1176, 1273), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset\n'), ((1500, 1558), 'mpl_toolkits.axes_grid1.inset_locator.mark_inset', 'mark_inset', (['ax', 'axins'], {'loc1': '(2)', 'loc2': '(2)', 'fc': '"""none"""', 'ec': '"""0.5"""'}), "(ax, axins, loc1=2, loc2=2, fc='none', ec='0.5')\n", (1510, 1558), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset\n'), ((1607, 1635), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 5)'}), '(figsize=(3, 5))\n', (1619, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1701), 'numpy.arange', 'np.arange', (['row'], {}), '(row)\n', (1696, 1701), True, 'import numpy as np\n'), ((789, 831), 'numpy.sqrt', 'np.sqrt', (["(df['分辨率宽'] ** 2 + df['分辨率长'] ** 2)"], {}), "(df['分辨率宽'] ** 2 + df['分辨率长'] ** 2)\n", (796, 831), True, 'import numpy as np\n'), ((663, 678), 'pandas.read_csv', 'pd.read_csv', (['fn'], {}), '(fn)\n', (674, 678), True, 'import pandas as pd\n')] |
# Copyright 2020, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Name:
# habex.m
#
# Purpose:
# Representation of the Habex telescope and coronagraph. To be called using
# the PROPER library procedure "proper.prop_run".
#
# Inputs:
# lambda_m
# The wavelength of propagation in meters (note that the wavelength is provided
# to proper.prop_run in microns and is converted to meters in there).
# gridsize
# Size of the computational grid (gridsize by gridsize elements). Must be
# a power of 2.
#
# Outputs:
# wavefront
# Variable in which the computed E-field at the final image plane is returned.
# The field is sampled by "final_sampling_lam0" lambda_m/D over "nout" by "nout"
# pixels.
# sampling_m
# The sampling at the final image plane in meters per pixel
#
# Optional keywords or switches:
# optval
# (Optional) Structure whose fields are values
# that are passed to the prescription for use as the prescription desires.
#
# Revision history:
# Written by <NAME> (Jet Propulsion Laboratory, California Inst. Technology), January 2020
# Translated to Python by <NAME> (JPL, CIT), February 2020. Added an option,
# use_pr, to retrieve the E-field at the pupil before the focal plane mask. Also
# added the vortex as the focal plane mask.
##----------------------------------------------------------------------------------
import numpy as np
#import matplotlib.pyplot as plt # For Debugging
#from astropy.io import fits # For Debugging
import proper # Use v3.2 or higher
import falco # FALCO needed for propagation to/from vortex
def habex(lambda_m, gridsize, PASSVALUE={'dummy':0}):
nact = 64; #-- number of actuators across DM
nact_across_pupil = 62; #-- number of actuators across pupil
dm_xc = 31.5; #-- wavefront centered at corner of DM actuator (0,0 is center of 1st actuator)
dm_yc = 31.5;
dm_sampling = 0.4e-3; #-- DM actuator spacing (BMC)
#-- default settings (override with optval)
map_dir = '../maps/'; #-- directory containing optical surface error maps
lambda0_um = 0.5; #-- default reference wavelength (center of bandpass) for star offsets & field stop size
use_errors = 1; #-- 1 = use optical surface errors, 0 = none
zindex = np.array([0,]) #-- vector of Zernike indices (Noll ordered)
zval = np.array([0,]) #-- vector of Zernike coefficients (unobscured RMS wavefront in meters)
xoffset = 0; #-- star X offset in lambda0/D units (must then provide lambda0_um)
yoffset = 0; #-- star Y offset in lambda0/D units
use_dm1 = 0; #-- use DM1 (if non-zero, must then provide pokes (meters) in "dm1" array)
use_dm2 = 0; #-- use DM2 (if non-zero, must then provide pokes (meters) in "dm2" array)
use_fpm = 1; #-- use focal plane mask (0 = no FPM)
use_lyot_stop = 1; #-- use Lyot stop (0 = no stop)
use_field_stop = 1; #-- use field stop (0 = no stop)
field_stop_radius = 25.0; #-- field stop radius in lam0/D
final_sampling_lam0 = 0.2; #-- sampling at final image plane in lam0/D
nout = 300; #-- output field size (nout x nout pixels)
normLyotDiam = 0.95; #-- Lyot stop outer diameter normalized to the beam diameter
vortexCharge = 6; #-- charge of the vortex focal plane mask
pupil_diam_pix = nact_across_pupil * 7 #-- define sampling of pupil based on having 7 pixels across each DM actuator
pr_pupil_diam_pix = pupil_diam_pix; #-- define sampling of pupil used for flattening phase with the DMs
use_pr = False #-- whether to return a fake phase retrieval of the pupil rather than the focal plane
#-- override defaults using values passed using optval structure
if 'PASSVALUE' in locals():
if 'lam0' in PASSVALUE: lamba0_um = PASSVALUE['lam0']
if 'lambda0_um' in PASSVALUE: lambda0_um = PASSVALUE['lambda0_um']
if 'use_errors' in PASSVALUE: use_errors = PASSVALUE['use_errors']
if 'zindex' in PASSVALUE: zindex = PASSVALUE['zindex']
if 'zval' in PASSVALUE: zval = PASSVALUE['zval']
if 'xoffset' in PASSVALUE: xoffset = PASSVALUE['xoffset']
if 'yoffset' in PASSVALUE: yoffset = PASSVALUE['yoffset']
if 'use_dm1' in PASSVALUE: use_dm1 = PASSVALUE['use_dm1']
if 'dm1' in PASSVALUE: dm1 = PASSVALUE['dm1']
if 'use_dm2' in PASSVALUE: use_dm2 = PASSVALUE['use_dm2']
if 'dm2' in PASSVALUE: dm2 = PASSVALUE['dm2']
if 'use_fpm' in PASSVALUE: use_fpm = PASSVALUE['use_fpm']
if 'use_lyot_stop' in PASSVALUE: use_lyot_stop = PASSVALUE['use_lyot_stop']
if 'use_field_stop' in PASSVALUE: use_field_stop = PASSVALUE['use_field_stop']
if 'field_stop_radius' in PASSVALUE: field_stop_radius = PASSVALUE['field_stop_radius']
if 'final_sampling_lam0' in PASSVALUE: final_sampling_lam0 = PASSVALUE['final_sampling_lam0']
if 'nout' in PASSVALUE: nout = PASSVALUE['nout']
if 'normLyotDiam' in PASSVALUE: normLyotDiam = PASSVALUE['normLyotDiam']
if 'vortexCharge' in PASSVALUE: vortexCharge = PASSVALUE['vortexCharge']
if 'map_dir' in PASSVALUE: map_dir = PASSVALUE['map_dir']
if 'pupil_diam_pix' in PASSVALUE: pupil_diam_pix = PASSVALUE['pupil_diam_pix']
if 'pr_pupil_diam_pix' in PASSVALUE: pr_pupil_diam_pix = PASSVALUE['pr_pupil_diam_pix']
if 'use_pr' in PASSVALUE: use_pr = PASSVALUE['use_pr']
# Convert 0 and 1 to False and True
use_errors = bool(use_errors)
use_dm1 = bool(use_dm1)
use_dm2 = bool(use_dm2)
use_fpm = bool(use_fpm)
use_lyot_stop = bool(use_lyot_stop)
use_field_stop = bool(use_field_stop)
use_pr = bool(use_pr)
if(np.isscalar(zindex)):
zindex = np.asarray((zindex,))
else: # Check if iterable. If not, then make an array containing 0
try:
temp = zindex[0]
except:
zindex = np.array([0])
lambda0_m = lambda0_um * 1.0e-6;
pupil_ratio = pupil_diam_pix / float(gridsize)
#-- define optical prescription (distances, focal lengths)
diam = 4.00;
r_pri = 19.8;
h_pri = 2.5;
z_pri = h_pri**2 / (2*r_pri)
fl_pri = np.sqrt(h_pri**2 + (r_pri/2-z_pri)**2) #-- effective focal length of primary as a pure parabola
d_pri_sec = 9.172532289071727;
d_focus_sec = fl_pri - d_pri_sec;
d_sec_focus = 7.979857207574376844;
fl_sec = 1 / (1/d_sec_focus - 1/d_focus_sec)
d_sec_m3 = 9.076690863872008;
fl_m3 = d_sec_m3 - d_sec_focus;
d_m3_fold = 0.654597300210990;
d_fold_fsm = 0.577743120280288;
d_fsm_dichroic = 0.1950;
d_dichroic_m4 = 0.450;
fl_m4 = 0.5075;
d_m4_m5 = 0.762954002022743;
fl_m5 = d_m4_m5 - fl_m4;
d_m5_dm1 = 0.220615776458241;
d_dm1_dm2 = 0.32;
d_dm2_qwp = 0.32 + 0.157485214529470;
fl_m6 = 1.029143136045496931;
d_qwp_m6 = fl_m6 - (d_dm1_dm2 + d_dm2_qwp)
d_m6_fpm = fl_m6;
d_fpm_m7 = 0.255580492381039;
fl_m7 = d_fpm_m7;
d_m7_lyotstop = fl_m7;
d_lyotstop_m8 = 0.2536;
fl_m8 = d_lyotstop_m8;
d_m8_fieldstop = fl_m8;
d_fieldstop_m9 = d_m8_fieldstop;
fl_m9 = d_fieldstop_m9;
d_m9_filter = 0.296399999724129;
d_filter_m10 = 0.462615469378302;
fl_m10 = 0.503971038519431261;
d_m10_ccd = fl_m10;
wavefront = proper.prop_begin(diam, lambda_m, gridsize, pupil_diam_pix/gridsize)
proper.prop_circular_aperture(wavefront, diam/2)
if not zindex[0] == 0:
proper.prop_zernikes(wavefront, zindex, zval) #-- optionally add Zernikes
if( (xoffset != 0) or (yoffset != 0) ):
#-- star X,Y offset in lam0/D
xoffset_lam = xoffset * lambda0_m / lambda_m;
yoffset_lam = yoffset * lambda0_m / lambda_m;
u = (np.arange(gridsize)-gridsize/2.) / (pupil_diam_pix/2.) # IDL version: u = (dindgen(gridsize)-gridsize/2) / (pupil_diam_pix/2)
xtilt = np.exp( 1j * np.pi * u * xoffset_lam )
ytilt = np.exp( 1j * np.pi * u * yoffset_lam )
proper.prop_multiply(wavefront, ytilt.reshape((gridsize, 1)) @ xtilt.reshape((1, gridsize))) # IDL version: proper.prop_multiply, wavefront, xtilt # ytilt
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_PRIMARY_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_pri)
proper.prop_define_entrance(wavefront)
proper.prop_propagate(wavefront, d_pri_sec, 'secondary')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_SECONDARY_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_sec)
proper.prop_propagate(wavefront, d_sec_m3, 'M3')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M3_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m3)
proper.prop_propagate(wavefront, d_m3_fold, 'fold')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_FOLD1_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_fold_fsm, 'FSM') #-- pupil at fast steering mirror (interface with telescope)
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_FSM_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_fsm_dichroic, 'dichroic')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_DICHROIC_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_dichroic_m4, 'M4')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M4_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m4)
proper.prop_propagate(wavefront, d_m4_m5, 'M5')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M5_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m5)
proper.prop_propagate(wavefront, d_m5_dm1, 'DM1')
if(use_dm1): proper.prop_dm(wavefront, dm1, dm_xc, dm_yc, dm_sampling)
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_DM1_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_dm1_dm2, 'DM2')
if(use_dm2): proper.prop_dm(wavefront, dm2, dm_xc, dm_yc, dm_sampling)
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_DM2_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_dm2_qwp, 'QWP') #-- quarter-wave plate
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_QWP1_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_qwp_m6, 'M6')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M6_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m6)
proper.prop_propagate(wavefront, d_m6_fpm)
if not use_pr:
# if use_fpm:
# fpm = proper.prop_8th_order_mask(wavefront, 4.0, CIRCULAR=True) #--Band-limited mask
if use_fpm:
apRad = pupil_diam_pix/2.
inVal = 0.3 #-- found empirically
outVal = 5 #-- found empirically
# 1) IFFT to previous pupil from FPM's plane
# 2) Use propcustom_mft_Pup2Vortex2Pup() to go to Lyot plane
# 3) IFFT to FPM's focal plane
EpupPre = np.fft.ifftshift(np.fft.ifft2(wavefront.wfarr))*gridsize # wavefront.wf is already fftshifted
EpupPost = falco.prop.mft_p2v2p(EpupPre, vortexCharge, apRad, inVal, outVal)
wavefront.wfarr = np.fft.ifft2(np.fft.fftshift(EpupPost))*gridsize
proper.prop_propagate(wavefront, d_fpm_m7, 'M7')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M7_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m7)
proper.prop_propagate(wavefront, d_m7_lyotstop, 'Lyot stop')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_QWP2_phase_error.fits', WAVEFRONT=True)
if(use_lyot_stop): proper.prop_circular_aperture(wavefront, normLyotDiam, NORM=True)
proper.prop_propagate(wavefront, d_lyotstop_m8, 'M8')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M8_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m8)
proper.prop_propagate(wavefront, proper.prop_get_distancetofocus(wavefront), 'field stop')
if(use_field_stop):
r_stop = field_stop_radius * lambda0_m / lambda_m;
proper.prop_circular_aperture(wavefront, r_stop/pupil_ratio*proper.prop_get_sampling(wavefront))
proper.prop_propagate(wavefront, d_fieldstop_m9, 'M9')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M9_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m9)
proper.prop_propagate(wavefront, d_m9_filter, 'filter')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_FILTER_phase_error.fits', WAVEFRONT=True)
proper.prop_propagate(wavefront, d_filter_m10, 'M10')
if(use_errors): proper.prop_errormap(wavefront, map_dir+'habex_cycle1_M10_phase_error.fits', WAVEFRONT=True)
proper.prop_lens(wavefront, fl_m10)
proper.prop_propagate(wavefront, proper.prop_get_distancetofocus(wavefront), 'CCD')
[wavefront, sampling_m] = proper.prop_end(wavefront, NOABS=True)
#-- rescale to "final_sampling_lam0" lam0/D per pixel
mag = (pupil_ratio / final_sampling_lam0) * (lambda_m / lambda0_m)
wavefront = proper.prop_magnify(wavefront, mag, nout, AMP_CONSERVE=True)
else: #-- Perfect-knowledge phase retrieval
# 1) IFFT to previous pupil
EpupBeforeFPM = np.fft.ifftshift(np.fft.ifft2(wavefront.wfarr))*gridsize
mag = pr_pupil_diam_pix/pupil_diam_pix;
wavefront = proper.prop_magnify(EpupBeforeFPM, mag, 2*np.ceil(0.5*mag*gridsize), AMP_CONSERVE=True)
sampling_m = 0 #-- dummy value
return wavefront, sampling_m
| [
"numpy.sqrt",
"proper.prop_get_distancetofocus",
"numpy.array",
"proper.prop_errormap",
"proper.prop_end",
"proper.prop_get_sampling",
"numpy.arange",
"numpy.isscalar",
"numpy.asarray",
"numpy.exp",
"proper.prop_dm",
"proper.prop_magnify",
"numpy.ceil",
"proper.prop_begin",
"proper.prop_... | [((2577, 2590), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2585, 2590), True, 'import numpy as np\n'), ((2649, 2662), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2657, 2662), True, 'import numpy as np\n'), ((5972, 5991), 'numpy.isscalar', 'np.isscalar', (['zindex'], {}), '(zindex)\n', (5983, 5991), True, 'import numpy as np\n'), ((6462, 6508), 'numpy.sqrt', 'np.sqrt', (['(h_pri ** 2 + (r_pri / 2 - z_pri) ** 2)'], {}), '(h_pri ** 2 + (r_pri / 2 - z_pri) ** 2)\n', (6469, 6508), True, 'import numpy as np\n'), ((7586, 7656), 'proper.prop_begin', 'proper.prop_begin', (['diam', 'lambda_m', 'gridsize', '(pupil_diam_pix / gridsize)'], {}), '(diam, lambda_m, gridsize, pupil_diam_pix / gridsize)\n', (7603, 7656), False, 'import proper\n'), ((7659, 7709), 'proper.prop_circular_aperture', 'proper.prop_circular_aperture', (['wavefront', '(diam / 2)'], {}), '(wavefront, diam / 2)\n', (7688, 7709), False, 'import proper\n'), ((8548, 8583), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_pri'], {}), '(wavefront, fl_pri)\n', (8564, 8583), False, 'import proper\n'), ((8588, 8626), 'proper.prop_define_entrance', 'proper.prop_define_entrance', (['wavefront'], {}), '(wavefront)\n', (8615, 8626), False, 'import proper\n'), ((8632, 8688), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_pri_sec', '"""secondary"""'], {}), "(wavefront, d_pri_sec, 'secondary')\n", (8653, 8688), False, 'import proper\n'), ((8813, 8848), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_sec'], {}), '(wavefront, fl_sec)\n', (8829, 8848), False, 'import proper\n'), ((8854, 8902), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_sec_m3', '"""M3"""'], {}), "(wavefront, d_sec_m3, 'M3')\n", (8875, 8902), False, 'import proper\n'), ((9020, 9054), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m3'], {}), '(wavefront, fl_m3)\n', (9036, 9054), False, 'import proper\n'), ((9060, 9111), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_m3_fold', '"""fold"""'], {}), "(wavefront, d_m3_fold, 'fold')\n", (9081, 9111), False, 'import proper\n'), ((9237, 9288), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_fold_fsm', '"""FSM"""'], {}), "(wavefront, d_fold_fsm, 'FSM')\n", (9258, 9288), False, 'import proper\n'), ((9469, 9529), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_fsm_dichroic', '"""dichroic"""'], {}), "(wavefront, d_fsm_dichroic, 'dichroic')\n", (9490, 9529), False, 'import proper\n'), ((9654, 9707), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_dichroic_m4', '"""M4"""'], {}), "(wavefront, d_dichroic_m4, 'M4')\n", (9675, 9707), False, 'import proper\n'), ((9825, 9859), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m4'], {}), '(wavefront, fl_m4)\n', (9841, 9859), False, 'import proper\n'), ((9865, 9912), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_m4_m5', '"""M5"""'], {}), "(wavefront, d_m4_m5, 'M5')\n", (9886, 9912), False, 'import proper\n'), ((10030, 10064), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m5'], {}), '(wavefront, fl_m5)\n', (10046, 10064), False, 'import proper\n'), ((10070, 10119), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_m5_dm1', '"""DM1"""'], {}), "(wavefront, d_m5_dm1, 'DM1')\n", (10091, 10119), False, 'import proper\n'), ((10319, 10369), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_dm1_dm2', '"""DM2"""'], {}), "(wavefront, d_dm1_dm2, 'DM2')\n", (10340, 10369), False, 'import proper\n'), ((10565, 10615), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_dm2_qwp', '"""QWP"""'], {}), "(wavefront, d_dm2_qwp, 'QWP')\n", (10586, 10615), False, 'import proper\n'), ((10759, 10807), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_qwp_m6', '"""M6"""'], {}), "(wavefront, d_qwp_m6, 'M6')\n", (10780, 10807), False, 'import proper\n'), ((10925, 10959), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m6'], {}), '(wavefront, fl_m6)\n', (10941, 10959), False, 'import proper\n'), ((10965, 11007), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_m6_fpm'], {}), '(wavefront, d_m6_fpm)\n', (10986, 11007), False, 'import proper\n'), ((6011, 6032), 'numpy.asarray', 'np.asarray', (['(zindex,)'], {}), '((zindex,))\n', (6021, 6032), True, 'import numpy as np\n'), ((7746, 7791), 'proper.prop_zernikes', 'proper.prop_zernikes', (['wavefront', 'zindex', 'zval'], {}), '(wavefront, zindex, zval)\n', (7766, 7791), False, 'import proper\n'), ((8168, 8206), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * u * xoffset_lam)'], {}), '(1.0j * np.pi * u * xoffset_lam)\n', (8174, 8206), True, 'import numpy as np\n'), ((8223, 8261), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * u * yoffset_lam)'], {}), '(1.0j * np.pi * u * yoffset_lam)\n', (8229, 8261), True, 'import numpy as np\n'), ((8447, 8549), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_PRIMARY_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_PRIMARY_phase_error.fits', WAVEFRONT=True)\n", (8467, 8549), False, 'import proper\n'), ((8710, 8814), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_SECONDARY_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_SECONDARY_phase_error.fits', WAVEFRONT=True)\n", (8730, 8814), False, 'import proper\n'), ((8924, 9021), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M3_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M3_phase_error.fits', WAVEFRONT=True)\n", (8944, 9021), False, 'import proper\n'), ((9133, 9233), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_FOLD1_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_FOLD1_phase_error.fits', WAVEFRONT=True)\n", (9153, 9233), False, 'import proper\n'), ((9371, 9469), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_FSM_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_FSM_phase_error.fits', WAVEFRONT=True)\n", (9391, 9469), False, 'import proper\n'), ((9551, 9654), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_DICHROIC_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_DICHROIC_phase_error.fits', WAVEFRONT=True)\n", (9571, 9654), False, 'import proper\n'), ((9729, 9826), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M4_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M4_phase_error.fits', WAVEFRONT=True)\n", (9749, 9826), False, 'import proper\n'), ((9934, 10031), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M5_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M5_phase_error.fits', WAVEFRONT=True)\n", (9954, 10031), False, 'import proper\n'), ((10138, 10195), 'proper.prop_dm', 'proper.prop_dm', (['wavefront', 'dm1', 'dm_xc', 'dm_yc', 'dm_sampling'], {}), '(wavefront, dm1, dm_xc, dm_yc, dm_sampling)\n', (10152, 10195), False, 'import proper\n'), ((10217, 10315), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_DM1_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_DM1_phase_error.fits', WAVEFRONT=True)\n", (10237, 10315), False, 'import proper\n'), ((10388, 10445), 'proper.prop_dm', 'proper.prop_dm', (['wavefront', 'dm2', 'dm_xc', 'dm_yc', 'dm_sampling'], {}), '(wavefront, dm2, dm_xc, dm_yc, dm_sampling)\n', (10402, 10445), False, 'import proper\n'), ((10467, 10565), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_DM2_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_DM2_phase_error.fits', WAVEFRONT=True)\n", (10487, 10565), False, 'import proper\n'), ((10660, 10759), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_QWP1_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_QWP1_phase_error.fits', WAVEFRONT=True)\n", (10680, 10759), False, 'import proper\n'), ((10829, 10926), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M6_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M6_phase_error.fits', WAVEFRONT=True)\n", (10849, 10926), False, 'import proper\n'), ((11787, 11835), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_fpm_m7', '"""M7"""'], {}), "(wavefront, d_fpm_m7, 'M7')\n", (11808, 11835), False, 'import proper\n'), ((11961, 11995), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m7'], {}), '(wavefront, fl_m7)\n', (11977, 11995), False, 'import proper\n'), ((12005, 12065), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_m7_lyotstop', '"""Lyot stop"""'], {}), "(wavefront, d_m7_lyotstop, 'Lyot stop')\n", (12026, 12065), False, 'import proper\n'), ((12289, 12342), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_lyotstop_m8', '"""M8"""'], {}), "(wavefront, d_lyotstop_m8, 'M8')\n", (12310, 12342), False, 'import proper\n'), ((12468, 12502), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m8'], {}), '(wavefront, fl_m8)\n', (12484, 12502), False, 'import proper\n'), ((12813, 12867), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_fieldstop_m9', '"""M9"""'], {}), "(wavefront, d_fieldstop_m9, 'M9')\n", (12834, 12867), False, 'import proper\n'), ((12993, 13027), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m9'], {}), '(wavefront, fl_m9)\n', (13009, 13027), False, 'import proper\n'), ((13037, 13092), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_m9_filter', '"""filter"""'], {}), "(wavefront, d_m9_filter, 'filter')\n", (13058, 13092), False, 'import proper\n'), ((13223, 13276), 'proper.prop_propagate', 'proper.prop_propagate', (['wavefront', 'd_filter_m10', '"""M10"""'], {}), "(wavefront, d_filter_m10, 'M10')\n", (13244, 13276), False, 'import proper\n'), ((13404, 13439), 'proper.prop_lens', 'proper.prop_lens', (['wavefront', 'fl_m10'], {}), '(wavefront, fl_m10)\n', (13420, 13439), False, 'import proper\n'), ((13568, 13606), 'proper.prop_end', 'proper.prop_end', (['wavefront'], {'NOABS': '(True)'}), '(wavefront, NOABS=True)\n', (13583, 13606), False, 'import proper\n'), ((13765, 13825), 'proper.prop_magnify', 'proper.prop_magnify', (['wavefront', 'mag', 'nout'], {'AMP_CONSERVE': '(True)'}), '(wavefront, mag, nout, AMP_CONSERVE=True)\n', (13784, 13825), False, 'import proper\n'), ((11633, 11698), 'falco.prop.mft_p2v2p', 'falco.prop.mft_p2v2p', (['EpupPre', 'vortexCharge', 'apRad', 'inVal', 'outVal'], {}), '(EpupPre, vortexCharge, apRad, inVal, outVal)\n', (11653, 11698), False, 'import falco\n'), ((11861, 11958), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M7_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M7_phase_error.fits', WAVEFRONT=True)\n", (11881, 11958), False, 'import proper\n'), ((12092, 12191), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_QWP2_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_QWP2_phase_error.fits', WAVEFRONT=True)\n", (12112, 12191), False, 'import proper\n'), ((12214, 12279), 'proper.prop_circular_aperture', 'proper.prop_circular_aperture', (['wavefront', 'normLyotDiam'], {'NORM': '(True)'}), '(wavefront, normLyotDiam, NORM=True)\n', (12243, 12279), False, 'import proper\n'), ((12368, 12465), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M8_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M8_phase_error.fits', WAVEFRONT=True)\n", (12388, 12465), False, 'import proper\n'), ((12545, 12587), 'proper.prop_get_distancetofocus', 'proper.prop_get_distancetofocus', (['wavefront'], {}), '(wavefront)\n', (12576, 12587), False, 'import proper\n'), ((12893, 12990), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M9_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M9_phase_error.fits', WAVEFRONT=True)\n", (12913, 12990), False, 'import proper\n'), ((13118, 13219), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_FILTER_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_FILTER_phase_error.fits', WAVEFRONT=True)\n", (13138, 13219), False, 'import proper\n'), ((13302, 13400), 'proper.prop_errormap', 'proper.prop_errormap', (['wavefront', "(map_dir + 'habex_cycle1_M10_phase_error.fits')"], {'WAVEFRONT': '(True)'}), "(wavefront, map_dir +\n 'habex_cycle1_M10_phase_error.fits', WAVEFRONT=True)\n", (13322, 13400), False, 'import proper\n'), ((13482, 13524), 'proper.prop_get_distancetofocus', 'proper.prop_get_distancetofocus', (['wavefront'], {}), '(wavefront)\n', (13513, 13524), False, 'import proper\n'), ((6183, 6196), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (6191, 6196), True, 'import numpy as np\n'), ((8025, 8044), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (8034, 8044), True, 'import numpy as np\n'), ((13961, 13990), 'numpy.fft.ifft2', 'np.fft.ifft2', (['wavefront.wfarr'], {}), '(wavefront.wfarr)\n', (13973, 13990), True, 'import numpy as np\n'), ((14120, 14149), 'numpy.ceil', 'np.ceil', (['(0.5 * mag * gridsize)'], {}), '(0.5 * mag * gridsize)\n', (14127, 14149), True, 'import numpy as np\n'), ((11533, 11562), 'numpy.fft.ifft2', 'np.fft.ifft2', (['wavefront.wfarr'], {}), '(wavefront.wfarr)\n', (11545, 11562), True, 'import numpy as np\n'), ((11742, 11767), 'numpy.fft.fftshift', 'np.fft.fftshift', (['EpupPost'], {}), '(EpupPost)\n', (11757, 11767), True, 'import numpy as np\n'), ((12767, 12802), 'proper.prop_get_sampling', 'proper.prop_get_sampling', (['wavefront'], {}), '(wavefront)\n', (12791, 12802), False, 'import proper\n')] |
"""
Calculations iodine emissions with updated iodide field
"""
import numpy as np
import pandas as pd
import xarray as xr
import sparse2spatial.utils as utils
# import AC_tools (https://github.com/tsherwen/AC_tools.git)
import AC_tools as AC
def compare_emissions(wd_dict=None, inorg_emiss=None, specs=None):
"""
Compare emissions between runs with different parameterisations
Parameters
-------
wd_dict (dict): dictionary of names (keys) and locations of model runs
inorg_emiss (dict): dictionary of inorganic iodine emissions for runs
Returns
-------
(pd.DataFrame)
"""
# Get emission runs that test output
if isinstance(wd_dict, type(None)):
wd_dict = get_emissions_testing_runs()
params = sorted(wd_dict.keys())
# Get ozone burdens
O3Burdens = [AC.get_O3_burden(wd_dict[i]) for i in params]
O3Burdens = [i.sum()/1E3 for i in O3Burdens]
# Compile date into dataframe
df = pd.DataFrame(O3Burdens, index=params, columns=['O3 bud.'])
# Get emissions
if isinstance(inorg_emiss, type(None)):
inorg_emiss, specs = get_inorg_emissions_for_params(wd_dict=wd_dict)
# Sum emissions
for param in params:
inorg_emiss[param] = [i.sum() for i in inorg_emiss[param]]
# Convert to DatFrame and combine
inorg_emiss_names = [i+' emiss.' for i in specs]
df2 = pd.DataFrame(inorg_emiss, index=inorg_emiss_names)
df = pd.concat([df, df2.T], axis=1)
# Add total inorganic flux? (Hasghed out for now )
# df['Inorg emiss'] = df[inorg_emiss_names].sum(axis=1)
# Now do calculations to get change and difference between runs
# calculate % change in values between runs
df = df.T
#
param = 'RFR(offline)'
refs = 'Chance2014', 'MacDonald2014'
# Loop and calculate percentages
for ref in refs:
col_name = '({}% vs. {})'.format(param, ref)
df[col_name] = (df[param] - df[ref])/df[ref]*100
df = df.T
return df
def get_emissions_testing_runs():
"""
Get dictionary of emission model run locations
"""
# folder = get_file_locations('earth0_home_dir')
folder = ''
folder += '/data/all_model_simulations/iodine_runs/iGEOSChem_4.0_v10/'
# Locations of model runs with different iodide fields
RFR_dir = 'run.XS.UPa.FP.EU.BC.II.FP.2014.NEW_OFFLINE_IODIDE.several_months/'
Chance_dir = '/run.XS.UPa.FP.EU.BC.II.FP.2014.re_run4HEMCO_diag/'
MacDonald_dir = 'run.XS.UPa.FP.EU.BC.II.FP.2014.Chance_iodide/'
extr_dir = '/'
# extr_dir = '/spin_up/'
# extr_dir = '/test_dates/'
wd_dict = {
'Chance2014': folder + MacDonald_dir + extr_dir,
'MacDonald2014': folder + Chance_dir,
'RFR(offline)': folder + RFR_dir + extr_dir,
}
return wd_dict
def get_inorg_emissions_for_params(wd_dict=None, res='4x5'):
"""
Get inorganic emissions for the difference parameterisations
"""
from A_PD_hal_paper_analysis_figures.halogen_family_emission_printer import get_species_emiss_Tg_per_yr
specs = ['HOI', 'I2']
# Retrieve the surface area for a given resolution
s_area = AC.get_surface_area(res=res)
# calc emissions!
inorg_emiss = {}
for param in wd_dict.keys():
print(param)
wd = wd_dict[param]
months = AC.get_gc_months(wd=wd)
years = AC.get_gc_years(wd=wd)
# Get emissions
ars = get_species_emiss_Tg_per_yr(wd=wd, specs=specs, ref_spec='I',
s_area=s_area, years=years,
months=months)
# Add sums
ars += [ars[0]+ars[1]]
inorg_emiss[param] = ars
return inorg_emiss, specs+['Inorg']
def add_Inorg_and_Org_totals2array(ds, InOrgVar='Inorg_Total', OrgVar='Org_Total'):
"""
Add inorganic and organic sub totals to dataset
"""
# Add aggregated values to ds
OrgVars = [
'EmisCH2IBr_Ocean', 'EmisCH2ICl_Ocean', 'EmisCH2I2_Ocean', 'EmisCH3I_Ocean',
]
InOrgVars = ['EmisI2_Ocean', 'EmisHOI_Ocean', ]
# - Inorganic
# template off the first species
ds[InOrgVar] = ds[InOrgVars[0]].copy()
# sum values to this
arr = ds[InOrgVar].values
for var_ in InOrgVars[1:]:
print(var_)
arr = arr + ds[var_].values
ds[InOrgVar].values = arr
attrs = ds[InOrgVar].attrs
attrs['long_name'] = InOrgVar
ds[InOrgVar].attrs = attrs
# - Organic
# template off the first species
ds[OrgVar] = ds[OrgVars[0]].copy()
# sum values to this
arr = ds[OrgVar].values
for var_ in OrgVars[1:]:
print(var_)
arr = arr + ds[var_].values
ds[OrgVar].values = arr
attrs = ds[OrgVar].attrs
attrs['long_name'] = OrgVar
ds[OrgVar].attrs = attrs
return ds
def plot_up_surface_emissions(dsDH=None, runs=None, show_plot=False,
wds=None, dpi=320):
"""
Plot up emissions using HEMCO NetCDF files
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
# names of runs to plot up?
if isinstance(wds, type(None)):
wds = get_run_dict4EGU_runs()
if isinstance(runs, type(None)):
runs = list(wds.keys())
# - Add aggregated values to ds
OrgVars = [
'EmisCH2IBr_Ocean', 'EmisCH2ICl_Ocean', 'EmisCH2I2_Ocean', 'EmisCH3I_Ocean',
]
InOrgVars = ['EmisI2_Ocean', 'EmisHOI_Ocean', ]
vars2use = OrgVars + InOrgVars
# Aggregate variables to use?
TotalVar = 'I_Total'
InOrgVar = 'Inorg_Total'
OrgVar = 'Org_Total'
# Setup the colourbar to use
Divergent_cmap = plt.get_cmap('RdBu_r')
cmap = AC.get_colormap(np.arange(10))
# loop my run and add values
for run in runs:
# which dataset to use?
print(run)
ds = dsDH[run]
# Add Inorg and org subtotals to array
ds = add_Inorg_and_Org_totals2array(ds=ds)
# Calculate totals
# template off the first species
ds[TotalVar] = dsDH[run][vars2use[0]].copy()
# Sum values to this
arr = ds[TotalVar].values
for var_ in vars2use[1:]:
print(var_)
arr = arr + dsDH[run][var_].values
ds[TotalVar].values = arr
attrs = ds[TotalVar].attrs
attrs['long_name'] = TotalVar
ds[TotalVar].attrs = attrs
# Setup PDF to save plot to
savetitle = 'Oi_prj_emissions_diff_plots_EGU_runs'
dpi = 320
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Plot up emissions spatial distribution of total emissions
for run in runs:
print(run)
# dataset to plot
ds = dsDH[run][[TotalVar]]
# use annual sum of emissions
ds = ds.sum(dim='time')
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[TotalVar].plot.imshow(x='lon', y='lat',
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot to the plot
PtrStr = "Total iodine emissions (Gg I) in '{}'"
PtrStr += "\n(max={:.1f}, min={:.1f}, sum={:.1f})"
sum_ = float(ds[TotalVar].sum().values)
max_ = float(ds[TotalVar].max().values)
min_ = float(ds[TotalVar].min().values)
plt.title(PtrStr.format(run, max_, min_, sum_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - Plot up emissions spatial distribution of inorg emissions
runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
for run in runs2plot:
print(run)
# dataset to plot
ds = dsDH[run][[InOrgVar]]
# use annual sum of emissions
ds = ds.sum(dim='time')
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[InOrgVar].plot.imshow(x='lon', y='lat',
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions (Gg I) in '{}'"
PtrStr += "\n(max={:.1f}, min={:.1f}, sum={:.1f})"
sum_ = float(ds[InOrgVar].sum().values)
max_ = float(ds[InOrgVar].max().values)
min_ = float(ds[InOrgVar].min().values)
plt.title(PtrStr.format(run, max_, min_, sum_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - Plot up emissions spatial distribution inorg emissions (% of total)
runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
for run in runs2plot:
print(run)
# dataset to plot
ds = dsDH[run][[InOrgVar, TotalVar]]
# use annual sum of emissions
ds = ds.sum(dim='time')
# Calculate the difference (perecent)
DIFFvar = 'Inorg/Total'
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values/ds[TotalVar].values*100
# Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions (% of total) in '{}' \n"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - plot up emissions as a % of REF (Chance2014)
REF = 'Chance2014'
# runs2plot = [i for i in runs if (i != REF)]
# runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
runs2plot = ['ML_Iodide']
for run in runs2plot:
print(run)
# dataset to plot (use annual sum of emissions)
ds = dsDH[run][[InOrgVar]].sum(dim='time')
dsREF = dsDH[REF][[InOrgVar]].sum(dim='time')
#
DIFFvar = 'Inorg/Inorg({})'.format(REF)
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values/dsREF[InOrgVar].values*100
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
# vmin=1, vmax=5,
vmin=0, vmax=200,
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions in '{}'\n as % of {}"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, REF, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - plot up emissions as a % of REF (Macdonald2014)
REF = 'Macdonald2014'
# runs2plot = [i for i in runs if (i != REF)]
# runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
runs2plot = ['ML_Iodide']
for run in runs2plot:
print(run)
# dataset to plot (use annual sum of emissions)
ds = dsDH[run][[InOrgVar]].sum(dim='time')
dsREF = dsDH[REF][[InOrgVar]].sum(dim='time')
#
DIFFvar = 'Inorg/Inorg({})'.format(REF)
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values/dsREF[InOrgVar].values*100
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
vmin=0, vmax=200,
ax=ax,
cmap=cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions in '{}'\n as % of {}"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, REF, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - plot up emissions as a % of REF (Chance2014)
REF = 'Chance2014'
# runs2plot = [i for i in runs if (i != REF)]
# runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
runs2plot = ['ML_Iodide']
for run in runs2plot:
print(run)
# dataset to plot (use annual sum of emissions)
ds = dsDH[run][[InOrgVar]].sum(dim='time')
dsREF = dsDH[REF][[InOrgVar]].sum(dim='time')
#
DIFFvar = 'Inorg/Inorg({})'.format(REF)
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values-dsREF[InOrgVar].values
ds[DIFFvar].values = ds[DIFFvar].values / dsREF[InOrgVar].values*100
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
# vmin=1, vmax=5,
vmin=-100, vmax=100,
ax=ax,
# cmap=cmap,
cmap=Divergent_cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions in '{}'\n as % of {}"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, REF, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# - plot up emissions as a % of REF (Macdonald2014)
REF = 'Macdonald2014'
# runs2plot = [i for i in runs if (i != REF)]
# runs2plot = [i for i in runs if (i != 'No_HOI_I2')]
runs2plot = ['ML_Iodide']
for run in runs2plot:
print(run)
# dataset to plot (use annual sum of emissions)
ds = dsDH[run][[InOrgVar]].sum(dim='time')
dsREF = dsDH[REF][[InOrgVar]].sum(dim='time')
#
DIFFvar = 'Inorg/Inorg({})'.format(REF)
ds[DIFFvar] = ds[InOrgVar].copy()
ds[DIFFvar].values = ds[InOrgVar].values-dsREF[InOrgVar].values
ds[DIFFvar].values = ds[DIFFvar].values / dsREF[InOrgVar].values*100
# - Loop and plot species
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree(), aspect='auto')
ds[DIFFvar].plot.imshow(x='lon', y='lat',
vmin=-100, vmax=100,
ax=ax,
cmap=Divergent_cmap,
transform=ccrs.PlateCarree())
# Add a title to the plot
PtrStr = "Total Inorganic iodine emissions in '{}'\n as % of {}"
PtrStr += '(max={:.1f}, min={:.1f})'
max_ = float(ds[DIFFvar].max().values)
min_ = float(ds[DIFFvar].min().values)
plt.title(PtrStr.format(run, REF, max_, min_))
# Beautify the plot
ax.coastlines()
ax.set_global()
# Save to PDF and close plot
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi)
if show_plot:
plt.show()
plt.close()
# -- Save entire pdf
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
def get_run_dict4EGU_runs():
"""
Return locations of data to use for analysis/plotting for EGU presentation
"""
RunRoot = '/users/ts551/scratch/GC/rundirs/'
# wds = glob.glob( RunRoot+ '*Oi*' )
# runs = [i.split('Oi.')[-1] for i in wds]
wds = {
# spin up period
# 'Chance2014': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.Chance2014.O3/spin_up/',
# 'No_HOI_I2': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.No_HOI_I2/spin_up/',
# 'Macdonald2014': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.Macdonald2014.O3/spin_up/',
# 'ML_Iodide': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.ML_Iodide.O3/spin_up/'
# analysis period
# 'Chance2014': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.Chance2014.O3/',
# 'No_HOI_I2': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.No_HOI_I2/',
# 'Macdonald2014': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.Macdonald2014.O3/',
# 'ML_Iodide': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.ML_Iodide.O3/'
# analysis period with SSBr
'Chance2014': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.Chance2014.O3.SSBr/',
'No_HOI_I2': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.No_HOI_I2.O3.SSBr/',
'Macdonald2014': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.Macdonald2014.O3.SSBr/',
'ML_Iodide': RunRoot+'/geosfp_4x5_tropchem.v12.2.1.Oi.ML_Iodide.O3.SSBr/'
}
return wds
def GetEmissionsFromHEMCONetCDFsAsDatasets(wds=None):
"""
Get the emissions from the HEMCO NetCDF files as a dictionary of datasets.
"""
# Look at emissions through HEMCO
# Get data locations and run names as a dictionary
if isinstance(wds, type(None)):
wds = get_run_dict4EGU_runs()
runs = list(wds.keys())
#
# vars2use = [i for i in dsDH[run].data_vars if 'I' in i ]
vars2use = [
'EmisCH2IBr_Ocean', 'EmisCH2ICl_Ocean', 'EmisCH2I2_Ocean',
'EmisCH3I_Ocean', 'EmisI2_Ocean', 'EmisHOI_Ocean',
]
# Loop and extract files
dsDH = {}
for run in runs:
wd = wds[run]
print(run, wd)
dsDH[run] = AC.GetHEMCODiagnostics_AsDataset(wd=wd)
# Get actual species
specs = [i.split('Emis')[-1].split('_')[0] for i in vars2use]
var_species_dict = dict(zip(vars2use, specs))
# Convert to Gg
for run in runs:
ds = dsDH[run]
ds = AC.Convert_HEMCO_ds2Gg_per_yr(ds, vars2convert=vars2use,
var_species_dict=var_species_dict)
dsDH[run] = ds
return dsDH
def Check_global_statistics_on_emissions(dsDH=None, verbose=True, debug=False):
"""
Get summary analysis on the updated iodide field
"""
# - Files locations to use
if isinstance(dsDH, type(None)):
dsDH = GetEmissionsFromHEMCONetCDFsAsDatasets()
# Set runs to use
runs = ['Chance2014', 'No_HOI_I2', 'Macdonald2014', 'ML_Iodide']
# vars to use
InOrgVar = 'Inorg_Total'
OrgVar = 'Org_Total'
vars2use = [
'EmisCH2IBr_Ocean', 'EmisCH2ICl_Ocean', 'EmisCH2I2_Ocean',
'EmisCH3I_Ocean', 'EmisI2_Ocean', 'EmisHOI_Ocean',
]
vars2useALL = vars2use + [InOrgVar, OrgVar]
# - compile data into a pd.DataFrame
df = pd.DataFrame()
for run in runs:
# Get the dataset to use
ds = dsDH[run].copy()
# Add inorg and org emissions
ds = add_Inorg_and_Org_totals2array(ds=ds)
# Sum data in to global values
s = ds[vars2useALL].sum(dim='lat').sum(dim='lon').to_dataframe().sum()
df[run] = s
if debug:
print(run, dsDH[run][vars2useALL].sum())
# Add totals and print summary
total = df.T[vars2use].T.sum().copy()
df = df.T
df['Total'] = total
# in Tg units
if verbose:
print('-------- Global Gg (I) emission budgets ')
print(df.T)
# In units of % change of the surface values
# vs. Macdonald
dfP = df.T.copy()
REF = 'Macdonald2014'
cols = list(dfP.columns)
cols.pop(cols.index(REF))
for col in cols + [REF]:
pcent = (dfP[col] - dfP[REF])/dfP[REF] * 100
if debug:
print(col, pcent)
dfP[col] = pcent.values
if verbose:
print('-------- Vs. {} in % terms'.format(REF))
print(dfP)
# vs. Chance
dfP = df.T.copy()
REF = 'Chance2014'
cols = list(dfP.columns)
cols.pop(cols.index(REF))
for col in cols + [REF]:
pcent = (dfP[col] - dfP[REF])/dfP[REF] * 100
if debug:
print(col, pcent)
dfP[col] = pcent.values
if verbose:
print('-------- Vs. {} in % terms'.format(REF))
print(dfP)
| [
"AC_tools.get_O3_burden",
"AC_tools.plot2pdfmulti",
"AC_tools.GetHEMCODiagnostics_AsDataset",
"matplotlib.pyplot.show",
"AC_tools.get_surface_area",
"A_PD_hal_paper_analysis_figures.halogen_family_emission_printer.get_species_emiss_Tg_per_yr",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.close",
"A... | [((963, 1021), 'pandas.DataFrame', 'pd.DataFrame', (['O3Burdens'], {'index': 'params', 'columns': "['O3 bud.']"}), "(O3Burdens, index=params, columns=['O3 bud.'])\n", (975, 1021), True, 'import pandas as pd\n'), ((1377, 1427), 'pandas.DataFrame', 'pd.DataFrame', (['inorg_emiss'], {'index': 'inorg_emiss_names'}), '(inorg_emiss, index=inorg_emiss_names)\n', (1389, 1427), True, 'import pandas as pd\n'), ((1437, 1467), 'pandas.concat', 'pd.concat', (['[df, df2.T]'], {'axis': '(1)'}), '([df, df2.T], axis=1)\n', (1446, 1467), True, 'import pandas as pd\n'), ((3130, 3158), 'AC_tools.get_surface_area', 'AC.get_surface_area', ([], {'res': 'res'}), '(res=res)\n', (3149, 3158), True, 'import AC_tools as AC\n'), ((5613, 5635), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (5625, 5635), True, 'import matplotlib.pyplot as plt\n'), ((6448, 6501), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', ([], {'title': 'savetitle', 'open': '(True)', 'dpi': 'dpi'}), '(title=savetitle, open=True, dpi=dpi)\n', (6464, 6501), True, 'import AC_tools as AC\n'), ((16880, 16934), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'close': '(True)', 'dpi': 'dpi'}), '(pdff, savetitle, close=True, dpi=dpi)\n', (16896, 16934), True, 'import AC_tools as AC\n'), ((20174, 20188), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (20186, 20188), True, 'import pandas as pd\n'), ((825, 853), 'AC_tools.get_O3_burden', 'AC.get_O3_burden', (['wd_dict[i]'], {}), '(wd_dict[i])\n', (841, 853), True, 'import AC_tools as AC\n'), ((3301, 3324), 'AC_tools.get_gc_months', 'AC.get_gc_months', ([], {'wd': 'wd'}), '(wd=wd)\n', (3317, 3324), True, 'import AC_tools as AC\n'), ((3341, 3363), 'AC_tools.get_gc_years', 'AC.get_gc_years', ([], {'wd': 'wd'}), '(wd=wd)\n', (3356, 3363), True, 'import AC_tools as AC\n'), ((3402, 3510), 'A_PD_hal_paper_analysis_figures.halogen_family_emission_printer.get_species_emiss_Tg_per_yr', 'get_species_emiss_Tg_per_yr', ([], {'wd': 'wd', 'specs': 'specs', 'ref_spec': '"""I"""', 's_area': 's_area', 'years': 'years', 'months': 'months'}), "(wd=wd, specs=specs, ref_spec='I', s_area=s_area,\n years=years, months=months)\n", (3429, 3510), False, 'from A_PD_hal_paper_analysis_figures.halogen_family_emission_printer import get_species_emiss_Tg_per_yr\n'), ((5663, 5676), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5672, 5676), True, 'import numpy as np\n'), ((6788, 6815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (6798, 6815), True, 'import matplotlib.pyplot as plt\n'), ((7577, 7619), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (7593, 7619), True, 'import AC_tools as AC\n'), ((7673, 7684), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7682, 7684), True, 'import matplotlib.pyplot as plt\n'), ((8032, 8059), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (8042, 8059), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8861), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (8835, 8861), True, 'import AC_tools as AC\n'), ((8915, 8926), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8924, 8926), True, 'import matplotlib.pyplot as plt\n'), ((9485, 9512), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (9495, 9512), True, 'import matplotlib.pyplot as plt\n'), ((10207, 10249), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (10223, 10249), True, 'import AC_tools as AC\n'), ((10303, 10314), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10312, 10314), True, 'import matplotlib.pyplot as plt\n'), ((10959, 10986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (10969, 10986), True, 'import matplotlib.pyplot as plt\n'), ((11814, 11856), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (11830, 11856), True, 'import AC_tools as AC\n'), ((11910, 11921), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11919, 11921), True, 'import matplotlib.pyplot as plt\n'), ((12572, 12599), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (12582, 12599), True, 'import matplotlib.pyplot as plt\n'), ((13347, 13389), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (13363, 13389), True, 'import AC_tools as AC\n'), ((13443, 13454), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13452, 13454), True, 'import matplotlib.pyplot as plt\n'), ((14172, 14199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (14182, 14199), True, 'import matplotlib.pyplot as plt\n'), ((15123, 15165), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (15139, 15165), True, 'import AC_tools as AC\n'), ((15219, 15230), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15228, 15230), True, 'import matplotlib.pyplot as plt\n'), ((15954, 15981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (15964, 15981), True, 'import matplotlib.pyplot as plt\n'), ((16742, 16784), 'AC_tools.plot2pdfmulti', 'AC.plot2pdfmulti', (['pdff', 'savetitle'], {'dpi': 'dpi'}), '(pdff, savetitle, dpi=dpi)\n', (16758, 16784), True, 'import AC_tools as AC\n'), ((16838, 16849), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16847, 16849), True, 'import matplotlib.pyplot as plt\n'), ((19057, 19096), 'AC_tools.GetHEMCODiagnostics_AsDataset', 'AC.GetHEMCODiagnostics_AsDataset', ([], {'wd': 'wd'}), '(wd=wd)\n', (19089, 19096), True, 'import AC_tools as AC\n'), ((19315, 19411), 'AC_tools.Convert_HEMCO_ds2Gg_per_yr', 'AC.Convert_HEMCO_ds2Gg_per_yr', (['ds'], {'vars2convert': 'vars2use', 'var_species_dict': 'var_species_dict'}), '(ds, vars2convert=vars2use, var_species_dict=\n var_species_dict)\n', (19344, 19411), True, 'import AC_tools as AC\n'), ((7654, 7664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7662, 7664), True, 'import matplotlib.pyplot as plt\n'), ((8896, 8906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8904, 8906), True, 'import matplotlib.pyplot as plt\n'), ((10284, 10294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10292, 10294), True, 'import matplotlib.pyplot as plt\n'), ((11891, 11901), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11899, 11901), True, 'import matplotlib.pyplot as plt\n'), ((13424, 13434), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13432, 13434), True, 'import matplotlib.pyplot as plt\n'), ((15200, 15210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15208, 15210), True, 'import matplotlib.pyplot as plt\n'), ((16819, 16829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16827, 16829), True, 'import matplotlib.pyplot as plt\n'), ((6861, 6879), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (6877, 6879), True, 'import cartopy.crs as ccrs\n'), ((7074, 7092), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7090, 7092), True, 'import cartopy.crs as ccrs\n'), ((8105, 8123), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8121, 8123), True, 'import cartopy.crs as ccrs\n'), ((8318, 8336), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8334, 8336), True, 'import cartopy.crs as ccrs\n'), ((9558, 9576), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9574, 9576), True, 'import cartopy.crs as ccrs\n'), ((9767, 9785), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9783, 9785), True, 'import cartopy.crs as ccrs\n'), ((11032, 11050), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11048, 11050), True, 'import cartopy.crs as ccrs\n'), ((11371, 11389), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11387, 11389), True, 'import cartopy.crs as ccrs\n'), ((12645, 12663), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12661, 12663), True, 'import cartopy.crs as ccrs\n'), ((12904, 12922), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12920, 12922), True, 'import cartopy.crs as ccrs\n'), ((14245, 14263), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (14261, 14263), True, 'import cartopy.crs as ccrs\n'), ((14680, 14698), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (14696, 14698), True, 'import cartopy.crs as ccrs\n'), ((16027, 16045), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16043, 16045), True, 'import cartopy.crs as ccrs\n'), ((16299, 16317), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16315, 16317), True, 'import cartopy.crs as ccrs\n')] |
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
def cbet_data(label_cols):
data = pd.read_csv('data/CBET.csv')
label = data[label_cols]
stop_words = set(stopwords.words('english'))
train_text = []
for t in data['text'].fillna("fillna").values:
t = t.lower()
word_tokens = word_tokenize(t)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
train_text.append(' '.join(filtered_sentence))
return train_text, label
if __name__ == '__main__':
label_cols = ['anger', 'fear', 'joy', 'love', 'sadness', 'surprise', 'thankfulness', 'disgust', 'guilt']
X, y = cbet_data(label_cols)
sss = ShuffleSplit(n_splits=1, test_size=0.1, random_state=0)
y = np.asarray(y[label_cols])
train_index, dev_index = next(sss.split(X, y))
X_train, X_dev = [X[i] for i in train_index], [X[i] for i in dev_index]
y_train, y_dev = y[train_index], y[dev_index]
bag_of_words_len = 5000
vectorizer = CountVectorizer(analyzer="word", tokenizer=None,
preprocessor=None,
max_features=bag_of_words_len)
x_train_fea = vectorizer.fit_transform(X_train)
x_train_fea = x_train_fea.toarray()
x_dev_fea = vectorizer.transform(X_dev)
x_dev_fea = x_dev_fea.toarray()
# clf = RandomForestClassifier(n_estimators=30, random_state=0, n_jobs=-1)
# clf = OneVsRestClassifier(SVC(kernel='linear'), n_jobs=-1)
clf = OneVsRestClassifier(SVC(kernel='rbf', probability=True), n_jobs=-1)
clf.fit(x_train_fea, y_train)
y_dev_pred = clf.predict(x_dev_fea)
result = clf.predict_proba(x_dev_fea)
import pickle
with open('result_prob.pkl', 'bw') as f:
pickle.dump([result], f)
f1 = f1_score(y_dev, y_dev_pred, average='macro')
p = precision_score(y_dev, y_dev_pred, average='macro')
r = recall_score(y_dev, y_dev_pred, average='macro')
print(f1, p, r)
with open('result_measure.txt', 'w') as f:
f.write(str(f1) + ' ' + str(p) + ' ' + str(r) + '\n')
| [
"sklearn.metrics.f1_score",
"pickle.dump",
"nltk.corpus.stopwords.words",
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.asarray",
"sklearn.model_selection.ShuffleSplit",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"nltk.tokenize.word_tokenize",... | [((485, 513), 'pandas.read_csv', 'pd.read_csv', (['"""data/CBET.csv"""'], {}), "('data/CBET.csv')\n", (496, 513), True, 'import pandas as pd\n'), ((1069, 1124), 'sklearn.model_selection.ShuffleSplit', 'ShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.1)', 'random_state': '(0)'}), '(n_splits=1, test_size=0.1, random_state=0)\n', (1081, 1124), False, 'from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit\n'), ((1133, 1158), 'numpy.asarray', 'np.asarray', (['y[label_cols]'], {}), '(y[label_cols])\n', (1143, 1158), True, 'import numpy as np\n'), ((1382, 1484), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""', 'tokenizer': 'None', 'preprocessor': 'None', 'max_features': 'bag_of_words_len'}), "(analyzer='word', tokenizer=None, preprocessor=None,\n max_features=bag_of_words_len)\n", (1397, 1484), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2166, 2210), 'sklearn.metrics.f1_score', 'f1_score', (['y_dev', 'y_dev_pred'], {'average': '"""macro"""'}), "(y_dev, y_dev_pred, average='macro')\n", (2174, 2210), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((2219, 2270), 'sklearn.metrics.precision_score', 'precision_score', (['y_dev', 'y_dev_pred'], {'average': '"""macro"""'}), "(y_dev, y_dev_pred, average='macro')\n", (2234, 2270), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((2279, 2327), 'sklearn.metrics.recall_score', 'recall_score', (['y_dev', 'y_dev_pred'], {'average': '"""macro"""'}), "(y_dev, y_dev_pred, average='macro')\n", (2291, 2327), False, 'from sklearn.metrics import f1_score, precision_score, recall_score\n'), ((565, 591), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (580, 591), False, 'from nltk.corpus import stopwords\n'), ((709, 725), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['t'], {}), '(t)\n', (722, 725), False, 'from nltk.tokenize import word_tokenize\n'), ((1895, 1930), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'probability': '(True)'}), "(kernel='rbf', probability=True)\n", (1898, 1930), False, 'from sklearn.svm import SVC\n'), ((2131, 2155), 'pickle.dump', 'pickle.dump', (['[result]', 'f'], {}), '([result], f)\n', (2142, 2155), False, 'import pickle\n')] |
import os
import glob
from PIL import Image
import numpy as np
import random
image_dir = "[DIRECTORY OF SCRAPED IMAGES]"
out_dir = "./out_pruned_images"
os.makedirs(out_dir, exist_ok=True)
filelist = glob.glob(os.path.join(image_dir, "*.png"))
random.shuffle(filelist)
uninteresting_count = 0
uninteresting_sat_stdevs = []
for i, image_path in enumerate(filelist):
pil_img = Image.open(image_path)
img = np.array(pil_img)
H, W, C = img.shape
sat_img = img[:, :W // 2, :]
map_img = img[:, W // 2:, :]
map_img_stdev = np.std(map_img.reshape((H * W // 2, C)), axis=0).mean()
if map_img_stdev < 1.0:
uninteresting_count += 1
sat_img_stdev = np.std(sat_img.reshape((H * W // 2, C)), axis=0).mean()
uninteresting_sat_stdevs.append(sat_img_stdev)
if sat_img_stdev < 30.0:
out_path = os.path.join(out_dir, os.path.basename(image_path))
pil_img.save(out_path)
print(out_path, i / len(filelist) * 100.0) | [
"PIL.Image.open",
"random.shuffle",
"os.makedirs",
"os.path.join",
"numpy.array",
"os.path.basename"
] | [((154, 189), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (165, 189), False, 'import os\n'), ((245, 269), 'random.shuffle', 'random.shuffle', (['filelist'], {}), '(filelist)\n', (259, 269), False, 'import random\n'), ((211, 243), 'os.path.join', 'os.path.join', (['image_dir', '"""*.png"""'], {}), "(image_dir, '*.png')\n", (223, 243), False, 'import os\n'), ((381, 403), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (391, 403), False, 'from PIL import Image\n'), ((414, 431), 'numpy.array', 'np.array', (['pil_img'], {}), '(pil_img)\n', (422, 431), True, 'import numpy as np\n'), ((882, 910), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (898, 910), False, 'import os\n')] |
import os
import click
import numpy as np
from joblib import Parallel, delayed
import trisicell as tsc
@click.command(short_help="Run SCITE.")
@click.argument(
"genotype_file",
required=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.argument(
"alpha",
required=True,
type=float,
)
@click.argument(
"beta",
required=True,
type=float,
)
@click.option(
"--n_iters",
"-l",
default=1000000,
type=int,
show_default=True,
help="Number of iterations.",
)
@click.option(
"--n_restarts",
"-r",
default=3,
type=int,
show_default=True,
help="Number of restarts.",
)
@click.option(
"--experiment",
"-e",
is_flag=True,
default=False,
type=bool,
show_default=True,
help="Is in experiment mode.",
)
@click.option(
"--n_hours",
"-h",
default=24,
type=float,
show_default=True,
help="Number of hours for the experiment part.",
)
def scite(genotype_file, alpha, beta, n_iters, n_restarts, experiment, n_hours):
"""Tree inference for single-cell data :cite:`SCITE`.
trisicell scite input.SC 0.0001 0.1 -l 1000000 -r 3 -e -h 24
"""
outfile = os.path.splitext(genotype_file)[0]
tsc.settings.verbosity = "info"
df_in = tsc.io.read(genotype_file)
if not experiment:
tsc.settings.logfile = f"{outfile}.scite.log"
df_out = tsc.tl.scite(
df_in,
alpha=alpha,
beta=beta,
n_iters=n_iters,
n_restarts=n_restarts,
)
tsc.io.write(df_out, f"{outfile}.scite.CFMatrix")
else:
tsc.settings.logfile = f"{outfile}.scite.log"
df_out, running_time, _, _ = tsc.tl.scite(
df_in,
alpha=alpha,
beta=beta,
n_iters=30000,
n_restarts=1,
experiment=True,
)
n_iters = int(2 * 30000 * n_hours * 60 * 60 / running_time)
def run(i):
do, r, s, b = tsc.tl.scite(
df_in,
alpha=alpha,
beta=beta,
n_iters=n_iters,
n_restarts=1,
experiment=True,
)
return do, r, s, b
output = Parallel(n_jobs=3)(delayed(run)(i) for i in range(3))
scores = [x[2] for x in output]
betas = [x[3] for x in output]
best_i = np.argmax(scores)
df_out = output[best_i][0]
tsc.ul.stat(df_in, df_out, alpha, beta, output[best_i][1])
tsc.logg.info(f"score: {output[best_i][2]}")
tsc.logg.info(f"beta: {output[best_i][3]}")
tsc.logg.info(f"n_iters: {n_iters}")
tsc.logg.info(f"scores: {','.join(list(map(str, scores)))}")
tsc.logg.info(f"betas: {','.join(list(map(str, betas)))}")
tsc.logg.info(f"picked: {best_i}")
tsc.io.write(df_out, f"{outfile}.scite.CFMatrix")
return None
| [
"trisicell.tl.scite",
"click.argument",
"trisicell.io.write",
"trisicell.logg.info",
"click.option",
"trisicell.ul.stat",
"os.path.splitext",
"numpy.argmax",
"joblib.Parallel",
"click.Path",
"trisicell.io.read",
"joblib.delayed",
"click.command"
] | [((108, 146), 'click.command', 'click.command', ([], {'short_help': '"""Run SCITE."""'}), "(short_help='Run SCITE.')\n", (121, 146), False, 'import click\n'), ((321, 371), 'click.argument', 'click.argument', (['"""alpha"""'], {'required': '(True)', 'type': 'float'}), "('alpha', required=True, type=float)\n", (335, 371), False, 'import click\n'), ((388, 437), 'click.argument', 'click.argument', (['"""beta"""'], {'required': '(True)', 'type': 'float'}), "('beta', required=True, type=float)\n", (402, 437), False, 'import click\n'), ((454, 566), 'click.option', 'click.option', (['"""--n_iters"""', '"""-l"""'], {'default': '(1000000)', 'type': 'int', 'show_default': '(True)', 'help': '"""Number of iterations."""'}), "('--n_iters', '-l', default=1000000, type=int, show_default=\n True, help='Number of iterations.')\n", (466, 566), False, 'import click\n'), ((590, 696), 'click.option', 'click.option', (['"""--n_restarts"""', '"""-r"""'], {'default': '(3)', 'type': 'int', 'show_default': '(True)', 'help': '"""Number of restarts."""'}), "('--n_restarts', '-r', default=3, type=int, show_default=True,\n help='Number of restarts.')\n", (602, 696), False, 'import click\n'), ((721, 849), 'click.option', 'click.option', (['"""--experiment"""', '"""-e"""'], {'is_flag': '(True)', 'default': '(False)', 'type': 'bool', 'show_default': '(True)', 'help': '"""Is in experiment mode."""'}), "('--experiment', '-e', is_flag=True, default=False, type=bool,\n show_default=True, help='Is in experiment mode.')\n", (733, 849), False, 'import click\n'), ((878, 1005), 'click.option', 'click.option', (['"""--n_hours"""', '"""-h"""'], {'default': '(24)', 'type': 'float', 'show_default': '(True)', 'help': '"""Number of hours for the experiment part."""'}), "('--n_hours', '-h', default=24, type=float, show_default=True,\n help='Number of hours for the experiment part.')\n", (890, 1005), False, 'import click\n'), ((1342, 1368), 'trisicell.io.read', 'tsc.io.read', (['genotype_file'], {}), '(genotype_file)\n', (1353, 1368), True, 'import trisicell as tsc\n'), ((1257, 1288), 'os.path.splitext', 'os.path.splitext', (['genotype_file'], {}), '(genotype_file)\n', (1273, 1288), False, 'import os\n'), ((1463, 1551), 'trisicell.tl.scite', 'tsc.tl.scite', (['df_in'], {'alpha': 'alpha', 'beta': 'beta', 'n_iters': 'n_iters', 'n_restarts': 'n_restarts'}), '(df_in, alpha=alpha, beta=beta, n_iters=n_iters, n_restarts=\n n_restarts)\n', (1475, 1551), True, 'import trisicell as tsc\n'), ((1626, 1675), 'trisicell.io.write', 'tsc.io.write', (['df_out', 'f"""{outfile}.scite.CFMatrix"""'], {}), "(df_out, f'{outfile}.scite.CFMatrix')\n", (1638, 1675), True, 'import trisicell as tsc\n'), ((1777, 1870), 'trisicell.tl.scite', 'tsc.tl.scite', (['df_in'], {'alpha': 'alpha', 'beta': 'beta', 'n_iters': '(30000)', 'n_restarts': '(1)', 'experiment': '(True)'}), '(df_in, alpha=alpha, beta=beta, n_iters=30000, n_restarts=1,\n experiment=True)\n', (1789, 1870), True, 'import trisicell as tsc\n'), ((2468, 2485), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (2477, 2485), True, 'import numpy as np\n'), ((2530, 2588), 'trisicell.ul.stat', 'tsc.ul.stat', (['df_in', 'df_out', 'alpha', 'beta', 'output[best_i][1]'], {}), '(df_in, df_out, alpha, beta, output[best_i][1])\n', (2541, 2588), True, 'import trisicell as tsc\n'), ((2597, 2641), 'trisicell.logg.info', 'tsc.logg.info', (['f"""score: {output[best_i][2]}"""'], {}), "(f'score: {output[best_i][2]}')\n", (2610, 2641), True, 'import trisicell as tsc\n'), ((2650, 2693), 'trisicell.logg.info', 'tsc.logg.info', (['f"""beta: {output[best_i][3]}"""'], {}), "(f'beta: {output[best_i][3]}')\n", (2663, 2693), True, 'import trisicell as tsc\n'), ((2702, 2738), 'trisicell.logg.info', 'tsc.logg.info', (['f"""n_iters: {n_iters}"""'], {}), "(f'n_iters: {n_iters}')\n", (2715, 2738), True, 'import trisicell as tsc\n'), ((2883, 2917), 'trisicell.logg.info', 'tsc.logg.info', (['f"""picked: {best_i}"""'], {}), "(f'picked: {best_i}')\n", (2896, 2917), True, 'import trisicell as tsc\n'), ((2927, 2976), 'trisicell.io.write', 'tsc.io.write', (['df_out', 'f"""{outfile}.scite.CFMatrix"""'], {}), "(df_out, f'{outfile}.scite.CFMatrix')\n", (2939, 2976), True, 'import trisicell as tsc\n'), ((213, 306), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(True)', 'dir_okay': '(False)', 'readable': '(True)', 'resolve_path': '(True)'}), '(exists=True, file_okay=True, dir_okay=False, readable=True,\n resolve_path=True)\n', (223, 306), False, 'import click\n'), ((2065, 2160), 'trisicell.tl.scite', 'tsc.tl.scite', (['df_in'], {'alpha': 'alpha', 'beta': 'beta', 'n_iters': 'n_iters', 'n_restarts': '(1)', 'experiment': '(True)'}), '(df_in, alpha=alpha, beta=beta, n_iters=n_iters, n_restarts=1,\n experiment=True)\n', (2077, 2160), True, 'import trisicell as tsc\n'), ((2317, 2335), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(3)'}), '(n_jobs=3)\n', (2325, 2335), False, 'from joblib import Parallel, delayed\n'), ((2336, 2348), 'joblib.delayed', 'delayed', (['run'], {}), '(run)\n', (2343, 2348), False, 'from joblib import Parallel, delayed\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from skimage import io
from skimage import transform
from tensorflow.keras.applications import Xception
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import GlobalMaxPooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tqdm import tqdm
model = load_model("./model/prinumco_mobilenet.h5")
def load(filename):
image = Image.open(filename)
np_image = np.array(image).astype("float32") / 255
np_image = transform.resize(np_image, (96, 96, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
url = "./results/test.png"
image = load(url)
predict_matrix = model.predict(image)
print(np.argmax(predict_matrix))
| [
"PIL.Image.open",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"skimage.transform.resize"
] | [((940, 983), 'tensorflow.keras.models.load_model', 'load_model', (['"""./model/prinumco_mobilenet.h5"""'], {}), "('./model/prinumco_mobilenet.h5')\n", (950, 983), False, 'from tensorflow.keras.models import load_model\n'), ((1019, 1039), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1029, 1039), False, 'from PIL import Image\n'), ((1110, 1149), 'skimage.transform.resize', 'transform.resize', (['np_image', '(96, 96, 3)'], {}), '(np_image, (96, 96, 3))\n', (1126, 1149), False, 'from skimage import transform\n'), ((1165, 1197), 'numpy.expand_dims', 'np.expand_dims', (['np_image'], {'axis': '(0)'}), '(np_image, axis=0)\n', (1179, 1197), True, 'import numpy as np\n'), ((1311, 1336), 'numpy.argmax', 'np.argmax', (['predict_matrix'], {}), '(predict_matrix)\n', (1320, 1336), True, 'import numpy as np\n'), ((1055, 1070), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1063, 1070), True, 'import numpy as np\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_exclude_labels_2d():
gpu_input = cle.push(np.asarray([
[0, 0, 2, 0, 0, 0, 0],
[0, 1, 2, 0, 7, 0, 0],
[0, 1, 0, 0, 7, 5, 5],
[8, 8, 8, 0, 0, 0, 0],
[0, 4, 4, 0, 3, 0, 0],
[0, 4, 4, 6, 0, 0, 0],
]))
gpu_reference = cle.push(np.asarray([
[0, 0, 2, 0, 0, 0, 0],
[0, 1, 2, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 3, 3],
[5, 5, 5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0],
]))
flaglist = cle.push(np.asarray([[0, 0, 0, 1, 1, 0, 0, 1, 0]]))
gpu_output = cle.exclude_labels(flaglist, gpu_input)
a = cle.pull(gpu_output)
b = cle.pull(gpu_reference)
print(a)
print(b)
assert (np.array_equal(a, b))
def test_exclude_labels_3d():
gpu_input = cle.push(np.asarray([
[
[0, 0, 2, 0, 0, 0, 0],
[0, 1, 2, 0, 7, 0, 0],
[0, 1, 0, 0, 7, 5, 5],
],[
[8, 8, 8, 0, 0, 0, 0],
[0, 4, 4, 0, 3, 0, 0],
[0, 4, 4, 6, 0, 0, 0],
]
]))
gpu_reference = cle.push(np.asarray([
[
[0, 0, 2, 0, 0, 0, 0],
[0, 1, 2, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 3, 3],
],[
[5, 5, 5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 0, 0, 0],
]
]))
flaglist = cle.push(np.asarray([[0, 0, 0, 1, 1, 0, 0, 1, 0]]))
gpu_output = cle.exclude_labels(flaglist, gpu_input)
a = cle.pull(gpu_output)
b = cle.pull(gpu_reference)
print(a)
print(b)
assert (np.array_equal(a, b))
| [
"numpy.asarray",
"pyclesperanto_prototype.pull",
"numpy.array_equal",
"pyclesperanto_prototype.exclude_labels"
] | [((700, 739), 'pyclesperanto_prototype.exclude_labels', 'cle.exclude_labels', (['flaglist', 'gpu_input'], {}), '(flaglist, gpu_input)\n', (718, 739), True, 'import pyclesperanto_prototype as cle\n'), ((749, 769), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_output'], {}), '(gpu_output)\n', (757, 769), True, 'import pyclesperanto_prototype as cle\n'), ((778, 801), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_reference'], {}), '(gpu_reference)\n', (786, 801), True, 'import pyclesperanto_prototype as cle\n'), ((842, 862), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (856, 862), True, 'import numpy as np\n'), ((1563, 1602), 'pyclesperanto_prototype.exclude_labels', 'cle.exclude_labels', (['flaglist', 'gpu_input'], {}), '(flaglist, gpu_input)\n', (1581, 1602), True, 'import pyclesperanto_prototype as cle\n'), ((1612, 1632), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_output'], {}), '(gpu_output)\n', (1620, 1632), True, 'import pyclesperanto_prototype as cle\n'), ((1641, 1664), 'pyclesperanto_prototype.pull', 'cle.pull', (['gpu_reference'], {}), '(gpu_reference)\n', (1649, 1664), True, 'import pyclesperanto_prototype as cle\n'), ((1705, 1725), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (1719, 1725), True, 'import numpy as np\n'), ((118, 272), 'numpy.asarray', 'np.asarray', (['[[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 7, 0, 0], [0, 1, 0, 0, 7, 5, 5], [8, 8,\n 8, 0, 0, 0, 0], [0, 4, 4, 0, 3, 0, 0], [0, 4, 4, 6, 0, 0, 0]]'], {}), '([[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 7, 0, 0], [0, 1, 0, 0, 7, 5,\n 5], [8, 8, 8, 0, 0, 0, 0], [0, 4, 4, 0, 3, 0, 0], [0, 4, 4, 6, 0, 0, 0]])\n', (128, 272), True, 'import numpy as np\n'), ((381, 535), 'numpy.asarray', 'np.asarray', (['[[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 0, 0, 0], [0, 1, 0, 0, 0, 3, 3], [5, 5,\n 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 0, 0, 0]]'], {}), '([[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 0, 0, 0], [0, 1, 0, 0, 0, 3,\n 3], [5, 5, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 0, 0, 0]])\n', (391, 535), True, 'import numpy as np\n'), ((639, 680), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 1, 1, 0, 0, 1, 0]]'], {}), '([[0, 0, 0, 1, 1, 0, 0, 1, 0]])\n', (649, 680), True, 'import numpy as np\n'), ((921, 1085), 'numpy.asarray', 'np.asarray', (['[[[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 7, 0, 0], [0, 1, 0, 0, 7, 5, 5]], [[8,\n 8, 8, 0, 0, 0, 0], [0, 4, 4, 0, 3, 0, 0], [0, 4, 4, 6, 0, 0, 0]]]'], {}), '([[[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 7, 0, 0], [0, 1, 0, 0, 7, \n 5, 5]], [[8, 8, 8, 0, 0, 0, 0], [0, 4, 4, 0, 3, 0, 0], [0, 4, 4, 6, 0, \n 0, 0]]])\n', (931, 1085), True, 'import numpy as np\n'), ((1214, 1378), 'numpy.asarray', 'np.asarray', (['[[[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 0, 0, 0], [0, 1, 0, 0, 0, 3, 3]], [[5,\n 5, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 0, 0, 0]]]'], {}), '([[[0, 0, 2, 0, 0, 0, 0], [0, 1, 2, 0, 0, 0, 0], [0, 1, 0, 0, 0, \n 3, 3]], [[5, 5, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 0, \n 0, 0]]])\n', (1224, 1378), True, 'import numpy as np\n'), ((1502, 1543), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 1, 1, 0, 0, 1, 0]]'], {}), '([[0, 0, 0, 1, 1, 0, 0, 1, 0]])\n', (1512, 1543), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# test_csa.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
CSA tests
"""
import unittest
import nest
from . import compatibility
try:
import csa
HAVE_CSA = True
except ImportError:
HAVE_CSA = False
try:
import numpy
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
nest.sli_run("statusdict/have_libneurosim ::")
HAVE_LIBNEUROSIM = nest.sli_pop()
@nest.check_stack
@unittest.skipIf(not HAVE_CSA, 'Python CSA package is not available')
@unittest.skipIf(
not HAVE_LIBNEUROSIM,
'NEST was built without support for libneurosim'
)
class CSATestCase(unittest.TestCase):
"""CSA tests"""
def test_CSA_OneToOne_tuples(self):
"""One-to-one connectivity using CGConnect with id tuples"""
nest.ResetKernel()
n_neurons = 4
sources = nest.Create("iaf_psc_alpha", n_neurons)
targets = nest.Create("iaf_psc_alpha", n_neurons)
# Create a plain connection set
cg = csa.cset(csa.oneToOne)
# Connect sources and targets using the connection set
# cs. This will internally call the variant of CGConnect that
# takes lists
nest.CGConnect(sources, targets, cg)
for i in range(n_neurons):
# We expect all connections from sources to have the
# correct targets
conns = nest.GetStatus(nest.GetConnections([sources[i]]))
self.assertEqual(len(conns), 1)
self.assertEqual(conns[0]["target"], targets[i])
# We expect the targets to have no connections at all
conns = nest.GetStatus(nest.GetConnections([targets[i]]))
self.assertEqual(len(conns), 0)
@unittest.skipIf(not HAVE_NUMPY, 'NumPy package is not available')
def test_CSA_OneToOne_intvectors(self):
"""One-to-one connectivity using CGConnect with id intvectors"""
nest.ResetKernel()
n_neurons = 4
sources = nest.Create("iaf_psc_alpha", n_neurons)
targets = nest.Create("iaf_psc_alpha", n_neurons)
# Create a plain connection set
cg = csa.cset(csa.oneToOne)
# Connect sources and targets (both converted to NumPy arrays)
# using the connection set cs. This will internally call the
# variant of CGConnect that takes intvector instead of lists
nest.CGConnect(numpy.array(sources), numpy.array(targets), cg)
for i in range(n_neurons):
# We expect all connections from sources to have the
# correct targets
conns = nest.GetStatus(nest.GetConnections([sources[i]]))
self.assertEqual(len(conns), 1)
self.assertEqual(conns[0]["target"], targets[i])
# We expect the targets to have no connections at all
conns = nest.GetStatus(nest.GetConnections([targets[i]]))
self.assertEqual(len(conns), 0)
def test_CSA_OneToOne_params(self):
"""One-to-one connectivity using CGConnect with paramters"""
nest.ResetKernel()
n_neurons = 4
weight = 10000.0
delay = 2.0
sources = nest.Create("iaf_psc_alpha", n_neurons)
targets = nest.Create("iaf_psc_alpha", n_neurons)
# Create a connection set with values for weight and delay
cs = csa.cset(csa.oneToOne, weight, delay)
# Connect sources and targets using the connection set cs and
# a parameter map mapping weight to position 0 in the value
# set and delay to position 1
nest.CGConnect(sources, targets, cs, {"weight": 0, "delay": 1})
for i in range(n_neurons):
# We expect all connections from sources to have the
# correct targets, weights and delays
conns = nest.GetStatus(nest.GetConnections([sources[i]]))
self.assertEqual(len(conns), 1)
self.assertEqual(conns[0]["target"], targets[i])
self.assertEqual(conns[0]["weight"], weight)
self.assertEqual(conns[0]["delay"], delay)
# We expect the targets to have no connections at all
conns = nest.GetStatus(nest.GetConnections([targets[i]]))
self.assertEqual(len(conns), 0)
def test_CSA_OneToOne_synmodel(self):
"""One-to-one connectivity using CGConnect with synmodel"""
nest.ResetKernel()
n_neurons = 4
synmodel = "stdp_synapse"
sources = nest.Create("iaf_psc_alpha", n_neurons)
targets = nest.Create("iaf_psc_alpha", n_neurons)
# Create a plain connection set
cs = csa.cset(csa.oneToOne)
# Connect with a non-standard synapse model
nest.CGConnect(sources, targets, cs, model=synmodel)
for i in range(n_neurons):
# We expect all connections to have the correct targets
# and the non-standard synapse model set
conns = nest.GetStatus(nest.GetConnections([sources[i]]))
self.assertEqual(len(conns), 1)
self.assertEqual(conns[0]["target"], targets[i])
self.assertEqual(conns[0]["synapse_model"], synmodel)
# We expect the targets to have no connections at all
conns = nest.GetStatus(nest.GetConnections([targets[i]]))
self.assertEqual(len(conns), 0)
def test_CSA_error_unknown_nodes(self):
"""Error handling of CGConnect in case of unknown nodes"""
nest.ResetKernel()
# Create a plain connection set
cs = csa.cset(csa.oneToOne)
nonnodes = [1, 2, 3]
# We expect CGConnect to fail with an UnknownNode exception if
# unknown nodes are given
self.assertRaisesRegex(nest.NESTError, "UnknownNode",
nest.CGConnect, nonnodes, nonnodes, cs)
def test_CSA_error_unknown_synapse(self):
"""Error handling of CGConnect in case of unknown synapse model"""
nest.ResetKernel()
# Create a plain connection set
cs = csa.cset(csa.oneToOne)
n_neurons = 4
sources = nest.Create("iaf_psc_alpha", n_neurons)
targets = nest.Create("iaf_psc_alpha", n_neurons)
# We expect CGConnect to fail with an UnknownSynapseType
# exception if an unknown synapse model is given
self.assertRaisesRegex(nest.NESTError, "UnknownSynapseType",
nest.CGConnect, sources, targets, cs,
model="nonexistent_synapse")
def suite():
suite = unittest.makeSuite(CSATestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| [
"nest.sli_pop",
"nest.Create",
"nest.ResetKernel",
"csa.cset",
"unittest.makeSuite",
"unittest.skipIf",
"nest.CGConnect",
"nest.GetConnections",
"nest.sli_run",
"numpy.array",
"unittest.TextTestRunner"
] | [((974, 1020), 'nest.sli_run', 'nest.sli_run', (['"""statusdict/have_libneurosim ::"""'], {}), "('statusdict/have_libneurosim ::')\n", (986, 1020), False, 'import nest\n'), ((1040, 1054), 'nest.sli_pop', 'nest.sli_pop', ([], {}), '()\n', (1052, 1054), False, 'import nest\n'), ((1076, 1144), 'unittest.skipIf', 'unittest.skipIf', (['(not HAVE_CSA)', '"""Python CSA package is not available"""'], {}), "(not HAVE_CSA, 'Python CSA package is not available')\n", (1091, 1144), False, 'import unittest\n'), ((1146, 1237), 'unittest.skipIf', 'unittest.skipIf', (['(not HAVE_LIBNEUROSIM)', '"""NEST was built without support for libneurosim"""'], {}), "(not HAVE_LIBNEUROSIM,\n 'NEST was built without support for libneurosim')\n", (1161, 1237), False, 'import unittest\n'), ((2351, 2416), 'unittest.skipIf', 'unittest.skipIf', (['(not HAVE_NUMPY)', '"""NumPy package is not available"""'], {}), "(not HAVE_NUMPY, 'NumPy package is not available')\n", (2366, 2416), False, 'import unittest\n'), ((7142, 7181), 'unittest.makeSuite', 'unittest.makeSuite', (['CSATestCase', '"""test"""'], {}), "(CSATestCase, 'test')\n", (7160, 7181), False, 'import unittest\n'), ((7225, 7261), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (7248, 7261), False, 'import unittest\n'), ((1421, 1439), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (1437, 1439), False, 'import nest\n'), ((1482, 1521), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (1493, 1521), False, 'import nest\n'), ((1540, 1579), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (1551, 1579), False, 'import nest\n'), ((1634, 1656), 'csa.cset', 'csa.cset', (['csa.oneToOne'], {}), '(csa.oneToOne)\n', (1642, 1656), False, 'import csa\n'), ((1821, 1857), 'nest.CGConnect', 'nest.CGConnect', (['sources', 'targets', 'cg'], {}), '(sources, targets, cg)\n', (1835, 1857), False, 'import nest\n'), ((2543, 2561), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (2559, 2561), False, 'import nest\n'), ((2604, 2643), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (2615, 2643), False, 'import nest\n'), ((2662, 2701), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (2673, 2701), False, 'import nest\n'), ((2756, 2778), 'csa.cset', 'csa.cset', (['csa.oneToOne'], {}), '(csa.oneToOne)\n', (2764, 2778), False, 'import csa\n'), ((3666, 3684), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (3682, 3684), False, 'import nest\n'), ((3772, 3811), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (3783, 3811), False, 'import nest\n'), ((3830, 3869), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (3841, 3869), False, 'import nest\n'), ((3951, 3988), 'csa.cset', 'csa.cset', (['csa.oneToOne', 'weight', 'delay'], {}), '(csa.oneToOne, weight, delay)\n', (3959, 3988), False, 'import csa\n'), ((4174, 4237), 'nest.CGConnect', 'nest.CGConnect', (['sources', 'targets', 'cs', "{'weight': 0, 'delay': 1}"], {}), "(sources, targets, cs, {'weight': 0, 'delay': 1})\n", (4188, 4237), False, 'import nest\n'), ((4977, 4995), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (4993, 4995), False, 'import nest\n'), ((5072, 5111), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (5083, 5111), False, 'import nest\n'), ((5130, 5169), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (5141, 5169), False, 'import nest\n'), ((5224, 5246), 'csa.cset', 'csa.cset', (['csa.oneToOne'], {}), '(csa.oneToOne)\n', (5232, 5246), False, 'import csa\n'), ((5308, 5360), 'nest.CGConnect', 'nest.CGConnect', (['sources', 'targets', 'cs'], {'model': 'synmodel'}), '(sources, targets, cs, model=synmodel)\n', (5322, 5360), False, 'import nest\n'), ((6061, 6079), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (6077, 6079), False, 'import nest\n'), ((6134, 6156), 'csa.cset', 'csa.cset', (['csa.oneToOne'], {}), '(csa.oneToOne)\n', (6142, 6156), False, 'import csa\n'), ((6557, 6575), 'nest.ResetKernel', 'nest.ResetKernel', ([], {}), '()\n', (6573, 6575), False, 'import nest\n'), ((6630, 6652), 'csa.cset', 'csa.cset', (['csa.oneToOne'], {}), '(csa.oneToOne)\n', (6638, 6652), False, 'import csa\n'), ((6695, 6734), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (6706, 6734), False, 'import nest\n'), ((6753, 6792), 'nest.Create', 'nest.Create', (['"""iaf_psc_alpha"""', 'n_neurons'], {}), "('iaf_psc_alpha', n_neurons)\n", (6764, 6792), False, 'import nest\n'), ((3012, 3032), 'numpy.array', 'numpy.array', (['sources'], {}), '(sources)\n', (3023, 3032), False, 'import numpy\n'), ((3034, 3054), 'numpy.array', 'numpy.array', (['targets'], {}), '(targets)\n', (3045, 3054), False, 'import numpy\n'), ((2024, 2057), 'nest.GetConnections', 'nest.GetConnections', (['[sources[i]]'], {}), '([sources[i]])\n', (2043, 2057), False, 'import nest\n'), ((2266, 2299), 'nest.GetConnections', 'nest.GetConnections', (['[targets[i]]'], {}), '([targets[i]])\n', (2285, 2299), False, 'import nest\n'), ((3226, 3259), 'nest.GetConnections', 'nest.GetConnections', (['[sources[i]]'], {}), '([sources[i]])\n', (3245, 3259), False, 'import nest\n'), ((3468, 3501), 'nest.GetConnections', 'nest.GetConnections', (['[targets[i]]'], {}), '([targets[i]])\n', (3487, 3501), False, 'import nest\n'), ((4424, 4457), 'nest.GetConnections', 'nest.GetConnections', (['[sources[i]]'], {}), '([sources[i]])\n', (4443, 4457), False, 'import nest\n'), ((4778, 4811), 'nest.GetConnections', 'nest.GetConnections', (['[targets[i]]'], {}), '([targets[i]])\n', (4797, 4811), False, 'import nest\n'), ((5553, 5586), 'nest.GetConnections', 'nest.GetConnections', (['[sources[i]]'], {}), '([sources[i]])\n', (5572, 5586), False, 'import nest\n'), ((5861, 5894), 'nest.GetConnections', 'nest.GetConnections', (['[targets[i]]'], {}), '([targets[i]])\n', (5880, 5894), False, 'import nest\n')] |
import os
import numpy as np
from train import config
# 训练数据 测试数据提供
# classes 分类数量
# input_size 图像维度/输入维度
# max_pixel 最大数值 0-1/0-255
def get_mnist_data():
classes = 10
input_size = 784
max_pixel = 1
train_dir_path = os.path.join(config.data_save_path, config.mnist_train_data)
test_dir_path = os.path.join(config.data_save_path, config.mnist_test_data)
train_datas = np.empty(shape=(0, input_size))
train_labels = np.empty(shape=(0, classes))
test_datas = np.empty(shape=(0, input_size))
test_labels = np.empty(shape=(0, classes))
for i in range(classes):
temp_train_datas = np.load(os.path.join(train_dir_path, str(i) + '.npy'))
temp_train_labels = np.zeros(shape=(temp_train_datas.shape[0], classes))
temp_train_labels[:, i] = 1
temp_test_datas = np.load(os.path.join(test_dir_path, str(i) + '.npy'))
temp_test_labels = np.zeros(shape=(temp_test_datas.shape[0], classes))
temp_test_labels[:, i] = 1
train_datas = np.concatenate((train_datas, temp_train_datas))
train_labels = np.concatenate((train_labels, temp_train_labels))
test_datas = np.concatenate((test_datas, temp_test_datas))
test_labels = np.concatenate((test_labels, temp_test_labels))
return train_datas / max_pixel, train_labels, test_datas / max_pixel, test_labels
def get_cifar_10_data():
classes = 10
input_size = 1024
max_pixel = 255
train_dir_path = os.path.join(config.data_save_path, config.cifar_10_train_L_data)
test_dir_path = os.path.join(config.data_save_path, config.cifar_10_test_L_data)
train_datas = np.empty(shape=(0, input_size))
train_labels = np.empty(shape=(0, classes))
test_datas = np.empty(shape=(0, input_size))
test_labels = np.empty(shape=(0, classes))
for i in range(classes):
temp_train_datas = np.load(os.path.join(train_dir_path, str(i) + '.npy'))
temp_train_labels = np.zeros(shape=(temp_train_datas.shape[0], classes))
temp_train_labels[:, i] = 1
temp_test_datas = np.load(os.path.join(test_dir_path, str(i) + '.npy'))
temp_test_labels = np.zeros(shape=(temp_test_datas.shape[0], classes))
temp_test_labels[:, i] = 1
train_datas = np.concatenate((train_datas, temp_train_datas))
train_labels = np.concatenate((train_labels, temp_train_labels))
test_datas = np.concatenate((test_datas, temp_test_datas))
test_labels = np.concatenate((test_labels, temp_test_labels))
return train_datas / max_pixel, train_labels, test_datas / max_pixel, test_labels
def get_cifar_100_data():
classes = 100
input_size = 1024
max_pixel = 255
train_dir_path = os.path.join(config.data_save_path, config.cifar_100_train_L_data)
test_dir_path = os.path.join(config.data_save_path, config.cifar_100_test_L_data)
train_datas = np.empty(shape=(0, input_size))
train_labels = np.empty(shape=(0, classes))
test_datas = np.empty(shape=(0, input_size))
test_labels = np.empty(shape=(0, classes))
for i in range(classes):
temp_train_datas = np.load(os.path.join(train_dir_path, str(i) + '.npy'))
temp_train_labels = np.zeros(shape=(temp_train_datas.shape[0], classes))
temp_train_labels[:, i] = 1
temp_test_datas = np.load(os.path.join(test_dir_path, str(i) + '.npy'))
temp_test_labels = np.zeros(shape=(temp_test_datas.shape[0], classes))
temp_test_labels[:, i] = 1
train_datas = np.concatenate((train_datas, temp_train_datas))
train_labels = np.concatenate((train_labels, temp_train_labels))
test_datas = np.concatenate((test_datas, temp_test_datas))
test_labels = np.concatenate((test_labels, temp_test_labels))
return train_datas / max_pixel, train_labels, test_datas / max_pixel, test_labels
if __name__ == '__main__':
# data = np.zeros(shape=(5, 10))
# print(data)
# # 任意行 的第一个元素设置为1 也就是将第一列设置为1
# data[:, 1] = 1
# print(data)
# train_data, train_label, test_data, test_label = get_mnist_data()
train_data, train_label, test_data, test_label = get_cifar_10_data()
# train_data, train_label, test_data, test_label = get_cifar_100_data()
print(train_data.shape)
print(test_data.shape)
print(train_label.shape)
print(test_label.shape)
| [
"numpy.concatenate",
"numpy.empty",
"os.path.join",
"numpy.zeros"
] | [((234, 294), 'os.path.join', 'os.path.join', (['config.data_save_path', 'config.mnist_train_data'], {}), '(config.data_save_path, config.mnist_train_data)\n', (246, 294), False, 'import os\n'), ((315, 374), 'os.path.join', 'os.path.join', (['config.data_save_path', 'config.mnist_test_data'], {}), '(config.data_save_path, config.mnist_test_data)\n', (327, 374), False, 'import os\n'), ((393, 424), 'numpy.empty', 'np.empty', ([], {'shape': '(0, input_size)'}), '(shape=(0, input_size))\n', (401, 424), True, 'import numpy as np\n'), ((444, 472), 'numpy.empty', 'np.empty', ([], {'shape': '(0, classes)'}), '(shape=(0, classes))\n', (452, 472), True, 'import numpy as np\n'), ((490, 521), 'numpy.empty', 'np.empty', ([], {'shape': '(0, input_size)'}), '(shape=(0, input_size))\n', (498, 521), True, 'import numpy as np\n'), ((540, 568), 'numpy.empty', 'np.empty', ([], {'shape': '(0, classes)'}), '(shape=(0, classes))\n', (548, 568), True, 'import numpy as np\n'), ((1465, 1530), 'os.path.join', 'os.path.join', (['config.data_save_path', 'config.cifar_10_train_L_data'], {}), '(config.data_save_path, config.cifar_10_train_L_data)\n', (1477, 1530), False, 'import os\n'), ((1551, 1615), 'os.path.join', 'os.path.join', (['config.data_save_path', 'config.cifar_10_test_L_data'], {}), '(config.data_save_path, config.cifar_10_test_L_data)\n', (1563, 1615), False, 'import os\n'), ((1634, 1665), 'numpy.empty', 'np.empty', ([], {'shape': '(0, input_size)'}), '(shape=(0, input_size))\n', (1642, 1665), True, 'import numpy as np\n'), ((1685, 1713), 'numpy.empty', 'np.empty', ([], {'shape': '(0, classes)'}), '(shape=(0, classes))\n', (1693, 1713), True, 'import numpy as np\n'), ((1731, 1762), 'numpy.empty', 'np.empty', ([], {'shape': '(0, input_size)'}), '(shape=(0, input_size))\n', (1739, 1762), True, 'import numpy as np\n'), ((1781, 1809), 'numpy.empty', 'np.empty', ([], {'shape': '(0, classes)'}), '(shape=(0, classes))\n', (1789, 1809), True, 'import numpy as np\n'), ((2708, 2774), 'os.path.join', 'os.path.join', (['config.data_save_path', 'config.cifar_100_train_L_data'], {}), '(config.data_save_path, config.cifar_100_train_L_data)\n', (2720, 2774), False, 'import os\n'), ((2795, 2860), 'os.path.join', 'os.path.join', (['config.data_save_path', 'config.cifar_100_test_L_data'], {}), '(config.data_save_path, config.cifar_100_test_L_data)\n', (2807, 2860), False, 'import os\n'), ((2879, 2910), 'numpy.empty', 'np.empty', ([], {'shape': '(0, input_size)'}), '(shape=(0, input_size))\n', (2887, 2910), True, 'import numpy as np\n'), ((2930, 2958), 'numpy.empty', 'np.empty', ([], {'shape': '(0, classes)'}), '(shape=(0, classes))\n', (2938, 2958), True, 'import numpy as np\n'), ((2976, 3007), 'numpy.empty', 'np.empty', ([], {'shape': '(0, input_size)'}), '(shape=(0, input_size))\n', (2984, 3007), True, 'import numpy as np\n'), ((3026, 3054), 'numpy.empty', 'np.empty', ([], {'shape': '(0, classes)'}), '(shape=(0, classes))\n', (3034, 3054), True, 'import numpy as np\n'), ((708, 760), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp_train_datas.shape[0], classes)'}), '(shape=(temp_train_datas.shape[0], classes))\n', (716, 760), True, 'import numpy as np\n'), ((904, 955), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp_test_datas.shape[0], classes)'}), '(shape=(temp_test_datas.shape[0], classes))\n', (912, 955), True, 'import numpy as np\n'), ((1014, 1061), 'numpy.concatenate', 'np.concatenate', (['(train_datas, temp_train_datas)'], {}), '((train_datas, temp_train_datas))\n', (1028, 1061), True, 'import numpy as np\n'), ((1085, 1134), 'numpy.concatenate', 'np.concatenate', (['(train_labels, temp_train_labels)'], {}), '((train_labels, temp_train_labels))\n', (1099, 1134), True, 'import numpy as np\n'), ((1156, 1201), 'numpy.concatenate', 'np.concatenate', (['(test_datas, temp_test_datas)'], {}), '((test_datas, temp_test_datas))\n', (1170, 1201), True, 'import numpy as np\n'), ((1224, 1271), 'numpy.concatenate', 'np.concatenate', (['(test_labels, temp_test_labels)'], {}), '((test_labels, temp_test_labels))\n', (1238, 1271), True, 'import numpy as np\n'), ((1949, 2001), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp_train_datas.shape[0], classes)'}), '(shape=(temp_train_datas.shape[0], classes))\n', (1957, 2001), True, 'import numpy as np\n'), ((2145, 2196), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp_test_datas.shape[0], classes)'}), '(shape=(temp_test_datas.shape[0], classes))\n', (2153, 2196), True, 'import numpy as np\n'), ((2255, 2302), 'numpy.concatenate', 'np.concatenate', (['(train_datas, temp_train_datas)'], {}), '((train_datas, temp_train_datas))\n', (2269, 2302), True, 'import numpy as np\n'), ((2326, 2375), 'numpy.concatenate', 'np.concatenate', (['(train_labels, temp_train_labels)'], {}), '((train_labels, temp_train_labels))\n', (2340, 2375), True, 'import numpy as np\n'), ((2397, 2442), 'numpy.concatenate', 'np.concatenate', (['(test_datas, temp_test_datas)'], {}), '((test_datas, temp_test_datas))\n', (2411, 2442), True, 'import numpy as np\n'), ((2465, 2512), 'numpy.concatenate', 'np.concatenate', (['(test_labels, temp_test_labels)'], {}), '((test_labels, temp_test_labels))\n', (2479, 2512), True, 'import numpy as np\n'), ((3194, 3246), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp_train_datas.shape[0], classes)'}), '(shape=(temp_train_datas.shape[0], classes))\n', (3202, 3246), True, 'import numpy as np\n'), ((3390, 3441), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp_test_datas.shape[0], classes)'}), '(shape=(temp_test_datas.shape[0], classes))\n', (3398, 3441), True, 'import numpy as np\n'), ((3500, 3547), 'numpy.concatenate', 'np.concatenate', (['(train_datas, temp_train_datas)'], {}), '((train_datas, temp_train_datas))\n', (3514, 3547), True, 'import numpy as np\n'), ((3571, 3620), 'numpy.concatenate', 'np.concatenate', (['(train_labels, temp_train_labels)'], {}), '((train_labels, temp_train_labels))\n', (3585, 3620), True, 'import numpy as np\n'), ((3642, 3687), 'numpy.concatenate', 'np.concatenate', (['(test_datas, temp_test_datas)'], {}), '((test_datas, temp_test_datas))\n', (3656, 3687), True, 'import numpy as np\n'), ((3710, 3757), 'numpy.concatenate', 'np.concatenate', (['(test_labels, temp_test_labels)'], {}), '((test_labels, temp_test_labels))\n', (3724, 3757), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
def train_knn(X, Y):
"""Trains a K-nearest-neighbors classifier on data X and labels Y, and returns the classifier"""
X = np.array(X)
Y = np.array(Y)
x_train, x_test, y_train, y_test = train_test_split(X,Y, train_size=0.625)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
clf = KNeighborsClassifier(n_neighbors = 1)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test, y_pred))
return clf
def train_forest(X, Y):
"""Trains a Random Forest classifier on data X and labels Y, and returns the classifier"""
X = np.array(X)
Y = np.array(Y)
x_train, x_test, y_train, y_test = train_test_split(X,Y, train_size=0.625)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
clf = RandomForestClassifier(n_estimators=50)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test, y_pred))
return clf
| [
"sklearn.model_selection.train_test_split",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"sklearn.metrics.accuracy_score"
] | [((351, 362), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (359, 362), True, 'import numpy as np\n'), ((371, 382), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (379, 382), True, 'import numpy as np\n'), ((423, 463), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'train_size': '(0.625)'}), '(X, Y, train_size=0.625)\n', (439, 463), False, 'from sklearn.model_selection import train_test_split\n'), ((478, 495), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (486, 495), True, 'import numpy as np\n'), ((510, 527), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (518, 527), True, 'import numpy as np\n'), ((541, 557), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (549, 557), True, 'import numpy as np\n'), ((571, 587), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (579, 587), True, 'import numpy as np\n'), ((599, 634), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(1)'}), '(n_neighbors=1)\n', (619, 634), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((889, 900), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (897, 900), True, 'import numpy as np\n'), ((909, 920), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (917, 920), True, 'import numpy as np\n'), ((961, 1001), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'train_size': '(0.625)'}), '(X, Y, train_size=0.625)\n', (977, 1001), False, 'from sklearn.model_selection import train_test_split\n'), ((1016, 1033), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1024, 1033), True, 'import numpy as np\n'), ((1048, 1065), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1056, 1065), True, 'import numpy as np\n'), ((1079, 1095), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (1087, 1095), True, 'import numpy as np\n'), ((1109, 1125), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1117, 1125), True, 'import numpy as np\n'), ((1141, 1180), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (1163, 1180), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((711, 741), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (725, 741), False, 'from sklearn.metrics import accuracy_score\n'), ((1255, 1285), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1269, 1285), False, 'from sklearn.metrics import accuracy_score\n')] |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("data/car data.csv")
features = list(filter(lambda x: x!="Car_Name" ,list(df.columns)))
final_dataset = df[features]
final_dataset['No_Year'] = 2020 - df['Year']
final_dataset.drop(['Year'], axis=1, inplace=True)
final_dataset = pd.get_dummies(final_dataset, drop_first=True)
X = final_dataset.iloc[:, 1:] # Independent features
y = final_dataset.iloc[:, 0] # Dependent features
# Extrapolate the feature importance
from sklearn.ensemble import ExtraTreesRegressor
etr = ExtraTreesRegressor()
etr.fit(X, y)
print(dict(zip(list(X.columns) ,etr.feature_importances_)))
# Train test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
from sklearn.ensemble import RandomForestRegressor
rf_random = RandomForestRegressor()
# Hyperparameter tuning
n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)] # Number of tree in RF
max_features = ['auto', 'sqrt'] # Number of features to be considered for each split
max_depth = [int(x) for x in np.linspace(5, 30, num=6)] # Max number of leaves in each tree
min_samples_split = [2, 5, 10, 15, 100] # Minimum number of samples required to split a node
min_samples_leaf = [1, 2, 5, 10] # Minimum number of samples required at each leaf node
from sklearn.model_selection import RandomizedSearchCV
random_grid = {
"n_estimators": n_estimators,
"max_features": max_features,
"max_depth": max_depth,
"min_samples_split": min_samples_split,
"min_samples_leaf": min_samples_leaf
}
# Init RandomizedSearchCV to find best tree
rf = RandomForestRegressor()
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, scoring="neg_mean_squared_error",
n_iter=10, cv=5, verbose=2, random_state=42)
# Prediction for test set
predictions = rf_random.predict(X_test)
# Storing the model in a serialized file(pickle file)
import pickle
file = open('data/random_forest_regression_model.pkl', 'wb')
pickle.dump(rf_random, file) # To dump the data into the pickle file
| [
"sklearn.ensemble.RandomForestRegressor",
"pickle.dump",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.ExtraTreesRegressor",
"numpy.linspace",
"pandas.get_dummies",
"sklearn.model_selection.RandomizedSearchCV"
] | [((99, 131), 'pandas.read_csv', 'pd.read_csv', (['"""data/car data.csv"""'], {}), "('data/car data.csv')\n", (110, 131), True, 'import pandas as pd\n'), ((341, 387), 'pandas.get_dummies', 'pd.get_dummies', (['final_dataset'], {'drop_first': '(True)'}), '(final_dataset, drop_first=True)\n', (355, 387), True, 'import pandas as pd\n'), ((590, 611), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (609, 611), False, 'from sklearn.ensemble import ExtraTreesRegressor\n'), ((794, 831), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (810, 831), False, 'from sklearn.model_selection import train_test_split\n'), ((896, 919), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (917, 919), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1708, 1731), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (1729, 1731), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1744, 1893), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'rf', 'param_distributions': 'random_grid', 'scoring': '"""neg_mean_squared_error"""', 'n_iter': '(10)', 'cv': '(5)', 'verbose': '(2)', 'random_state': '(42)'}), "(estimator=rf, param_distributions=random_grid, scoring=\n 'neg_mean_squared_error', n_iter=10, cv=5, verbose=2, random_state=42)\n", (1762, 1893), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((2118, 2146), 'pickle.dump', 'pickle.dump', (['rf_random', 'file'], {}), '(rf_random, file)\n', (2129, 2146), False, 'import pickle\n'), ((977, 1018), 'numpy.linspace', 'np.linspace', ([], {'start': '(100)', 'stop': '(1200)', 'num': '(12)'}), '(start=100, stop=1200, num=12)\n', (988, 1018), True, 'import numpy as np\n'), ((1158, 1183), 'numpy.linspace', 'np.linspace', (['(5)', '(30)'], {'num': '(6)'}), '(5, 30, num=6)\n', (1169, 1183), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""ImageProcessing.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1p32u78fh3vzTuK3TgaF9Aj9NXw5ztAnC
"""
from scipy import ndimage
import numpy as np
"""###Opening and writing to image files"""
from scipy import misc
import imageio
f = misc.face()
imageio.imsave('face.png', f) # uses the Image module (PIL)
import matplotlib.pyplot as plt
plt.imshow(f)
plt.show()
from scipy import misc
import imageio
face = misc.face()
imageio.imsave('face.png', face) # First we need to create the PNG file
face = imageio.imread('face.png')
type(face)
face.shape, face.dtype
face.tofile('face.raw') # Create raw file
face_from_raw = np.fromfile('face.raw', dtype=np.uint8)
face_from_raw.shape
face_from_raw.shape = (768, 1024, 3)
face_memmap = np.memmap('face.raw', dtype=np.uint8, shape=(768, 1024, 3))
for i in range(10):
im = np.random.randint(0, 256, 10000).reshape((100, 100))
imageio.imsave('random_%02d.png' % i, im)
from glob import glob
filelist = glob('random*.png')
filelist.sort()
"""###Displaying images"""
f = misc.face(gray=True) # retrieve a grayscale image
import matplotlib.pyplot as plt
plt.imshow(f, cmap=plt.cm.gray)
plt.imshow(f, cmap=plt.cm.gray, vmin=30, vmax=200)
plt.axis('off')
plt.contour(f, [50, 200])
"""###Basic manipulations"""
plt.imshow(f[320:340, 510:530], cmap=plt.cm.gray, interpolation='bilinear')
plt.imshow(f[320:340, 510:530], cmap=plt.cm.gray, interpolation='nearest')
face = misc.face(gray=True)
face[0, 40]
# Slicing
face[10:13, 20:23]
face[100:120] = 255
"""###Geometrical transformations"""
lx, ly = face.shape
X, Y = np.ogrid[0:lx, 0:ly]
mask = (X - lx / 2) ** 2 + (Y - ly / 2) ** 2 > lx * ly / 4
# Masks
face[mask] = 0
# Fancy indexing
face[range(400), range(400)] = 25
face = misc.face(gray=True)
lx, ly = face.shape
# Cropping
crop_face = face[lx // 4: - lx // 4, ly // 4: - ly // 4]
plt.imshow(crop_face)
# up <-> down flip
flip_ud_face = np.flipud(face)
plt.imshow(flip_ud_face)
# rotation
rotate_face = ndimage.rotate(face, 45)
rotate_face_noreshape = ndimage.rotate(face, 45, reshape=False)
plt.imshow(rotate_face)
"""###Blurring/smoothing"""
from scipy import misc
face = misc.face(gray=True)
blurred_face = ndimage.gaussian_filter(face, sigma=3)
very_blurred = ndimage.gaussian_filter(face, sigma=5)
plt.imshow(very_blurred)
"""###Sharpening"""
from scipy import misc
face = misc.face(gray=True).astype(float)
blurred_f = ndimage.gaussian_filter(face, 3)
filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
alpha = 30
sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f)
plt.imshow(sharpened)
"""###Segmentation"""
n = 10
l = 256
im = np.zeros((l, l))
np.random.seed(1)
points = l*np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = ndimage.gaussian_filter(im, sigma=l/(4.*n))
mask = (im > im.mean()).astype(np.float)
mask += 0.1 * im
img = mask + 0.2*np.random.randn(*mask.shape)
hist, bin_edges = np.histogram(img, bins=60)
bin_centers = 0.5*(bin_edges[:-1] + bin_edges[1:])
binary_img = img > 0.5
# Remove small white regions
open_img = ndimage.binary_opening(binary_img)
# Remove small black hole
close_img = ndimage.binary_closing(open_img)
plt.imshow(close_img)
"""###Edge Detection using Canny Edge Detector"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
plt.figure(figsize=(16, 16))
img_gs = cv2.imread('face.png', cv2.IMREAD_GRAYSCALE)
cv2.imwrite('gs.jpg', img_gs)
edges = cv2.Canny(img_gs, 100,200)
plt.subplot(121), plt.imshow(img_gs)
plt.title('Original Gray Scale Image')
plt.subplot(122), plt.imshow(edges)
plt.title('Edge Image')
plt.show()
"""###Measuring objects properties"""
n = 10
l = 256
im = np.zeros((l, l))
points = l*np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = ndimage.gaussian_filter(im, sigma=l/(4.*n))
mask = im > im.mean()
label_im, nb_labels = ndimage.label(mask)
nb_labels
plt.imshow(label_im) | [
"numpy.fromfile",
"imageio.imsave",
"scipy.ndimage.binary_opening",
"scipy.ndimage.gaussian_filter",
"scipy.ndimage.rotate",
"matplotlib.pyplot.imshow",
"numpy.histogram",
"numpy.random.random",
"numpy.memmap",
"scipy.ndimage.label",
"matplotlib.pyplot.contour",
"numpy.random.seed",
"matplot... | [((336, 347), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (345, 347), False, 'from scipy import misc\n'), ((348, 377), 'imageio.imsave', 'imageio.imsave', (['"""face.png"""', 'f'], {}), "('face.png', f)\n", (362, 377), False, 'import imageio\n'), ((441, 454), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f'], {}), '(f)\n', (451, 454), True, 'from matplotlib import pyplot as plt\n'), ((455, 465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (463, 465), True, 'from matplotlib import pyplot as plt\n'), ((512, 523), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (521, 523), False, 'from scipy import misc\n'), ((524, 556), 'imageio.imsave', 'imageio.imsave', (['"""face.png"""', 'face'], {}), "('face.png', face)\n", (538, 556), False, 'import imageio\n'), ((604, 630), 'imageio.imread', 'imageio.imread', (['"""face.png"""'], {}), "('face.png')\n", (618, 630), False, 'import imageio\n'), ((725, 764), 'numpy.fromfile', 'np.fromfile', (['"""face.raw"""'], {'dtype': 'np.uint8'}), "('face.raw', dtype=np.uint8)\n", (736, 764), True, 'import numpy as np\n'), ((838, 897), 'numpy.memmap', 'np.memmap', (['"""face.raw"""'], {'dtype': 'np.uint8', 'shape': '(768, 1024, 3)'}), "('face.raw', dtype=np.uint8, shape=(768, 1024, 3))\n", (847, 897), True, 'import numpy as np\n'), ((1060, 1079), 'glob.glob', 'glob', (['"""random*.png"""'], {}), "('random*.png')\n", (1064, 1079), False, 'from glob import glob\n'), ((1129, 1149), 'scipy.misc.face', 'misc.face', ([], {'gray': '(True)'}), '(gray=True)\n', (1138, 1149), False, 'from scipy import misc\n'), ((1212, 1243), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f'], {'cmap': 'plt.cm.gray'}), '(f, cmap=plt.cm.gray)\n', (1222, 1243), True, 'from matplotlib import pyplot as plt\n'), ((1245, 1295), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f'], {'cmap': 'plt.cm.gray', 'vmin': '(30)', 'vmax': '(200)'}), '(f, cmap=plt.cm.gray, vmin=30, vmax=200)\n', (1255, 1295), True, 'from matplotlib import pyplot as plt\n'), ((1297, 1312), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1305, 1312), True, 'from matplotlib import pyplot as plt\n'), ((1314, 1339), 'matplotlib.pyplot.contour', 'plt.contour', (['f', '[50, 200]'], {}), '(f, [50, 200])\n', (1325, 1339), True, 'from matplotlib import pyplot as plt\n'), ((1371, 1446), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f[320:340, 510:530]'], {'cmap': 'plt.cm.gray', 'interpolation': '"""bilinear"""'}), "(f[320:340, 510:530], cmap=plt.cm.gray, interpolation='bilinear')\n", (1381, 1446), True, 'from matplotlib import pyplot as plt\n'), ((1448, 1522), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f[320:340, 510:530]'], {'cmap': 'plt.cm.gray', 'interpolation': '"""nearest"""'}), "(f[320:340, 510:530], cmap=plt.cm.gray, interpolation='nearest')\n", (1458, 1522), True, 'from matplotlib import pyplot as plt\n'), ((1531, 1551), 'scipy.misc.face', 'misc.face', ([], {'gray': '(True)'}), '(gray=True)\n', (1540, 1551), False, 'from scipy import misc\n'), ((1843, 1863), 'scipy.misc.face', 'misc.face', ([], {'gray': '(True)'}), '(gray=True)\n', (1852, 1863), False, 'from scipy import misc\n'), ((1953, 1974), 'matplotlib.pyplot.imshow', 'plt.imshow', (['crop_face'], {}), '(crop_face)\n', (1963, 1974), True, 'from matplotlib import pyplot as plt\n'), ((2010, 2025), 'numpy.flipud', 'np.flipud', (['face'], {}), '(face)\n', (2019, 2025), True, 'import numpy as np\n'), ((2026, 2050), 'matplotlib.pyplot.imshow', 'plt.imshow', (['flip_ud_face'], {}), '(flip_ud_face)\n', (2036, 2050), True, 'from matplotlib import pyplot as plt\n'), ((2077, 2101), 'scipy.ndimage.rotate', 'ndimage.rotate', (['face', '(45)'], {}), '(face, 45)\n', (2091, 2101), False, 'from scipy import ndimage\n'), ((2126, 2165), 'scipy.ndimage.rotate', 'ndimage.rotate', (['face', '(45)'], {'reshape': '(False)'}), '(face, 45, reshape=False)\n', (2140, 2165), False, 'from scipy import ndimage\n'), ((2166, 2189), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rotate_face'], {}), '(rotate_face)\n', (2176, 2189), True, 'from matplotlib import pyplot as plt\n'), ((2250, 2270), 'scipy.misc.face', 'misc.face', ([], {'gray': '(True)'}), '(gray=True)\n', (2259, 2270), False, 'from scipy import misc\n'), ((2286, 2324), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['face'], {'sigma': '(3)'}), '(face, sigma=3)\n', (2309, 2324), False, 'from scipy import ndimage\n'), ((2340, 2378), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['face'], {'sigma': '(5)'}), '(face, sigma=5)\n', (2363, 2378), False, 'from scipy import ndimage\n'), ((2379, 2403), 'matplotlib.pyplot.imshow', 'plt.imshow', (['very_blurred'], {}), '(very_blurred)\n', (2389, 2403), True, 'from matplotlib import pyplot as plt\n'), ((2503, 2535), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['face', '(3)'], {}), '(face, 3)\n', (2526, 2535), False, 'from scipy import ndimage\n'), ((2556, 2593), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['blurred_f', '(1)'], {}), '(blurred_f, 1)\n', (2579, 2593), False, 'from scipy import ndimage\n'), ((2668, 2689), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sharpened'], {}), '(sharpened)\n', (2678, 2689), True, 'from matplotlib import pyplot as plt\n'), ((2734, 2750), 'numpy.zeros', 'np.zeros', (['(l, l)'], {}), '((l, l))\n', (2742, 2750), True, 'import numpy as np\n'), ((2751, 2768), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2765, 2768), True, 'import numpy as np\n'), ((2876, 2924), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['im'], {'sigma': '(l / (4.0 * n))'}), '(im, sigma=l / (4.0 * n))\n', (2899, 2924), False, 'from scipy import ndimage\n'), ((3044, 3070), 'numpy.histogram', 'np.histogram', (['img'], {'bins': '(60)'}), '(img, bins=60)\n', (3056, 3070), True, 'import numpy as np\n'), ((3187, 3221), 'scipy.ndimage.binary_opening', 'ndimage.binary_opening', (['binary_img'], {}), '(binary_img)\n', (3209, 3221), False, 'from scipy import ndimage\n'), ((3260, 3292), 'scipy.ndimage.binary_closing', 'ndimage.binary_closing', (['open_img'], {}), '(open_img)\n', (3282, 3292), False, 'from scipy import ndimage\n'), ((3293, 3314), 'matplotlib.pyplot.imshow', 'plt.imshow', (['close_img'], {}), '(close_img)\n', (3303, 3314), True, 'from matplotlib import pyplot as plt\n'), ((3434, 3462), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (3444, 3462), True, 'from matplotlib import pyplot as plt\n'), ((3472, 3516), 'cv2.imread', 'cv2.imread', (['"""face.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('face.png', cv2.IMREAD_GRAYSCALE)\n", (3482, 3516), False, 'import cv2\n'), ((3517, 3546), 'cv2.imwrite', 'cv2.imwrite', (['"""gs.jpg"""', 'img_gs'], {}), "('gs.jpg', img_gs)\n", (3528, 3546), False, 'import cv2\n'), ((3555, 3582), 'cv2.Canny', 'cv2.Canny', (['img_gs', '(100)', '(200)'], {}), '(img_gs, 100, 200)\n', (3564, 3582), False, 'import cv2\n'), ((3619, 3657), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Gray Scale Image"""'], {}), "('Original Gray Scale Image')\n", (3628, 3657), True, 'from matplotlib import pyplot as plt\n'), ((3694, 3717), 'matplotlib.pyplot.title', 'plt.title', (['"""Edge Image"""'], {}), "('Edge Image')\n", (3703, 3717), True, 'from matplotlib import pyplot as plt\n'), ((3718, 3728), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3726, 3728), True, 'from matplotlib import pyplot as plt\n'), ((3789, 3805), 'numpy.zeros', 'np.zeros', (['(l, l)'], {}), '((l, l))\n', (3797, 3805), True, 'import numpy as np\n'), ((3913, 3961), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['im'], {'sigma': '(l / (4.0 * n))'}), '(im, sigma=l / (4.0 * n))\n', (3936, 3961), False, 'from scipy import ndimage\n'), ((4002, 4021), 'scipy.ndimage.label', 'ndimage.label', (['mask'], {}), '(mask)\n', (4015, 4021), False, 'from scipy import ndimage\n'), ((4033, 4053), 'matplotlib.pyplot.imshow', 'plt.imshow', (['label_im'], {}), '(label_im)\n', (4043, 4053), True, 'from matplotlib import pyplot as plt\n'), ((985, 1026), 'imageio.imsave', 'imageio.imsave', (["('random_%02d.png' % i)", 'im'], {}), "('random_%02d.png' % i, im)\n", (999, 1026), False, 'import imageio\n'), ((2780, 2809), 'numpy.random.random', 'np.random.random', (['(2, n ** 2)'], {}), '((2, n ** 2))\n', (2796, 2809), True, 'import numpy as np\n'), ((3582, 3598), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3593, 3598), True, 'from matplotlib import pyplot as plt\n'), ((3600, 3618), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_gs'], {}), '(img_gs)\n', (3610, 3618), True, 'from matplotlib import pyplot as plt\n'), ((3658, 3674), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3669, 3674), True, 'from matplotlib import pyplot as plt\n'), ((3676, 3693), 'matplotlib.pyplot.imshow', 'plt.imshow', (['edges'], {}), '(edges)\n', (3686, 3693), True, 'from matplotlib import pyplot as plt\n'), ((3817, 3846), 'numpy.random.random', 'np.random.random', (['(2, n ** 2)'], {}), '((2, n ** 2))\n', (3833, 3846), True, 'import numpy as np\n'), ((2456, 2476), 'scipy.misc.face', 'misc.face', ([], {'gray': '(True)'}), '(gray=True)\n', (2465, 2476), False, 'from scipy import misc\n'), ((2996, 3024), 'numpy.random.randn', 'np.random.randn', (['*mask.shape'], {}), '(*mask.shape)\n', (3011, 3024), True, 'import numpy as np\n'), ((928, 960), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(10000)'], {}), '(0, 256, 10000)\n', (945, 960), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import seaborn as sns
from scipy import stats
# from scipy.optimize import root
from pyapprox import generate_independent_random_samples
import matplotlib as mpl
from scipy import stats
from scipy.stats import spearmanr
mpl.rcParams['font.size'] = 16
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['text.usetex'] = False # use latex for all text handling
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.format'] = 'png' # gives best resolution plots
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['axes.titlesize'] = 20
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
mpl.rcParams['legend.fontsize'] = 16
# mpl.rc('xtick', labelsize=20)
# mpl.rc('ytick', labelsize=20)
# print mpl.rcParams.keys()
mpl.rcParams['text.latex.preamble'] = \
r'\usepackage{siunitx}\usepackage{amsmath}\usepackage{amssymb}'
from funcs.read_data import file_settings, variables_prep
from adaptive_gp_model import *
# Calculate the ratio of samples in the subregion
def ratio_subreg(gp):
"""
Function to calculate the ratio of samples in the subregion in the adaptive procedure.
Parameters:
===========
gp: Gaussian Process object
Return:
=======
ration_df: pd.DataFrame, dataframe of the ratios at each iteration.
"""
y_training = gp.y_train_
# num_new_samples = np.asarray([0]+[20]+[8]*10+[16]*20+[24]*16+[40]*14)
num_new_samples = np.asarray([20]+[8]*10+[16]*20+[24]*20+[40]*18)
num_samples = np.cumsum(num_new_samples)
ratio_samples = np.zeros(shape=(num_new_samples.shape[0]-2, 2))
ratio_sum = 0
for ii in range(num_new_samples.shape[0] - 2):
num_subreg = np.where(y_training[num_samples[ii]: num_samples[ii+1]]>0)[0].shape[0]
ratio_sum = ratio_sum + num_subreg
ratio_samples[ii, 0] = num_subreg / num_new_samples[ii+1]
ratio_samples[ii, 1] = ratio_sum / num_samples[ii+1]
ratio_df = pd.DataFrame(data=ratio_samples,
index=np.arange(ratio_samples.shape[0]), columns=['Subregion', 'FullSpace'])
ratio_df['num_samples'] = num_samples[1:-1]
return ratio_df
# END ratio_subreg()
from funcs.utils import define_constants
def choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals):
"""
Function used to set the nomial point for fixing parameters at.
Parameters:
===========
plot_range: str, decide which type of nomial values to use.
dot_samples: np.ndarray, of shape D*N where D is the number of parameters,
the initial parameter samples for calculation objective functions
samples_opt: np.ndarray, of shape D*M where D is the number of parameters,
parameter samples resulting in objective functions above the threshold
dot_vals: np.ndarray, objective function values from dot_samples
Return:
===========
x_default: list, the nominal values for all D parameters
fig_path: str, the dir defined by the type of nominal values for results to save
"""
if plot_range == 'full_mean':
x_default = define_constants(dot_samples, 13, stats = np.mean)
fig_path = 'fix_mean_full'
elif plot_range == 'sub_median':
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
x_default = define_constants(samples_opt, 13, stats = np.median)
fig_path = 'fix_median_subreg'
elif plot_range == 'sub_mean':
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
x_default = define_constants(samples_opt, 13, stats = np.mean)
fig_path = 'fix_mean_subreg'
elif plot_range == 'sub_rand':
x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 38] # 8 for analytic, 38 for sample
fig_path = 'fix_rand_subreg'
elif plot_range == 'full_rand':
breakpoint()
x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 8] # 8 for analytic, 38 for sample
fig_path = 'fix_rand_subreg'
elif (plot_range == 'sub_max')|(plot_range == 'full_max'):
x_default = dot_samples[:, np.where(dot_vals>=dot_vals.max())[0]]
fig_path = 'fix_max_subreg'
else:
AssertionError
return x_default, fig_path
def cal_stats(vals_opt, vals_dict, re_eval):
"""
Function used to calculate the statstics of the objective values VS parameter fixing.
Parameters:
===========
vals_dict: dict, containing the objective function values with parameters being fixed
vals_opt: np.ndarray, objective function values used to calculate the statistics
re_eval: Bool, re-evaluate the OBJ using the whole samples if True,
else using the optimal set only for parameter fixing
Return:
===========
df_stats: pd.DataFrame, of statistics
"""
# PDF plot
df_stats = pd.DataFrame(columns=['mean', 'std', 'qlow','qup'])
if re_eval:
df_stats.loc['full_set', ['mean', 'std']] = [vals_opt[vals_opt>0.382].mean(), vals_opt[vals_opt>0.382].std()]
df_stats.loc['full_set', 'qlow':'qup'] = np.quantile(vals_opt[vals_opt>0.382], [0.025, 0.957])
else:
df_stats.loc['full_set', ['mean', 'std']] = [vals_opt.mean(), vals_opt.std()]
df_stats.loc['full_set', 'qlow':'qup'] = np.quantile(vals_opt, [0.025, 0.957])
for key, value in vals_dict.items():
if key != 'fix_13':
if re_eval:
value = value[value>0.382]
df_stats.loc[key, 'mean'] = value.mean()
df_stats.loc[key, 'std'] = value.std()
df_stats.loc[key, 'qlow':'qup'] = np.quantile(value, [0.025, 0.975])
return df_stats
def cal_prop_optimal(vals_dict, dot_vals, fig_path):
"""
Used to calculate the ratio of optimal values.
Parameters:
===========
fig_path: str, dir to save the result formed into a pd.DataFrame
"""
pct_optimal = {}
for key, value in vals_dict.items():
pct_optimal[key] = value[value>0.382].shape[0] / dot_vals.shape[0]
pct_optimal = pd.DataFrame.from_dict(pct_optimal, orient='index', columns=['Proportion'])
pct_optimal.to_csv(f'{fig_path}/Proportion_optimal.csv')
# END cal_prop_optimal()
def plot_pdf(vals_opt, vals_dict, re_eval, fig_path):
"""
Used to generate the plot of probability distribution function.
"""
fig, axes = plt.subplots(1, 3, figsize=(20, 6), sharex=True)
sns.distplot(vals_opt.flatten(), hist=False, ax=axes[0])
k = 0
for key, value in vals_dict.items():
if key != 'fix_13':
if re_eval:
value = value[value>0.382]
sns.distplot(value.flatten(), hist=False, ax=axes[k//4]);
k += 1
axes[0].legend(['full_set', *list(vals_dict.keys())[0:4]])
axes[1].set_xlabel('F')
axes[1].set_ylabel('')
axes[1].legend(list(vals_dict.keys())[4:8])
axes[2].legend(list(vals_dict.keys())[8:])
axes[2].set_ylabel('')
for ii in range(3):
axes[ii].axvline(0.382, color='grey', linestyle='--', alpha=0.7)
plt.savefig(f'{fig_path}/objective_dist.png', dpi=300)
def box_plot(vals_dict, vals_opt, num_fix, fig_path,fig_name, y_label='1/(2-F)', y_norm=True):
"""
Used to generate the boxplot of objective values.
"""
fig2 = plt.figure(figsize=(8, 6))
df = pd.DataFrame.from_dict(vals_dict)
df['fix_0'] = vals_opt.flatten()
df.columns = [*num_fix, 0]
df = df[[0, *num_fix]]
if y_norm:
df_filter = df
else:
df_filter = df.where(df>0.382)
ax = sns.boxplot(data=df_filter, saturation=0.5, linewidth=1, whis=0.5)
if y_norm == True:
ax.axhline(1/(2 - 0.382), color='orange', linestyle='--', alpha=1 , linewidth=1)
ax.set_ylim(0, 0.8)
else:
ax.axhline(0.382, color='orange', linestyle='--', alpha=1 , linewidth=1)
ax.set_ylim(0.3, 0.8)
ax.set_xlabel('Number of fixed parameters')
ax.set_ylabel(y_label)
plt.savefig(f'{fig_path}/{fig_name}.png', dpi=300)
def spr_coef(dot_samples, dot_vals, fsave):
"""
Calculate the spearman-rank correlation.
"""
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
coef_dict = pd.DataFrame(index=np.arange(0, 13), columns=np.arange(0, 13))
p_dict = pd.DataFrame(index=np.arange(0, 13), columns=np.arange(0, 13))
for ii in range(13):
for jj in range(ii+1, 13):
coef_dict.loc[ii, jj], p_dict.loc[ii, jj] = spearmanr(samples_opt[ii], samples_opt[jj])
coef_dict.to_csv(fsave+'spearman_coeff.csv')
p_dict.to_csv(fsave+'spearman_p.csv')
def corner_pot(samples_dict, vals_dict, x_opt, y_opt, index_fix, y_lab='F'):
"""
Create dotty plots for the model inputs and outputs.
Only part of the results will be plotted and shown in the paper due to the space available in a page.
Parameteres:
============
samples_dict: dict, collection of parameter samples with and without FF;
vals_dict: dict
x_opt: np.ndarray, parameter data points resulting in the selected optima
y_opt: np.ndarray, output values of the selected optima corresponding to x_opt
index_fix: list, the index of parameters ranked according to sensitivities.
y_lab: str, the label of y-axis
Returns:
========
fig
"""
fig, axes = plt.subplots(9, 9, figsize = (6*9, 5*9), sharey=True)
num_param_start = 5
for key, x_value in samples_dict.items():
num_fix = int(key.split('_')[1])
if num_fix > (num_param_start-1):
x_value_opt = x_value[:, np.where(vals_dict[key]>0.382)[0]]
y_value_opt = vals_dict[key][vals_dict[key]>0.382]
k = num_fix - num_param_start
for ii in index_fix[num_fix-1:]:
sns.scatterplot(x=x_opt[ii, :], y=y_opt.flatten(), ax=axes[k, num_fix-num_param_start], color='royalblue', s=20, alpha=0.8)
sns.scatterplot(x=x_value_opt[ii, :], y=y_value_opt.flatten(), ax=axes[k, num_fix-num_param_start], color='orange', s=20, alpha=0.5)
axes[k, num_fix-num_param_start].xaxis.set_tick_params(labelsize=40)
axes[k, num_fix-num_param_start].yaxis.set_tick_params(labelsize=40)
k += 1
axes[num_fix-num_param_start, 0].set_ylabel(y_lab, fontsize=40)
fig.set_tight_layout(True)
return fig
# define the order to fix parameters
def fix_plot(gp, fsave, param_names, ind_vars, sa_cal_type, variables_full,
variable_temp, plot_range='full', param_range='full', re_eval=False, norm_y=False):
"""
Used to fix parameter sequentially and obtaining unconditional outputs,
as well as boxplot and scatterplots.
Parameters:
===========
gp: Gaussian Process object
variables: variable
fsave: the outer dir for saving results of, for example, spearman correlation
param_names: list, parameter names
ind_vars: individual parameter variable
sa_cal_type: str, the type of SA to conduct. Should be from ['analytic', 'sampling']
plot_range: str, defining the set of validation samples to use.
Use global samples if "full", else local. Default is "full".
re_eval: Bool
norm_y: Bool, whether to normalize objective functions when sensitive analysis
Return:
========
dot_vals: np.ndarray, objective function values from dot_samples
vals_dict: dict, containing the objective function values with parameters being fixed
index_fix: list, the ordered index of fixed parameters
"""
from funcs.utils import fix_sample_set, dotty_plot
if re_eval:
eval_bool = 'reeval'
else:
eval_bool = 'no_reeval'
dot_fn = f'{file_settings()[0]}gp_run_1117/dotty_samples_{param_range}.txt'
if not os.path.exists(dot_fn):
dot_samples = generate_independent_random_samples(variable_temp, 150000)
np.savetxt(dot_fn, dot_samples)
else:
dot_samples = np.loadtxt(dot_fn)
dot_vals = np.zeros(shape=(dot_samples.shape[1], 1))
for ii in range(15):
dot_vals[10000*ii:(ii+1)*10000] = gp.predict(dot_samples[:, 10000*ii:(ii+1)*10000].T)
# Whether to re-evaluate the optimal values.
if re_eval:
samples_opt = dot_samples
vals_opt = dot_vals
else:
samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]
vals_opt = dot_vals[dot_vals>0.382]
# Choose the fixed values
print(f'Number of values beyond the threshold: {samples_opt.shape[1]}')
x_default, fig_path = choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals)
fig_path = fsave + fig_path
y_default = gp.predict(x_default.reshape(x_default.shape[0], 1).T)[0]
print(f'F of the point with default values: {y_default}')
x_default = np.append(x_default, y_default)
if not os.path.exists(fig_path):
os.makedirs(fig_path)
# calculate / import parameter rankings
from sensitivity_settings import sa_gp
if sa_cal_type == 'analytic':
vars = variables_full
else:
vars = variable_temp
_, ST = sa_gp(fsave, gp, ind_vars, vars, param_names,
cal_type=sa_cal_type, save_values=True, norm_y=norm_y)
par_rank = np.argsort(ST['ST'].values)
index_sort = {ii:par_rank[12-ii] for ii in range(13)}
num_fix = []
vals_dict = {}
samples_dict = {}
index_fix = np.array([], dtype=int)
for ii in range(max(index_sort.keys()), -1, -1):
index_fix = np.append(index_fix, index_sort[ii])
num_fix.append(index_fix.shape[0])
print(f'Fix {index_fix.shape[0]} parameters')
print(f'index: {index_fix}')
samples_fix = fix_sample_set(index_fix, samples_opt, x_default)
vals_fix = np.zeros_like(vals_opt)
# calculate with surrogate
if re_eval == True:
for ii in range(15):
vals_fix[10000*ii:(ii+1)*10000] = gp.predict(samples_fix[:, 10000*ii:(ii+1)*10000].T)
else:
vals_fix = gp.predict(samples_fix.T)
# if num_fix[-1] == 2:
# np.savetxt(f'{fig_path}/samples_fix_{num_fix[-1]}_{param_range}.txt', samples_fix)
# np.savetxt(f'{fig_path}/values_fix_{num_fix[-1]}_{param_range}.txt', vals_fix)
# select points statisfying the optima
if not re_eval:
samples_opt_fix = samples_fix
vals_opt_fix = vals_fix
vals_dict[f'fix_{len(index_fix)}'] = vals_fix.flatten()
samples_dict[f'fix_{len(index_fix)}'] = samples_fix
# plot
samples_opt_no_fix = samples_opt
vals_opt_no_fix = vals_opt
else:
index_opt_fix = np.where(vals_fix.flatten() >= 0.382)[0]
samples_opt_fix = samples_fix[:, index_opt_fix]
vals_opt_fix = vals_fix[index_opt_fix]
vals_dict[f'fix_{len(index_fix)}'] = vals_fix.flatten()
samples_dict[f'fix_{len(index_fix)}'] = samples_fix
# plot
index_opt = np.where(vals_opt.flatten() >= 0.382)[0]
samples_opt_no_fix = samples_opt[:, index_opt]
vals_opt_no_fix = vals_opt[index_opt]
fig = dotty_plot(samples_opt_no_fix, vals_opt_no_fix.flatten(), samples_opt_fix, vals_opt_fix.flatten(),
param_names, 'F'); #, orig_x_opt=samples_fix, orig_y_opt=vals_fix
# plt.savefig(f'{fig_path}/{len(index_fix)}_{param_range}_{eval_bool}.png', dpi=300)
# Calculate the stats of objectives vs. Parameter Fixing
# cal_prop_optimal(vals_dict, dot_vals, fig_path)
# df_stats = cal_stats(vals_opt, vals_dict, re_eval)
# df_stats.to_csv(f'{fig_path}/F_stats_{param_range}.csv')
# np.savetxt(f'{fig_path}/fixed_values_{plot_range}.txt', x_default)
# Calculate the Spearman correlation between parameters
# spr_coef(dot_samples, dot_vals, fsave)
# corner plot
fig = corner_pot(samples_dict, vals_dict, samples_opt_no_fix, vals_opt_no_fix.flatten(), index_fix, y_lab='F')
plt.savefig(f'{fig_path}/corner_plot_sub_{param_range}_{eval_bool}.png', dpi=300)
# Box plot
# normalize the vals in vals_dict so as to well distinguish the feasible F.
vals_dict_norm = {}
for key, v in vals_dict.items():
vals_dict_norm[key] = 1 / (2 - v)
vals_opt_norm = 1 / (2 - vals_opt)
# box_plot(vals_dict_norm, vals_opt_norm, num_fix, fig_path, f'boxplot_{param_range}_norm_{eval_bool}', y_label='1/(2-F)', y_norm=True)
# box_plot(vals_dict_feasible_norm, vals_feasible_norm, num_fix, fig_path, 'boxplot_feasible_norm', y_label='1/(2-F)', y_norm=True)
# box_plot(vals_dict, vals_opt, num_fix, fig_path, f'boxplot_feasible_{param_range}_{eval_bool}', y_label='F', y_norm=False)
return dot_vals, vals_dict, index_fix
# END fix_plot() #_no_reeval
# import GP
def run_fix(fpath):
# Get the feasible region
def define_variable(x_samples, y_vals, y_threshold, num_pars):
"""
The function is used to identify the parameter ranges constrained by a given threshold.
Parameters:
===========
x_samples: np.ndarray, of the shape (N, D),
where N is the sample size and D is the number of parameters.
y_vals: np.ndarray, of the shape (N, 1).
The output corresponds to x_samples.
y_threshold: float, the value used to constrain parameter ranges.
Return:
=======
variable_feasible: pyapprox.IndependentMultivariateRandomVariable
"""
if x_samples.shape[0] == num_pars:
x_samples = x_samples.T
x_temp_select = x_samples[np.where(y_vals > y_threshold)[0], :]
x_temp_range = x_temp_select.max(axis=0)
univariable_feasible = [stats.uniform(0, x_temp_range[ii]) for ii in range(0, x_temp_range.shape[0])]
variable_feasible = pyapprox.IndependentMultivariateRandomVariable(univariable_feasible)
return variable_feasible
gp = pickle.load(open(f'{fpath}gp_0.pkl', "rb"))
x_training = gp.X_train_
y_training = gp.y_train_
# visualization of the effects of factor fixing
# define the variables for PCE
param_file = file_settings()[-1]
ind_vars, variables_full = variables_prep(param_file, product_uniform='uniform', dummy=False)
var_trans = AffineRandomVariableTransformation(variables_full, enforce_bounds=True)
param_names = pd.read_csv(param_file, usecols=[2]).values.flatten()
# Resample in the ranges where the objective values are above -10
variable_temp = define_variable(x_training, y_training, -5, num_pars=13)
# Identify the parameter ranges with output value satisfying a given criteria
dot_fn = f'{file_settings()[0]}gp_run_1117/dotty_parameter_range.txt'
if not os.path.exists(dot_fn):
variable_temp_range = define_variable(x_training, y_training, 0, num_pars=13)
dot_samples = generate_independent_random_samples(variable_temp_range, 40000)
np.savetxt(dot_fn, dot_samples)
else:
dot_samples = np.loadtxt(dot_fn)
dot_vals = gp.predict(dot_samples.T)
variable_feasible= define_variable(dot_samples, dot_vals, 0.382, num_pars=13)
# Calculate the ratio of calibrating samples in the sub-region
if not os.path.exists(f'{fpath}ratio_cali_subreg.csv'):
df = ratio_subreg(gp)
df.to_csv(f'{fpath}ratio_cali_subreg.csv')
# Calculate results with and create plots VS fixing parameters
fsave = fpath + 'analytic-sa/' # if sampling, use variable_feasible; else, use variable_temp
norm_y = False
param_range = 'full'
vals_fix_dict = {}
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['full_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_rand', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['full_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_max', param_range=param_range, re_eval=False, norm_y = norm_y)
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['full_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_rand', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['full_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic',
variables_full, variable_feasible, plot_range='full_max', param_range=param_range, re_eval=True, norm_y = norm_y)
fsave = fpath + 'sampling-sa/'
norm_y = False
param_range = 'sub'
vals_fix_dict = {}
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['sub_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_rand', param_range=param_range, re_eval=False, norm_y = norm_y)
_, vals_fix_dict['sub_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_max', param_range=param_range, re_eval=False, norm_y = norm_y)
dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['sub_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_rand', param_range=param_range, re_eval=True, norm_y = norm_y)
_, vals_fix_dict['sub_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling',
variables_full, variable_feasible, plot_range='sub_max', param_range=param_range, re_eval=True, norm_y = norm_y)
# END run_fix()
def plot_validation(fpath, xlabel, ylabel, plot_range='full', save_fig=False, comp=False):
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from math import sqrt
def plot(gp, vali_samples, fpath, xlabel, ylabel, plot_range='full', save_fig=False):
"""
Function used to plot the figures of GP validation.
Parameters:
===========
gp: Gaussian Process object
fpath: str, path to save figures
plot_range: str, defining the set of validation samples to use.
Use global samples if "full", else local. Default is "full".
save_vali: Bool, save figures if true. Default is False.
"""
if plot_range == 'full':
y_hat = gp.predict(vali_samples[0:13, 100:].T)
y_eval = vali_samples[13, 100:]
else:
y_hat = gp.predict(vali_samples[0:13, 0:100].T)
y_eval = vali_samples[13, 0:100]
# l2 = np.linalg.norm(y_hat.flatten() - y_eval.flatten()) / np.linalg.norm(y_eval.flatten())
r2 = r2_score(y_eval.flatten(), y_hat.flatten())
rmse = sqrt(mean_squared_error(y_eval.flatten(), y_hat.flatten()))
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.plot(y_eval.flatten(), y_hat.flatten(), linestyle='', marker='o', ms=8)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
y_eval_opt = y_eval[y_eval>0.382]
y_hat_opt = y_hat[y_eval>0.382]
ax.plot(y_eval_opt.flatten(), y_hat_opt.flatten(), linestyle='',
marker='o', color='darkorange', alpha=0.7, ms=8)
ax.plot(np.linspace(y_eval.min(), 0.8, 100), np.linspace(y_eval.min(), 0.8, 100),
linestyle='--', color='slategrey', alpha=0.5)
# ax.text(-950, -100, r'$R^2 = %.3f$'%r2)
# ax.text(-950, -200, r'$RMSE = %.3f$'%rmse)
ax.text(0.05, 0.75, r'$R^2 = %.3f$'%r2, transform=ax.transAxes)
ax.text(0.05, 0.65, r'$RMSE = %.3f$'%rmse, transform=ax.transAxes)
# plt.show()
if save_fig:
plt.savefig(f'{fpath}figs/gpr_validation_{plot_range}_range_text.png', dpi=300)
# END plot()
def vali_samples_subreg(gp, variable, variable_const, num_candidate_samples=40000):
"""
Function used to generate validation samples.
"""
import random
random.seed(666)
candidates_samples = generate_independent_random_samples(variable=variable,
num_samples = num_candidate_samples)
candidates_samples_const = generate_independent_random_samples(variable=variable_const,
num_samples = num_candidate_samples)
y_pred_full = gp.predict(candidates_samples.T)
y_pred_const = gp.predict(candidates_samples_const.T)
samples_vali_subreg1 = candidates_samples_const[:, np.where(y_pred_const>0.382)[0][0:20]]
samples_vali_subreg2 = candidates_samples_const[:, np.where(y_pred_const>0)[0]]
y_sub1 = gp.predict(samples_vali_subreg2.T)
samples_vali_subreg2 = samples_vali_subreg2[:, np.where(y_sub1<=0.382)[0][0:80]]
samples_vali_full1 = candidates_samples[:, np.where(y_pred_full>-200)[0][0:180]]
samples_vali_full2 = candidates_samples[:, np.where((y_pred_full>-1000)&(y_pred_full<-200))[0][0:20]]
vali_samples = np.zeros(shape=(14, 300))
vali_samples[0:13, 0:20] = samples_vali_subreg1
vali_samples[0:13, 20:100] = samples_vali_subreg2
vali_samples[0:13, 100:280] = samples_vali_full1
vali_samples[0:13, 280:300] = samples_vali_full2
vali_samples[13, :] = gp.predict(vali_samples[0:13, :].T).flatten()
return vali_samples
# END vali_samples_subreg()
# Obtain validation samples
def vali_samples_save(gp):
# Resample in the ranges where the objective values are above 0
x_select = x_training[np.where(y_training>0)[0], :]
x_range = x_select.max(axis=0)
univariable_temp = [stats.uniform(0, x_range[ii]) for ii in range(0, x_range.shape[0])]
variable_temp = pyapprox.IndependentMultivariateRandomVariable(univariable_temp)
x_select2 = x_training[np.where(y_training>-200)[0], :]
x_range2 = x_select2.max(axis=0)
univariable_temp2 = [stats.uniform(0, x_range2[ii]) for ii in range(0, x_range2.shape[0])]
variable_temp2 = pyapprox.IndependentMultivariateRandomVariable(univariable_temp2)
# validation plot
vali_samples = vali_samples_subreg(gp, variable_temp2, variable_temp, 20000)
np.savetxt(f'{fpath}vali_samples.txt', vali_samples)
# import GP
gp = pickle.load(open(f'{fpath}gp_0.pkl', "rb"))
x_training = gp.X_train_
y_training = gp.y_train_
num_new_samples = np.asarray([20]+[8]*10+[16]*20+[24]*20+[40]*18)
num_sample_cum = np.cumsum(num_new_samples)
x_training = gp.X_train_
y_training = gp.y_train_
# Plot the validation plots using two independent sample set
if not os.path.exists(fpath+'vali_samples.txt'):
print("There is no validation samples and will generate.")
vali_samples_save(gp)
else:
vali_samples = np.loadtxt(fpath+'vali_samples.txt')
y_gp = gp.predict(vali_samples[0:13, :].T).flatten()
# plt.scatter(vali_samples[-1, :], y_gp)
# plt.show()
plot(gp, vali_samples, fpath, xlabel, ylabel, plot_range=plot_range, save_fig=save_fig)
# Calculate the errors due vs increasing samples
if os.path.exists('error_df.csv'):
error_df = pd.DataFrame(index=num_sample_cum, columns=['r2_full', 'r2_sub', 'rmse_full', 'rmse_sub'])
for ntrain in num_sample_cum:
print(f'-------------{ntrain} training samples------------')
gp_temp = gp.fit(x_training[0:ntrain, :].T, y_training[0:ntrain])
y_hat = gp_temp.predict(vali_samples[0:13, :].T).flatten()
error_df.loc[ntrain, 'r2_sub'] = r2_score(vali_samples[-1, 0:100], y_hat[0:100])
error_df.loc[ntrain, 'r2_full'] = r2_score(vali_samples[-1, 100:], y_hat[100:])
error_df.loc[ntrain, 'rmse_full'] = sqrt(mean_squared_error(vali_samples[-1, 100:], y_hat[100:]))
error_df.loc[ntrain, 'rmse_sub'] = sqrt(mean_squared_error(vali_samples[-1, 0:100], y_hat[0:100]))
error_df.to_csv(f'{fpath}error_df.csv')
if comp:
# Compare the accuracy of adaptive and non-adaptive GP:
fpaths = ['../output/gp_run_1117/', '../output/gp_run_20220107/']
error_adaptive = pd.read_csv(f'{fpaths[0]}error_df.csv', index_col='Unnamed: 0')
error_nonadaptive = pd.read_csv(f'{fpaths[1]}error_df.csv', index_col='Unnamed: 0')
sns.set_style('whitegrid')
fig, axes = plt.subplots(1, 2, figsize=(6*2, 5), sharey=True, sharex=False)
error_adaptive.loc[:, ['rmse_full']].plot(logy=True, logx=True, ax=axes[0])
error_nonadaptive.loc[:, ['rmse_full']].plot(logy=True, logx=True, ax=axes[0])
axes[0].legend(['Adaptive GP', 'Non-adaptive GP'])
axes[0].set_title('(a)')
error_adaptive.loc[:, ['rmse_sub']].plot(logy=True, logx=True, ax=axes[1])
error_nonadaptive.loc[:, ['rmse_sub']].plot(logy=True, logx=True, ax=axes[1])
axes[1].set_title('(b)')
axes[1].legend(['Adaptive GP', 'Non-adaptive GP'])
plt.savefig(f'{fpaths[0]}figs/GP_compare.png', dpi=300, format='png')
# END plot_validation()
# plot_validation(fpath='../output/gp_run_20220107/', xlabel='Model outputs',
# ylabel='GP simulation', plot_range='sub', save_fig=True, comp=True)
# run_fix(fpath = '../output/gp_run_1117/')
| [
"pandas.read_csv",
"numpy.argsort",
"numpy.array",
"funcs.utils.fix_sample_set",
"seaborn.set_style",
"sklearn.metrics.r2_score",
"pyapprox.generate_independent_random_samples",
"numpy.arange",
"funcs.read_data.variables_prep",
"numpy.where",
"numpy.asarray",
"pandas.DataFrame.from_dict",
"f... | [((1518, 1581), 'numpy.asarray', 'np.asarray', (['([20] + [8] * 10 + [16] * 20 + [24] * 20 + [40] * 18)'], {}), '([20] + [8] * 10 + [16] * 20 + [24] * 20 + [40] * 18)\n', (1528, 1581), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.cumsum', 'np.cumsum', (['num_new_samples'], {}), '(num_new_samples)\n', (1593, 1610), True, 'import numpy as np\n'), ((1631, 1680), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_new_samples.shape[0] - 2, 2)'}), '(shape=(num_new_samples.shape[0] - 2, 2))\n', (1639, 1680), True, 'import numpy as np\n'), ((4883, 4935), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['mean', 'std', 'qlow', 'qup']"}), "(columns=['mean', 'std', 'qlow', 'qup'])\n", (4895, 4935), True, 'import pandas as pd\n'), ((6076, 6151), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['pct_optimal'], {'orient': '"""index"""', 'columns': "['Proportion']"}), "(pct_optimal, orient='index', columns=['Proportion'])\n", (6098, 6151), True, 'import pandas as pd\n'), ((6393, 6441), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 6)', 'sharex': '(True)'}), '(1, 3, figsize=(20, 6), sharex=True)\n', (6405, 6441), True, 'import matplotlib.pyplot as plt\n'), ((7087, 7141), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{fig_path}/objective_dist.png"""'], {'dpi': '(300)'}), "(f'{fig_path}/objective_dist.png', dpi=300)\n", (7098, 7141), True, 'import matplotlib.pyplot as plt\n'), ((7319, 7345), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (7329, 7345), True, 'import matplotlib.pyplot as plt\n'), ((7355, 7388), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['vals_dict'], {}), '(vals_dict)\n', (7377, 7388), True, 'import pandas as pd\n'), ((7581, 7647), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df_filter', 'saturation': '(0.5)', 'linewidth': '(1)', 'whis': '(0.5)'}), '(data=df_filter, saturation=0.5, linewidth=1, whis=0.5)\n', (7592, 7647), True, 'import seaborn as sns\n'), ((7990, 8040), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{fig_path}/{fig_name}.png"""'], {'dpi': '(300)'}), "(f'{fig_path}/{fig_name}.png', dpi=300)\n", (8001, 8040), True, 'import matplotlib.pyplot as plt\n'), ((9336, 9391), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(9)', '(9)'], {'figsize': '(6 * 9, 5 * 9)', 'sharey': '(True)'}), '(9, 9, figsize=(6 * 9, 5 * 9), sharey=True)\n', (9348, 9391), True, 'import matplotlib.pyplot as plt\n'), ((11988, 12029), 'numpy.zeros', 'np.zeros', ([], {'shape': '(dot_samples.shape[1], 1)'}), '(shape=(dot_samples.shape[1], 1))\n', (11996, 12029), True, 'import numpy as np\n'), ((12787, 12818), 'numpy.append', 'np.append', (['x_default', 'y_default'], {}), '(x_default, y_default)\n', (12796, 12818), True, 'import numpy as np\n'), ((13089, 13193), 'sensitivity_settings.sa_gp', 'sa_gp', (['fsave', 'gp', 'ind_vars', 'vars', 'param_names'], {'cal_type': 'sa_cal_type', 'save_values': '(True)', 'norm_y': 'norm_y'}), '(fsave, gp, ind_vars, vars, param_names, cal_type=sa_cal_type,\n save_values=True, norm_y=norm_y)\n', (13094, 13193), False, 'from sensitivity_settings import sa_gp\n'), ((13214, 13241), 'numpy.argsort', 'np.argsort', (["ST['ST'].values"], {}), "(ST['ST'].values)\n", (13224, 13241), True, 'import numpy as np\n'), ((13375, 13398), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (13383, 13398), True, 'import numpy as np\n'), ((16614, 16699), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{fig_path}/corner_plot_sub_{param_range}_{eval_bool}.png"""'], {'dpi': '(300)'}), "(f'{fig_path}/corner_plot_sub_{param_range}_{eval_bool}.png',\n dpi=300)\n", (16625, 16699), True, 'import matplotlib.pyplot as plt\n'), ((18834, 18900), 'funcs.read_data.variables_prep', 'variables_prep', (['param_file'], {'product_uniform': '"""uniform"""', 'dummy': '(False)'}), "(param_file, product_uniform='uniform', dummy=False)\n", (18848, 18900), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((27859, 27922), 'numpy.asarray', 'np.asarray', (['([20] + [8] * 10 + [16] * 20 + [24] * 20 + [40] * 18)'], {}), '([20] + [8] * 10 + [16] * 20 + [24] * 20 + [40] * 18)\n', (27869, 27922), True, 'import numpy as np\n'), ((27928, 27954), 'numpy.cumsum', 'np.cumsum', (['num_new_samples'], {}), '(num_new_samples)\n', (27937, 27954), True, 'import numpy as np\n'), ((3154, 3202), 'funcs.utils.define_constants', 'define_constants', (['dot_samples', '(13)'], {'stats': 'np.mean'}), '(dot_samples, 13, stats=np.mean)\n', (3170, 3202), False, 'from funcs.utils import define_constants\n'), ((5118, 5173), 'numpy.quantile', 'np.quantile', (['vals_opt[vals_opt > 0.382]', '[0.025, 0.957]'], {}), '(vals_opt[vals_opt > 0.382], [0.025, 0.957])\n', (5129, 5173), True, 'import numpy as np\n'), ((5317, 5354), 'numpy.quantile', 'np.quantile', (['vals_opt', '[0.025, 0.957]'], {}), '(vals_opt, [0.025, 0.957])\n', (5328, 5354), True, 'import numpy as np\n'), ((11822, 11880), 'pyapprox.generate_independent_random_samples', 'generate_independent_random_samples', (['variable_temp', '(150000)'], {}), '(variable_temp, 150000)\n', (11857, 11880), False, 'from pyapprox import generate_independent_random_samples\n'), ((11889, 11920), 'numpy.savetxt', 'np.savetxt', (['dot_fn', 'dot_samples'], {}), '(dot_fn, dot_samples)\n', (11899, 11920), True, 'import numpy as np\n'), ((11953, 11971), 'numpy.loadtxt', 'np.loadtxt', (['dot_fn'], {}), '(dot_fn)\n', (11963, 11971), True, 'import numpy as np\n'), ((13472, 13508), 'numpy.append', 'np.append', (['index_fix', 'index_sort[ii]'], {}), '(index_fix, index_sort[ii])\n', (13481, 13508), True, 'import numpy as np\n'), ((13665, 13714), 'funcs.utils.fix_sample_set', 'fix_sample_set', (['index_fix', 'samples_opt', 'x_default'], {}), '(index_fix, samples_opt, x_default)\n', (13679, 13714), False, 'from funcs.utils import fix_sample_set, dotty_plot\n'), ((13734, 13757), 'numpy.zeros_like', 'np.zeros_like', (['vals_opt'], {}), '(vals_opt)\n', (13747, 13757), True, 'import numpy as np\n'), ((18783, 18798), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (18796, 18798), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((19513, 19576), 'pyapprox.generate_independent_random_samples', 'generate_independent_random_samples', (['variable_temp_range', '(40000)'], {}), '(variable_temp_range, 40000)\n', (19548, 19576), False, 'from pyapprox import generate_independent_random_samples\n'), ((19585, 19616), 'numpy.savetxt', 'np.savetxt', (['dot_fn', 'dot_samples'], {}), '(dot_fn, dot_samples)\n', (19595, 19616), True, 'import numpy as np\n'), ((19649, 19667), 'numpy.loadtxt', 'np.loadtxt', (['dot_fn'], {}), '(dot_fn)\n', (19659, 19667), True, 'import numpy as np\n'), ((24316, 24350), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (24328, 24350), True, 'import matplotlib.pyplot as plt\n'), ((25465, 25481), 'random.seed', 'random.seed', (['(666)'], {}), '(666)\n', (25476, 25481), False, 'import random\n'), ((25511, 25605), 'pyapprox.generate_independent_random_samples', 'generate_independent_random_samples', ([], {'variable': 'variable', 'num_samples': 'num_candidate_samples'}), '(variable=variable, num_samples=\n num_candidate_samples)\n', (25546, 25605), False, 'from pyapprox import generate_independent_random_samples\n'), ((25652, 25752), 'pyapprox.generate_independent_random_samples', 'generate_independent_random_samples', ([], {'variable': 'variable_const', 'num_samples': 'num_candidate_samples'}), '(variable=variable_const, num_samples=\n num_candidate_samples)\n', (25687, 25752), False, 'from pyapprox import generate_independent_random_samples\n'), ((26430, 26455), 'numpy.zeros', 'np.zeros', ([], {'shape': '(14, 300)'}), '(shape=(14, 300))\n', (26438, 26455), True, 'import numpy as np\n'), ((27656, 27708), 'numpy.savetxt', 'np.savetxt', (['f"""{fpath}vali_samples.txt"""', 'vali_samples'], {}), "(f'{fpath}vali_samples.txt', vali_samples)\n", (27666, 27708), True, 'import numpy as np\n'), ((28262, 28300), 'numpy.loadtxt', 'np.loadtxt', (["(fpath + 'vali_samples.txt')"], {}), "(fpath + 'vali_samples.txt')\n", (28272, 28300), True, 'import numpy as np\n'), ((28642, 28736), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'num_sample_cum', 'columns': "['r2_full', 'r2_sub', 'rmse_full', 'rmse_sub']"}), "(index=num_sample_cum, columns=['r2_full', 'r2_sub',\n 'rmse_full', 'rmse_sub'])\n", (28654, 28736), True, 'import pandas as pd\n'), ((29637, 29700), 'pandas.read_csv', 'pd.read_csv', (['f"""{fpaths[0]}error_df.csv"""'], {'index_col': '"""Unnamed: 0"""'}), "(f'{fpaths[0]}error_df.csv', index_col='Unnamed: 0')\n", (29648, 29700), True, 'import pandas as pd\n'), ((29729, 29792), 'pandas.read_csv', 'pd.read_csv', (['f"""{fpaths[1]}error_df.csv"""'], {'index_col': '"""Unnamed: 0"""'}), "(f'{fpaths[1]}error_df.csv', index_col='Unnamed: 0')\n", (29740, 29792), True, 'import pandas as pd\n'), ((29801, 29827), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (29814, 29827), True, 'import seaborn as sns\n'), ((29848, 29913), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6 * 2, 5)', 'sharey': '(True)', 'sharex': '(False)'}), '(1, 2, figsize=(6 * 2, 5), sharey=True, sharex=False)\n', (29860, 29913), True, 'import matplotlib.pyplot as plt\n'), ((30444, 30513), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{fpaths[0]}figs/GP_compare.png"""'], {'dpi': '(300)', 'format': '"""png"""'}), "(f'{fpaths[0]}figs/GP_compare.png', dpi=300, format='png')\n", (30455, 30513), True, 'import matplotlib.pyplot as plt\n'), ((2074, 2107), 'numpy.arange', 'np.arange', (['ratio_samples.shape[0]'], {}), '(ratio_samples.shape[0])\n', (2083, 2107), True, 'import numpy as np\n'), ((3363, 3413), 'funcs.utils.define_constants', 'define_constants', (['samples_opt', '(13)'], {'stats': 'np.median'}), '(samples_opt, 13, stats=np.median)\n', (3379, 3413), False, 'from funcs.utils import define_constants\n'), ((5643, 5677), 'numpy.quantile', 'np.quantile', (['value', '[0.025, 0.975]'], {}), '(value, [0.025, 0.975])\n', (5654, 5677), True, 'import numpy as np\n'), ((8244, 8260), 'numpy.arange', 'np.arange', (['(0)', '(13)'], {}), '(0, 13)\n', (8253, 8260), True, 'import numpy as np\n'), ((8270, 8286), 'numpy.arange', 'np.arange', (['(0)', '(13)'], {}), '(0, 13)\n', (8279, 8286), True, 'import numpy as np\n'), ((8320, 8336), 'numpy.arange', 'np.arange', (['(0)', '(13)'], {}), '(0, 13)\n', (8329, 8336), True, 'import numpy as np\n'), ((8346, 8362), 'numpy.arange', 'np.arange', (['(0)', '(13)'], {}), '(0, 13)\n', (8355, 8362), True, 'import numpy as np\n'), ((8480, 8523), 'scipy.stats.spearmanr', 'spearmanr', (['samples_opt[ii]', 'samples_opt[jj]'], {}), '(samples_opt[ii], samples_opt[jj])\n', (8489, 8523), False, 'from scipy.stats import spearmanr\n'), ((18358, 18392), 'scipy.stats.uniform', 'stats.uniform', (['(0)', 'x_temp_range[ii]'], {}), '(0, x_temp_range[ii])\n', (18371, 18392), False, 'from scipy import stats\n'), ((25166, 25245), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{fpath}figs/gpr_validation_{plot_range}_range_text.png"""'], {'dpi': '(300)'}), "(f'{fpath}figs/gpr_validation_{plot_range}_range_text.png', dpi=300)\n", (25177, 25245), True, 'import matplotlib.pyplot as plt\n'), ((27083, 27112), 'scipy.stats.uniform', 'stats.uniform', (['(0)', 'x_range[ii]'], {}), '(0, x_range[ii])\n', (27096, 27112), False, 'from scipy import stats\n'), ((27375, 27405), 'scipy.stats.uniform', 'stats.uniform', (['(0)', 'x_range2[ii]'], {}), '(0, x_range2[ii])\n', (27388, 27405), False, 'from scipy import stats\n'), ((29042, 29089), 'sklearn.metrics.r2_score', 'r2_score', (['vali_samples[-1, 0:100]', 'y_hat[0:100]'], {}), '(vali_samples[-1, 0:100], y_hat[0:100])\n', (29050, 29089), False, 'from sklearn.metrics import r2_score\n'), ((29136, 29181), 'sklearn.metrics.r2_score', 'r2_score', (['vali_samples[-1, 100:]', 'y_hat[100:]'], {}), '(vali_samples[-1, 100:], y_hat[100:])\n', (29144, 29181), False, 'from sklearn.metrics import r2_score\n'), ((3576, 3624), 'funcs.utils.define_constants', 'define_constants', (['samples_opt', '(13)'], {'stats': 'np.mean'}), '(samples_opt, 13, stats=np.mean)\n', (3592, 3624), False, 'from funcs.utils import define_constants\n'), ((8180, 8206), 'numpy.where', 'np.where', (['(dot_vals > 0.382)'], {}), '(dot_vals > 0.382)\n', (8188, 8206), True, 'import numpy as np\n'), ((11701, 11716), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (11714, 11716), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((19007, 19043), 'pandas.read_csv', 'pd.read_csv', (['param_file'], {'usecols': '[2]'}), '(param_file, usecols=[2])\n', (19018, 19043), True, 'import pandas as pd\n'), ((19312, 19327), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (19325, 19327), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((29235, 29290), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['vali_samples[-1, 100:]', 'y_hat[100:]'], {}), '(vali_samples[-1, 100:], y_hat[100:])\n', (29253, 29290), False, 'from sklearn.metrics import mean_squared_error\n'), ((29344, 29401), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['vali_samples[-1, 0:100]', 'y_hat[0:100]'], {}), '(vali_samples[-1, 0:100], y_hat[0:100])\n', (29362, 29401), False, 'from sklearn.metrics import mean_squared_error\n'), ((1769, 1830), 'numpy.where', 'np.where', (['(y_training[num_samples[ii]:num_samples[ii + 1]] > 0)'], {}), '(y_training[num_samples[ii]:num_samples[ii + 1]] > 0)\n', (1777, 1830), True, 'import numpy as np\n'), ((12330, 12356), 'numpy.where', 'np.where', (['(dot_vals > 0.382)'], {}), '(dot_vals > 0.382)\n', (12338, 12356), True, 'import numpy as np\n'), ((18239, 18269), 'numpy.where', 'np.where', (['(y_vals > y_threshold)'], {}), '(y_vals > y_threshold)\n', (18247, 18269), True, 'import numpy as np\n'), ((26038, 26064), 'numpy.where', 'np.where', (['(y_pred_const > 0)'], {}), '(y_pred_const > 0)\n', (26046, 26064), True, 'import numpy as np\n'), ((26986, 27010), 'numpy.where', 'np.where', (['(y_training > 0)'], {}), '(y_training > 0)\n', (26994, 27010), True, 'import numpy as np\n'), ((27272, 27299), 'numpy.where', 'np.where', (['(y_training > -200)'], {}), '(y_training > -200)\n', (27280, 27299), True, 'import numpy as np\n'), ((3314, 3340), 'numpy.where', 'np.where', (['(dot_vals > 0.382)'], {}), '(dot_vals > 0.382)\n', (3322, 3340), True, 'import numpy as np\n'), ((9580, 9612), 'numpy.where', 'np.where', (['(vals_dict[key] > 0.382)'], {}), '(vals_dict[key] > 0.382)\n', (9588, 9612), True, 'import numpy as np\n'), ((25940, 25970), 'numpy.where', 'np.where', (['(y_pred_const > 0.382)'], {}), '(y_pred_const > 0.382)\n', (25948, 25970), True, 'import numpy as np\n'), ((26174, 26199), 'numpy.where', 'np.where', (['(y_sub1 <= 0.382)'], {}), '(y_sub1 <= 0.382)\n', (26182, 26199), True, 'import numpy as np\n'), ((26259, 26287), 'numpy.where', 'np.where', (['(y_pred_full > -200)'], {}), '(y_pred_full > -200)\n', (26267, 26287), True, 'import numpy as np\n'), ((26348, 26402), 'numpy.where', 'np.where', (['((y_pred_full > -1000) & (y_pred_full < -200))'], {}), '((y_pred_full > -1000) & (y_pred_full < -200))\n', (26356, 26402), True, 'import numpy as np\n'), ((3527, 3553), 'numpy.where', 'np.where', (['(dot_vals > 0.382)'], {}), '(dot_vals > 0.382)\n', (3535, 3553), True, 'import numpy as np\n'), ((3734, 3760), 'numpy.where', 'np.where', (['(dot_vals > 0.382)'], {}), '(dot_vals > 0.382)\n', (3742, 3760), True, 'import numpy as np\n'), ((3931, 3957), 'numpy.where', 'np.where', (['(dot_vals > 0.382)'], {}), '(dot_vals > 0.382)\n', (3939, 3957), True, 'import numpy as np\n')] |
import numpy as np
def pad_tensor(vec, pad, axis):
"""
args:
vec - tensor to pad
pad - the size to pad to
axis - dimension to pad
return:
a new tensor padded to 'pad' in dimension 'dim'
"""
pad_size = list(vec.shape)
pad_size[axis] = pad - vec.shape[axis]
return np.concatenate([vec, np.zeros(pad_size)], axis=axis)
def collate_fn(batch):
"""Moves list inside of dict/dataclass recursively.
Can be used as map after batching of an dataset:
`dataset.batch(...).map(collate_fn)`
Args:
batch: list of examples
Returns:
>>> batch = [{'a': 1}, {'a': 2}]
>>> collate_fn(batch)
{'a': [1, 2]}
>>> collate_fn(tuple(batch))
{'a': (1, 2)}
>>> batch = [{'a': {'b': [1, 2]}}, {'a': {'b': [3, 4]}}]
>>> collate_fn(batch)
{'a': {'b': [[1, 2], [3, 4]]}}
>>> import dataclasses
>>> Point = dataclasses.make_dataclass('Point', ['x', 'y'])
>>> batch = [Point(1, 2), Point(3, 4)]
>>> batch
[Point(x=1, y=2), Point(x=3, y=4)]
>>> collate_fn(batch)
Point(x=[1, 3], y=[2, 4])
>>> collate_fn(tuple(batch))
Point(x=(1, 3), y=(2, 4))
"""
assert isinstance(batch, (tuple, list)), (type(batch), batch)
if isinstance(batch[0], dict):
for b in batch[1:]:
assert batch[0].keys() == b.keys(), batch
return batch[0].__class__({
k: (collate_fn(batch.__class__([b[k] for b in batch])))
for k in batch[0]
})
elif hasattr(batch[0], '__dataclass_fields__'):
for b in batch[1:]:
assert batch[0].__dataclass_fields__ == b.__dataclass_fields__, batch
return batch[0].__class__(**{
k: (collate_fn(batch.__class__([getattr(b, k) for b in batch])))
for k in batch[0].__dataclass_fields__
})
else:
return batch
| [
"numpy.zeros"
] | [((349, 367), 'numpy.zeros', 'np.zeros', (['pad_size'], {}), '(pad_size)\n', (357, 367), True, 'import numpy as np\n')] |
#Simple Password Generator created by <NAME>
#Contact me on instagram at kushal.bastakoti
#You can customize the weight,ratio and number of digits by just editing some values below
#To use custom value run as python pwd_generator.py number1
#number1 is the number of digits you want
#to customize ratio of strings letters and digits
#run python pwd_generator.py num1 num2 num3 num4
#num1 is digits
#num2 is weight of strings
#num3 is weight of numbers
#num3 is weight of symbols
import numpy as np
import random as rm
import sys
#List for base_digits
word_list = np.array(['a', 'A', 'b', 'B', 'c', 'C', 'd', 'D', 'e', 'E', 'f', 'F','g', 'G', 'h', 'H', 'i', 'I', 'j', 'J', 'k', 'K', 'l', 'L','m', 'M', 'n', 'N', 'o', 'O', 'p', 'P', 'q', 'Q', 'r', 'R','s', 'S', 't', 't', 'T', 'u', 'U', 'v', 'V', 'w', 'W', 'x', 'X', 'y', 'Y', 'z', 'Z', ])
number_list = np.array(['1', '2', '3' , '4', '5', '6', '7','8', '9'])
symbol_list = np.array(['!','@','#','$','%','^','&','*'])
#for 16 digits take 60% digits string, take 25% numbers and 15 % symbols;
#Defalut Values
if len(sys.argv) == 1:
word_count = 16
weight_string = 60
weight_number = 25
weight_symbols = 15
elif len(sys.argv) == 2 :
word_count = int(sys.argv[1])
weight_string = 60
weight_number = 25
weight_symbols = 15
elif len(sys.argv) == 5:
word_count = int(sys.argv[1])
weight_string = int(sys.argv[2])
weight_number = int(sys.argv[3])
weight_symbols = int(sys.argv[4])
else:
print("Invalid number of Arguments")
exit(0)
ratio_total = weight_number+weight_string+weight_symbols
if ratio_total == word_count:
ratio_string = weight_string
ratio_numbers = weight_number
ratio_symbols = weight_symbols
else:
ratio_string = (word_count * weight_string)/ ratio_total
ratio_numbers = (word_count * weight_number)/ratio_total
ratio_symbols = (word_count * weight_symbols)/ratio_total
#print(" ",ratio_string," ",ratio_numbers," ",ratio_symbols)
#Code for filling in any gaps that occured when creating integer ratios
while (ratio_numbers + ratio_string + ratio_symbols < word_count):
ratio_increase = rm.choice([1,2,3])
if ratio_increase == 1:
ratio_string += 1
elif ratio_increase == 2:
ratio_numbers += 1
else:
ratio_symbols += 1
#raw_password: meaning a list of selected strings, numbers and symbols to create pwd from
raw_password = np.array([])
while (raw_password.size < ratio_string):
raw_password = np.append(raw_password,rm.choice(word_list))
while (raw_password.size - ratio_string < ratio_numbers):
raw_password = np.append(raw_password, rm.choice(number_list))
while (raw_password.size -ratio_numbers -ratio_string < ratio_symbols):
raw_password = np.append(raw_password, rm.choice(symbol_list))
#Dictionary to avoid repitition
dict1 ={k:np.count_nonzero(raw_password == k) for k in raw_password}
password = np.array([])
#Final Password
while(password.size < word_count):
a = rm.choice(raw_password)
if(dict1[a] != 0 ):
password = np.append(password,a)
dict1[a] -= 1
# print (raw_password)
# print (password)
print("".join(password))
| [
"numpy.count_nonzero",
"numpy.array",
"random.choice",
"numpy.append"
] | [((575, 862), 'numpy.array', 'np.array', (["['a', 'A', 'b', 'B', 'c', 'C', 'd', 'D', 'e', 'E', 'f', 'F', 'g', 'G', 'h',\n 'H', 'i', 'I', 'j', 'J', 'k', 'K', 'l', 'L', 'm', 'M', 'n', 'N', 'o',\n 'O', 'p', 'P', 'q', 'Q', 'r', 'R', 's', 'S', 't', 't', 'T', 'u', 'U',\n 'v', 'V', 'w', 'W', 'x', 'X', 'y', 'Y', 'z', 'Z']"], {}), "(['a', 'A', 'b', 'B', 'c', 'C', 'd', 'D', 'e', 'E', 'f', 'F', 'g',\n 'G', 'h', 'H', 'i', 'I', 'j', 'J', 'k', 'K', 'l', 'L', 'm', 'M', 'n',\n 'N', 'o', 'O', 'p', 'P', 'q', 'Q', 'r', 'R', 's', 'S', 't', 't', 'T',\n 'u', 'U', 'v', 'V', 'w', 'W', 'x', 'X', 'y', 'Y', 'z', 'Z'])\n", (583, 862), True, 'import numpy as np\n'), ((881, 936), 'numpy.array', 'np.array', (["['1', '2', '3', '4', '5', '6', '7', '8', '9']"], {}), "(['1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", (889, 936), True, 'import numpy as np\n'), ((954, 1004), 'numpy.array', 'np.array', (["['!', '@', '#', '$', '%', '^', '&', '*']"], {}), "(['!', '@', '#', '$', '%', '^', '&', '*'])\n", (962, 1004), True, 'import numpy as np\n'), ((2556, 2568), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2564, 2568), True, 'import numpy as np\n'), ((3078, 3090), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3086, 3090), True, 'import numpy as np\n'), ((2261, 2281), 'random.choice', 'rm.choice', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2270, 2281), True, 'import random as rm\n'), ((3002, 3037), 'numpy.count_nonzero', 'np.count_nonzero', (['(raw_password == k)'], {}), '(raw_password == k)\n', (3018, 3037), True, 'import numpy as np\n'), ((3151, 3174), 'random.choice', 'rm.choice', (['raw_password'], {}), '(raw_password)\n', (3160, 3174), True, 'import random as rm\n'), ((2660, 2680), 'random.choice', 'rm.choice', (['word_list'], {}), '(word_list)\n', (2669, 2680), True, 'import random as rm\n'), ((2788, 2810), 'random.choice', 'rm.choice', (['number_list'], {}), '(number_list)\n', (2797, 2810), True, 'import random as rm\n'), ((2932, 2954), 'random.choice', 'rm.choice', (['symbol_list'], {}), '(symbol_list)\n', (2941, 2954), True, 'import random as rm\n'), ((3218, 3240), 'numpy.append', 'np.append', (['password', 'a'], {}), '(password, a)\n', (3227, 3240), True, 'import numpy as np\n')] |
#
# Copyright 2018-2020 <NAME>
# 2019 <NAME>
# 2019 <NAME>
# 2015-2016 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Bin for small common helper function and classes for nonuniform
topographies.
"""
import numpy as np
from ..HeightContainer import NonuniformLineScanInterface
def bandwidth(self):
"""
Computes lower and upper bound of bandwidth, i.e. of the wavelengths or
length scales occurring on a topography. The lower end of the bandwidth is
given by the mean of the spacing of the individual points on the line
scan. The upper bound is given by the overall length of the line scan.
Returns
-------
lower_bound : float
Lower bound of the bandwidth.
upper_bound : float
Upper bound of the bandwidth.
"""
x = self.positions()
lower_bound = np.mean(np.diff(x))
upper_bound, = self.physical_sizes
return lower_bound, upper_bound
# Register analysis functions from this module
NonuniformLineScanInterface.register_function('bandwidth', bandwidth)
| [
"numpy.diff"
] | [((1896, 1906), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (1903, 1906), True, 'import numpy as np\n')] |
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table
from astropy.io import fits
#from astropy.stats import BoxLeastSquares
### Written by <NAME>
# Read in your TESS lightcurve as a fits file, taking the values for time and flux (you can also take their respective errors if you like).
#f=fits.open("tess2018206045859-s0001-0000000267263253-111-s_llc.fits")
f=fits.open("toi135.fits")
time=f[1].data['TIME']
flux=f[1].data['SAP_FLUX']
# I defined a range of values to replace with NaNs wherever I see weird points in the data.
# Certainly not optimised, but works as a brute force approach in the short term.
flagvals=[1340,1346,2298,3006,5119,5403,6977,6983,6986,6987,6998,7013,7234,8233,8760,9034,11809,13661,13671,13849,13937,15424,15763,15764,17335,17352,17401,17414,17415,17433,18975,19002,19006,19076,19240,19725]
#[15887:17320]
# Replace any values below a generous flux value with NaNs, as well as any of the flagged points defined earlier.
for i in range(len(flux)):
if flux[i] < 29800:
flux[i] = np.nan
i+=1
else:
i+=1
for j in flagvals:
flux[j] = np.nan
j+=1
for k in range(15887,17321):
flux[k] = np.nan
k+=1
# Temporarily replace the NaNs with zeroes in another array in order to calculate the mean (as NaN prevents it from working).
vals=[]
for a in range(len(flux)):
if (np.isnan(flux[a])):
vals.append(0)
else:
vals.append(flux[a])
mean=np.mean(vals)
normflux=flux/mean
# Plot of time versus the mean-reduced flux
plt.plot(time,normflux)
plt.show()
# Define a polynomial fit, specifying to define it only where time and flux are finite (i.e. not NaNs), and divide it from the data.
idx=np.isfinite(time) & np.isfinite(normflux)
coefs = np.polynomial.polynomial.polyfit(time[idx],normflux[idx],1)
ffit=np.polynomial.polynomial.polyval(time,coefs)
corrected=normflux/ffit
#idx = np.isfinite(time) & np.isfinite(flux)
#fit = np.polyfit(time[idx],flux[idx],1)
#fit_fn = np.poly1d(fit)
#corrected = flux - fit_fn(time) #+ 30221.9122551963
#t=np.ascontiguousarray(time, dtype=np.float64)
#f=np.ascontiguousarray(corrected, dtype=np.float64)
#durations = np.linspace(0.05, 0.2, 10)
#model = BLS(t,f, dy=0.01)
#periodogram = model.autopower(0.2)
# Plot BJD versus normalised and corrected flux
plt.plot(time,corrected,linewidth=1)
plt.xlim(1325.0,1353.3)
plt.xlabel('Barycentric Julian Date')
plt.ylabel('Normalised Flux')
plt.show()
# Convert BJD to JD. The tau value given here is a scaling factor, which will be mostly correct for you, but should probably be checked.
tau=1.3228e-8
for a in range(len(time)):
time[a] = (time[a]+2457000)-a*tau
# Plot the final JD versus normalised and corrected flux
plt.plot(time,corrected,linewidth=1)
#plt.xlim(1325.0,1353.3)
plt.xlabel('Julian Date')
plt.ylabel('Normalised Flux')
plt.show()
# Export your corrected data as a new fits file, ready for phase folding.
data=Table([time,corrected],names=['Time_JD','Flux'])
data.write('/data5/thomson/Folder/TOI135.fits',format='fits', overwrite=True)
| [
"numpy.mean",
"astropy.table.Table",
"numpy.polynomial.polynomial.polyfit",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.isnan",
"numpy.isfinite",
"astropy.io.fits.open",
"matplotlib.pyplot.xlim",
"numpy.polynomial.polynomial.polyval",
"matplotlib.py... | [((425, 449), 'astropy.io.fits.open', 'fits.open', (['"""toi135.fits"""'], {}), "('toi135.fits')\n", (434, 449), False, 'from astropy.io import fits\n'), ((1495, 1508), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (1502, 1508), True, 'import numpy as np\n'), ((1573, 1597), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'normflux'], {}), '(time, normflux)\n', (1581, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1597, 1607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1605, 1607), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1857), 'numpy.polynomial.polynomial.polyfit', 'np.polynomial.polynomial.polyfit', (['time[idx]', 'normflux[idx]', '(1)'], {}), '(time[idx], normflux[idx], 1)\n', (1828, 1857), True, 'import numpy as np\n'), ((1861, 1906), 'numpy.polynomial.polynomial.polyval', 'np.polynomial.polynomial.polyval', (['time', 'coefs'], {}), '(time, coefs)\n', (1893, 1906), True, 'import numpy as np\n'), ((2351, 2389), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'corrected'], {'linewidth': '(1)'}), '(time, corrected, linewidth=1)\n', (2359, 2389), True, 'import matplotlib.pyplot as plt\n'), ((2388, 2412), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1325.0)', '(1353.3)'], {}), '(1325.0, 1353.3)\n', (2396, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2449), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Barycentric Julian Date"""'], {}), "('Barycentric Julian Date')\n", (2422, 2449), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalised Flux"""'], {}), "('Normalised Flux')\n", (2460, 2479), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2490), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2488, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2804), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'corrected'], {'linewidth': '(1)'}), '(time, corrected, linewidth=1)\n', (2774, 2804), True, 'import matplotlib.pyplot as plt\n'), ((2828, 2853), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Julian Date"""'], {}), "('Julian Date')\n", (2838, 2853), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalised Flux"""'], {}), "('Normalised Flux')\n", (2864, 2883), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2892, 2894), True, 'import matplotlib.pyplot as plt\n'), ((2975, 3026), 'astropy.table.Table', 'Table', (['[time, corrected]'], {'names': "['Time_JD', 'Flux']"}), "([time, corrected], names=['Time_JD', 'Flux'])\n", (2980, 3026), False, 'from astropy.table import Table\n'), ((1408, 1425), 'numpy.isnan', 'np.isnan', (['flux[a]'], {}), '(flux[a])\n', (1416, 1425), True, 'import numpy as np\n'), ((1746, 1763), 'numpy.isfinite', 'np.isfinite', (['time'], {}), '(time)\n', (1757, 1763), True, 'import numpy as np\n'), ((1766, 1787), 'numpy.isfinite', 'np.isfinite', (['normflux'], {}), '(normflux)\n', (1777, 1787), True, 'import numpy as np\n')] |
import torch.nn as nn
import torch
import random
import librosa
import numpy as np
import mido
import os
num_epochs = 10
batch_size = 1
learning_rate = 0.001
input_size = 1025
output_size = 88
# sequence_length = 28
hidden_size = 128
num_layers = 2
device = torch.device('cpu')
FRAME_SIZE = 2048
HOP_SIZE = 512
SAMPLE_RATE = 22050
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super().__init__()
self.num_layers = num_layers
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
# inpt -> (batch_size, seq_len, input_size)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
out, (hidden, cell) = self.lstm(x, (h0, c0))
# out -> (batch_size, seq_len, hidden_size)
return hidden, cell
class Decoder(nn.Module):
def __init__(self, output_size, hidden_size, num_layers):
super().__init__()
self.output_size = output_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.LSTM(output_size, hidden_size, num_layers, batch_first=True)
self.fc_out = nn.Linear(hidden_size, output_size)
def forward(self, inpt, hidden, cell):
inpt = inpt.unsqueeze(0)
output, (hidden, cell) = self.rnn(inpt, (hidden, cell))
prediction = torch.sigmoid(self.fc_out(output.squeeze(0)))
#prediction -> [batch size, output dim]
return prediction, hidden, cell
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
assert encoder.hidden_size == decoder.hidden_size, \
"Hidden dimensions of encoder and decoder must be equal!"
assert encoder.num_layers == decoder.num_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, src, trg, teacher_forcing_ratio = 0.5):
if str(type(trg)) =="<class 'bool'>":
if trg == False:
batch_size = src.shape[0]
trg_len = src.shape[1] * 22
trg_note_count = self.decoder.output_size
#store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_note_count).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src.to(self.device))
hidden, cell = hidden.to(self.device), cell.to(self.device)
#first input to the decoder is zerozerozero and soo ooonnnn
inpt = torch.zeros(batch_size, trg_note_count)
for t in range(1, trg_len):
output, hidden, cell = self.decoder(inpt.to(self.device), hidden, cell)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
inpt = output
return outputs
batch_size = trg.shape[0]
trg_len = trg.shape[1]
trg_note_count = self.decoder.output_size
#store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_note_count).to(self.device)
#last hidden state of the encoder is used as the initial hidden state of the decoder
hidden, cell = self.encoder(src.to(self.device))
hidden, cell = hidden.to(self.device), cell.to(self.device)
#first input to the decoder is zerozerozero and soo ooonnnn
inpt = torch.zeros(batch_size, trg_note_count)
for t in range(1, trg_len):
output, hidden, cell = self.decoder(inpt.to(self.device), hidden, cell)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
#if teacher forcing, use actual next token as next input
#if not, use predicted token
inpt = trg[:, t, :] if teacher_force else output
return outputs
encoder = Encoder(input_size, hidden_size, num_layers).to(device)
decoder = Decoder(output_size, hidden_size, num_layers).to(device)
model = Seq2Seq(encoder, decoder, device)
def wavfile2spec(wavfile):
wavRaw, sample_rate = librosa.load(wavfile)
wavSpec = librosa.stft(wavRaw, n_fft=FRAME_SIZE, hop_length=HOP_SIZE)
wavSpec = np.abs(wavSpec) ** 2
wavSpec = librosa.power_to_db(wavSpec)
return torch.from_numpy(wavSpec)
model_load = torch.load(os.path.join('model', 'notescribe.pt'), map_location=device)
# print(model_load.items())
for k, v in model_load.items():
print(k, v)
# model_load.eval()
# print(model_load)
model = model.load_state_dict(model_load)
print('MODEL: ', model, 'TYPE: ', type(model))
model.eval()
def wavfile2midifile(wavfile, out_path):
wavSpec = wavfile2spec(wavfile).transpose(0, 1).unsqueeze(0)
midiData = torch.round(model(wavSpec, False).squeeze()).int()
midi = tnsr2mid(midiData)
midi.save(out_path)
def tnsr2mid(tnsr, tempo=500000):
ary = tnsr.cpu().detach().numpy()
# get the difference
new_ary = np.concatenate([np.array([[0] * 88]), np.array(ary)], axis=0)
changes = new_ary[1:] - new_ary[:-1]
# create a midi file with an empty track
mid_new = mido.MidiFile()
track = mido.MidiTrack()
mid_new.tracks.append(track)
track.append(mido.MetaMessage('set_tempo', tempo=tempo, time=0))
# add difference in the empty track
last_time = 0
for ch in changes:
if set(ch) == {0}: # no change
last_time += 1
else:
on_notes = np.where(ch > 0)[0]
on_notes_vol = ch[on_notes]
off_notes = np.where(ch < 0)[0]
first_ = True
for n, v in zip(on_notes, on_notes_vol):
new_time = last_time if first_ else 0
track.append(mido.Message('note_on', note=n + 21, velocity=v, time=new_time))
first_ = False
for n in off_notes:
new_time = last_time if first_ else 0
track.append(mido.Message('note_off', note=n + 21, velocity=0, time=new_time))
first_ = False
last_time = 0
return mid_new | [
"numpy.abs",
"torch.device",
"mido.MetaMessage",
"mido.MidiTrack",
"torch.nn.LSTM",
"numpy.where",
"os.path.join",
"torch.from_numpy",
"mido.Message",
"librosa.power_to_db",
"numpy.array",
"mido.MidiFile",
"torch.nn.Linear",
"librosa.stft",
"random.random",
"torch.zeros",
"librosa.lo... | [((261, 280), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (273, 280), False, 'import torch\n'), ((4831, 4852), 'librosa.load', 'librosa.load', (['wavfile'], {}), '(wavfile)\n', (4843, 4852), False, 'import librosa\n'), ((4867, 4926), 'librosa.stft', 'librosa.stft', (['wavRaw'], {'n_fft': 'FRAME_SIZE', 'hop_length': 'HOP_SIZE'}), '(wavRaw, n_fft=FRAME_SIZE, hop_length=HOP_SIZE)\n', (4879, 4926), False, 'import librosa\n'), ((4976, 5004), 'librosa.power_to_db', 'librosa.power_to_db', (['wavSpec'], {}), '(wavSpec)\n', (4995, 5004), False, 'import librosa\n'), ((5016, 5041), 'torch.from_numpy', 'torch.from_numpy', (['wavSpec'], {}), '(wavSpec)\n', (5032, 5041), False, 'import torch\n'), ((5067, 5105), 'os.path.join', 'os.path.join', (['"""model"""', '"""notescribe.pt"""'], {}), "('model', 'notescribe.pt')\n", (5079, 5105), False, 'import os\n'), ((5848, 5863), 'mido.MidiFile', 'mido.MidiFile', ([], {}), '()\n', (5861, 5863), False, 'import mido\n'), ((5876, 5892), 'mido.MidiTrack', 'mido.MidiTrack', ([], {}), '()\n', (5890, 5892), False, 'import mido\n'), ((556, 618), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)'}), '(input_size, hidden_size, num_layers, batch_first=True)\n', (563, 618), True, 'import torch.nn as nn\n'), ((1298, 1361), 'torch.nn.LSTM', 'nn.LSTM', (['output_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)'}), '(output_size, hidden_size, num_layers, batch_first=True)\n', (1305, 1361), True, 'import torch.nn as nn\n'), ((1393, 1428), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1402, 1428), True, 'import torch.nn as nn\n'), ((3946, 3985), 'torch.zeros', 'torch.zeros', (['batch_size', 'trg_note_count'], {}), '(batch_size, trg_note_count)\n', (3957, 3985), False, 'import torch\n'), ((4941, 4956), 'numpy.abs', 'np.abs', (['wavSpec'], {}), '(wavSpec)\n', (4947, 4956), True, 'import numpy as np\n'), ((5943, 5993), 'mido.MetaMessage', 'mido.MetaMessage', (['"""set_tempo"""'], {'tempo': 'tempo', 'time': '(0)'}), "('set_tempo', tempo=tempo, time=0)\n", (5959, 5993), False, 'import mido\n'), ((5702, 5722), 'numpy.array', 'np.array', (['[[0] * 88]'], {}), '([[0] * 88])\n', (5710, 5722), True, 'import numpy as np\n'), ((5724, 5737), 'numpy.array', 'np.array', (['ary'], {}), '(ary)\n', (5732, 5737), True, 'import numpy as np\n'), ((2998, 3037), 'torch.zeros', 'torch.zeros', (['batch_size', 'trg_note_count'], {}), '(batch_size, trg_note_count)\n', (3009, 3037), False, 'import torch\n'), ((3571, 3619), 'torch.zeros', 'torch.zeros', (['trg_len', 'batch_size', 'trg_note_count'], {}), '(trg_len, batch_size, trg_note_count)\n', (3582, 3619), False, 'import torch\n'), ((4345, 4360), 'random.random', 'random.random', ([], {}), '()\n', (4358, 4360), False, 'import random\n'), ((6180, 6196), 'numpy.where', 'np.where', (['(ch > 0)'], {}), '(ch > 0)\n', (6188, 6196), True, 'import numpy as np\n'), ((6264, 6280), 'numpy.where', 'np.where', (['(ch < 0)'], {}), '(ch < 0)\n', (6272, 6280), True, 'import numpy as np\n'), ((6446, 6509), 'mido.Message', 'mido.Message', (['"""note_on"""'], {'note': '(n + 21)', 'velocity': 'v', 'time': 'new_time'}), "('note_on', note=n + 21, velocity=v, time=new_time)\n", (6458, 6509), False, 'import mido\n'), ((6657, 6721), 'mido.Message', 'mido.Message', (['"""note_off"""'], {'note': '(n + 21)', 'velocity': '(0)', 'time': 'new_time'}), "('note_off', note=n + 21, velocity=0, time=new_time)\n", (6669, 6721), False, 'import mido\n'), ((2591, 2639), 'torch.zeros', 'torch.zeros', (['trg_len', 'batch_size', 'trg_note_count'], {}), '(trg_len, batch_size, trg_note_count)\n', (2602, 2639), False, 'import torch\n')] |
import json
import pickle
import sys
import os
import glob
import pandas as pd
import numpy as np
from tqdm import tqdm
from allennlp.predictors.predictor import Predictor
from copy import deepcopy
import torch
from torch import nn
import heapq
import argparse
import allennlp
from allennlp.common.checks import check_for_gpu
if allennlp.__version__ == '0.8.5':
from allennlp.common.util import import_submodules as import_module_and_submodules
elif allennlp.__version__ == '1.1.0':
from allennlp.common.util import import_module_and_submodules
from allennlp.models.archival import load_archive
def normalize_arg_type(arg_type):
if arg_type[0] in ['R', 'C']:
return arg_type[2:]
else:
return arg_type
def get_flatten_varg_toks(varg):
varg_toks = [varg['V_toks']] + varg['ARGS_toks']
varg_span = [varg['V_span']] + varg['ARGS_span']
varg_type = ['V'] + [normalize_arg_type(arg_type) for arg_type in varg['ARGS_type']]
assert len(varg_toks) == len(varg_span) and len(varg_toks) == len(varg_type)
indices = list(range(len(varg_toks)))
# sort pred/args by their textual order
indices = sorted(indices, key=lambda x: varg_span[x])
varg_toks = [varg_toks[i] for i in indices]
varg_type = [varg_type[i] for i in indices]
flatten_toks = []
for i, toks in enumerate(varg_toks):
flatten_toks.extend(toks)
return flatten_toks
def chain_str(chain):
texts = []
for varg in chain:
if not 'Description' in varg:
varg['Description'] = " ".join(get_flatten_varg_toks(varg))
texts.append("<EVENT> " + " ".join(varg['V_toks']) + " <ARGS> " + varg['Description'])
return texts
def check_chain_fulfill_constraints(events, constraints):
def fulfill_constraint(e1, e2):
for e in events:
if e == e1:
return True
elif e == e2:
return False
return all(fulfill_constraint(e1, e2) for e1, e2 in constraints)
def predict_on_unseen_events(data, predictor, args, file=sys.stdout):
question_event_in_context = data['question_event_in_context']
question_event_in_context_idx = data['question_event_in_context_idx']
assert data['context_events'][question_event_in_context_idx] == question_event_in_context
assert data['temp_rel'] in {'BEFORE', 'AFTER'}
if data['temp_rel'] == 'BEFORE':
constraints = [(data['candidate_event'], question_event_in_context)]
elif data['temp_rel'] == 'AFTER':
constraints = [(question_event_in_context, data['candidate_event'])]
test_json = {
'events': data['context_events'],
'cand_event': data['candidate_event'],
'beams': args.beams,
'feed_unseen': args.feed_unseen
}
output = predictor.predict_json(test_json)
print('---'*3, file=file)
print('##Context##', file=file)
print(data['context'], file=file)
print(file=file)
print('##Question##', file=file)
print(data['question'], file=file)
print(file=file)
print('##Candidate##', file=file)
print(data['candidate'], file=file)
print(file=file)
print("##Relation##", file=file)
print("[Candidate]", data['temp_rel'], "[Question]", file=file)
print(file=file)
print('---'*3, file=file)
print("input_repr:", file=file)
for r in chain_str(output['input_vargs']):
print(r, file=file)
print(file=file)
print('---'*3, file=file)
print("question_repr:", file=file)
for r in chain_str([question_event_in_context]):
print(r, file=file)
print(file=file)
print('---'*3, file=file)
print("cand_repr:", file=file)
for r in chain_str(output['unseen_vargs']):
print(r, file=file)
print(file=file)
print('---'*3, file=file)
print("Max: {:.4f} - Min: {:.4f} - Mean: {:.4f} - Std: {:.4f} - Best POS: {:.4f}".format(np.max(output['all_beam_scores']), np.min(output['all_beam_scores']), np.mean(output['all_beam_scores']), np.std(output['all_beam_scores']), output["best_pos_score"]), file=file)
beam_matches = []
for b_idx, pred in enumerate(output['beam_pred']):
if "EVENT_SEP" in pred['pred_vargs'][0]:
for v in pred['pred_vargs']:
v.pop("EVENT_SEP")
assert question_event_in_context in pred['pred_vargs']
assert data['candidate_event'] in pred['pred_vargs']
match = check_chain_fulfill_constraints(pred['pred_vargs'], constraints)
beam_matches.append(match)
print("Beam {:d} (gold: {} - score: {:.4f})".format(b_idx, match, pred['score']), file=file)
for r in chain_str(pred['pred_vargs']):
print(r, file=file)
print(file=file)
print("\n\n", file=file)
return beam_matches
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='test the predictor above')
parser.add_argument('--archive-path', type=str, required=True, help='path to trained archive file')
parser.add_argument('--predictor', type=str, required=True, help='name of predictor')
parser.add_argument('--weights-file', type=str,
help='a path that overrides which weights file to use')
parser.add_argument('--cuda-device', type=int, default=-1, help='id of GPU to use (if any)')
parser.add_argument('-o', '--overrides', type=str, default="",
help='a JSON structure used to override the experiment configuration')
parser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
parser.add_argument('--input-path', type=str, nargs='+', help='input data')
parser.add_argument('--beams', type=int, help='beam size', default=1)
parser.add_argument('--num_instances', type=int, default=-1,
help='number of instances to process')
parser.add_argument('--feed-unseen', action='store_true', help='whether to feed unseen events as inputs', default=False)
args = parser.parse_args()
# Load modules
for package_name in args.include_package:
import_module_and_submodules(package_name)
check_for_gpu(args.cuda_device)
archive = load_archive(args.archive_path,
weights_file=args.weights_file,
cuda_device=args.cuda_device,
overrides=args.overrides)
predictor = Predictor.from_archive(archive, args.predictor)
data = []
for path_regex in args.input_path:
for path in sorted(glob.glob(path_regex)):
with open(path, 'r') as f:
data += json.load(f)
if args.num_instances > 0:
data = data[:args.num_instances]
print("Num Instances:", len(data))
total_confusion = {
"gold BEFORE": {
"pred BEFORE": 0.,
"pred AFTER": 0.
},
"gold AFTER": {
"pred BEFORE": 0.,
"pred AFTER": 0.
}
}
total_correct = 0.
total_examples = 0
for d_idx, d in enumerate(tqdm(data)):
beam_matches = predict_on_unseen_events(d, predictor, args)
if beam_matches[0]:
pred_temp_rel = d['temp_rel']
else:
if d['temp_rel'] == 'BEFORE':
pred_temp_rel = 'AFTER'
else:
pred_temp_rel = 'BEFORE'
total_confusion['gold '+d['temp_rel']]['pred '+pred_temp_rel] += 1
total_correct += int(beam_matches[0])
total_examples += 1
assert sum(pv for gk, gv in total_confusion.items() for pk, pv in gv.items()) == total_examples
assert sum(pv for gk, gv in total_confusion.items() for pk, pv in gv.items() if gk[5:] == pk[5:]) == total_correct
print("Acc: {:.4f} ({:.4f} / {:d})".format(total_correct / total_examples, total_correct, total_examples))
# BEFORE f1
if sum(pv for pk, pv in total_confusion['gold BEFORE'].items()) > 0:
recl = total_confusion['gold BEFORE']['pred BEFORE'] / sum(pv for pk, pv in total_confusion['gold BEFORE'].items())
else:
recl = 0.
if sum(gv['pred BEFORE'] for gk, gv in total_confusion.items()) > 0:
prec = total_confusion['gold BEFORE']['pred BEFORE'] / sum(gv['pred BEFORE'] for gk, gv in total_confusion.items())
else:
prec = 0.
if prec + recl > 0:
before_f1 = (2 * prec * recl) / (prec + recl)
else:
before_f1 = 0.
print("BEFORE P: {:.4f} - R: {:.4f} - F1: {:.4f}".format(prec, recl, before_f1))
# AFTER f1
if sum(pv for pk, pv in total_confusion['gold AFTER'].items()) > 0:
recl = total_confusion['gold AFTER']['pred AFTER'] / sum(pv for pk, pv in total_confusion['gold AFTER'].items())
else:
recl = 0.
if sum(gv['pred AFTER'] for gk, gv in total_confusion.items()) > 0:
prec = total_confusion['gold AFTER']['pred AFTER'] / sum(gv['pred AFTER'] for gk, gv in total_confusion.items())
else:
prec = 0.
if prec + recl > 0:
after_f1 = (2 * prec * recl) / (prec + recl)
else:
after_f1 = 0.
print("AFTER P: {:.4f} - R: {:.4f} - F1: {:.4f}".format(prec, recl, after_f1))
macro_f1 = (before_f1 + after_f1) / 2.
print("Macro F1: {:.4f})".format(macro_f1))
| [
"numpy.mean",
"argparse.ArgumentParser",
"numpy.std",
"allennlp.common.util.import_module_and_submodules",
"tqdm.tqdm",
"allennlp.predictors.predictor.Predictor.from_archive",
"numpy.max",
"glob.glob",
"allennlp.models.archival.load_archive",
"numpy.min",
"json.load",
"allennlp.common.checks.c... | [((4794, 4857), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""test the predictor above"""'}), "(description='test the predictor above')\n", (4817, 4857), False, 'import argparse\n'), ((6224, 6255), 'allennlp.common.checks.check_for_gpu', 'check_for_gpu', (['args.cuda_device'], {}), '(args.cuda_device)\n', (6237, 6255), False, 'from allennlp.common.checks import check_for_gpu\n'), ((6270, 6394), 'allennlp.models.archival.load_archive', 'load_archive', (['args.archive_path'], {'weights_file': 'args.weights_file', 'cuda_device': 'args.cuda_device', 'overrides': 'args.overrides'}), '(args.archive_path, weights_file=args.weights_file, cuda_device\n =args.cuda_device, overrides=args.overrides)\n', (6282, 6394), False, 'from allennlp.models.archival import load_archive\n'), ((6488, 6535), 'allennlp.predictors.predictor.Predictor.from_archive', 'Predictor.from_archive', (['archive', 'args.predictor'], {}), '(archive, args.predictor)\n', (6510, 6535), False, 'from allennlp.predictors.predictor import Predictor\n'), ((6176, 6218), 'allennlp.common.util.import_module_and_submodules', 'import_module_and_submodules', (['package_name'], {}), '(package_name)\n', (6204, 6218), False, 'from allennlp.common.util import import_module_and_submodules\n'), ((7126, 7136), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (7130, 7136), False, 'from tqdm import tqdm\n'), ((3872, 3905), 'numpy.max', 'np.max', (["output['all_beam_scores']"], {}), "(output['all_beam_scores'])\n", (3878, 3905), True, 'import numpy as np\n'), ((3907, 3940), 'numpy.min', 'np.min', (["output['all_beam_scores']"], {}), "(output['all_beam_scores'])\n", (3913, 3940), True, 'import numpy as np\n'), ((3942, 3976), 'numpy.mean', 'np.mean', (["output['all_beam_scores']"], {}), "(output['all_beam_scores'])\n", (3949, 3976), True, 'import numpy as np\n'), ((3978, 4011), 'numpy.std', 'np.std', (["output['all_beam_scores']"], {}), "(output['all_beam_scores'])\n", (3984, 4011), True, 'import numpy as np\n'), ((6617, 6638), 'glob.glob', 'glob.glob', (['path_regex'], {}), '(path_regex)\n', (6626, 6638), False, 'import glob\n'), ((6704, 6716), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6713, 6716), False, 'import json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2018 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
""""""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from .core import Extractor
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class DeltamDeltat(Extractor):
r"""
**DMDT (Delta Magnitude - Delta Time Mapping)**
The 2D representations - called dmdt-images hereafter -
reflect the underlying structure from variability of the source.
The dmdt-images are translation independent as they consider
only the differences in time.
For each pair of points in a light curve we determine
the difference in magnitude (dm) and the difference in
time (dt). This gives us $p = n/2 = n ∗ (n − 1)/2$ points
for a light curve of length $n$. . These points
are then binned for a range of dm and dt values. The
resulting binned 2D representation is our 2D mapping from
the light curve.
.. code-block:: pycon
>>> fs = feets.FeatureSpace(only=['DMDT'])
>>> rs = fs.extract(**lc_normal)
>>> rs.as_dict()
{'DMDT': array([[0, 0, 1, 1, ..., ]])},
References
----------
.. [Mahabal2017] <NAME>., <NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., & <NAME>. (2017, November).
Deep-learn classification of light curves. In 2017 IEEE Symposium
Series on Computational Intelligence (SSCI) (pp. 1-8). IEEE.
"""
data = ["magnitude", "time"]
params = {
"dt_bins": np.hstack([0.0, np.logspace(-3.0, 3.5, num=23)]),
"dm_bins": np.hstack(
[-1.0 * np.logspace(1, -1, num=12), 0, np.logspace(-1, 1, num=12)]
),
}
features = ["DMDT"]
def fit(self, magnitude, time, dt_bins, dm_bins):
def delta_calc(idx):
t0 = time[idx]
m0 = magnitude[idx]
deltat = time[idx + 1 :] - t0
deltam = magnitude[idx + 1 :] - m0
deltat[np.where(deltat < 0)] *= -1
deltam[np.where(deltat < 0)] *= -1
return np.column_stack((deltat, deltam))
lc_len = len(time)
n_vals = int(0.5 * lc_len * (lc_len - 1))
deltas = np.vstack(tuple(delta_calc(idx) for idx in range(lc_len - 1)))
deltat = deltas[:, 0]
deltam = deltas[:, 1]
bins = [dt_bins, dm_bins]
counts = np.histogram2d(deltat, deltam, bins=bins, normed=False)[0]
result = np.fix(255.0 * counts / n_vals + 0.999).astype(int)
return {"DMDT": result}
| [
"numpy.where",
"numpy.fix",
"numpy.column_stack",
"numpy.histogram2d",
"numpy.logspace"
] | [((3527, 3560), 'numpy.column_stack', 'np.column_stack', (['(deltat, deltam)'], {}), '((deltat, deltam))\n', (3542, 3560), True, 'import numpy as np\n'), ((3833, 3888), 'numpy.histogram2d', 'np.histogram2d', (['deltat', 'deltam'], {'bins': 'bins', 'normed': '(False)'}), '(deltat, deltam, bins=bins, normed=False)\n', (3847, 3888), True, 'import numpy as np\n'), ((2995, 3025), 'numpy.logspace', 'np.logspace', (['(-3.0)', '(3.5)'], {'num': '(23)'}), '(-3.0, 3.5, num=23)\n', (3006, 3025), True, 'import numpy as np\n'), ((3110, 3136), 'numpy.logspace', 'np.logspace', (['(-1)', '(1)'], {'num': '(12)'}), '(-1, 1, num=12)\n', (3121, 3136), True, 'import numpy as np\n'), ((3432, 3452), 'numpy.where', 'np.where', (['(deltat < 0)'], {}), '(deltat < 0)\n', (3440, 3452), True, 'import numpy as np\n'), ((3479, 3499), 'numpy.where', 'np.where', (['(deltat < 0)'], {}), '(deltat < 0)\n', (3487, 3499), True, 'import numpy as np\n'), ((3909, 3948), 'numpy.fix', 'np.fix', (['(255.0 * counts / n_vals + 0.999)'], {}), '(255.0 * counts / n_vals + 0.999)\n', (3915, 3948), True, 'import numpy as np\n'), ((3079, 3105), 'numpy.logspace', 'np.logspace', (['(1)', '(-1)'], {'num': '(12)'}), '(1, -1, num=12)\n', (3090, 3105), True, 'import numpy as np\n')] |
import pandas as pd
from re import match
import numpy as np
import sys, glob
from termcolor import colored, cprint
from pathlib import Path
logprint = lambda x: cprint(x, 'red', attrs=["bold"])
msgprint = lambda x: cprint(x, 'green', attrs=["bold"])
def procs_mi( fin, fout):
# mat = pd.read_csv("expr-all-ctrl-complete.tsv", sep = "\t")
mat = pd.read_csv(fin, sep = "\t")
mat.index = mat.columns
msgprint("Size of matrix: " + str(mat.shape))
# mat.isnull().sum().sum()
genes = list(filter(lambda v: match('^ENS', v), mat.columns))
ngenes = len(genes) # 16290
msgprint("Genes without miRNAs: " + str(ngenes))
if mat.iloc[:ngenes,:].isnull().sum().sum() != 0:
print("NAs on mirna-gen matrix...")
sys.exit(15)
gen_gen = mat.iloc[:ngenes,:ngenes]
gen_gen.index = gen_gen.columns
gen_gen = gen_gen.where(np.triu(np.ones(gen_gen.shape),1).astype(np.bool))
gen_gen = gen_gen.stack().reset_index()
gen_gen.columns = ['Source','Target','MI']
gen_gen = gen_gen.sort_values('MI', ascending=False)
print(gen_gen)
msgprint("Writing: " + fout + '-gengen.tsv')
gen_gen.to_csv(fout + '-gengen.tsv',
index = False, header=True, sep='\t')
# gen-gen interactions: 132673905
gen_mirna = mat.iloc[:ngenes,ngenes:]
gen_mirna = gen_mirna.stack().reset_index()
gen_mirna.columns = ['Source','Target','MI']
gen_mirna = gen_mirna.sort_values('MI', ascending=False)
print(gen_mirna)
msgprint("Writing: " + fout + '-genmirna.tsv')
gen_mirna.to_csv(fout + '-genmirna.tsv',
index = False, header=True, sep='\t')
# gen-miRNA interactions:
alldata = pd.concat([gen_gen, gen_mirna])
alldata = alldata.sort_values('MI', ascending=False)
print(alldata)
msgprint("Writing: " + fout + '-all.tsv')
alldata.to_csv(fout + '-all.tsv',
index = False, header=True, sep='\t')
######################################################################
## MAIN
Path("expr-miRNA").mkdir(parents=True, exist_ok=True)
for file in sorted(glob.glob('*-complete.tsv')):
logprint("Using file: " + file)
prefix = '-'.join(file.split('-')[:-1])
prefix = 'expr-miRNA/' + prefix
procs_mi(file,prefix)
| [
"numpy.ones",
"pandas.read_csv",
"pathlib.Path",
"re.match",
"pandas.concat",
"sys.exit",
"termcolor.cprint",
"glob.glob"
] | [((163, 195), 'termcolor.cprint', 'cprint', (['x', '"""red"""'], {'attrs': "['bold']"}), "(x, 'red', attrs=['bold'])\n", (169, 195), False, 'from termcolor import colored, cprint\n'), ((217, 251), 'termcolor.cprint', 'cprint', (['x', '"""green"""'], {'attrs': "['bold']"}), "(x, 'green', attrs=['bold'])\n", (223, 251), False, 'from termcolor import colored, cprint\n'), ((351, 377), 'pandas.read_csv', 'pd.read_csv', (['fin'], {'sep': '"""\t"""'}), "(fin, sep='\\t')\n", (362, 377), True, 'import pandas as pd\n'), ((1568, 1599), 'pandas.concat', 'pd.concat', (['[gen_gen, gen_mirna]'], {}), '([gen_gen, gen_mirna])\n', (1577, 1599), True, 'import pandas as pd\n'), ((1946, 1973), 'glob.glob', 'glob.glob', (['"""*-complete.tsv"""'], {}), "('*-complete.tsv')\n", (1955, 1973), False, 'import sys, glob\n'), ((715, 727), 'sys.exit', 'sys.exit', (['(15)'], {}), '(15)\n', (723, 727), False, 'import sys, glob\n'), ((1872, 1890), 'pathlib.Path', 'Path', (['"""expr-miRNA"""'], {}), "('expr-miRNA')\n", (1876, 1890), False, 'from pathlib import Path\n'), ((512, 528), 're.match', 'match', (['"""^ENS"""', 'v'], {}), "('^ENS', v)\n", (517, 528), False, 'from re import match\n'), ((832, 854), 'numpy.ones', 'np.ones', (['gen_gen.shape'], {}), '(gen_gen.shape)\n', (839, 854), True, 'import numpy as np\n')] |
from itertools import product
import os
import matplotlib.pyplot as plt
from multiprocessing import Pool
import numpy as np
from palettable.colorbrewer.qualitative import Paired_12, Set2_8, Dark2_8, Pastel2_8, Pastel1_9
import pandas as pd
import seaborn as sns
from scipy.signal import argrelmax
from scipy.stats import mannwhitneyu, lognorm, norm
import process_csv
from process_csv import DATA_DIR
import utils
N_PROC = 60
def load_text_summary():
df = pd.read_excel('../scales_database.xlsx', "source_list")
Y1 = "Players exhibit octave?"
Y2 = "Sources indicate that octave is generally used in culture?"
for Y in [Y1, Y2]:
df.loc[df[Y].isnull(), Y] = ''
return df.loc[:, [Y1, Y2]]
def get_md2(ints):
if isinstance(ints, str):
ints = np.array([float(x) for x in ints.split(';')])
return np.min([np.sum(np.roll(ints, i)[:2]) for i in range(len(ints))])
# md2 = np.array([np.sum(np.roll(poss, i, axis=1)[:,:2], axis=1) for i in range(7)]).min(axis=0)
def instrument_tunings():
df = pd.concat([pd.read_excel('../scales_database.xlsx', f"scales_{a}") for a in 'BCDEF'], ignore_index=True)
df['Intervals'] = df.Intervals.apply(lambda x: utils.str_to_ints(x))
df['scale'] = df.Intervals.apply(np.cumsum)
df['max_scale'] = df.scale.apply(max)
df['min_int'] = df.Intervals.apply(min)
df['max_int'] = df.Intervals.apply(max)
df['AllInts'] = df.Intervals.apply(lambda x: [y for i in range(len(x)-1) for y in np.cumsum(x[i:])])
return df
def octave_chance(df, n_rep=10, plot=False, octave=1200, w=50):
df = df.loc[df.scale.apply(lambda x: x[-2] >= octave-w)]
print(len(df))
ints = df.Intervals.values
# all_ints = np.array([x for y in ints for x in np.cumsum(y)])
all_ints = np.array([x for y in ints for i in range(len(y)) for x in np.cumsum(y[i:])])
oct_real = all_ints[(all_ints>=octave-w)&(all_ints<=octave+w)]
print(len(oct_real), len(oct_real) / len(all_ints))
shuffled_ints = []
for j in range(n_rep):
for i in ints:
ran = np.random.choice(i, replace=False, size=len(i))
# for k in np.cumsum(ran):
# shuffled_ints.append(k)
for k in range(len(ran)):
for m in np.cumsum(ran[k:]):
shuffled_ints.append(m)
shuffled_ints = np.array(shuffled_ints)
idx = (shuffled_ints>=octave-w)&(shuffled_ints<=octave+w)
oct_shuf = shuffled_ints[idx]
print(len(oct_shuf) / len(shuffled_ints))
if plot:
fig, ax = plt.subplots(1,2)
sns.distplot(np.abs(oct_real-octave), bins=np.arange(0, w+10, 10), kde=False, norm_hist=True, ax=ax[0])
sns.distplot(np.abs(oct_shuf-octave), bins=np.arange(0, w+10, 10), kde=False, norm_hist=True, ax=ax[0])
sns.distplot(oct_real, bins=np.arange(octave-w, octave+w+10, 10), kde=False, norm_hist=True, ax=ax[1])
sns.distplot(oct_shuf, bins=np.arange(octave-w, octave+w+10, 10), kde=False, norm_hist=True, ax=ax[1])
print(mannwhitneyu(np.abs(oct_real-octave), np.abs(oct_shuf-octave)))
print(np.mean(np.abs(oct_real-octave)))
print(np.mean(np.abs(oct_shuf-octave)))
def label_sig(p):
if p >= 0.05:
return "NS"
elif p >= 0.005:
return '*'
elif p >= 0.0005:
return '**'
elif p >= 0.00005:
return '***'
def octave_chance_individual(df, n_rep=50, plot=False, octave=1200, w1=100, w2=20):
df = df.loc[df.scale.apply(lambda x: x[-2] >= octave)]
ints = df.Intervals.values
res = pd.DataFrame(columns=["max_scale", "n_notes", "ints", "oct_real", "oct_shuf", "mean_real", "mean_shuf", "MWU", "f_real", "f_shuf"])
for i in ints:
all_ints = np.array([x for j in range(len(i)) for x in np.cumsum(i[j:])])
oct_real = all_ints[(all_ints>=octave-w1)&(all_ints<=octave+w1)]
f_real = sum(np.abs(all_ints-octave)<=w2) / len(all_ints)
mean_real = np.mean(np.abs(oct_real-octave))
shuffled_ints = []
for j in range(n_rep):
ran = np.random.choice(i, replace=False, size=len(i))
for k in range(len(ran)):
for m in np.cumsum(ran[k:]):
shuffled_ints.append(m)
shuffled_ints = np.array(shuffled_ints)
idx = (shuffled_ints>=octave-w1)&(shuffled_ints<=octave+w1)
oct_shuf = shuffled_ints[idx]
f_shuf = sum(np.abs(shuffled_ints-octave)<=w2) / len(shuffled_ints)
mean_shuf = np.mean(np.abs(oct_shuf-octave))
try:
mwu = mannwhitneyu(np.abs(oct_real-octave), np.abs(oct_shuf-octave))[1]
except ValueError:
mwu = 1
res.loc[len(res)] = [sum(i), len(i), i, oct_real, oct_shuf, mean_real, mean_shuf, mwu, f_real, f_shuf]
res['sig'] = res.MWU.apply(label_sig)
return res
def create_new_scales(df, n_rep=10):
ints = [x for y in df.Intervals for x in y]
n_notes = df.scale.apply(len).values
df_list = []
for i in range(n_rep):
new_ints = [np.random.choice(ints, replace=True, size=n) for n in n_notes]
new_df = df.copy()
new_df.Intervals = new_ints
new_df['scale'] = new_df.Intervals.apply(np.cumsum)
df_list.append(new_df)
return df_list
def ideal_scale(ints, sigma):
N = len(ints)
imax = np.argmin(np.abs(np.cumsum(ints)-1200))
ints = ints[:imax]
ints = ints * 1200 / np.sum(ints)
new_ints = np.array([ints[i%len(ints)] for i in range(N)])
return new_ints + np.random.normal(0, sigma, size=N)
def create_ideal_scales(df):
ints = [x for y in df.Intervals for x in y if x < 800]
n_notes = df.scale.apply(len).values
sigma = np.arange(0, 55, 5)
df_list = []
for s in sigma:
new_ints = [ideal_scale(np.random.choice(ints, replace=True, size=n), s) for n in n_notes]
new_df = df.copy()
new_df.Intervals = new_ints
df_list.append(new_df)
return sigma, df_list
def get_stats(df, i, k, w1=100, w2=20, n_rep=50, nrep2=100):
out = np.zeros((3,nrep2), float)
path = f"../IntStats/{k}_w1{w1}_w2{w2}_I{i:04d}.npy"
print(path)
for j in range(nrep2):
res = octave_chance_individual(df, octave=i, n_rep=n_rep, w1=w1, w2=w2)
out[0,j] = len(res.loc[(res.MWU<0.05)&(res.mean_real<res.mean_shuf)])
out[1,j] = len(res.loc[(res.MWU<0.05)&(res.mean_real>res.mean_shuf)])
out[2,j] = len(res.loc[(res.MWU>=0.05)])
np.save(path, out)
return out.mean(axis=1)
def get_inst_subsample(df, xsamp, N):
idx = []
for x in df[xsamp].unique():
x_idx = df.loc[df[xsamp]==x].index
idx.extend(list(np.random.choice(x_idx, replace=True, size=min(N, len(x_idx)))))
return df.loc[idx]
def unexpected_intervals(df):
ints = np.arange(200, 2605, 5)
for c in df['Region'].unique():
alt_df = df.loc[df["Region"]!=c]
with Pool(N_PROC) as pool:
res = pool.starmap(get_stats, product([alt_df], ints, [c], [100], [20]), 7)
for i in range(3):
alt_df = get_inst_subsample(df, 'Region', 10)
with Pool(N_PROC) as pool:
res = pool.starmap(get_stats, product([alt_df], ints, [f"contsamp{i}"], [100], [20]), 5)
for i in range(3):
alt_df = get_inst_subsample(df, 'Culture', 5)
with Pool(N_PROC) as pool:
res = pool.starmap(get_stats, product([alt_df], ints, [f"cultsamp{i}"], [100], [20]), 5)
df = df.loc[:, ['Intervals', 'scale']]
w1_list = [50, 75, 100, 125, 150, 175, 200]
w2_list = [5, 10, 15, 20, 30, 40]
for w1 in w1_list:
for w2 in w2_list:
with Pool(N_PROC) as pool:
res = pool.starmap(get_stats, product([df], ints, [0], [w1], [w2]), 7)
alt_df = create_new_scales(df, n_rep=3)
with Pool(N_PROC) as pool:
for i in range(3):
res = pool.starmap(get_stats, product([alt_df[i]], ints, [i+1]), 9)
sigma, ideal_df = create_ideal_scales(df)
with Pool(N_PROC) as pool:
for i, s in enumerate(sigma):
res = pool.starmap(get_stats, product([ideal_df[i]], ints, [f"sigma{s}"]), 9)
def get_norm_posterior(Y, s, m):
n = len(Y)
sy = np.sum(Y)
sy2 = np.sum(np.square(Y))
a = n / (2 * s**2)
b = sy / (s**2)
c = - sy2 / (2 * s**2)
A = 0.5 * (sy2 + n * m**2 - 2 * m * sy)
left = (a/np.pi)**0.5 * np.exp(-a * m**2 + b * m - b**2 / (4*a))
right = A**(n/2) / (2*np.pi*n) * np.exp(-A / s**2 - n*np.log(n)-1) / s**(n+2)
return left * right
def evaluate_best_fit_lognorm(df):
Y = [x for c in df.Region.unique() for y in np.random.choice(df.loc[df.Region==c, "AllInts"], size=6) for x in y]
Yl = np.log(np.array(Y))
s_arr = np.linspace(0, 2, 1001)[1:]
m_arr = np.linspace(np.log(25), np.log(6000), 1001)
si, mi = np.meshgrid(s_arr, m_arr)
return get_norm_posterior(Yl, si, mi)
def get_int_prob_via_sampling(df, ysamp='AllInts', xsamp='Region', s=6, ax='', fa=0.5):
if len(xsamp):
Y = [x for c in df[xsamp].unique() for y in np.random.choice(df.loc[df[xsamp]==c, ysamp], size=s) for x in y]
else:
Y = [x for y in df[ysamp] for x in y]
# Yl = np.log(np.array(Y))
# print(norm.fit(Yl))
col = np.array(Set2_8.mpl_colors)
bins = np.arange(15, 5000, 30)
dx = np.diff(bins[:2])
X = bins[:-1] + dx / 2.
# shape, loc, scale = lognorm.fit(Y)
shape, loc, scale = [0.93, -45.9, 605.4]
params = lognorm.fit(Y, loc=loc, scale=scale)
print(params)
boot = np.array([np.histogram(lognorm.rvs(*params, len(Y)), bins=bins, density=True)[0] for i in range(10000)])
if isinstance(ax, str):
fig, ax = plt.subplots()
count = np.histogram(Y, bins=bins)[0]
hist = np.histogram(Y, bins=bins, density=True)[0]
p1 = lognorm.pdf(X, *params)
p2 = lognorm.pdf(bins, *params)
p3 = np.array([0.5*(lo+hi) * dx for lo, hi in zip(p2[:-1], p2[1:])])
ax.plot(X, hist, '-', c=col[1], lw=0.9)
ax.plot(X, p1, ':k')
ax.fill_between(X, *[np.quantile(boot, q, axis=0) for q in [0.01, 0.99]], color=col[0], alpha=fa)
# for imax in argrelmax(hist)[0]:
# p = p3[imax]**count[imax]
# print(X[imax], p3[imax], count[imax], sum(count))
if __name__ == "__main__":
df = instrument_tunings()
unexpected_intervals(df)
| [
"numpy.log",
"numpy.array",
"pandas.read_excel",
"numpy.save",
"numpy.arange",
"numpy.histogram",
"itertools.product",
"numpy.diff",
"numpy.exp",
"numpy.linspace",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.random.normal",
"numpy.abs",
"numpy.random.choice",
"numpy.square",
"utils.... | [((465, 520), 'pandas.read_excel', 'pd.read_excel', (['"""../scales_database.xlsx"""', '"""source_list"""'], {}), "('../scales_database.xlsx', 'source_list')\n", (478, 520), True, 'import pandas as pd\n'), ((2341, 2364), 'numpy.array', 'np.array', (['shuffled_ints'], {}), '(shuffled_ints)\n', (2349, 2364), True, 'import numpy as np\n'), ((3541, 3676), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['max_scale', 'n_notes', 'ints', 'oct_real', 'oct_shuf', 'mean_real',\n 'mean_shuf', 'MWU', 'f_real', 'f_shuf']"}), "(columns=['max_scale', 'n_notes', 'ints', 'oct_real',\n 'oct_shuf', 'mean_real', 'mean_shuf', 'MWU', 'f_real', 'f_shuf'])\n", (3553, 3676), True, 'import pandas as pd\n'), ((5671, 5690), 'numpy.arange', 'np.arange', (['(0)', '(55)', '(5)'], {}), '(0, 55, 5)\n', (5680, 5690), True, 'import numpy as np\n'), ((6038, 6065), 'numpy.zeros', 'np.zeros', (['(3, nrep2)', 'float'], {}), '((3, nrep2), float)\n', (6046, 6065), True, 'import numpy as np\n'), ((6454, 6472), 'numpy.save', 'np.save', (['path', 'out'], {}), '(path, out)\n', (6461, 6472), True, 'import numpy as np\n'), ((6785, 6808), 'numpy.arange', 'np.arange', (['(200)', '(2605)', '(5)'], {}), '(200, 2605, 5)\n', (6794, 6808), True, 'import numpy as np\n'), ((8211, 8220), 'numpy.sum', 'np.sum', (['Y'], {}), '(Y)\n', (8217, 8220), True, 'import numpy as np\n'), ((8834, 8859), 'numpy.meshgrid', 'np.meshgrid', (['s_arr', 'm_arr'], {}), '(s_arr, m_arr)\n', (8845, 8859), True, 'import numpy as np\n'), ((9249, 9276), 'numpy.array', 'np.array', (['Set2_8.mpl_colors'], {}), '(Set2_8.mpl_colors)\n', (9257, 9276), True, 'import numpy as np\n'), ((9288, 9311), 'numpy.arange', 'np.arange', (['(15)', '(5000)', '(30)'], {}), '(15, 5000, 30)\n', (9297, 9311), True, 'import numpy as np\n'), ((9321, 9338), 'numpy.diff', 'np.diff', (['bins[:2]'], {}), '(bins[:2])\n', (9328, 9338), True, 'import numpy as np\n'), ((9465, 9501), 'scipy.stats.lognorm.fit', 'lognorm.fit', (['Y'], {'loc': 'loc', 'scale': 'scale'}), '(Y, loc=loc, scale=scale)\n', (9476, 9501), False, 'from scipy.stats import mannwhitneyu, lognorm, norm\n'), ((9804, 9827), 'scipy.stats.lognorm.pdf', 'lognorm.pdf', (['X', '*params'], {}), '(X, *params)\n', (9815, 9827), False, 'from scipy.stats import mannwhitneyu, lognorm, norm\n'), ((9837, 9863), 'scipy.stats.lognorm.pdf', 'lognorm.pdf', (['bins', '*params'], {}), '(bins, *params)\n', (9848, 9863), False, 'from scipy.stats import mannwhitneyu, lognorm, norm\n'), ((2543, 2561), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (2555, 2561), True, 'import matplotlib.pyplot as plt\n'), ((4243, 4266), 'numpy.array', 'np.array', (['shuffled_ints'], {}), '(shuffled_ints)\n', (4251, 4266), True, 'import numpy as np\n'), ((5395, 5407), 'numpy.sum', 'np.sum', (['ints'], {}), '(ints)\n', (5401, 5407), True, 'import numpy as np\n'), ((5493, 5527), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma'], {'size': 'N'}), '(0, sigma, size=N)\n', (5509, 5527), True, 'import numpy as np\n'), ((7816, 7828), 'multiprocessing.Pool', 'Pool', (['N_PROC'], {}), '(N_PROC)\n', (7820, 7828), False, 'from multiprocessing import Pool\n'), ((8001, 8013), 'multiprocessing.Pool', 'Pool', (['N_PROC'], {}), '(N_PROC)\n', (8005, 8013), False, 'from multiprocessing import Pool\n'), ((8238, 8250), 'numpy.square', 'np.square', (['Y'], {}), '(Y)\n', (8247, 8250), True, 'import numpy as np\n'), ((8394, 8440), 'numpy.exp', 'np.exp', (['(-a * m ** 2 + b * m - b ** 2 / (4 * a))'], {}), '(-a * m ** 2 + b * m - b ** 2 / (4 * a))\n', (8400, 8440), True, 'import numpy as np\n'), ((8712, 8723), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (8720, 8723), True, 'import numpy as np\n'), ((8737, 8760), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(1001)'], {}), '(0, 2, 1001)\n', (8748, 8760), True, 'import numpy as np\n'), ((8789, 8799), 'numpy.log', 'np.log', (['(25)'], {}), '(25)\n', (8795, 8799), True, 'import numpy as np\n'), ((8801, 8813), 'numpy.log', 'np.log', (['(6000)'], {}), '(6000)\n', (8807, 8813), True, 'import numpy as np\n'), ((9683, 9697), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9695, 9697), True, 'import matplotlib.pyplot as plt\n'), ((9710, 9736), 'numpy.histogram', 'np.histogram', (['Y'], {'bins': 'bins'}), '(Y, bins=bins)\n', (9722, 9736), True, 'import numpy as np\n'), ((9751, 9791), 'numpy.histogram', 'np.histogram', (['Y'], {'bins': 'bins', 'density': '(True)'}), '(Y, bins=bins, density=True)\n', (9763, 9791), True, 'import numpy as np\n'), ((1054, 1109), 'pandas.read_excel', 'pd.read_excel', (['"""../scales_database.xlsx"""', 'f"""scales_{a}"""'], {}), "('../scales_database.xlsx', f'scales_{a}')\n", (1067, 1109), True, 'import pandas as pd\n'), ((1199, 1219), 'utils.str_to_ints', 'utils.str_to_ints', (['x'], {}), '(x)\n', (1216, 1219), False, 'import utils\n'), ((2582, 2607), 'numpy.abs', 'np.abs', (['(oct_real - octave)'], {}), '(oct_real - octave)\n', (2588, 2607), True, 'import numpy as np\n'), ((2694, 2719), 'numpy.abs', 'np.abs', (['(oct_shuf - octave)'], {}), '(oct_shuf - octave)\n', (2700, 2719), True, 'import numpy as np\n'), ((3031, 3056), 'numpy.abs', 'np.abs', (['(oct_real - octave)'], {}), '(oct_real - octave)\n', (3037, 3056), True, 'import numpy as np\n'), ((3056, 3081), 'numpy.abs', 'np.abs', (['(oct_shuf - octave)'], {}), '(oct_shuf - octave)\n', (3062, 3081), True, 'import numpy as np\n'), ((3100, 3125), 'numpy.abs', 'np.abs', (['(oct_real - octave)'], {}), '(oct_real - octave)\n', (3106, 3125), True, 'import numpy as np\n'), ((3144, 3169), 'numpy.abs', 'np.abs', (['(oct_shuf - octave)'], {}), '(oct_shuf - octave)\n', (3150, 3169), True, 'import numpy as np\n'), ((3942, 3967), 'numpy.abs', 'np.abs', (['(oct_real - octave)'], {}), '(oct_real - octave)\n', (3948, 3967), True, 'import numpy as np\n'), ((4477, 4502), 'numpy.abs', 'np.abs', (['(oct_shuf - octave)'], {}), '(oct_shuf - octave)\n', (4483, 4502), True, 'import numpy as np\n'), ((5009, 5053), 'numpy.random.choice', 'np.random.choice', (['ints'], {'replace': '(True)', 'size': 'n'}), '(ints, replace=True, size=n)\n', (5025, 5053), True, 'import numpy as np\n'), ((6900, 6912), 'multiprocessing.Pool', 'Pool', (['N_PROC'], {}), '(N_PROC)\n', (6904, 6912), False, 'from multiprocessing import Pool\n'), ((7105, 7117), 'multiprocessing.Pool', 'Pool', (['N_PROC'], {}), '(N_PROC)\n', (7109, 7117), False, 'from multiprocessing import Pool\n'), ((7323, 7335), 'multiprocessing.Pool', 'Pool', (['N_PROC'], {}), '(N_PROC)\n', (7327, 7335), False, 'from multiprocessing import Pool\n'), ((8626, 8685), 'numpy.random.choice', 'np.random.choice', (["df.loc[df.Region == c, 'AllInts']"], {'size': '(6)'}), "(df.loc[df.Region == c, 'AllInts'], size=6)\n", (8642, 8685), True, 'import numpy as np\n'), ((1834, 1850), 'numpy.cumsum', 'np.cumsum', (['y[i:]'], {}), '(y[i:])\n', (1843, 1850), True, 'import numpy as np\n'), ((2256, 2274), 'numpy.cumsum', 'np.cumsum', (['ran[k:]'], {}), '(ran[k:])\n', (2265, 2274), True, 'import numpy as np\n'), ((2612, 2636), 'numpy.arange', 'np.arange', (['(0)', '(w + 10)', '(10)'], {}), '(0, w + 10, 10)\n', (2621, 2636), True, 'import numpy as np\n'), ((2724, 2748), 'numpy.arange', 'np.arange', (['(0)', '(w + 10)', '(10)'], {}), '(0, w + 10, 10)\n', (2733, 2748), True, 'import numpy as np\n'), ((2821, 2863), 'numpy.arange', 'np.arange', (['(octave - w)', '(octave + w + 10)', '(10)'], {}), '(octave - w, octave + w + 10, 10)\n', (2830, 2863), True, 'import numpy as np\n'), ((2932, 2974), 'numpy.arange', 'np.arange', (['(octave - w)', '(octave + w + 10)', '(10)'], {}), '(octave - w, octave + w + 10, 10)\n', (2941, 2974), True, 'import numpy as np\n'), ((4155, 4173), 'numpy.cumsum', 'np.cumsum', (['ran[k:]'], {}), '(ran[k:])\n', (4164, 4173), True, 'import numpy as np\n'), ((5324, 5339), 'numpy.cumsum', 'np.cumsum', (['ints'], {}), '(ints)\n', (5333, 5339), True, 'import numpy as np\n'), ((5760, 5804), 'numpy.random.choice', 'np.random.choice', (['ints'], {'replace': '(True)', 'size': 'n'}), '(ints, replace=True, size=n)\n', (5776, 5804), True, 'import numpy as np\n'), ((6964, 7005), 'itertools.product', 'product', (['[alt_df]', 'ints', '[c]', '[100]', '[20]'], {}), '([alt_df], ints, [c], [100], [20])\n', (6971, 7005), False, 'from itertools import product\n'), ((7169, 7223), 'itertools.product', 'product', (['[alt_df]', 'ints', "[f'contsamp{i}']", '[100]', '[20]'], {}), "([alt_df], ints, [f'contsamp{i}'], [100], [20])\n", (7176, 7223), False, 'from itertools import product\n'), ((7387, 7441), 'itertools.product', 'product', (['[alt_df]', 'ints', "[f'cultsamp{i}']", '[100]', '[20]'], {}), "([alt_df], ints, [f'cultsamp{i}'], [100], [20])\n", (7394, 7441), False, 'from itertools import product\n'), ((7653, 7665), 'multiprocessing.Pool', 'Pool', (['N_PROC'], {}), '(N_PROC)\n', (7657, 7665), False, 'from multiprocessing import Pool\n'), ((7907, 7942), 'itertools.product', 'product', (['[alt_df[i]]', 'ints', '[i + 1]'], {}), '([alt_df[i]], ints, [i + 1])\n', (7914, 7942), False, 'from itertools import product\n'), ((8103, 8146), 'itertools.product', 'product', (['[ideal_df[i]]', 'ints', "[f'sigma{s}']"], {}), "([ideal_df[i]], ints, [f'sigma{s}'])\n", (8110, 8146), False, 'from itertools import product\n'), ((9063, 9118), 'numpy.random.choice', 'np.random.choice', (['df.loc[df[xsamp] == c, ysamp]'], {'size': 's'}), '(df.loc[df[xsamp] == c, ysamp], size=s)\n', (9079, 9118), True, 'import numpy as np\n'), ((10031, 10059), 'numpy.quantile', 'np.quantile', (['boot', 'q'], {'axis': '(0)'}), '(boot, q, axis=0)\n', (10042, 10059), True, 'import numpy as np\n'), ((857, 873), 'numpy.roll', 'np.roll', (['ints', 'i'], {}), '(ints, i)\n', (864, 873), True, 'import numpy as np\n'), ((1485, 1501), 'numpy.cumsum', 'np.cumsum', (['x[i:]'], {}), '(x[i:])\n', (1494, 1501), True, 'import numpy as np\n'), ((3756, 3772), 'numpy.cumsum', 'np.cumsum', (['i[j:]'], {}), '(i[j:])\n', (3765, 3772), True, 'import numpy as np\n'), ((3869, 3894), 'numpy.abs', 'np.abs', (['(all_ints - octave)'], {}), '(all_ints - octave)\n', (3875, 3894), True, 'import numpy as np\n'), ((4394, 4424), 'numpy.abs', 'np.abs', (['(shuffled_ints - octave)'], {}), '(shuffled_ints - octave)\n', (4400, 4424), True, 'import numpy as np\n'), ((4547, 4572), 'numpy.abs', 'np.abs', (['(oct_real - octave)'], {}), '(oct_real - octave)\n', (4553, 4572), True, 'import numpy as np\n'), ((4572, 4597), 'numpy.abs', 'np.abs', (['(oct_shuf - octave)'], {}), '(oct_shuf - octave)\n', (4578, 4597), True, 'import numpy as np\n'), ((7721, 7757), 'itertools.product', 'product', (['[df]', 'ints', '[0]', '[w1]', '[w2]'], {}), '([df], ints, [0], [w1], [w2])\n', (7728, 7757), False, 'from itertools import product\n'), ((8493, 8502), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (8499, 8502), True, 'import numpy as np\n')] |
import cv2
import numpy as np
# def FindCourtCorners(frame, file_output=0):
# input: frame -- a numpy array representing the frame in RGB
# file_output -- flag to produce debugging output at:
# ../UntrackedFiles/out/*.png
# For this to work, please create
# this "out/" directory first.
# output:
# (success, corners)
# success -- boolean flag indicating success
# corners -- 4x2 array containing the coordinates of corners:
# [back left; back right; front right; front left;]
# Example:
# from FindCourtCorners import FindCourtCorners
# frame = cv2.imread( "../SharedData/FindCourtTest1.png");
# print FindCourtCorners(frame,1);
# The algorithm works by taking a rectangular crop of the court's
# color (with assumption that it's in the center region of the frame).
# The dominant color of the court is found using histogram of this crop.
# Then, we extract a mask corresponding the the court's shape using
# thresholding, morphological closure, and contour-finding.
# Next, edge-detection is used to find the edges of the court. A hough
# transform is applied to the edges mask, and the dominant lines
# are intersected together. We expect 4 clusters of intersections.
# The clusters are projected to points using dilation and then
# finding the centroid of the dilated blobs. The largest 4 blobs
# are assumed to be the dominant clusters, corresponding to
# actual court corners. Then, the corners are sorted by spatial
# coordinates with ordering [back left, back right, front right,
# front left] as perceived from the camera.
# You can see intermediate output by createing this directory:
# mkdir ../UntrackedFiles/out/
# Then, pass file_output=1 into GetCourtFeaturePoints
class CourtFinder(object):
def __init__(self):
self.corners_sort = []
self.found_corners = False
# Intersection of rho/theta lines
def RhoThetaIsect(self, rho1, rho2, theta1, theta2 ):
term1 = rho2 / np.sin(theta2);
term2 = rho1 / np.sin(theta1);
term3 = 1.0/np.tan(theta2) - 1.0/np.tan(theta1);
x = (term1 - term2) / term3;
y = (rho1 - x * np.cos(theta1)) / np.sin(theta1);
return (int(x), int(y));
# Find dominant color in image using hist. binning
def GetDominantColor(self, img):
result = [0,0,0];
bins = 64;
bin_w = 256/bins;
for i in range (0,3):
hist = cv2.calcHist([img],[i],None,[bins],[0,256]);
hist_soft = hist[1:-1];
hist_soft += hist[:-2];
hist_soft += hist[2:];
idx = np.argmax(hist_soft);
result[i] = (idx + 1.5) * bin_w;
return np.asarray(result);
# Find the corners of the court
def FindCourtCorners(self, frame, file_output=0):
# Get h/w for convenience
height, width = frame.shape[:2];
# Take a small window from the center of the image and average its pixels in HSV
cent_x = int(width / 2);
cent_y = int(height / 2);
cent_win_sz = int(width / 20);
win = frame[(cent_y - cent_win_sz):(cent_y + cent_win_sz), (cent_x - cent_win_sz):(cent_x + cent_win_sz)];
if file_output:
cv2.imwrite( "../UntrackedFiles/out/frame.png", frame);
cv2.imwrite( "../UntrackedFiles/out/win.jpg", win);
# Find the biggest region that closely matches the court's average color in HSV space
win_hsv = cv2.cvtColor(win, cv2.COLOR_BGR2HSV);
win_dominant_hsv = self.GetDominantColor(win_hsv);
sat_thresh = 4;
hue_thresh = 30;
val_thresh = 1000;
lower_sat = win_dominant_hsv - [sat_thresh, hue_thresh, val_thresh];
upper_sat = win_dominant_hsv + [sat_thresh, hue_thresh, val_thresh];
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv_mask = cv2.inRange(hsv_frame, lower_sat, upper_sat);
if file_output:
cv2.imwrite("../UntrackedFiles/out/hsv_mask.png", hsv_mask);
# Find the biggest region that closely matches the court's average color in RGB space
win_rgb = win.copy();
win_dominant_rgb = self.GetDominantColor(win_rgb);
r_thresh = 40;
g_thresh = 40;
b_thresh = 40;
lower_rgb = win_dominant_rgb - [r_thresh, g_thresh, b_thresh];
upper_rgb = win_dominant_rgb + [r_thresh, g_thresh, b_thresh];
rgb_mask = cv2.inRange(frame, lower_rgb, upper_rgb);
if file_output:
cv2.imwrite("../UntrackedFiles/out/rgb_mask.png", rgb_mask);
court_mask = cv2.bitwise_and(rgb_mask, rgb_mask, mask=hsv_mask);
if file_output:
cv2.imwrite("../UntrackedFiles/out/court_mask.png", court_mask);
# Output the court's dominant RGB color for preview
preview = np.ones((100,100,3)) * np.asarray([win_dominant_rgb[0], win_dominant_rgb[1], win_dominant_rgb[2]]);
if file_output:
cv2.imwrite("../UntrackedFiles/out/court_color_rgb.png", preview);
# Find the largest contour in the court mask. This is assumed to be the court.
close_sz = int(width / 20);
dilate_sz = int(width / 150);
court_mask = cv2.morphologyEx(court_mask, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(close_sz,close_sz)))
if file_output:
cv2.imwrite("../UntrackedFiles/out/court_mask_closed.png", court_mask);
im2, contours, hier = cv2.findContours(court_mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
max_area = 0;
max_c = None;
for c in contours:
c_area = cv2.contourArea(c)
if c_area > max_area:
max_area = c_area
max_c = c;
# Draw the outline of the court
court_mask_color = np.zeros(frame.shape);
court_mask_color = (cv2.drawContours(court_mask_color,[max_c],0,(0, 255, 0),-1));
court_mask_bw = cv2.inRange(court_mask_color, (0,255,0), (0, 255, 0));
court_outline = cv2.Canny(court_mask_bw,100,200)
# Dilate the outline to help out the Hough transform.
dilate_sz = int(width / 450);
court_outline = cv2.morphologyEx(court_outline, cv2.MORPH_DILATE, np.ones((dilate_sz,dilate_sz)))
if file_output:
cv2.imwrite( "../UntrackedFiles/out/court_outline.jpg", court_outline);
# Do a Hough transform to find dominant lines.
frame_lines = frame.copy();
lines = cv2.HoughLines(court_outline,1,np.pi/180, int(width/8));
isect_mask = np.zeros(court_outline.shape, dtype = "uint8");
# Find all interesting intersections among Hough lines
angle_thresh = 0.1; # pairs of lines with relative angles smaller than this are ignored
for line1 in lines:
rho1 = line1[0][0];
theta1 = line1[0][1] + 0.0005234; # fix div-zero case
for line2 in lines:
rho2 = line2[0][0];
theta2 = line2[0][1] + 0.0005234; # fix div-zero case
if (theta1 != theta2):
#print rho1, rho2, theta1, theta2
isect = self.RhoThetaIsect(rho1, rho2, theta1, theta2);
# TODO: handle edge cases of theta1-theta2
if (isect[0] >= 0 and isect[0] < width and isect[1] >= 0 and isect[1] < height) and np.abs(theta1-theta2) > angle_thresh:
isect_mask[isect[1]][isect[0]] = 1;
# Draw the Hough lines for debugging
for line in lines:
line = np.squeeze(line);
rho = line[0];
theta = line[1] + 0.0001; # hack, ensures eventual intersection...
isect1 = self.RhoThetaIsect(rho, 0, theta, 0.001); # vertical
isect2 = self.RhoThetaIsect(rho, 0, theta, np.pi/2); # horiz
isect3 = self.RhoThetaIsect(rho, height, theta, np.pi/2); # horiz
cv2.line(frame_lines,isect1,isect2,(0,0,255),2);
if (isect2[0] > isect3[0]):
cv2.line(frame_lines,isect1,isect2,(0,0,255),2);
else:
cv2.line(frame_lines,isect1,isect3,(0,0,255),2);
if file_output:
cv2.imwrite("../UntrackedFiles/out/houghlines.png",frame_lines);
# Find centroids among intersection clusters. These are considered corners.
dilate_sz = int(width / 50);
isect_mask = cv2.morphologyEx(isect_mask, cv2.MORPH_DILATE, np.ones((dilate_sz,dilate_sz)));
dilate_sz = int(width / 15);
court_mask_dilated = cv2.morphologyEx(court_mask_bw, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(dilate_sz,dilate_sz)));
court_mask_dilated = cv2.morphologyEx(court_mask_dilated, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(dilate_sz,dilate_sz)));
court_mask_dilated = cv2.morphologyEx(court_mask_dilated, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(dilate_sz,dilate_sz)));
if file_output:
cv2.imwrite("../UntrackedFiles/out/court_mask_dilated.png", court_mask_dilated);
isect_mask = isect_mask & court_mask_dilated;
im2, contours, hier = cv2.findContours(isect_mask,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE);
if file_output:
cv2.imwrite("../UntrackedFiles/out/isect_dots.png", isect_mask * 255);
# if there are fewer than 4 contours, we failed to find 4 court corners in the image.
if (len(contours) < 4):
# return (False, []);
self.corners_sort = []
self.found_corners = False
return
# sort the corners by their confidence (in this case, area of the blob of intersections)
area_idx = np.argsort([-cv2.contourArea(c) for c in contours]);
area_idx = area_idx[:4]
corners = np.zeros((4,2), dtype="uint32");
ct = 0;
for idx in area_idx:
M = cv2.moments(contours[idx])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
isect_mask[cy][cx] = 0;
corners[ct] = [cx, cy]; ct += 1;
# Sort the corners based on where they are located (clockwise indexing).
y_sort_idx = np.argsort([c[1] for c in corners]);
corners_sorted = corners[y_sort_idx, :];
x_sort_idx = [0,1,2,3];
if corners_sorted[0,0] > corners_sorted[1,0]:
x_sort_idx[0] = 1;
x_sort_idx[1] = 0;
if corners_sorted[2,0] < corners_sorted[3,0]:
x_sort_idx[2] = 3;
x_sort_idx[3] = 2;
corners_sorted = corners_sorted[x_sort_idx,:];
# Draw the frame with marked corners (for debugging)
if file_output:
frame_marked_corners = frame.copy();
corner_idx = 0;
for corner in corners_sorted:
cx = corner[0];
cy = corner[1];
# draw this corner
draw_length=15;
cv2.line(frame_marked_corners,(cx - draw_length, cy),(cx + draw_length,cy),(0, 0, 255),2);
cv2.line(frame_marked_corners,(cx, cy - draw_length),(cx,cy + draw_length),(0, 0, 255),2);
font = cv2.FONT_HERSHEY_SIMPLEX;
bottomLeftCornerOfText = (cx + draw_length,cy - draw_length);
fontScale = 1;
fontColor = (0,0,255);
lineType = 2;
cv2.putText(frame_marked_corners,str(corner_idx),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType);
corner_idx += 1;
cv2.imwrite("../UntrackedFiles/out/frame_marked_corners.png", frame_marked_corners);
# return (True, corners_sorted);
# set self.corners_sort and self.found_corners
self.corners_sort = corners_sorted
self.found_corners = True
def drawCornersOnFrame(self, frame):
frame_marked_corners = frame.copy();
corner_idx = 0;
for corner in self.corners_sort:
cx = corner[0];
cy = corner[1];
# draw this corner
draw_length=15;
cv2.line(frame_marked_corners,(cx - draw_length, cy),(cx + draw_length,cy),(0, 0, 255),2);
cv2.line(frame_marked_corners,(cx, cy - draw_length),(cx,cy + draw_length),(0, 0, 255),2);
font = cv2.FONT_HERSHEY_SIMPLEX;
bottomLeftCornerOfText = (cx + draw_length,cy - draw_length);
fontScale = 1;
fontColor = (0,0,255);
lineType = 2;
cv2.putText(frame_marked_corners,str(corner_idx),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType);
corner_idx += 1;
return frame_marked_corners
| [
"numpy.argsort",
"numpy.sin",
"cv2.calcHist",
"cv2.line",
"numpy.asarray",
"cv2.contourArea",
"numpy.abs",
"cv2.drawContours",
"numpy.ones",
"numpy.argmax",
"numpy.squeeze",
"numpy.cos",
"cv2.cvtColor",
"cv2.moments",
"cv2.Canny",
"cv2.imwrite",
"numpy.tan",
"cv2.inRange",
"cv2.b... | [((2824, 2842), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (2834, 2842), True, 'import numpy as np\n'), ((3608, 3644), 'cv2.cvtColor', 'cv2.cvtColor', (['win', 'cv2.COLOR_BGR2HSV'], {}), '(win, cv2.COLOR_BGR2HSV)\n', (3620, 3644), False, 'import cv2\n'), ((3962, 4000), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (3974, 4000), False, 'import cv2\n'), ((4021, 4065), 'cv2.inRange', 'cv2.inRange', (['hsv_frame', 'lower_sat', 'upper_sat'], {}), '(hsv_frame, lower_sat, upper_sat)\n', (4032, 4065), False, 'import cv2\n'), ((4590, 4630), 'cv2.inRange', 'cv2.inRange', (['frame', 'lower_rgb', 'upper_rgb'], {}), '(frame, lower_rgb, upper_rgb)\n', (4601, 4630), False, 'import cv2\n'), ((4753, 4803), 'cv2.bitwise_and', 'cv2.bitwise_and', (['rgb_mask', 'rgb_mask'], {'mask': 'hsv_mask'}), '(rgb_mask, rgb_mask, mask=hsv_mask)\n', (4768, 4803), False, 'import cv2\n'), ((5636, 5704), 'cv2.findContours', 'cv2.findContours', (['court_mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(court_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (5652, 5704), False, 'import cv2\n'), ((5987, 6008), 'numpy.zeros', 'np.zeros', (['frame.shape'], {}), '(frame.shape)\n', (5995, 6008), True, 'import numpy as np\n'), ((6039, 6102), 'cv2.drawContours', 'cv2.drawContours', (['court_mask_color', '[max_c]', '(0)', '(0, 255, 0)', '(-1)'], {}), '(court_mask_color, [max_c], 0, (0, 255, 0), -1)\n', (6055, 6102), False, 'import cv2\n'), ((6126, 6181), 'cv2.inRange', 'cv2.inRange', (['court_mask_color', '(0, 255, 0)', '(0, 255, 0)'], {}), '(court_mask_color, (0, 255, 0), (0, 255, 0))\n', (6137, 6181), False, 'import cv2\n'), ((6206, 6240), 'cv2.Canny', 'cv2.Canny', (['court_mask_bw', '(100)', '(200)'], {}), '(court_mask_bw, 100, 200)\n', (6215, 6240), False, 'import cv2\n'), ((6753, 6797), 'numpy.zeros', 'np.zeros', (['court_outline.shape'], {'dtype': '"""uint8"""'}), "(court_outline.shape, dtype='uint8')\n", (6761, 6797), True, 'import numpy as np\n'), ((9408, 9476), 'cv2.findContours', 'cv2.findContours', (['isect_mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(isect_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (9424, 9476), False, 'import cv2\n'), ((10067, 10099), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': '"""uint32"""'}), "((4, 2), dtype='uint32')\n", (10075, 10099), True, 'import numpy as np\n'), ((10462, 10497), 'numpy.argsort', 'np.argsort', (['[c[1] for c in corners]'], {}), '([c[1] for c in corners])\n', (10472, 10497), True, 'import numpy as np\n'), ((2100, 2114), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (2106, 2114), True, 'import numpy as np\n'), ((2140, 2154), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (2146, 2154), True, 'import numpy as np\n'), ((2295, 2309), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (2301, 2309), True, 'import numpy as np\n'), ((2566, 2614), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[i]', 'None', '[bins]', '[0, 256]'], {}), '([img], [i], None, [bins], [0, 256])\n', (2578, 2614), False, 'import cv2\n'), ((2740, 2760), 'numpy.argmax', 'np.argmax', (['hist_soft'], {}), '(hist_soft)\n', (2749, 2760), True, 'import numpy as np\n'), ((3371, 3424), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/frame.png"""', 'frame'], {}), "('../UntrackedFiles/out/frame.png', frame)\n", (3382, 3424), False, 'import cv2\n'), ((3440, 3489), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/win.jpg"""', 'win'], {}), "('../UntrackedFiles/out/win.jpg', win)\n", (3451, 3489), False, 'import cv2\n'), ((4105, 4164), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/hsv_mask.png"""', 'hsv_mask'], {}), "('../UntrackedFiles/out/hsv_mask.png', hsv_mask)\n", (4116, 4164), False, 'import cv2\n'), ((4670, 4729), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/rgb_mask.png"""', 'rgb_mask'], {}), "('../UntrackedFiles/out/rgb_mask.png', rgb_mask)\n", (4681, 4729), False, 'import cv2\n'), ((4843, 4906), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/court_mask.png"""', 'court_mask'], {}), "('../UntrackedFiles/out/court_mask.png', court_mask)\n", (4854, 4906), False, 'import cv2\n'), ((4990, 5012), 'numpy.ones', 'np.ones', (['(100, 100, 3)'], {}), '((100, 100, 3))\n', (4997, 5012), True, 'import numpy as np\n'), ((5013, 5088), 'numpy.asarray', 'np.asarray', (['[win_dominant_rgb[0], win_dominant_rgb[1], win_dominant_rgb[2]]'], {}), '([win_dominant_rgb[0], win_dominant_rgb[1], win_dominant_rgb[2]])\n', (5023, 5088), True, 'import numpy as np\n'), ((5128, 5193), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/court_color_rgb.png"""', 'preview'], {}), "('../UntrackedFiles/out/court_color_rgb.png', preview)\n", (5139, 5193), False, 'import cv2\n'), ((5429, 5495), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(close_sz, close_sz)'], {}), '(cv2.MORPH_ELLIPSE, (close_sz, close_sz))\n', (5454, 5495), False, 'import cv2\n'), ((5533, 5603), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/court_mask_closed.png"""', 'court_mask'], {}), "('../UntrackedFiles/out/court_mask_closed.png', court_mask)\n", (5544, 5603), False, 'import cv2\n'), ((5799, 5817), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (5814, 5817), False, 'import cv2\n'), ((6418, 6449), 'numpy.ones', 'np.ones', (['(dilate_sz, dilate_sz)'], {}), '((dilate_sz, dilate_sz))\n', (6425, 6449), True, 'import numpy as np\n'), ((6488, 6557), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/court_outline.jpg"""', 'court_outline'], {}), "('../UntrackedFiles/out/court_outline.jpg', court_outline)\n", (6499, 6557), False, 'import cv2\n'), ((7771, 7787), 'numpy.squeeze', 'np.squeeze', (['line'], {}), '(line)\n', (7781, 7787), True, 'import numpy as np\n'), ((8138, 8191), 'cv2.line', 'cv2.line', (['frame_lines', 'isect1', 'isect2', '(0, 0, 255)', '(2)'], {}), '(frame_lines, isect1, isect2, (0, 0, 255), 2)\n', (8146, 8191), False, 'import cv2\n'), ((8417, 8481), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/houghlines.png"""', 'frame_lines'], {}), "('../UntrackedFiles/out/houghlines.png', frame_lines)\n", (8428, 8481), False, 'import cv2\n'), ((8676, 8707), 'numpy.ones', 'np.ones', (['(dilate_sz, dilate_sz)'], {}), '((dilate_sz, dilate_sz))\n', (8683, 8707), True, 'import numpy as np\n'), ((8827, 8895), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(dilate_sz, dilate_sz)'], {}), '(cv2.MORPH_ELLIPSE, (dilate_sz, dilate_sz))\n', (8852, 8895), False, 'import cv2\n'), ((8981, 9049), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(dilate_sz, dilate_sz)'], {}), '(cv2.MORPH_ELLIPSE, (dilate_sz, dilate_sz))\n', (9006, 9049), False, 'import cv2\n'), ((9135, 9203), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(dilate_sz, dilate_sz)'], {}), '(cv2.MORPH_ELLIPSE, (dilate_sz, dilate_sz))\n', (9160, 9203), False, 'import cv2\n'), ((9240, 9319), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/court_mask_dilated.png"""', 'court_mask_dilated'], {}), "('../UntrackedFiles/out/court_mask_dilated.png', court_mask_dilated)\n", (9251, 9319), False, 'import cv2\n'), ((9514, 9583), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/isect_dots.png"""', '(isect_mask * 255)'], {}), "('../UntrackedFiles/out/isect_dots.png', isect_mask * 255)\n", (9525, 9583), False, 'import cv2\n'), ((10164, 10190), 'cv2.moments', 'cv2.moments', (['contours[idx]'], {}), '(contours[idx])\n', (10175, 10190), False, 'import cv2\n'), ((11982, 12069), 'cv2.imwrite', 'cv2.imwrite', (['"""../UntrackedFiles/out/frame_marked_corners.png"""', 'frame_marked_corners'], {}), "('../UntrackedFiles/out/frame_marked_corners.png',\n frame_marked_corners)\n", (11993, 12069), False, 'import cv2\n'), ((12533, 12631), 'cv2.line', 'cv2.line', (['frame_marked_corners', '(cx - draw_length, cy)', '(cx + draw_length, cy)', '(0, 0, 255)', '(2)'], {}), '(frame_marked_corners, (cx - draw_length, cy), (cx + draw_length,\n cy), (0, 0, 255), 2)\n', (12541, 12631), False, 'import cv2\n'), ((12637, 12735), 'cv2.line', 'cv2.line', (['frame_marked_corners', '(cx, cy - draw_length)', '(cx, cy + draw_length)', '(0, 0, 255)', '(2)'], {}), '(frame_marked_corners, (cx, cy - draw_length), (cx, cy +\n draw_length), (0, 0, 255), 2)\n', (12645, 12735), False, 'import cv2\n'), ((2177, 2191), 'numpy.tan', 'np.tan', (['theta2'], {}), '(theta2)\n', (2183, 2191), True, 'import numpy as np\n'), ((2198, 2212), 'numpy.tan', 'np.tan', (['theta1'], {}), '(theta1)\n', (2204, 2212), True, 'import numpy as np\n'), ((8245, 8298), 'cv2.line', 'cv2.line', (['frame_lines', 'isect1', 'isect2', '(0, 0, 255)', '(2)'], {}), '(frame_lines, isect1, isect2, (0, 0, 255), 2)\n', (8253, 8298), False, 'import cv2\n'), ((8330, 8383), 'cv2.line', 'cv2.line', (['frame_lines', 'isect1', 'isect3', '(0, 0, 255)', '(2)'], {}), '(frame_lines, isect1, isect3, (0, 0, 255), 2)\n', (8338, 8383), False, 'import cv2\n'), ((11239, 11337), 'cv2.line', 'cv2.line', (['frame_marked_corners', '(cx - draw_length, cy)', '(cx + draw_length, cy)', '(0, 0, 255)', '(2)'], {}), '(frame_marked_corners, (cx - draw_length, cy), (cx + draw_length,\n cy), (0, 0, 255), 2)\n', (11247, 11337), False, 'import cv2\n'), ((11347, 11445), 'cv2.line', 'cv2.line', (['frame_marked_corners', '(cx, cy - draw_length)', '(cx, cy + draw_length)', '(0, 0, 255)', '(2)'], {}), '(frame_marked_corners, (cx, cy - draw_length), (cx, cy +\n draw_length), (0, 0, 255), 2)\n', (11355, 11445), False, 'import cv2\n'), ((2277, 2291), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (2283, 2291), True, 'import numpy as np\n'), ((9975, 9993), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (9990, 9993), False, 'import cv2\n'), ((7576, 7599), 'numpy.abs', 'np.abs', (['(theta1 - theta2)'], {}), '(theta1 - theta2)\n', (7582, 7599), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
def cytopath_merger(adata, overlap=0.5, num_cores=1):
"""
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix with end points.
overlap: float (default: 0.5)
overlap until new bucket of cells is created.
Returns
-------
adata.uns['trajectories']["cells_along_trajectories_each_step_merged"]: List of arrays containing the cell indexes and allignment scores for each step
"""
cells_along_trajectories = adata.uns['trajectories']["cells_along_trajectories_each_step"].copy()
cells_along_trajectories_merged = []
# For each terminal region and each trajectory
for end_point in adata.uns['run_info']['end_point_clusters']:
print('Merging steps for trajectories for ' + end_point)
sel_end_point_data = cells_along_trajectories[np.where(cells_along_trajectories["End point"]==end_point)[0]]
for i in tqdm(range(adata.uns['run_info']["trajectory_count"][end_point])):
# Find all trajectories
sel_end_point_trajectories=sel_end_point_data[np.where(sel_end_point_data["Trajectory"]==i)[0]]
start=0
steps=0
global_merge_steps=[]
# Create list, which steps to merge
# Retrieve all steps with % overlap
while steps < len(np.unique(sel_end_point_trajectories["Step"])):
merge_steps = [start]
steps += 1
intersect_step = len(np.intersect1d(sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==start)[0]]["Cell"],sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==steps)[0]]["Cell"]))
total_unqiue = len(np.unique(np.append(sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==start)[0]]["Cell"],sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==steps)[0]]["Cell"])))
overlap_perc = intersect_step/(total_unqiue+1e-15)
if overlap_perc > overlap:
merge_steps.append(steps)
while overlap_perc > overlap and steps < len(np.unique(sel_end_point_trajectories["Step"])):
steps += 1
intersect_step = len(np.intersect1d(sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==start)[0]]["Cell"],sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==steps)[0]]["Cell"]))
total_unqiue = len(np.unique(np.append(sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==start)[0]]["Cell"],sel_end_point_trajectories[np.where(sel_end_point_trajectories["Step"]==steps)[0]]["Cell"])))
overlap_perc = intersect_step/total_unqiue
if overlap_perc > overlap:
merge_steps.append(steps)
start = steps
global_merge_steps.append(merge_steps)
counter=0
# Merge all adjacent steps
for h in range(len(global_merge_steps)):
for k in range(len(global_merge_steps[h])):
end_p = cells_along_trajectories["End point"] == end_point
average_t = cells_along_trajectories["Trajectory"] == i
step_av = cells_along_trajectories["Step"] == k + counter
end_p_avg = np.zeros((len(end_p)))
for l in range(len(end_p_avg)):
end_p_avg[l] = end_p[l] and average_t[l] and step_av[l]
cells_along_trajectories["Step"][np.where(end_p_avg)[0]] = h
counter+=len(global_merge_steps[h])
# Save new bags of cell under separate key
for m in range(len(global_merge_steps)):
end_p = cells_along_trajectories["End point"]==end_point
average_t = cells_along_trajectories["Trajectory"]==i
step_av = cells_along_trajectories["Step"]==m
end_p_avg = np.zeros((len(end_p)))
for l in range(len(end_p_avg)):
end_p_avg[l] = end_p[l] and average_t[l] and step_av[l]
cells = np.unique(cells_along_trajectories["Cell"][np.where(end_p_avg)[0]])
cell_along_traj = cells_along_trajectories[np.where(end_p_avg)[0]]
for n in range(len(cells)):
max_a = np.max(cell_along_traj[np.where(cell_along_traj["Cell"]==cells[n])]["Allignment Score"])
cells_along_trajectories_merged.append([end_point, i, m, cells[n], max_a])
adata.uns['trajectories']["cells_along_trajectories_each_step_merged"] = np.rec.fromrecords(cells_along_trajectories_merged,
dtype=[('End point', 'U8'),
('Trajectory', int),
('Step', int),
('Cell', 'U8'),
('Allignment Score', float)])
| [
"numpy.where",
"numpy.rec.fromrecords",
"numpy.unique"
] | [((4863, 5033), 'numpy.rec.fromrecords', 'np.rec.fromrecords', (['cells_along_trajectories_merged'], {'dtype': "[('End point', 'U8'), ('Trajectory', int), ('Step', int), ('Cell', 'U8'), (\n 'Allignment Score', float)]"}), "(cells_along_trajectories_merged, dtype=[('End point',\n 'U8'), ('Trajectory', int), ('Step', int), ('Cell', 'U8'), (\n 'Allignment Score', float)])\n", (4881, 5033), True, 'import numpy as np\n'), ((933, 993), 'numpy.where', 'np.where', (["(cells_along_trajectories['End point'] == end_point)"], {}), "(cells_along_trajectories['End point'] == end_point)\n", (941, 993), True, 'import numpy as np\n'), ((1174, 1221), 'numpy.where', 'np.where', (["(sel_end_point_data['Trajectory'] == i)"], {}), "(sel_end_point_data['Trajectory'] == i)\n", (1182, 1221), True, 'import numpy as np\n'), ((1437, 1482), 'numpy.unique', 'np.unique', (["sel_end_point_trajectories['Step']"], {}), "(sel_end_point_trajectories['Step'])\n", (1446, 1482), True, 'import numpy as np\n'), ((4484, 4503), 'numpy.where', 'np.where', (['end_p_avg'], {}), '(end_p_avg)\n', (4492, 4503), True, 'import numpy as np\n'), ((2247, 2292), 'numpy.unique', 'np.unique', (["sel_end_point_trajectories['Step']"], {}), "(sel_end_point_trajectories['Step'])\n", (2256, 2292), True, 'import numpy as np\n'), ((4400, 4419), 'numpy.where', 'np.where', (['end_p_avg'], {}), '(end_p_avg)\n', (4408, 4419), True, 'import numpy as np\n'), ((3743, 3762), 'numpy.where', 'np.where', (['end_p_avg'], {}), '(end_p_avg)\n', (3751, 3762), True, 'import numpy as np\n'), ((4604, 4649), 'numpy.where', 'np.where', (["(cell_along_traj['Cell'] == cells[n])"], {}), "(cell_along_traj['Cell'] == cells[n])\n", (4612, 4649), True, 'import numpy as np\n'), ((1629, 1682), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == start)"], {}), "(sel_end_point_trajectories['Step'] == start)\n", (1637, 1682), True, 'import numpy as np\n'), ((1720, 1773), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == steps)"], {}), "(sel_end_point_trajectories['Step'] == steps)\n", (1728, 1773), True, 'import numpy as np\n'), ((1868, 1921), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == start)"], {}), "(sel_end_point_trajectories['Step'] == start)\n", (1876, 1921), True, 'import numpy as np\n'), ((1959, 2012), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == steps)"], {}), "(sel_end_point_trajectories['Step'] == steps)\n", (1967, 2012), True, 'import numpy as np\n'), ((2409, 2462), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == start)"], {}), "(sel_end_point_trajectories['Step'] == start)\n", (2417, 2462), True, 'import numpy as np\n'), ((2500, 2553), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == steps)"], {}), "(sel_end_point_trajectories['Step'] == steps)\n", (2508, 2553), True, 'import numpy as np\n'), ((2652, 2705), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == start)"], {}), "(sel_end_point_trajectories['Step'] == start)\n", (2660, 2705), True, 'import numpy as np\n'), ((2743, 2796), 'numpy.where', 'np.where', (["(sel_end_point_trajectories['Step'] == steps)"], {}), "(sel_end_point_trajectories['Step'] == steps)\n", (2751, 2796), True, 'import numpy as np\n')] |
from Puzzle.PuzzlePiece import *
from Img.filters import angle_between
from Img.Pixel import *
import math
import numpy as np
def rotate(origin, point, angle):
"""
Rotate the pixel around `origin` by `angle` degrees
:param origin: Coordinates of points used to rotate around
:param angle: number of degrees
:return: Nothing
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
if qx != qx or qy != qy:
print("NAN DETECTED: {} {} {} {} {}".format(ox, oy, px, py, qx, qy, angle))
return qx, qy
def stick_pieces(bloc_p, bloc_e, p, e, final_stick=False):
"""
Stick an edge of a piece to the bloc of already resolved pieces
:param bloc_p: bloc of pieces already solved
:param bloc_e: bloc of edges already solved
:param p: piece to add to the bloc
:param e: edge to stick
:return: Nothing
"""
vec_bloc = np.subtract(bloc_e.shape[0], bloc_e.shape[-1])
vec_piece = np.subtract(e.shape[0], e.shape[-1])
translation = np.subtract(bloc_e.shape[0], e.shape[-1])
angle = angle_between((vec_bloc[0], vec_bloc[1], 0), (-vec_piece[0], -vec_piece[1], 0))
# First move the first corner of piece to the corner of bloc edge
for edge in p.edges_:
edge.shape += translation
# Then rotate piece of `angle` degrees centered on the corner
for edge in p.edges_:
for i, point in enumerate(edge.shape):
edge.shape[i] = rotate(bloc_e.shape[0], point, -angle)
if final_stick:
#prev bounding box
minX, minY, maxX, maxY = float('inf'), float('inf'), -float('inf'), -float('inf')
for i, pixel in enumerate(p.img_piece_):
x, y = p.img_piece_[i].translate(translation[1], translation[0])
minX, minY, maxX, maxY = min(minX, x), min(minY, y), max(maxX, x), max(maxY, y)
# pixel.rotate(bloc_e.shape[0], -angle)
#rotation center
img_p = np.full((maxX - minX + 1, maxY - minY + 1, 3), -1)
for pix in p.img_piece_:
x, y = pix.pos
x, y = x - minX, y - minY
img_p[x, y] = pix.color
#new bounding box
minX2, minY2, maxX2, maxY2 = float('inf'), float('inf'), -float('inf'), -float('inf')
for x in [minX, maxX]:
for y in [minY, maxY]:
x2, y2 = rotate((bloc_e.shape[0][1], bloc_e.shape[0][0]), (x,y), angle)
x2, y2 = int(x2), int(y2)
minX2, minY2, maxX2, maxY2 = min(minX2, x2), min(minY2, y2), max(maxX2, x2), max(maxY2, y2)
pixels = []
for px in range(minX2, maxX2 + 1):
for py in range(minY2, maxY2 + 1):
qx, qy = rotate((bloc_e.shape[0][1], bloc_e.shape[0][0]), (px,py), -angle)
qx, qy = int(qx), int(qy)
if minX <= qx <= maxX and minY <= qy <= maxY and img_p[qx - minX, qy - minY][0] != -1:
pixels.append(Pixel((px, py), img_p[qx - minX, qy - minY]))
p.img_piece_ = pixels
| [
"Img.filters.angle_between",
"numpy.subtract",
"math.cos",
"numpy.full",
"math.sin"
] | [((1058, 1104), 'numpy.subtract', 'np.subtract', (['bloc_e.shape[0]', 'bloc_e.shape[-1]'], {}), '(bloc_e.shape[0], bloc_e.shape[-1])\n', (1069, 1104), True, 'import numpy as np\n'), ((1121, 1157), 'numpy.subtract', 'np.subtract', (['e.shape[0]', 'e.shape[-1]'], {}), '(e.shape[0], e.shape[-1])\n', (1132, 1157), True, 'import numpy as np\n'), ((1177, 1218), 'numpy.subtract', 'np.subtract', (['bloc_e.shape[0]', 'e.shape[-1]'], {}), '(bloc_e.shape[0], e.shape[-1])\n', (1188, 1218), True, 'import numpy as np\n'), ((1231, 1310), 'Img.filters.angle_between', 'angle_between', (['(vec_bloc[0], vec_bloc[1], 0)', '(-vec_piece[0], -vec_piece[1], 0)'], {}), '((vec_bloc[0], vec_bloc[1], 0), (-vec_piece[0], -vec_piece[1], 0))\n', (1244, 1310), False, 'from Img.filters import angle_between\n'), ((2108, 2158), 'numpy.full', 'np.full', (['(maxX - minX + 1, maxY - minY + 1, 3)', '(-1)'], {}), '((maxX - minX + 1, maxY - minY + 1, 3), -1)\n', (2115, 2158), True, 'import numpy as np\n'), ((455, 470), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (463, 470), False, 'import math\n'), ((527, 542), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (535, 542), False, 'import math\n'), ((425, 440), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (433, 440), False, 'import math\n'), ((497, 512), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (505, 512), False, 'import math\n')] |
#!/usr/bin/env python3
import time
import pandas as pd
import numpy as np
def choose_category(t):
categories = [v for v in t.category.unique()]
for i in categories:
print(i)
def main():
translations = pd.read_csv("data/translations.csv")
translations["english"] = translations["english"].str.lower()
translations["pinyin"] = translations["pinyin"].str.lower()
translations["hanzi_length"] = translations["hanzi"].str.len()
# translations["hanzi_length"] = translations["hanzi"].apply(lambda x: len(x))
# translations = translations[translations["character_length"] == 1]
translations = translations[translations["category"] == "Family Members"]
translations.reset_index(inplace=True)
number_correct = 0
iterations = 10
number_of_answers = 3
give_character = False
# Direction of translation
direction = ["pinyin", "hanzi"]
if give_character:
direction = direction[::-1]
for _ in range(iterations):
rng = np.random.default_rng()
random_indexes = rng.choice(translations.shape[0], size=number_of_answers, replace=False)
correct_answer = translations.loc[random_indexes[0],direction[0]]
print("\n", translations.loc[random_indexes[0],direction[1]],sep="")
np.random.shuffle(random_indexes)
for number, i in enumerate(random_indexes):
print(number+1,": ", translations.loc[i,direction[0]],sep="")
user_selection = int(input("\nSelect an answer: ")) - 1
user_answer = translations.loc[random_indexes[user_selection],direction[0]]
if correct_answer == user_answer:
print("That is correct! + 1 point!")
number_correct += 1
else:
print("Sorry, that's not quite right. The correct answer was ", correct_answer, sep="")
time.sleep(0.5)
score = round(100*(number_correct/iterations), None)
print("\nYou achieved a score of ", score, "%", sep="")
if score < 75:
print("Try studying a bit more!")
else:
print("Good job!")
# TODO:
# When gui is made, make sure number of answers isnt greater than size of df
# also avoid repeat questions and keep track of specific works between sessions
# "you should work on x, y, and z", etc.
#TODO: write webscraper https://commons.wikimedia.org/wiki/Commons:Stroke_Order_Project
if __name__ == "__main__":
translations = pd.read_csv("data/translations.csv")
choose_category(translations)
#main()
| [
"time.sleep",
"numpy.random.default_rng",
"pandas.read_csv",
"numpy.random.shuffle"
] | [((236, 272), 'pandas.read_csv', 'pd.read_csv', (['"""data/translations.csv"""'], {}), "('data/translations.csv')\n", (247, 272), True, 'import pandas as pd\n'), ((2504, 2540), 'pandas.read_csv', 'pd.read_csv', (['"""data/translations.csv"""'], {}), "('data/translations.csv')\n", (2515, 2540), True, 'import pandas as pd\n'), ((1036, 1059), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (1057, 1059), True, 'import numpy as np\n'), ((1321, 1354), 'numpy.random.shuffle', 'np.random.shuffle', (['random_indexes'], {}), '(random_indexes)\n', (1338, 1354), True, 'import numpy as np\n'), ((1884, 1899), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1894, 1899), False, 'import time\n')] |
import numpy as np
import tensorflow as tf
import pickle
import logging
logger = logging.getLogger(__name__)
class DKN:
def __init__(self, transform=False, use_bert_embeddings=None, word_embeddings_path=None,
entity_embeddings_path=None, context_embeddings_path=None, use_context=False, max_click_history=10,
max_text_length=10, entity_dim=32, word_dim=32, l2_weight=0.01, filter_sizes=(1, 2), n_filters=128,
lr=0.001, batch_size=128, n_epochs=10, output_path=None):
self.model_params = dict(transform=transform, use_bert_embeddings=use_bert_embeddings,
word_embeddings_path=word_embeddings_path,
entity_embeddings_path=entity_embeddings_path,
context_embeddings_path=context_embeddings_path, use_context=use_context,
max_click_history=max_click_history, max_text_length=max_text_length,
entity_dim=entity_dim,
word_dim=word_dim, l2_weight=l2_weight, filter_sizes=filter_sizes, n_filters=n_filters,
lr=lr, batch_size=batch_size, n_epochs=n_epochs, output_path=output_path)
self.output_path = output_path
if output_path is None:
raise Warning("The output path has not been set. The model will not be saved.")
# Embeddings
self.transform = transform
# Word Embeddings
self.use_bert_embeddings = use_bert_embeddings
self.word_embeddings_path = word_embeddings_path
if not(self.use_bert_embeddings or self.word_embeddings_path):
raise ValueError('Neither bert embeddings or Word2Vec has been set')
# Entity
self.entity_embeddings_path = entity_embeddings_path
# Context
self.context_embeddings_path = context_embeddings_path
self.use_context = use_context
# Tensor size
self.max_click_history = max_click_history
self.max_text_length = max_text_length
self.entity_dim = entity_dim
self.word_dim = word_dim
# Model
self.l2_weight = l2_weight
self.filter_sizes = filter_sizes
self.n_filters = n_filters
self.lr = lr
# Training
self.batch_size = batch_size
self.n_epochs = n_epochs
self.params = [] # for computing regularization loss
self._build_inputs()
self._build_model()
self._build_train()
self.session = None
def _prepare_data_attention(self):
clicked_words = tf.reshape(self.clicked_words, shape=[-1, self.max_text_length])
clicked_entities = tf.reshape(self.clicked_entities, shape=[-1, self.max_text_length])
return clicked_words, clicked_entities
def _prepare_data_kcnn(self, words, entities):
embedded_words = tf.nn.embedding_lookup(params=self.word_embeddings, ids=words)
embedded_entities = tf.nn.embedding_lookup(params=self.entity_embeddings, ids=entities)
return embedded_words, embedded_entities
def _build_inputs(self):
with tf.compat.v1.name_scope('input'):
self.clicked_words = tf.compat.v1.placeholder(
dtype=tf.int32, shape=[None, self.max_click_history, self.max_text_length], name='clicked_words')
self.clicked_entities = tf.compat.v1.placeholder(
dtype=tf.int32, shape=[None, self.max_click_history, self.max_text_length], name='clicked_entities')
self.words = tf.compat.v1.placeholder(
dtype=tf.int32, shape=[None, self.max_text_length], name='words')
self.entities = tf.compat.v1.placeholder(
dtype=tf.int32, shape=[None, self.max_text_length], name='entities')
self.labels = tf.compat.v1.placeholder(
dtype=tf.float32, shape=[None], name='labels')
def _build_model(self):
with tf.compat.v1.name_scope('embedding'):
self.word_embeddings = tf.Variable(np.load(self.word_embeddings_path), dtype=np.float32, name='word')
self.entity_embeddings = tf.Variable(np.load(self.entity_embeddings_path), dtype=np.float32, name='entity')
self.params.append(self.word_embeddings)
self.params.append(self.entity_embeddings)
if self.use_context:
context_embs = np.load(self.context_embeddings_path)
self.context_embeddings = tf.Variable(context_embs, dtype=np.float32, name='context')
self.params.append(self.context_embeddings)
if self.transform:
self.entity_embeddings = tf.compat.v1.layers.dense(
self.entity_embeddings, units=self.entity_dim, activation=tf.nn.tanh, name='transformed_entity',
kernel_regularizer=tf.keras.regularizers.l2(0.5 * self.l2_weight))
if self.use_context:
self.context_embeddings = tf.compat.v1.layers.dense(
self.context_embeddings, units=self.entity_dim, activation=tf.nn.tanh,
name='transformed_context', kernel_regularizer=tf.keras.regularizers.l2(0.5 * self.l2_weight))
user_embeddings, item_embeddings = self._attention()
self.scores_unnormalized = tf.reduce_sum(input_tensor=user_embeddings * item_embeddings, axis=1)
self.scores = tf.sigmoid(self.scores_unnormalized, name='scores')
def _attention(self):
# (batch_size * max_click_history, max_title_length)
clicked_words, clicked_entities = self._prepare_data_attention()
with tf.compat.v1.variable_scope('kcnn', reuse=tf.compat.v1.AUTO_REUSE): # reuse the variables of KCNN
# (batch_size * max_click_history, title_embedding_length)
# title_embedding_length = n_filters_for_each_size * n_filter_sizes
clicked_embeddings = self._kcnn(clicked_words, clicked_entities)
# (batch_size, title_embedding_length)
item_embeddings = self._kcnn(self.words, self.entities)
# (batch_size, max_click_history, title_embedding_length)
clicked_embeddings = tf.reshape(
clicked_embeddings, shape=[-1, self.max_click_history, self.n_filters * len(self.filter_sizes)])
# (batch_size, 1, title_embedding_length)
item_embeddings_expanded = tf.expand_dims(item_embeddings, 1)
# (batch_size, max_click_history)
attention_weights = tf.reduce_sum(input_tensor=clicked_embeddings * item_embeddings_expanded, axis=-1)
# (batch_size, max_click_history)
attention_weights = tf.nn.softmax(attention_weights, axis=-1)
# (batch_size, max_click_history, 1)
attention_weights_expanded = tf.expand_dims(attention_weights, axis=-1)
# (batch_size, title_embedding_length)
user_embeddings = tf.reduce_sum(input_tensor=clicked_embeddings * attention_weights_expanded, axis=1)
return user_embeddings, item_embeddings
def _kcnn(self, words, entities):
# (batch_size * max_click_history, max_title_length, word_dim) for users
# (batch_size, max_title_length, word_dim) for items
embedded_words, embedded_entities = self._prepare_data_kcnn(words, entities)
# (batch_size * max_click_history, max_title_length, full_dim) for users
# (batch_size, max_title_length, full_dim) for items
if self.use_context:
embedded_contexts = tf.nn.embedding_lookup(params=self.context_embeddings, ids=entities)
concat_input = tf.concat([embedded_words, embedded_entities, embedded_contexts], axis=-1)
full_dim = self.word_dim + self.entity_dim * 2
else:
concat_input = tf.concat([embedded_words, embedded_entities], axis=-1)
full_dim = self.word_dim + self.entity_dim
# (batch_size * max_click_history, max_title_length, full_dim, 1) for users
# (batch_size, max_title_length, full_dim, 1) for items
concat_input = tf.expand_dims(concat_input, -1)
outputs = []
for filter_size in self.filter_sizes:
filter_shape = [filter_size, full_dim, 1, self.n_filters]
w = tf.compat.v1.get_variable(name='w_' + str(filter_size), shape=filter_shape, dtype=tf.float32)
b = tf.compat.v1.get_variable(name='b_' + str(filter_size), shape=[self.n_filters], dtype=tf.float32)
if w not in self.params:
self.params.append(w)
# (batch_size * max_click_history, max_title_length - filter_size + 1, 1, n_filters_for_each_size) for users
# (batch_size, max_title_length - filter_size + 1, 1, n_filters_for_each_size) for items
conv = tf.nn.conv2d(input=concat_input, filters=w, strides=[1, 1, 1, 1], padding='VALID', name='conv')
relu = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')
# (batch_size * max_click_history, 1, 1, n_filters_for_each_size) for users
# (batch_size, 1, 1, n_filters_for_each_size) for items
pool = tf.nn.max_pool2d(input=relu, ksize=[1, self.max_text_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1], padding='VALID', name='pool')
outputs.append(pool)
# (batch_size * max_click_history, 1, 1, n_filters_for_each_size * n_filter_sizes) for users
# (batch_size, 1, 1, n_filters_for_each_size * n_filter_sizes) for items
output = tf.concat(outputs, axis=-1)
# (batch_size * max_click_history, n_filters_for_each_size * n_filter_sizes) for users
# (batch_size, n_filters_for_each_size * n_filter_sizes) for items
output = tf.reshape(output, [-1, self.n_filters * len(self.filter_sizes)])
return output
def _build_train(self):
with tf.compat.v1.name_scope('train'):
self.base_loss = tf.reduce_mean(
input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels, logits=self.scores_unnormalized))
self.l2_loss = tf.Variable(tf.constant(0., dtype=tf.float32), trainable=False)
for param in self.params:
self.l2_loss = tf.add(self.l2_loss, self.l2_weight * tf.nn.l2_loss(param))
if self.transform:
self.l2_loss = tf.add(self.l2_loss, tf.compat.v1.losses.get_regularization_loss())
self.loss = self.base_loss + self.l2_loss
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.lr).minimize(self.loss)
def train(self, sess, data, start, end):
return sess.run(self.optimizer, self.get_feed_dict(data, start, end))
def predict(self, data, start, end):
labels, scores = self.session.run([self.labels, self.scores], self.get_feed_dict(data, start, end))
return labels, scores
@staticmethod
def transform_feed_dict(data, start, end, model):
return [data.clicked_words[start:end],
data.clicked_entities[start:end],
data.words[start:end],
data.entities[start:end],
data.labels[start:end]]
def get_feed_dict(self, data, start, end):
transformed_data = self.transform_feed_dict(data, start, end, self)
feed_dict = {self.clicked_words: transformed_data[0],
self.clicked_entities: transformed_data[1],
self.words: transformed_data[2],
self.entities: transformed_data[3],
self.labels: transformed_data[4]}
return feed_dict
def save_session(self):
if self.output_path:
saver = tf.compat.v1.train.Saver(max_to_keep=None)
saver.save(self.session, self.output_path + '/epoch', global_step=self.n_epochs)
logger.info("Model saved in path: %s" % self.output_path)
else:
raise ValueError('Output path is None')
def save_prediction_model(self):
self.save_session()
with open(str(self.output_path) + ".pickle", 'wb') as f:
pickle.dump({'params': self.model_params,
'transform_feed_dict': self.transform_feed_dict}, f)
class DKNPredict:
def __init__(self, params):
self.model_params = params['params']
self.session = tf.compat.v1.Session()
# Load session
graph = self.load_tf_model(self.model_params['output_path'], self.model_params['n_epochs'])
# Load model components
self.clicked_words = graph.get_tensor_by_name("input/clicked_words:0")
self.clicked_entities = graph.get_tensor_by_name("input/clicked_entities:0")
self.words = graph.get_tensor_by_name("input/words:0")
self.entities = graph.get_tensor_by_name("input/entities:0")
self.labels = graph.get_tensor_by_name('input/labels:0')
self.scores = graph.get_tensor_by_name('scores:0')
# Load methods to transform data
self.transform_feed_dict = params['transform_feed_dict']
if self.model_params.get('embeddings_extractor'):
self.embeddings_extractor = self.model_params.get('embeddings_extractor')
def get_feed_dict(self, data, start, end):
transformed_data = self.transform_feed_dict(data, start, end, self)
feed_dict = {self.clicked_words: transformed_data[0],
self.clicked_entities: transformed_data[1],
self.words: transformed_data[2],
self.entities: transformed_data[3],
self.labels: transformed_data[4]}
return feed_dict
def load_tf_model(self, output_path, n_epochs):
saver = tf.compat.v1.train.import_meta_graph(output_path + '/epoch-{}.meta'.format(n_epochs))
saver.restore(self.session, output_path + '/epoch-{}'.format(n_epochs))
return tf.compat.v1.get_default_graph()
def predict(self, data, start, end):
labels, scores = self.session.run([self.labels, self.scores], self.get_feed_dict(data, start, end))
return labels, scores
@classmethod
def load_prediction_model(cls, output_path, paths=None):
with open(output_path + '.pickle', 'rb') as f:
params = pickle.load(f)
# If we have changed the folder of the files
if paths:
params['params'].update(paths)
return cls(params)
| [
"logging.getLogger",
"tensorflow.reduce_sum",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.nn.softmax",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.nn.embedding_lookup",
"tensorflow.concat",
"tensorflow.nn.con... | [((81, 108), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (98, 108), False, 'import logging\n'), ((2650, 2714), 'tensorflow.reshape', 'tf.reshape', (['self.clicked_words'], {'shape': '[-1, self.max_text_length]'}), '(self.clicked_words, shape=[-1, self.max_text_length])\n', (2660, 2714), True, 'import tensorflow as tf\n'), ((2742, 2809), 'tensorflow.reshape', 'tf.reshape', (['self.clicked_entities'], {'shape': '[-1, self.max_text_length]'}), '(self.clicked_entities, shape=[-1, self.max_text_length])\n', (2752, 2809), True, 'import tensorflow as tf\n'), ((2934, 2996), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', ([], {'params': 'self.word_embeddings', 'ids': 'words'}), '(params=self.word_embeddings, ids=words)\n', (2956, 2996), True, 'import tensorflow as tf\n'), ((3025, 3092), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', ([], {'params': 'self.entity_embeddings', 'ids': 'entities'}), '(params=self.entity_embeddings, ids=entities)\n', (3047, 3092), True, 'import tensorflow as tf\n'), ((5370, 5439), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': '(user_embeddings * item_embeddings)', 'axis': '(1)'}), '(input_tensor=user_embeddings * item_embeddings, axis=1)\n', (5383, 5439), True, 'import tensorflow as tf\n'), ((5462, 5513), 'tensorflow.sigmoid', 'tf.sigmoid', (['self.scores_unnormalized'], {'name': '"""scores"""'}), "(self.scores_unnormalized, name='scores')\n", (5472, 5513), True, 'import tensorflow as tf\n'), ((6439, 6473), 'tensorflow.expand_dims', 'tf.expand_dims', (['item_embeddings', '(1)'], {}), '(item_embeddings, 1)\n', (6453, 6473), True, 'import tensorflow as tf\n'), ((6545, 6631), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': '(clicked_embeddings * item_embeddings_expanded)', 'axis': '(-1)'}), '(input_tensor=clicked_embeddings * item_embeddings_expanded,\n axis=-1)\n', (6558, 6631), True, 'import tensorflow as tf\n'), ((6699, 6740), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['attention_weights'], {'axis': '(-1)'}), '(attention_weights, axis=-1)\n', (6712, 6740), True, 'import tensorflow as tf\n'), ((6824, 6866), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_weights'], {'axis': '(-1)'}), '(attention_weights, axis=-1)\n', (6838, 6866), True, 'import tensorflow as tf\n'), ((6941, 7028), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': '(clicked_embeddings * attention_weights_expanded)', 'axis': '(1)'}), '(input_tensor=clicked_embeddings * attention_weights_expanded,\n axis=1)\n', (6954, 7028), True, 'import tensorflow as tf\n'), ((8098, 8130), 'tensorflow.expand_dims', 'tf.expand_dims', (['concat_input', '(-1)'], {}), '(concat_input, -1)\n', (8112, 8130), True, 'import tensorflow as tf\n'), ((9557, 9584), 'tensorflow.concat', 'tf.concat', (['outputs'], {'axis': '(-1)'}), '(outputs, axis=-1)\n', (9566, 9584), True, 'import tensorflow as tf\n'), ((12373, 12395), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (12393, 12395), True, 'import tensorflow as tf\n'), ((13917, 13949), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (13947, 13949), True, 'import tensorflow as tf\n'), ((3185, 3217), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""input"""'], {}), "('input')\n", (3208, 3217), True, 'import tensorflow as tf\n'), ((3252, 3379), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, self.max_click_history, self.max_text_length]', 'name': '"""clicked_words"""'}), "(dtype=tf.int32, shape=[None, self.\n max_click_history, self.max_text_length], name='clicked_words')\n", (3276, 3379), True, 'import tensorflow as tf\n'), ((3428, 3558), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, self.max_click_history, self.max_text_length]', 'name': '"""clicked_entities"""'}), "(dtype=tf.int32, shape=[None, self.\n max_click_history, self.max_text_length], name='clicked_entities')\n", (3452, 3558), True, 'import tensorflow as tf\n'), ((3596, 3690), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, self.max_text_length]', 'name': '"""words"""'}), "(dtype=tf.int32, shape=[None, self.max_text_length],\n name='words')\n", (3620, 3690), True, 'import tensorflow as tf\n'), ((3732, 3829), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, self.max_text_length]', 'name': '"""entities"""'}), "(dtype=tf.int32, shape=[None, self.max_text_length],\n name='entities')\n", (3756, 3829), True, 'import tensorflow as tf\n'), ((3869, 3940), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""labels"""'}), "(dtype=tf.float32, shape=[None], name='labels')\n", (3893, 3940), True, 'import tensorflow as tf\n'), ((4000, 4036), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""embedding"""'], {}), "('embedding')\n", (4023, 4036), True, 'import tensorflow as tf\n'), ((5689, 5755), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""kcnn"""'], {'reuse': 'tf.compat.v1.AUTO_REUSE'}), "('kcnn', reuse=tf.compat.v1.AUTO_REUSE)\n", (5716, 5755), True, 'import tensorflow as tf\n'), ((7544, 7612), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', ([], {'params': 'self.context_embeddings', 'ids': 'entities'}), '(params=self.context_embeddings, ids=entities)\n', (7566, 7612), True, 'import tensorflow as tf\n'), ((7640, 7714), 'tensorflow.concat', 'tf.concat', (['[embedded_words, embedded_entities, embedded_contexts]'], {'axis': '(-1)'}), '([embedded_words, embedded_entities, embedded_contexts], axis=-1)\n', (7649, 7714), True, 'import tensorflow as tf\n'), ((7815, 7870), 'tensorflow.concat', 'tf.concat', (['[embedded_words, embedded_entities]'], {'axis': '(-1)'}), '([embedded_words, embedded_entities], axis=-1)\n', (7824, 7870), True, 'import tensorflow as tf\n'), ((8810, 8910), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'concat_input', 'filters': 'w', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""conv"""'}), "(input=concat_input, filters=w, strides=[1, 1, 1, 1], padding=\n 'VALID', name='conv')\n", (8822, 8910), True, 'import tensorflow as tf\n'), ((9150, 9291), 'tensorflow.nn.max_pool2d', 'tf.nn.max_pool2d', ([], {'input': 'relu', 'ksize': '[1, self.max_text_length - filter_size + 1, 1, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""pool"""'}), "(input=relu, ksize=[1, self.max_text_length - filter_size +\n 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool')\n", (9166, 9291), True, 'import tensorflow as tf\n'), ((9904, 9936), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""train"""'], {}), "('train')\n", (9927, 9936), True, 'import tensorflow as tf\n'), ((11717, 11759), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (11741, 11759), True, 'import tensorflow as tf\n'), ((12133, 12232), 'pickle.dump', 'pickle.dump', (["{'params': self.model_params, 'transform_feed_dict': self.transform_feed_dict}", 'f'], {}), "({'params': self.model_params, 'transform_feed_dict': self.\n transform_feed_dict}, f)\n", (12144, 12232), False, 'import pickle\n'), ((14285, 14299), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14296, 14299), False, 'import pickle\n'), ((4085, 4119), 'numpy.load', 'np.load', (['self.word_embeddings_path'], {}), '(self.word_embeddings_path)\n', (4092, 4119), True, 'import numpy as np\n'), ((4201, 4237), 'numpy.load', 'np.load', (['self.entity_embeddings_path'], {}), '(self.entity_embeddings_path)\n', (4208, 4237), True, 'import numpy as np\n'), ((4445, 4482), 'numpy.load', 'np.load', (['self.context_embeddings_path'], {}), '(self.context_embeddings_path)\n', (4452, 4482), True, 'import numpy as np\n'), ((4525, 4584), 'tensorflow.Variable', 'tf.Variable', (['context_embs'], {'dtype': 'np.float32', 'name': '"""context"""'}), "(context_embs, dtype=np.float32, name='context')\n", (4536, 4584), True, 'import tensorflow as tf\n'), ((8936, 8959), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {}), '(conv, b)\n', (8950, 8959), True, 'import tensorflow as tf\n'), ((10145, 10179), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (10156, 10179), True, 'import tensorflow as tf\n'), ((10012, 10109), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'self.labels', 'logits': 'self.scores_unnormalized'}), '(labels=self.labels, logits=self.\n scores_unnormalized)\n', (10051, 10109), True, 'import tensorflow as tf\n'), ((10409, 10454), 'tensorflow.compat.v1.losses.get_regularization_loss', 'tf.compat.v1.losses.get_regularization_loss', ([], {}), '()\n', (10452, 10454), True, 'import tensorflow as tf\n'), ((10539, 10580), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (10571, 10580), True, 'import tensorflow as tf\n'), ((4901, 4947), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.5 * self.l2_weight)'], {}), '(0.5 * self.l2_weight)\n', (4925, 4947), True, 'import tensorflow as tf\n'), ((10304, 10324), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['param'], {}), '(param)\n', (10317, 10324), True, 'import tensorflow as tf\n'), ((5225, 5271), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.5 * self.l2_weight)'], {}), '(0.5 * self.l2_weight)\n', (5249, 5271), True, 'import tensorflow as tf\n')] |
#DCGAN train and generate
# 1. train
# 2. generate
import tifffile
import h5py
import torch.utils.data
from torch import Tensor
from os import listdir
from os.path import join
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import os
import random
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from scipy.ndimage.filters import median_filter
from skimage.filters import threshold_otsu
from collections import Counter
####################需要提供的数据FOR TRAIN####################
out_folder = 'H:/tmp/DEEP_WATERROCK_CODE/codetest/' #数据存储路径
dataset_name='berea'
device='cuda'
manualSeed=43
imageSize=64
batchSize=32
number_generator_feature=64
number_discriminator_feature=32
number_z=512
number_train_iterations=10
number_gpu=1
####################需要提供的数据FOR GENERATE##################
seedmin=62
seedmax=64
netG='H:/tmp/DEEP_WATERROCK_CODE/codetest/DCGAN/result/netG/netG_epoch_9.pth'
generate_name='test'
image_generate_size=4
##############################################################
#判别器
class DCGAN3d_D(nn.Container):
def __init__(self,
image_size, #进入判别器中的图片大小
dimension_n, #nz latent space纬度
channel_in, #nc 进入管道数
D_feature_number, #ndf 判别网络中的初始feature数
gpu_number,
extra_layers_number=0):
super(DCGAN3d_D,self).__init__()
self.gpu_number=gpu_number
assert image_size % 16 ==0,'image size has to be a multiple of 16'
D=nn.Sequential(
nn.Conv3d(channel_in,D_feature_number,4,2,1,bias=False),
nn.LeakyReLU(0.2,inplace=True),
)
i=3
next_size=image_size/2
next_D_feature_number=D_feature_number
#build next other layers
for t in range(extra_layers_number):
D.add_module(str(i),
nn.Conv3d(next_D_feature_number,
next_D_feature_number,
3,1,1,bias=False))
D.add_module(str(i+1),
nn.BatchNorm3d(next_D_feature_number))
D.add_module(str(i+2),
nn.LeakyReLU(0.2,inplace=True))
i+=3
while next_size>4:
in_feat=next_D_feature_number
out_feat=next_D_feature_number * 2
D.add_module(str(i),
nn.Conv3d(in_feat,out_feat,4,2,1,bias=False))
D.add_module(str(i+1),
nn.BatchNorm3d(out_feat))
D.add_module(str(i+2),
nn.LeakyReLU(0.2,inplace=True))
i+=3
next_D_feature_number=next_D_feature_number * 2
next_size=next_size/2
D.add_module(str(i),
nn.Conv3d(next_D_feature_number,1,4,1,0,bias=False))
D.add_module(str(i+1),
nn.Sigmoid())
self.D=D
def forward(self,input):
gpu_ids=None
if isinstance(input.data, torch.cuda.FloatTensor) and self.gpu_number > 1:
gpu_ids = range(self.gpu_number)
output=nn.parallel.data_parallel(self.D,input,gpu_ids)
return output.view(-1,1)
#生成器
class DCGAN3d_G(nn.Container):
def __init__(self,
image_size,
dimension_n,
channel_in,
G_feature_number, #ngf 生成网络中的初始feature数
gpu_number,
extra_layers_number=0):
super(DCGAN3d_G,self).__init__()
self.gpu_number=gpu_number
assert image_size % 16 ==0, "image size has to be a multiple of 16"
next_G_feature_number=G_feature_number//2
end_image_size=4
while end_image_size!=image_size:
next_G_feature_number=next_G_feature_number * 2
end_image_size = end_image_size * 2
G=nn.Sequential(
nn.ConvTranspose3d(dimension_n,next_G_feature_number,4,1,0,bias=False),
nn.BatchNorm3d(next_G_feature_number),
nn.ReLU(True),
)
i=3
next_size=4
next_G_feature_number=next_G_feature_number
while next_size<image_size//2:
G.add_module(str(i),
nn.ConvTranspose3d(next_G_feature_number,
next_G_feature_number//2,
4,2,1,bias=False))
G.add_module(str(i+1),
nn.BatchNorm3d(next_G_feature_number//2))
G.add_module(str(i+2),
nn.ReLU(True))
i+=3
next_G_feature_number=next_G_feature_number//2
next_size=next_size*2
#extra layers
for t in range(extra_layers_number):
G.add_module(str(i),
nn.Conv3d(next_G_feature_number,next_G_feature_number,
3,1,1,bias=False))
G.add_module(str(i+1),
nn.BatchNorm3d(next_G_feature_number))
G.add_module(str(i+2),
nn.ReLU(True))
i+=3
G.add_module(str(i),
nn.ConvTranspose3d(next_G_feature_number,channel_in,
4,2,1,bias=False))
G.add_module(str(i+1),
nn.Tanh())
self.G=G
def forward(self,input):
gpu_ids = None
if isinstance(input.data, torch.cuda.FloatTensor) and self.gpu_number> 1:
gpu_ids = range(self.gpu_number)
return nn.parallel.data_parallel(self.G, input, gpu_ids)
class DCGAN3D_G_CPU(nn.Container):
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
super(DCGAN3D_G_CPU, self).__init__()
self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16"
cngf, tisize = ngf//2, 4
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose3d(nz, cngf, 4, 1, 0, bias=True),
nn.BatchNorm3d(cngf),
nn.ReLU(True),
)
i, csize, cndf = 3, 4, cngf
while csize < isize//2:
main.add_module(str(i),
nn.ConvTranspose3d(cngf, cngf//2, 4, 2, 1, bias=True))
main.add_module(str(i+1),
nn.BatchNorm3d(cngf//2))
main.add_module(str(i+2),
nn.ReLU(True))
i += 3
cngf = cngf // 2
csize = csize * 2
# Extra layers
for t in range(n_extra_layers):
main.add_module(str(i),
nn.Conv3d(cngf, cngf, 3, 1, 1, bias=True))
main.add_module(str(i+1),
nn.BatchNorm3d(cngf))
main.add_module(str(i+2),
nn.ReLU(True))
i += 3
main.add_module(str(i),
nn.ConvTranspose3d(cngf, nc, 4, 2, 1, bias=True))
main.add_module(str(i+1), nn.Tanh())
self.main = main
def forward(self, input):
return self.main(input)
def save_hdf5(tensor, filename):
tensor = tensor.cpu()
ndarr = tensor.mul(0.5).add(0.5).mul(255).byte().numpy()
with h5py.File(filename, 'w') as f:
f.create_dataset('data', data=ndarr, dtype="i8", compression="gzip")
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".hdf5", ".h5"])
def load_img(filepath):
img = None
with h5py.File(filepath, "r") as f:
img = f['data'][()]
img = np.expand_dims(img, axis=0)
torch_img = Tensor(img)
torch_img = torch_img.div(255).sub(0.5).div(0.5)
return torch_img
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class HDF5Dataset(torch.utils.data.Dataset):
def __init__(self, image_dir, input_transform=None, target_transform=None):
super(HDF5Dataset, self).__init__()
self.image_filenames = [join(image_dir, x) for x in listdir(image_dir) if is_image_file(x)]
self.input_transform = input_transform
self.target_transform = target_transform
def __getitem__(self, index):
input = load_img(self.image_filenames[index])
target = None
return input
def __len__(self):
return len(self.image_filenames)
###creat training images
def train_dataset_preprocess(dataset_name):
tiff_path=out_folder+dataset_name+'.tif'
edge_length=64
stride=32
train_images_path=out_folder+'DCGAN/train_images/'
try:
os.makedirs(out_folder+'DCGAN/train_images')
except OSError:
pass
img=tifffile.imread(tiff_path)
N = edge_length
M = edge_length
O = edge_length
I_inc = stride
J_inc = stride
K_inc = stride
count = 0
for i in range(0, img.shape[0], I_inc):
for j in range(0, img.shape[1], J_inc):
for k in range(0, img.shape[2], K_inc):
subset = img[i:i+N, j:j+N, k:k+O]
if subset.shape == (N, M, O):
f = h5py.File(train_images_path+"/"+str(dataset_name)+"_"+str(count)+".hdf5", "w")
f.create_dataset('data', data=subset, dtype="i8", compression="gzip")
f.close()
count += 1
print('Generate images/dataset number count:',count)
return train_images_path
def DCGAN_train(imageSize,
batchSize,
ngf,
ndf,
nz,
niter,
ngpu,
manualSeed,
out_folder,
dataset_name,
device):
data_root=train_dataset_preprocess(dataset_name)
lr=1e-5
workers=0
nc=1
criterion=nn.BCELoss()
result_path=out_folder+'DCGAN/result/'
outf=out_folder+'DCGAN/output/'
try:
os.makedirs(out_folder+'DCGAN/output')
os.makedirs(out_folder+'DCGAN/result')
except OSError:
pass
np.random.seed(43)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
cudnn.benchmark=True
if torch.cuda.is_available() and device!='cuda':
print("WARNING: You have a CUDA device, so you should probably run with device='cuda'")
if dataset_name in ['berea']:
dataset=HDF5Dataset(data_root,
input_transform=transforms.Compose([transforms.ToTensor()]))
assert dataset
dataloader=torch.utils.data.DataLoader(dataset,batch_size=batchSize,shuffle=True,num_workers=int(workers))
netG=DCGAN3d_G(imageSize,nz,nc,ngf,ngpu)
netG.apply(weights_init)
print(netG)
netD=DCGAN3d_D(imageSize,nz,nc,ndf,ngpu)
netD.apply(weights_init)
print(netD)
input,noise,fixed_noise,fixed_noise_TI=None,None,None,None
input=torch.FloatTensor(batchSize,nc,imageSize,imageSize,imageSize)
noise=torch.FloatTensor(batchSize,nz,1,1,1)
fixed_noise=torch.FloatTensor(1,nz,7,7,7).normal_(0,1)
fixed_noise_TI=torch.FloatTensor(1,nz,1,1,1).normal_(0,1)
label=torch.FloatTensor(batchSize)
real_label=0.9
fake_label=0
if device=='cuda':
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise_TI = fixed_noise_TI.cuda()
input = Variable(input) #变量可修改
label = Variable(label) #变量可修改
noise = Variable(noise) #变量可修改
fixed_noise=Variable(fixed_noise)
fixed_noise_TI=Variable(fixed_noise_TI)
optimizerD=optim.Adam(netD.parameters(),lr=lr,betas=(0.5,0.999))
optimizerG=optim.Adam(netG.parameters(),lr=lr,betas=(0.5,0.999))
#main part
gen_iterations=0
G_loss=[]
D_loss=[]
iters=0
for epoch in range(niter):
print('This is the ',epoch,'-th')
for i,data in enumerate(dataloader,0):
f=open(result_path+'training_curve.scv','a')
netD.zero_grad()
real_cpu=data.to(device)
batch_size=real_cpu.size(0)
label=torch.full((batch_size,),real_label,device=device)
output=netD(real_cpu).view(-1)
errD_real=criterion(output,label)
errD_real.backward()
D_x=output.mean().item()
noise=torch.randn(batch_size,nz,1,1,1,device=device)
fake=netG(noise)
label.fill_(fake_label)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD.step()
#生成器
netG.zero_grad()
label.fill_(1.0)
noise2=torch.randn(batch_size,nz,1,1,1,device=device)
fake2=netG(noise2)
output = netD(fake2).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
gen_iterations+=1
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, niter, i, len(dataloader),
errD.data, errG.data, D_x, D_G_z1, D_G_z2))
f.write('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, niter, i, len(dataloader),
errD.data, errG.data, D_x, D_G_z1, D_G_z2))
f.write('\n')
f.close()
fake = netG(fixed_noise)
fake_TI = netG(fixed_noise_TI)
try:
os.makedirs(result_path+'fake_samples')
os.makedirs(result_path+'fake_TI')
except OSError:
pass
save_hdf5(fake.data, result_path+'fake_samples/'+'fake_samples_{0}.hdf5'.format(gen_iterations))
save_hdf5(fake_TI.data, result_path+'fake_TI/'+'fake_TI_{0}.hdf5'.format(gen_iterations))
# do checkpointing
try:
os.makedirs(result_path+'netG')
os.makedirs(result_path+'netD')
except OSError:
pass
torch.save(netG.state_dict(), result_path+'netG/'+'netG_epoch_%d.pth' % (epoch))
torch.save(netD.state_dict(), result_path+'netD/'+'netD_epoch_%d.pth' % (epoch))
#record loss
G_loss.append(errG.item())
D_loss.append(errD.item())
iters+=1
f=open(result_path+'Loss_log.txt','a')
f.write('G_loss:')
f.write('\n')
for k in range(len(G_loss)):
f.write(str(G_loss[k]))
f.write('\n')
f.write('D_loss:')
f.write('\n')
for k in range(len(D_loss)):
f.write(str(D_loss[k]))
f.write('\n')
f.close()
def DCGAN_generator(seedmin,
seedmax,
ngf,
ndf,
nz,
ngpu,
imageSize,
imsize,
out_folder,
name,
device,
netG,
):
if name is None:
name = 'samples'
try:
os.makedirs(out_folder+'DCGAN/output/'+name)
except OSError:
pass
outf=out_folder+'DCGAN/output/'
for seed in range(seedmin, seedmax, 1):
random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = True
ngpu = int(ngpu)
nz = int(nz)
ngf = int(ngf)
ndf = int(ndf)
nc = 1
net = DCGAN3d_G(imageSize, nz, nc, ngf, ngpu)
net.apply(weights_init)
net.load_state_dict(torch.load(netG))
print(net)
fixed_noise = torch.FloatTensor(1, nz, imsize, imsize, imsize).normal_(0, 1)
if device=='cuda':
net.cuda()
fixed_noise = fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
fake = net(fixed_noise)
save_hdf5(fake.data, '{0}/{1}_{2}.hdf5'.format(outf+name, name, seed))
def result_analysis(out_folder,generate_name):
path=out_folder+'DCGAN/output/'+generate_name+'/'
tiff_name=generate_name+'_tiff'
datalist=os.listdir(path)
try:
os.makedirs(out_folder+'DCGAN/output/'+tiff_name)
except OSError:
pass
for img in datalist:
f=h5py.File(path+img,'r')
array=f['data'][()]
tiff=array[0,0,:,:,:].astype(np.float32)
tifffile.imsave(out_folder+'DCGAN/output/{0}/{1}.tiff'.format(tiff_name,img[:-5]),tiff)
path2=out_folder+'DCGAN/output/'+tiff_name
tifflist=os.listdir(path2)
for img in tifflist:
f=open(out_folder+'DCGAN/output/'+generate_name+'_log.txt','a')
im_in=tifffile.imread(path2+'/'+img)
im_in=median_filter(im_in,size=(3,3,3))
im_in=im_in[40:240,40:240,40:240]
im_in=im_in/255.
threshold_global_otsu=threshold_otsu(im_in)
segmented_image=(im_in>=threshold_global_otsu).astype(np.int32)
porc=Counter(segmented_image.flatten())
porosity=porc[0]/(porc[0]+porc[1])
print(img[:-5],' porosity: ',porosity)
f.write(str(img[:-5])+' porosity: '+str(porosity))
f.write('\n')
f.close()
'''
9. DCGAN train
DCGAN_train(imageSize=imageSize,batchSize=batchSize,
ngf=number_generator_feature,
ndf=number_discriminator_feature,
nz=number_z,
niter=number_train_iterations,
ngpu=number_gpu,
manualSeed=manualSeed,
out_folder=out_folder,
dataset_name=dataset_name,
device=device)
'''
'''
10. DCGAN generate
DCGAN_generator(seedmin=seedmin,
seedmax=seedmax,
ngf=number_generator_feature,
ndf=number_discriminator_feature,
nz=number_z,
ngpu=number_gpu,
imageSize=imageSize,
imsize=image_generate_size,
out_folder=out_folder,
name=generate_name,
device=device,
netG=netG,
)
'''
'''
11. DCGAN batch processing samples statistic
result_analysis(out_folder,generate_name)
'''
| [
"torch.nn.ReLU",
"torch.nn.Tanh",
"skimage.filters.threshold_otsu",
"torch.cuda.is_available",
"torch.nn.Sigmoid",
"os.listdir",
"torch.nn.parallel.data_parallel",
"numpy.random.seed",
"torch.nn.BatchNorm3d",
"torch.autograd.Variable",
"torchvision.transforms.ToTensor",
"torch.randn",
"torch... | [((8020, 8047), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (8034, 8047), True, 'import numpy as np\n'), ((8064, 8075), 'torch.Tensor', 'Tensor', (['img'], {}), '(img)\n', (8070, 8075), False, 'from torch import Tensor\n'), ((9271, 9297), 'tifffile.imread', 'tifffile.imread', (['tiff_path'], {}), '(tiff_path)\n', (9286, 9297), False, 'import tifffile\n'), ((10384, 10396), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (10394, 10396), True, 'import torch.nn as nn\n'), ((10616, 10634), 'numpy.random.seed', 'np.random.seed', (['(43)'], {}), '(43)\n', (10630, 10634), True, 'import numpy as np\n'), ((10639, 10662), 'random.seed', 'random.seed', (['manualSeed'], {}), '(manualSeed)\n', (10650, 10662), False, 'import random\n'), ((10667, 10696), 'torch.manual_seed', 'torch.manual_seed', (['manualSeed'], {}), '(manualSeed)\n', (10684, 10696), False, 'import torch\n'), ((11426, 11491), 'torch.FloatTensor', 'torch.FloatTensor', (['batchSize', 'nc', 'imageSize', 'imageSize', 'imageSize'], {}), '(batchSize, nc, imageSize, imageSize, imageSize)\n', (11443, 11491), False, 'import torch\n'), ((11498, 11539), 'torch.FloatTensor', 'torch.FloatTensor', (['batchSize', 'nz', '(1)', '(1)', '(1)'], {}), '(batchSize, nz, 1, 1, 1)\n', (11515, 11539), False, 'import torch\n'), ((11667, 11695), 'torch.FloatTensor', 'torch.FloatTensor', (['batchSize'], {}), '(batchSize)\n', (11684, 11695), False, 'import torch\n'), ((11996, 12011), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (12004, 12011), False, 'from torch.autograd import Variable\n'), ((12031, 12046), 'torch.autograd.Variable', 'Variable', (['label'], {}), '(label)\n', (12039, 12046), False, 'from torch.autograd import Variable\n'), ((12066, 12081), 'torch.autograd.Variable', 'Variable', (['noise'], {}), '(noise)\n', (12074, 12081), False, 'from torch.autograd import Variable\n'), ((12105, 12126), 'torch.autograd.Variable', 'Variable', (['fixed_noise'], {}), '(fixed_noise)\n', (12113, 12126), False, 'from torch.autograd import Variable\n'), ((12146, 12170), 'torch.autograd.Variable', 'Variable', (['fixed_noise_TI'], {}), '(fixed_noise_TI)\n', (12154, 12170), False, 'from torch.autograd import Variable\n'), ((16833, 16849), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (16843, 16849), False, 'import os\n'), ((17251, 17268), 'os.listdir', 'os.listdir', (['path2'], {}), '(path2)\n', (17261, 17268), False, 'import os\n'), ((3306, 3355), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.D', 'input', 'gpu_ids'], {}), '(self.D, input, gpu_ids)\n', (3331, 3355), True, 'import torch.nn as nn\n'), ((5886, 5935), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.G', 'input', 'gpu_ids'], {}), '(self.G, input, gpu_ids)\n', (5911, 5935), True, 'import torch.nn as nn\n'), ((7685, 7709), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (7694, 7709), False, 'import h5py\n'), ((7951, 7975), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (7960, 7975), False, 'import h5py\n'), ((9185, 9231), 'os.makedirs', 'os.makedirs', (["(out_folder + 'DCGAN/train_images')"], {}), "(out_folder + 'DCGAN/train_images')\n", (9196, 9231), False, 'import os\n'), ((10493, 10533), 'os.makedirs', 'os.makedirs', (["(out_folder + 'DCGAN/output')"], {}), "(out_folder + 'DCGAN/output')\n", (10504, 10533), False, 'import os\n'), ((10540, 10580), 'os.makedirs', 'os.makedirs', (["(out_folder + 'DCGAN/result')"], {}), "(out_folder + 'DCGAN/result')\n", (10551, 10580), False, 'import os\n'), ((10729, 10754), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10752, 10754), False, 'import torch\n'), ((15820, 15868), 'os.makedirs', 'os.makedirs', (["(out_folder + 'DCGAN/output/' + name)"], {}), "(out_folder + 'DCGAN/output/' + name)\n", (15831, 15868), False, 'import os\n'), ((15994, 16011), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (16005, 16011), False, 'import random\n'), ((16020, 16043), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (16037, 16043), False, 'import torch\n'), ((16549, 16570), 'torch.autograd.Variable', 'Variable', (['fixed_noise'], {}), '(fixed_noise)\n', (16557, 16570), False, 'from torch.autograd import Variable\n'), ((16867, 16920), 'os.makedirs', 'os.makedirs', (["(out_folder + 'DCGAN/output/' + tiff_name)"], {}), "(out_folder + 'DCGAN/output/' + tiff_name)\n", (16878, 16920), False, 'import os\n'), ((16985, 17011), 'h5py.File', 'h5py.File', (['(path + img)', '"""r"""'], {}), "(path + img, 'r')\n", (16994, 17011), False, 'import h5py\n'), ((17380, 17414), 'tifffile.imread', 'tifffile.imread', (["(path2 + '/' + img)"], {}), "(path2 + '/' + img)\n", (17395, 17414), False, 'import tifffile\n'), ((17425, 17461), 'scipy.ndimage.filters.median_filter', 'median_filter', (['im_in'], {'size': '(3, 3, 3)'}), '(im_in, size=(3, 3, 3))\n', (17438, 17461), False, 'from scipy.ndimage.filters import median_filter\n'), ((17556, 17577), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['im_in'], {}), '(im_in)\n', (17570, 17577), False, 'from skimage.filters import threshold_otsu\n'), ((1706, 1766), 'torch.nn.Conv3d', 'nn.Conv3d', (['channel_in', 'D_feature_number', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(channel_in, D_feature_number, 4, 2, 1, bias=False)\n', (1715, 1766), True, 'import torch.nn as nn\n'), ((1775, 1806), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1787, 1806), True, 'import torch.nn as nn\n'), ((2968, 3024), 'torch.nn.Conv3d', 'nn.Conv3d', (['next_D_feature_number', '(1)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(next_D_feature_number, 1, 4, 1, 0, bias=False)\n', (2977, 3024), True, 'import torch.nn as nn\n'), ((3073, 3085), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3083, 3085), True, 'import torch.nn as nn\n'), ((4104, 4179), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['dimension_n', 'next_G_feature_number', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(dimension_n, next_G_feature_number, 4, 1, 0, bias=False)\n', (4122, 4179), True, 'import torch.nn as nn\n'), ((4188, 4225), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['next_G_feature_number'], {}), '(next_G_feature_number)\n', (4202, 4225), True, 'import torch.nn as nn\n'), ((4239, 4252), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (4246, 4252), True, 'import torch.nn as nn\n'), ((5484, 5558), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['next_G_feature_number', 'channel_in', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(next_G_feature_number, channel_in, 4, 2, 1, bias=False)\n', (5502, 5558), True, 'import torch.nn as nn\n'), ((5664, 5673), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5671, 5673), True, 'import torch.nn as nn\n'), ((6409, 6457), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['nz', 'cngf', '(4)', '(1)', '(0)'], {'bias': '(True)'}), '(nz, cngf, 4, 1, 0, bias=True)\n', (6427, 6457), True, 'import torch.nn as nn\n'), ((6471, 6491), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['cngf'], {}), '(cngf)\n', (6485, 6491), True, 'import torch.nn as nn\n'), ((6505, 6518), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6512, 6518), True, 'import torch.nn as nn\n'), ((7372, 7420), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['cngf', 'nc', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(cngf, nc, 4, 2, 1, bias=True)\n', (7390, 7420), True, 'import torch.nn as nn\n'), ((7456, 7465), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7463, 7465), True, 'import torch.nn as nn\n'), ((8603, 8621), 'os.path.join', 'join', (['image_dir', 'x'], {}), '(image_dir, x)\n', (8607, 8621), False, 'from os.path import join\n'), ((11552, 11585), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'nz', '(7)', '(7)', '(7)'], {}), '(1, nz, 7, 7, 7)\n', (11569, 11585), False, 'import torch\n'), ((11614, 11647), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'nz', '(1)', '(1)', '(1)'], {}), '(1, nz, 1, 1, 1)\n', (11631, 11647), False, 'import torch\n'), ((12691, 12743), 'torch.full', 'torch.full', (['(batch_size,)', 'real_label'], {'device': 'device'}), '((batch_size,), real_label, device=device)\n', (12701, 12743), False, 'import torch\n'), ((12932, 12983), 'torch.randn', 'torch.randn', (['batch_size', 'nz', '(1)', '(1)', '(1)'], {'device': 'device'}), '(batch_size, nz, 1, 1, 1, device=device)\n', (12943, 12983), False, 'import torch\n'), ((13396, 13447), 'torch.randn', 'torch.randn', (['batch_size', 'nz', '(1)', '(1)', '(1)'], {'device': 'device'}), '(batch_size, nz, 1, 1, 1, device=device)\n', (13407, 13447), False, 'import torch\n'), ((14285, 14326), 'os.makedirs', 'os.makedirs', (["(result_path + 'fake_samples')"], {}), "(result_path + 'fake_samples')\n", (14296, 14326), False, 'import os\n'), ((14337, 14373), 'os.makedirs', 'os.makedirs', (["(result_path + 'fake_TI')"], {}), "(result_path + 'fake_TI')\n", (14348, 14373), False, 'import os\n'), ((14668, 14701), 'os.makedirs', 'os.makedirs', (["(result_path + 'netG')"], {}), "(result_path + 'netG')\n", (14679, 14701), False, 'import os\n'), ((14712, 14745), 'os.makedirs', 'os.makedirs', (["(result_path + 'netD')"], {}), "(result_path + 'netD')\n", (14723, 14745), False, 'import os\n'), ((16305, 16321), 'torch.load', 'torch.load', (['netG'], {}), '(netG)\n', (16315, 16321), False, 'import torch\n'), ((2056, 2132), 'torch.nn.Conv3d', 'nn.Conv3d', (['next_D_feature_number', 'next_D_feature_number', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(next_D_feature_number, next_D_feature_number, 3, 1, 1, bias=False)\n', (2065, 2132), True, 'import torch.nn as nn\n'), ((2261, 2298), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['next_D_feature_number'], {}), '(next_D_feature_number)\n', (2275, 2298), True, 'import torch.nn as nn\n'), ((2360, 2391), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2372, 2391), True, 'import torch.nn as nn\n'), ((2583, 2632), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_feat', 'out_feat', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(in_feat, out_feat, 4, 2, 1, bias=False)\n', (2592, 2632), True, 'import torch.nn as nn\n'), ((2689, 2713), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['out_feat'], {}), '(out_feat)\n', (2703, 2713), True, 'import torch.nn as nn\n'), ((2775, 2806), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2787, 2806), True, 'import torch.nn as nn\n'), ((4466, 4560), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['next_G_feature_number', '(next_G_feature_number // 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(next_G_feature_number, next_G_feature_number // 2, 4, 2,\n 1, bias=False)\n', (4484, 4560), True, 'import torch.nn as nn\n'), ((4725, 4767), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(next_G_feature_number // 2)'], {}), '(next_G_feature_number // 2)\n', (4739, 4767), True, 'import torch.nn as nn\n'), ((4835, 4848), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (4842, 4848), True, 'import torch.nn as nn\n'), ((5102, 5178), 'torch.nn.Conv3d', 'nn.Conv3d', (['next_G_feature_number', 'next_G_feature_number', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(next_G_feature_number, next_G_feature_number, 3, 1, 1, bias=False)\n', (5111, 5178), True, 'import torch.nn as nn\n'), ((5287, 5324), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['next_G_feature_number'], {}), '(next_G_feature_number)\n', (5301, 5324), True, 'import torch.nn as nn\n'), ((5394, 5407), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (5401, 5407), True, 'import torch.nn as nn\n'), ((6651, 6706), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['cngf', '(cngf // 2)', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(cngf, cngf // 2, 4, 2, 1, bias=True)\n', (6669, 6706), True, 'import torch.nn as nn\n'), ((6772, 6797), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(cngf // 2)'], {}), '(cngf // 2)\n', (6786, 6797), True, 'import torch.nn as nn\n'), ((6863, 6876), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6870, 6876), True, 'import torch.nn as nn\n'), ((7084, 7125), 'torch.nn.Conv3d', 'nn.Conv3d', (['cngf', 'cngf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(cngf, cngf, 3, 1, 1, bias=True)\n', (7093, 7125), True, 'import torch.nn as nn\n'), ((7193, 7213), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['cngf'], {}), '(cngf)\n', (7207, 7213), True, 'import torch.nn as nn\n'), ((7281, 7294), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7288, 7294), True, 'import torch.nn as nn\n'), ((8631, 8649), 'os.listdir', 'listdir', (['image_dir'], {}), '(image_dir)\n', (8638, 8649), False, 'from os import listdir\n'), ((16369, 16417), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'nz', 'imsize', 'imsize', 'imsize'], {}), '(1, nz, imsize, imsize, imsize)\n', (16386, 16417), False, 'import torch\n'), ((11008, 11029), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11027, 11029), True, 'import torchvision.transforms as transforms\n')] |
# import modules
# -------------
# built-in
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from progress.bar import Bar
from timeit import default_timer as timer
# user
from cogen_util import find_global_cost
import pce as pce
from pce.quad4pce import columnize
# savedata flag (set to true to save the simulation in npz format)
savedata = False
# PARAMETERS
# ----------
power_graph = True
case_graph=1
# CHP
etae = 0.33
etat = 0.4
Pmin = 600
Pmax = 1000
print("CHP data:\n"
f"---------\n"
f" * etae = {etae}\n"
f" * etat = {etat}\n"
f" * Pmin = {Pmin} (kW)\n"
f" * Pmax = {Pmax} (kW)\n")
Ptmin = Pmin / etae * etat
Ptmax = Pmax / etae * etat
# Boiler
etab = 0.95
Bmax = 3000
print("Boiler data:\n"
f"------------\n"
f" * etab = {etab}\n"
f" * Bmax = {Bmax} (kW)\n")
# economic data
cNGd = 0.242 # cost of Natural Gas without tax (euro/SMC)
delta_tax = 0.008
cNGnd = cNGd + delta_tax # cost of Natural Gas with tax (euro/SMC)
Hi = 9.59 # Lower Heating Value (kWh/SMC)
print("Natural gas cost\n"
f" * for CHP = {cNGd} (euro/SMC)\n"
f" * for boiler = {cNGnd} (euro/SMC)\n"
f" * lower heating value = {Hi} (kWh/SMC)\n")
# interval set to 1 hour
# ----------------------
Deltat = 1
print("integration time = {} (h)\n".format(Deltat))
# read csv file containing thermal load Ut (kWt)
# ----------------------------------------------
print("Reading electricity prices from 'cs.csv'")
print("----------------------------------------")
Ut = []
with open('UtAL.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for line_count, row in enumerate(csv_reader):
if line_count == 0:
print(f' * Column names are {", ".join(row)}')
else:
Ut.append(float(row[1]))
print(f' * processed {line_count} lines.')
fmt = "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}\n" * 3 + "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}"
print("Ut = [" + fmt.format(*Ut) + "] (kW)\n")
Ut = np.array(Ut)
# read csv file containing electricity prices cs (euro/MWh)
# ---------------------------------------------------------
print("Reading electricity prices from 'cs.csv'")
cs = []
with open('cs.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for line_count, row in enumerate(csv_reader):
if line_count == 0:
print(f' * column names are {", ".join(row)}')
else:
cs.append(float(row[1]))
print(f' * processed {line_count} lines.')
fmt = "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}\n" * 3 + "{:.2f} {:.2f}, {:.2f} {:.2f} {:.2f}"
print("cs = [" + fmt.format(*cs) + "] (euro/MWh)\n")
cs = np.array(cs)
# convert price from euro/MWh to euro/kWh
cskW=[]
for element in cs:
cskW.append(element/1000)
fmt = "{:.4f} {:.4f}, {:.4f} {:.4f} {:.4f}\n" * 3 + "{:.4f} {:.4f}, {:.4f} {:.4f} {:.4f}"
print("cskW = [" + fmt.format(*cskW) + "] (euro/kWh)\n\n")
cskW = np.array(cskW)
# PCE
# ---
# Wapper
def fun(x, etae=etae, etat=etat, Pmin=Pmin, Pmax=Pmax,
Ptmin=Ptmin, Ptmax=Ptmax, etab=etab, Bmax=Bmax,cskW=cskW,
Hi=Hi, cNGd=cNGd, cNGnd=cNGnd, Deltat=Deltat, Ut=Ut):
def inner_fun(xx):
GlobalCost, _, _, _, _ = find_global_cost(etae, etat, Pmin, Pmax, Ptmin, Ptmax, etab, Bmax, Hi, xx[2]*cskW, xx[0]*cNGd, cNGnd, Deltat, xx[1]*Ut)
return GlobalCost
from joblib import Parallel, delayed
y = Parallel(n_jobs=-1, verbose=0)(map(delayed(inner_fun), x))
return np.array(y)
# generate PCE
orders = range(2,20,2)
index = [[1], [2], [3], [1,2], [1,3], [2,3], [1,2,3]]
S = np.zeros(( len(index), len(orders)))
kind = 'n'
if kind == 'n':
distrib = ['n', 'n', 'n']
param = [[1, 0.05],[1, 0.05],[1, 0.05]]
elif kind == 'u':
distrib = ['u', 'u', 'u']
param = [[0.9, 1.1],[0.9, 1.1],[0.9, 1.1]]
mu = []
sigma = []
t1 = timer()
print('Sobol index computation at increasing PCE order:')
with Bar(' * progress: ', max=len(orders), suffix='%(percent)d%%') as bar:
for k, order in enumerate(orders):
# generate PCE
poly = pce.PolyChaos(order, distrib, param)
# level selected according to simulation PCE vs MC
if kind == 'u':
lev = 15
elif kind == 'n':
lev = 25
# compute coefficients
poly.spectral_projection(fun, lev, verbose='n')
poly.norm_fit()
mu.append(poly.mu)
sigma.append(poly.sigma)
sobol_index = poly.sobol(index)
S[:, k] = np.array(sobol_index)
bar.next()
t2 = timer()
print(" * elapsed time {:.3f} sec\n".format(t2 - t1))
# print first line
first_line = "order & " + " & ".join([str(k) for k in orders]) + " \\\\"
print(first_line)
# print sobol index
for idx, sobol, in zip(index, S):
format_str = "".join([str(ele) for ele in idx])
format_str = "S" + format_str
format_str = format_str + " & {:.4f}"*len(sobol) + " \\\\"
print(format_str.format(*sobol))
# final plots
h1 = plt.figure()
plt.plot(range(len(Ut)), Ut, 'C0-o')
plt.xlabel("hour", fontsize=14)
plt.ylabel("Ut (kW)", fontsize=14)
plt.grid()
plt.tight_layout()
h2 = plt.figure()
plt.plot(range(len(cskW)), cskW, 'C0-o')
plt.xlabel("hour", fontsize=14)
plt.ylabel("cskW (euro/kWh)", fontsize=14)
plt.grid()
plt.tight_layout()
h3 = plt.figure(figsize=(11,6))
plt.subplot(1,2,1)
plt.plot(orders, mu,'C0-o', label='PCE')
plt.xlabel('order')
plt.ylabel('mean')
plt.legend()
plt.tight_layout()
plt.subplot(1,2,2)
plt.plot(orders, sigma,'C0-o', label='PCE')
plt.xlabel('order')
plt.ylabel('standard deviation')
plt.legend()
plt.tight_layout()
plt.ion()
plt.show()
# save data reletd to higher degree PCE
if savedata:
SI = S[:, -1]
np.savez("sobol_" + kind, sobol_index=SI)
| [
"numpy.savez",
"matplotlib.pyplot.grid",
"cogen_util.find_global_cost",
"matplotlib.pyplot.ylabel",
"timeit.default_timer",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"joblib.delayed",
"joblib.Parallel",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
... | [((2044, 2056), 'numpy.array', 'np.array', (['Ut'], {}), '(Ut)\n', (2052, 2056), True, 'import numpy as np\n'), ((2720, 2732), 'numpy.array', 'np.array', (['cs'], {}), '(cs)\n', (2728, 2732), True, 'import numpy as np\n'), ((2998, 3012), 'numpy.array', 'np.array', (['cskW'], {}), '(cskW)\n', (3006, 3012), True, 'import numpy as np\n'), ((3924, 3931), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3929, 3931), True, 'from timeit import default_timer as timer\n'), ((4609, 4616), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4614, 4616), True, 'from timeit import default_timer as timer\n'), ((5046, 5058), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5056, 5058), True, 'import matplotlib.pyplot as plt\n'), ((5096, 5127), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hour"""'], {'fontsize': '(14)'}), "('hour', fontsize=14)\n", (5106, 5127), True, 'import matplotlib.pyplot as plt\n'), ((5128, 5162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ut (kW)"""'], {'fontsize': '(14)'}), "('Ut (kW)', fontsize=14)\n", (5138, 5162), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5173), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5171, 5173), True, 'import matplotlib.pyplot as plt\n'), ((5174, 5192), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5190, 5192), True, 'import matplotlib.pyplot as plt\n'), ((5199, 5211), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5209, 5211), True, 'import matplotlib.pyplot as plt\n'), ((5253, 5284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hour"""'], {'fontsize': '(14)'}), "('hour', fontsize=14)\n", (5263, 5284), True, 'import matplotlib.pyplot as plt\n'), ((5285, 5327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cskW (euro/kWh)"""'], {'fontsize': '(14)'}), "('cskW (euro/kWh)', fontsize=14)\n", (5295, 5327), True, 'import matplotlib.pyplot as plt\n'), ((5328, 5338), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5336, 5338), True, 'import matplotlib.pyplot as plt\n'), ((5339, 5357), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5355, 5357), True, 'import matplotlib.pyplot as plt\n'), ((5365, 5392), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 6)'}), '(figsize=(11, 6))\n', (5375, 5392), True, 'import matplotlib.pyplot as plt\n'), ((5392, 5412), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5403, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5411, 5452), 'matplotlib.pyplot.plot', 'plt.plot', (['orders', 'mu', '"""C0-o"""'], {'label': '"""PCE"""'}), "(orders, mu, 'C0-o', label='PCE')\n", (5419, 5452), True, 'import matplotlib.pyplot as plt\n'), ((5452, 5471), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""order"""'], {}), "('order')\n", (5462, 5471), True, 'import matplotlib.pyplot as plt\n'), ((5472, 5490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean"""'], {}), "('mean')\n", (5482, 5490), True, 'import matplotlib.pyplot as plt\n'), ((5491, 5503), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5501, 5503), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5522), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5520, 5522), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5544), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5535, 5544), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5587), 'matplotlib.pyplot.plot', 'plt.plot', (['orders', 'sigma', '"""C0-o"""'], {'label': '"""PCE"""'}), "(orders, sigma, 'C0-o', label='PCE')\n", (5551, 5587), True, 'import matplotlib.pyplot as plt\n'), ((5587, 5606), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""order"""'], {}), "('order')\n", (5597, 5606), True, 'import matplotlib.pyplot as plt\n'), ((5607, 5639), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""standard deviation"""'], {}), "('standard deviation')\n", (5617, 5639), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5652), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5650, 5652), True, 'import matplotlib.pyplot as plt\n'), ((5653, 5671), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5669, 5671), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5682), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5680, 5682), True, 'import matplotlib.pyplot as plt\n'), ((5683, 5693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5691, 5693), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1651), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1626, 1651), False, 'import csv\n'), ((2286, 2321), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (2296, 2321), False, 'import csv\n'), ((3554, 3565), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3562, 3565), True, 'import numpy as np\n'), ((5770, 5811), 'numpy.savez', 'np.savez', (["('sobol_' + kind)"], {'sobol_index': 'SI'}), "('sobol_' + kind, sobol_index=SI)\n", (5778, 5811), True, 'import numpy as np\n'), ((3287, 3417), 'cogen_util.find_global_cost', 'find_global_cost', (['etae', 'etat', 'Pmin', 'Pmax', 'Ptmin', 'Ptmax', 'etab', 'Bmax', 'Hi', '(xx[2] * cskW)', '(xx[0] * cNGd)', 'cNGnd', 'Deltat', '(xx[1] * Ut)'], {}), '(etae, etat, Pmin, Pmax, Ptmin, Ptmax, etab, Bmax, Hi, xx[2\n ] * cskW, xx[0] * cNGd, cNGnd, Deltat, xx[1] * Ut)\n', (3303, 3417), False, 'from cogen_util import find_global_cost\n'), ((3483, 3513), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(0)'}), '(n_jobs=-1, verbose=0)\n', (3491, 3513), False, 'from joblib import Parallel, delayed\n'), ((4145, 4181), 'pce.PolyChaos', 'pce.PolyChaos', (['order', 'distrib', 'param'], {}), '(order, distrib, param)\n', (4158, 4181), True, 'import pce as pce\n'), ((4563, 4584), 'numpy.array', 'np.array', (['sobol_index'], {}), '(sobol_index)\n', (4571, 4584), True, 'import numpy as np\n'), ((3518, 3536), 'joblib.delayed', 'delayed', (['inner_fun'], {}), '(inner_fun)\n', (3525, 3536), False, 'from joblib import Parallel, delayed\n')] |
import numpy as np
from pymoo.model.mutation import Mutation
import copy
import config as cf
import random as rm
from scipy.spatial.distance import directed_hausdorff
class MyTcMutation(Mutation):
def __init__(self, mut_rate):
super().__init__()
self.mut_rate = mut_rate
def _do(self, problem, X, **kwargs):
# print("X mutate", X.shape)
# for each individual
#print("MUT1")
#print("Mutation size", len(X))
for i in range(len(X)):
r = np.random.random()
s = X[i, 0]
if s is None:
print("S i none")
'''
print("Mut_start*******")
print("States", s.states)
print("Mut_end*********")
'''
# with a probabilty of 40% - change the order of characters
if (r < self.mut_rate) and (s is not None):#cf.ga["mut_rate"]:
#
# for some reason it seems we must do a deep copy
# and replace the original object
# pymoo seems to keep a deep copy of the best object if I change it
# in a mutation it will not chnage pymoo best individual and we end up
# with an incosistency in evaluated fitnesses
sn = copy.deepcopy(s)
#print("Child before", sn.states)
sn.get_points()
sn.remove_invalid_cases()
#wr = np.random.random()
wr = rm.randint(1, 101)
child = sn.states
old_points = sn.road_points
old_states = child
if wr < 20:
#print("mut1")
# print("mutation MUT1")
candidates = list(np.random.randint(0, high=len(child), size=2))
temp = child["st" + str(candidates[0])]
child["st" + str(candidates[0])] = child["st" + str(candidates[1])]
child["st" + str(candidates[1])] = temp
#sn.states = child
#print("Child after 1", child)
elif wr >= 20 and wr <= 40 :
# print("mutation MUT2")
#print("mut2")
num = np.random.randint(0, high=len(child) )
#value = np.random.choice(["state", "value"])
value ="value"
duration_list = []
if child["st" + str(num)]["state"] == "straight":
duration_list = np.arange(cf.model["min_len"], cf.model["max_len"], cf.model["len_step"])
else:
duration_list = np.arange(cf.model["min_angle"], cf.model["max_angle"], cf.model["ang_step"])
child["st" + str(num)][value] = int(np.random.choice(duration_list))
#sn.states = child
#elif value == "state":
value ="state"
if child["st" + str(num)][value] == "straight":
child["st" + str(num)][value] = np.random.choice(["left", "right"])
duration_list = np.arange(cf.model["min_angle"], cf.model["max_angle"], cf.model["ang_step"])
child["st" + str(num)]["value"] = int(np.random.choice(duration_list))
else:
child["st" + str(num)][value] = "straight"
duration_list = np.arange(cf.model["min_len"], cf.model["max_len"], cf.model["len_step"])
child["st" + str(num)]["value"] = int(np.random.choice(duration_list))
else:
#print("mut 4")
cand = list(np.random.randint(0, high=len(child), size=int(len(child)/2)))
while cand:
c1 = np.random.choice(cand)
cand.remove(c1)
if cand:
c2 = np.random.choice(cand)
cand.remove(c2)
temp = child["st" + str(c1)]
child["st" + str(c1)] = child["st" + str(c2)]
child["st" + str(c2)] = temp
else:
if child["st" + str(c1)]["state"] == "straight":
duration_list = np.arange(cf.model["min_len"], cf.model["max_len"], cf.model["len_step"])
else:
duration_list = np.arange(cf.model["min_angle"], cf.model["max_angle"], cf.model["ang_step"])
child["st" + str(c1)]['value'] = int(np.random.choice(duration_list))
#print("after", child)
#print("MUT")
sn.states = child
#print("Child after 2", child)
#print("sn.states", sn.states)
sn.get_points()
#print("sn.road_points", sn.road_points)
sn.remove_invalid_cases()
new_points = sn.road_points
#sn.novelty = -max(directed_hausdorff(old_points, new_points)[0], directed_hausdorff(old_points, new_points)[0])
sn.novelty = sn.calc_novelty(old_states, sn.states)
#print("sn.road_points", sn.road_points)
#print(sn.eval_fitness())
X[i, 0] = sn
return X
'''
elif wr >= 0.50 and wr < 0.75:
print("new mut")
print("before", child)
for tc in child:
state = child[tc]["state"]
if state == "straight":
state = np.random.choice(["left", "right"])
child[tc]["state"] = state
else:
child[tc]["state"] = "straight"
print("after", child)
''' | [
"numpy.random.random",
"numpy.random.choice",
"copy.deepcopy",
"random.randint",
"numpy.arange"
] | [((512, 530), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (528, 530), True, 'import numpy as np\n'), ((1301, 1317), 'copy.deepcopy', 'copy.deepcopy', (['s'], {}), '(s)\n', (1314, 1317), False, 'import copy\n'), ((1507, 1525), 'random.randint', 'rm.randint', (['(1)', '(101)'], {}), '(1, 101)\n', (1517, 1525), True, 'import random as rm\n'), ((2575, 2648), 'numpy.arange', 'np.arange', (["cf.model['min_len']", "cf.model['max_len']", "cf.model['len_step']"], {}), "(cf.model['min_len'], cf.model['max_len'], cf.model['len_step'])\n", (2584, 2648), True, 'import numpy as np\n'), ((2718, 2795), 'numpy.arange', 'np.arange', (["cf.model['min_angle']", "cf.model['max_angle']", "cf.model['ang_step']"], {}), "(cf.model['min_angle'], cf.model['max_angle'], cf.model['ang_step'])\n", (2727, 2795), True, 'import numpy as np\n'), ((2881, 2912), 'numpy.random.choice', 'np.random.choice', (['duration_list'], {}), '(duration_list)\n', (2897, 2912), True, 'import numpy as np\n'), ((3162, 3197), 'numpy.random.choice', 'np.random.choice', (["['left', 'right']"], {}), "(['left', 'right'])\n", (3178, 3197), True, 'import numpy as np\n'), ((3238, 3315), 'numpy.arange', 'np.arange', (["cf.model['min_angle']", "cf.model['max_angle']", "cf.model['ang_step']"], {}), "(cf.model['min_angle'], cf.model['max_angle'], cf.model['ang_step'])\n", (3247, 3315), True, 'import numpy as np\n'), ((3544, 3617), 'numpy.arange', 'np.arange', (["cf.model['min_len']", "cf.model['max_len']", "cf.model['len_step']"], {}), "(cf.model['min_len'], cf.model['max_len'], cf.model['len_step'])\n", (3553, 3617), True, 'import numpy as np\n'), ((3948, 3970), 'numpy.random.choice', 'np.random.choice', (['cand'], {}), '(cand)\n', (3964, 3970), True, 'import numpy as np\n'), ((3378, 3409), 'numpy.random.choice', 'np.random.choice', (['duration_list'], {}), '(duration_list)\n', (3394, 3409), True, 'import numpy as np\n'), ((3680, 3711), 'numpy.random.choice', 'np.random.choice', (['duration_list'], {}), '(duration_list)\n', (3696, 3711), True, 'import numpy as np\n'), ((4077, 4099), 'numpy.random.choice', 'np.random.choice', (['cand'], {}), '(cand)\n', (4093, 4099), True, 'import numpy as np\n'), ((4488, 4561), 'numpy.arange', 'np.arange', (["cf.model['min_len']", "cf.model['max_len']", "cf.model['len_step']"], {}), "(cf.model['min_len'], cf.model['max_len'], cf.model['len_step'])\n", (4497, 4561), True, 'import numpy as np\n'), ((4648, 4725), 'numpy.arange', 'np.arange', (["cf.model['min_angle']", "cf.model['max_angle']", "cf.model['ang_step']"], {}), "(cf.model['min_angle'], cf.model['max_angle'], cf.model['ang_step'])\n", (4657, 4725), True, 'import numpy as np\n'), ((4791, 4822), 'numpy.random.choice', 'np.random.choice', (['duration_list'], {}), '(duration_list)\n', (4807, 4822), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from .databases import RetrievalDb
from ..utils.generic_utils import expanded_join
class StanfordOnlineProducts(RetrievalDb):
""" Stanford Online Products dataset wrapper. Refs:
https://www.cv-foundation.org/openaccess/content_cvpr_2016/app/S17-11.pdf
Arguments:
Returns:
Instance of StanfordOnlineProducts to get images and labels from train/val/test sets for DML tasks.
"""
def __init__(self):
super(StanfordOnlineProducts, self).__init__(name='STANFORD_ONLINE_PRODUCTS', queries_in_collection=True)
self.train_images, self.train_labels = self._make_images_and_labels("Ebay_train.txt", True)
self.test_images, self.test_labels = self._make_images_and_labels("Ebay_test.txt", True)
def get_training_set(self, **args):
return self.train_images, self.train_labels
def get_validation_set(self, **args):
super(StanfordOnlineProducts).get_validation_set(**args)
def get_testing_set(self, **args):
return self.test_images, self.test_labels
def _make_images_and_labels(self, filename, is_retrieval):
""" Automatic parsing of available files to get train/test images and labels for the classification task. Note
that the split in DML is different from the classification one.
:param db: either 'train' or 'test'.
:return: bounding boxes, labels and image paths.
"""
labels = []
images = []
j = 1 if is_retrieval else 2
with open(expanded_join(self.root_path, filename)) as f:
for i, l in enumerate(f):
if i != 0:
data = l.split(' ')
labels.append(np.int32(data[j]) - 1)
images.append(expanded_join(self.root_path, data[3][0:-1]))
images = np.array(images, dtype=np.str)
labels = np.array(labels, dtype=np.int32)
return images, labels
@staticmethod
def get_usual_retrieval_rank():
return [1, 10, 100, 1000]
def get_queries_idx(self, db_set):
""" Get the set of query images from which metrics are evaluated.
:param db_set: string containing either 'train', 'training', 'validation', 'val', 'testing' or 'test'.
:return: a nd-array of query indexes.
"""
if db_set.lower() == 'train' or db_set.lower() == 'training':
return np.arange(len(self.train_images), dtype=np.int32)
elif db_set.lower() == 'validation' or db_set.lower() == 'val':
raise ValueError('There is no validation set for {}.'.format(self.name))
elif db_set.lower() == 'testing' or db_set.lower() == 'test':
return np.arange(len(self.test_images), dtype=np.int32)
else:
raise ValueError("'db_set' unrecognized."
"Expected 'train', 'training', 'validation', 'val', 'testing', 'test'"
"Got {}".format(db_set))
def get_collection_idx(self, db_set):
""" Get the set of collection images for retrieval tasks.
:param db_set: string containing either 'train', 'training', 'validation', 'val', 'testing' or 'test'.
:return: a nd-array of the collection indexes.
"""
if db_set.lower() == 'train' or db_set.lower() == 'training':
return np.arange(len(self.train_images), dtype=np.int32)
elif db_set.lower() == 'validation' or db_set.lower() == 'val':
raise ValueError('There is no validation set for {}.'.format(self.name))
elif db_set.lower() == 'testing' or db_set.lower() == 'test':
return np.arange(len(self.test_images), dtype=np.int32)
else:
raise ValueError("'db_set' unrecognized."
"Expected 'train', 'training', 'validation', 'val', 'testing', 'test'"
"Got {}".format(db_set)) | [
"numpy.array",
"numpy.int32"
] | [((1863, 1893), 'numpy.array', 'np.array', (['images'], {'dtype': 'np.str'}), '(images, dtype=np.str)\n', (1871, 1893), True, 'import numpy as np\n'), ((1911, 1943), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int32'}), '(labels, dtype=np.int32)\n', (1919, 1943), True, 'import numpy as np\n'), ((1742, 1759), 'numpy.int32', 'np.int32', (['data[j]'], {}), '(data[j])\n', (1750, 1759), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Calibration target analysis module
"""
import logging
log = logging.getLogger(__name__)
import numpy as np
import arepytools.constants as cst
from arepytools.math import genericpoly
import sct.support.signalprocessing as sp
from sct.sarproduct.sarproduct import EDataQuantity
from sct.analysis.irfanalyser import IRFAnalyser
class CalibrationTargetAnalyser: # TODO Improve CalibrationTargetAnalyser class
"""CalibrationTargetAnalyser class"""
def __init__(self, sar_product, calibration_target):
"""Initialise CalibrationTargetAnalyser object
:param sar_product: SAR product object (type depends on the mission)
:param calibration_target: Calibration target object
:type calibration_target: CalibrationTarget
"""
self.sar_product = sar_product
self.calibration_target = calibration_target
self.swath = []
self.burst = []
self.polarization = []
self.position_range = []
self.position_azimuth = []
self.incidence_angle = []
self.look_angle = []
self.squint_angle = []
self.resolution_range = []
self.resolution_azimuth = []
self.pslr_range = []
self.pslr_azimuth = []
self.pslr_2d = []
self.islr_range = []
self.islr_azimuth = []
self.islr_2d = []
self.sslr_range = []
self.sslr_azimuth = []
self.sslr_2d = []
self.rcs = []
self.clutter = []
self.peak_error = []
self.scr = []
self.measured_ale_range = []
self.measured_ale_azimuth = []
self.n_views = 0
# Internal configuration parameters
self.__analyse_irf_flag = True
self.__measure_rcs_flag = True
self.__measure_ale_flag = True
self.__unit_of_measure = "Meters"
def analyse_calibration_target(self, maximum_ale=None):
"""Analyse calibration target
:param maximum_ale: Maximum measurable ALE [m], defaults to None
:type maximum_ale: float, optional
:return: Status (True for success, False for unsuccess)
:rtype: bool
"""
# Check configuration parameters
if (not self.__analyse_irf_flag) and (not self.__measure_rcs_flag) and (not self.__measure_ale_flag):
return True
# Read useful SAR product metadata
(
t_rg_0,
t_rg_step,
n_rg,
_,
t_az_0,
t_az_step,
n_az,
_,
rg_step,
az_step,
tags,
) = self.sar_product.roi.get_product_roi()
fc_hz = self.sar_product.dataset_info[0].fc_hz
side_looking = self.sar_product.dataset_info[0].side_looking.value
# Read target coordinates
if self.__measure_ale_flag:
pt_geo = self.calibration_target.xyz
pt_rcs = self.calibration_target.rcs
pt_delay = np.array([[self.calibration_target.delay]])
pt_sar__t_rg, pt_sar__t_az = self.sar_product.convert_coordinates_geo2sar(pt_geo, pt_delay)
pt_sar__rg_pos__mask, pt_sar__az_pos__mask = self.sar_product.convert_coordinates_sar2roi(
pt_sar__t_rg, pt_sar__t_az
)
pt_sar__mask = np.logical_and(pt_sar__rg_pos__mask != 0, pt_sar__az_pos__mask != 0)
# Select data portion where to perform IRF and RCS analyses
log.debug("Compute number of times the target is seen by the current SAR product (swath/burst combinations)")
if sum(sum(pt_sar__mask[:, :, 0])) == 0:
log.debug("Target outside image.")
return True
if maximum_ale is None: # the ROI dimensions depend on the maximum ALE allowed
roi_size_rg = 32
roi_size_az = 32
else:
roi_size_rg = int(max(np.ceil(abs(maximum_ale) / np.mean(rg_step[:, 0]) * 2), 3))
roi_size_az = int(max(np.ceil(abs(maximum_ale) / np.mean(az_step[:, 0]) * 2), 3))
pt_roi = np.zeros(4)
pt_roi[0] = (
pt_sar__t_az[0, 0] - t_az_0[0, 0] - roi_size_az / 2 * np.mean(t_az_step[:, 0])
) # an average sampling steps is used
pt_roi[1] = pt_sar__t_rg[0, 0] - roi_size_rg / 2 * np.mean(t_rg_step[:, 0])
pt_roi[2] = roi_size_az * np.mean(t_az_step[:, 0])
pt_roi[3] = roi_size_rg * np.mean(t_rg_step[:, 0])
data_portion_corners_rg, data_portion_corners_az = self.sar_product.select_data_portion(pt_roi)
if len(data_portion_corners_rg) == 0 or len(data_portion_corners_az) == 0:
log.debug("Target outside image.")
return True
data_portion_corners_rg = data_portion_corners_rg * np.tile(pt_sar__mask, (1, 1, 2))
data_portion_corners_az = data_portion_corners_az * np.tile(pt_sar__mask, (1, 1, 2))
# Perform and display IRF and RCS analyses
for t in range(data_portion_corners_rg.shape[0]):
for tt in range(data_portion_corners_rg.shape[1]):
if (
data_portion_corners_rg[t, tt, 0] == 0
and data_portion_corners_rg[t, tt, 1] == 0
and data_portion_corners_az[t, tt, 0] == 0
and data_portion_corners_az[t, tt, 1] == 0
):
continue
log.debug("Analyse target (swath: {}, burst: {})".format(tags[t, tt][0], tt))
irf_analyser = IRFAnalyser()
# Read data (define square ROI around the strongest target in the selected area)
log.debug("Read data portion around target")
roi = [
data_portion_corners_az[t, tt, 0],
data_portion_corners_rg[t, tt, 0],
data_portion_corners_az[t, tt, 1] - data_portion_corners_az[t, tt, 0] + 1,
data_portion_corners_rg[t, tt, 1] - data_portion_corners_rg[t, tt, 0] + 1,
]
data_portion = self.sar_product.read_data(t, roi)
if sum(sum(np.abs(data_portion))) == 0:
continue
if np.all(np.isreal(data_portion)):
_, roi_max_rg, roi_max_az = sp.max2d_fine(
np.abs(data_portion) ** 2
) # compute power (only for detected data)
else:
_, roi_max_rg, roi_max_az = sp.max2d_fine(data_portion)
roi_size = 128
position = [roi[1] + roi_max_rg, roi[0] + roi_max_az]
roi = np.array(
[
roi[0] + np.floor(roi_max_az) - roi_size / 2,
roi[1] + np.floor(roi_max_rg) - roi_size / 2,
roi_size,
roi_size,
],
dtype=int,
)
data_portion = self.sar_product.read_data(t, roi, n_rg[t, 0], sum(n_az[t, :]))
roi_max__pos = np.array(
[
roi_size / 2 + (roi_max_rg - np.floor(roi_max_rg)),
roi_size / 2 + (roi_max_az - np.floor(roi_max_az)),
]
) # the strongest target in the selected area is used for the analyses
if (
np.count_nonzero(np.abs(data_portion)) / np.prod(data_portion.shape) * 100 < 50
): # TODO Check threshold
continue
# Derive additional useful information for the selected target
pt_valid__flag = 1
b_rg = self.sar_product.sampling_constants_list[t].brg_hz
f_rg = self.sar_product.sampling_constants_list[t].frg_hz
rg_ovrs = np.max([int(np.rint(f_rg / b_rg / 5)), 1]) # factor=5 has been chosen arbitrarily
b_az = self.sar_product.sampling_constants_list[t].baz_hz
f_az = self.sar_product.sampling_constants_list[t].faz_hz
az_ovrs = np.max([int(np.rint(f_az / b_az / 5)), 1]) # factor=5 has been chosen arbitrarily
if rg_ovrs > 1 or az_ovrs > 1:
roi = np.array(
[
roi[0] - roi_size * (az_ovrs - 1) / 2,
roi[1] - roi_size * (rg_ovrs - 1) / 2,
roi_size * az_ovrs,
roi_size * rg_ovrs,
],
dtype=int,
)
data_portion = self.sar_product.read_data(t, roi, n_rg[t, 0], sum(n_az[t, :]))
roi_max__pos = np.array(
[roi_max__pos[0] + roi_size * (rg_ovrs - 1) / 2, roi_max__pos[1] + roi_size * (az_ovrs - 1) / 2]
)
t_rg__curr = t_rg_0[t, tt] + (roi_max__pos[0] + roi[1]) * t_rg_step[t, tt]
t_rg__near = t_rg_0[t, tt] + (roi[1]) * t_rg_step[t, tt]
t_rg__far = t_rg_0[t, tt] + (roi[3] - 1 + roi[1]) * t_rg_step[t, tt]
if self.sar_product.type == "GRD":
raise NotImplementedError # TODO
"""
coefficients = PFGround2SlantObj_Ref.coefficients
if coefficients[0] > 1: # GroundToSlant polynomials expressed in meters
conversion_factor = LightSpeed / 2
else: # GroundToSlant polynomials expressed in seconds
conversion_factor = 1
t_rg__curr = gp.create_generic_poly(PFGround2SlantObj_Ref).evaluate((TAz0[t, tt] + (NAz[t, tt] - 1) / 2 * TAzStep[t, tt], t_rg__curr)) / conversion_factor
t_rg__near = gp.create_generic_poly(PFGround2SlantObj_Ref).evaluate((TAz0[t, tt] + (NAz[t, tt] - 1) / 2 * TAzStep[t, tt], t_rg__near)) / conversion_factor
t_rg__far = gp.create_generic_poly(PFGround2SlantObj_Ref).evaluate((TAz0[t, tt] + (NAz[t, tt] - 1) / 2 * TAzStep[t, tt], t_rg__far)) / conversion_factor
"""
t_az__curr = t_az_0[t, tt] + (roi_max__pos[1] + roi[0] - sum(n_az[t, 0:tt])) * t_az_step[t, tt]
incidence_angle = self.sar_product.general_sar_orbit[0].get_incidence_angle(
t_az__curr, t_rg__curr, side_looking
)
look_angle = self.sar_product.general_sar_orbit[0].get_look_angle(t_az__curr, t_rg__curr, side_looking)
squint_angle, _ = self.sar_product.get_squint(t, tt, t_rg__curr, t_az__curr)
dc = genericpoly.create_sorted_poly_list(self.sar_product.dc_vector_list[t]).evaluate(
(t_az__curr, t_rg__curr)
) # TODO Add electronic steering
pt__near = self.sar_product.general_sar_orbit[0].sat2earth(
t_az__curr, t_rg__near, "RIGHT", 0.0, 0.0, cst.LIGHT_SPEED / fc_hz
)
pt__near__t_az, _ = self.sar_product.general_sar_orbit[0].earth2sat(
np.reshape(pt__near, (3,)), dc, cst.LIGHT_SPEED / fc_hz
)
pt__far = self.sar_product.general_sar_orbit[0].sat2earth(
t_az__curr, t_rg__far, "RIGHT", 0.0, 0.0, cst.LIGHT_SPEED / fc_hz
)
pt__far__t_az, _ = self.sar_product.general_sar_orbit[0].earth2sat(
np.reshape(pt__far, (3,)), dc, cst.LIGHT_SPEED / fc_hz
)
irf_rg_cut = -(roi_size - 1) / (
(pt__far__t_az[0] - pt__near__t_az[0]) / t_az_step[t, tt]
) # range cut angular coefficient in samples
irf_az_cut = (
-1 / irf_rg_cut * az_step[t, tt] ** 2 / rg_step[t, tt] ** 2
) # azimuth cut angular coefficient in samples
if np.abs((roi_size - 1) / irf_rg_cut) < 1: # use vertical and horizontal cuts in case of low squint
irf_rg_cut = np.inf
irf_az_cut = 0
# Perform IRF analysis
log.debug("Perform IRF analysis")
if self.__measure_ale_flag:
# Find target to be used as reference
pt_sar__pos = [
np.squeeze(pt_sar__rg_pos__mask[t, tt, 0]) - roi[1],
np.squeeze(pt_sar__az_pos__mask[t, tt, 0]) - roi[0],
]
else:
pt_sar__pos = np.array([])
step = np.array([rg_step[t, tt], az_step[t, tt]])
if self.__unit_of_measure == "Meters":
(
irf_resolution,
irf_pslr,
irf_islr,
irf_sslr,
irf_localization_error,
) = irf_analyser.measure_resolution_slr_localization(
data_portion, roi_max__pos, pt_sar__pos, step, [irf_rg_cut, irf_az_cut]
)
else:
(
irf_resolution,
irf_pslr,
irf_islr,
irf_sslr,
irf_localization_error,
) = irf_analyser.measure_resolution_slr_localization(
data_portion, roi_max__pos, pt_sar__pos, [1, 1], [irf_rg_cut, irf_az_cut]
)
if (
np.any(irf_resolution == 0)
or np.any(irf_pslr[0:2] == 0)
or np.any(irf_islr[0:2] == 0)
or np.any(irf_sslr[0:2] == 0)
): # if IRF analysis has provided partially invalid results mark the target as invalid
pt_valid__flag = 0
# Perform RCS analysis
log.debug("Perform RCS analysis")
if pt_valid__flag: # if target has not been found during IRF analysis skip RCS analysis
if self.__measure_rcs_flag:
if self.sar_product.data_quantity == EDataQuantity.sigma_nought.value:
data_portion = data_portion / np.sqrt(
np.sin(incidence_angle / 180 * np.pi)
) # sigma to beta nought conversion
elif self.sar_product.data_quantity == EDataQuantity.gamma_nought.value:
data_portion = (
data_portion
/ np.sqrt(np.sin(incidence_angle / 180 * np.pi))
* np.sqrt(np.cos(incidence_angle / 180 * np.pi))
) # gamma to beta nought conversion
irf_resolution__temp = irf_resolution
if self.sar_product.type == "GRD":
step[0] = step[0] * np.sin(
incidence_angle / 180 * np.pi
) # for GRD, the pixel area is computed as PAgr*sin(alpha)
irf_resolution__temp[0] = irf_resolution__temp[0] * np.sin(incidence_angle / 180 * np.pi)
if self.__unit_of_measure == "Meters":
rcs, peak_value, clutter = irf_analyser.measure_rcs_peak_clutter(
data_portion, roi_max__pos, step, irf_resolution__temp
)
else:
rcs, peak_value, clutter = irf_analyser.measure_rcs_peak_clutter(
data_portion, roi_max__pos, step, irf_resolution__temp * step
)
if (
rcs == 0 or peak_value == 0 or clutter == 0
): # if RCS analysis has provided partially invalid results mark the target as invalid
pt_valid__flag = 0
else:
rcs = []
clutter = []
# Compute RCS and peak phase errors
log.debug("Compute RCS and peak phase errors")
if pt_valid__flag: # if target has not been found during IRF analysis skip errors computation
if self.__measure_ale_flag and self.__measure_rcs_flag:
polarization = tags[t, tt][1]
rcs_error = np.sqrt(10 ** ((rcs - pt_rcs[polarization]) / 10))
sat_geo = self.sar_product.general_sar_orbit[0].get_position(pt_sar__t_az[0, 0])
peak_phase_error = np.angle(
peak_value
* np.exp(1j * 4 * np.pi / (cst.LIGHT_SPEED / fc_hz) * np.linalg.norm(sat_geo - pt_geo))
)
peak_error = rcs_error * np.exp(1j * peak_phase_error)
else:
peak_error = []
# Compute SCR
log.debug("Compute SCR")
if pt_valid__flag: # if target has not been found during IRF analysis skip SCR computation
if self.__measure_rcs_flag:
scr = 10 * np.log10(np.abs(peak_value) ** 2) - clutter
else:
scr = []
# If target has been marked as valid, store results
if pt_valid__flag:
log.debug("Target valid, store results")
self.swath.append(tags[t, tt][0])
self.burst.append(tt)
self.polarization.append(tags[t, tt][1])
self.position_range.append(position[0])
self.position_azimuth.append(position[1])
self.incidence_angle.append(incidence_angle)
self.look_angle.append(look_angle)
self.squint_angle.append(squint_angle)
self.resolution_range.append(irf_resolution[0])
self.resolution_azimuth.append(irf_resolution[1])
self.pslr_range.append(irf_pslr[0])
self.pslr_azimuth.append(irf_pslr[1])
self.pslr_2d.append(irf_pslr[2])
self.islr_range.append(irf_islr[0])
self.islr_azimuth.append(irf_islr[1])
self.islr_2d.append(irf_islr[2])
self.sslr_range.append(irf_sslr[0])
self.sslr_azimuth.append(irf_sslr[1])
self.sslr_2d.append(irf_sslr[2])
self.rcs.append(rcs)
self.clutter.append(clutter)
self.peak_error.append(peak_error)
self.scr.append(scr)
self.measured_ale_range.append(-irf_localization_error[0])
self.measured_ale_azimuth.append(-irf_localization_error[1])
else:
log.debug("Target not valid, results discarded")
self.n_views = len(self.swath)
return True
| [
"logging.getLogger",
"numpy.prod",
"numpy.sqrt",
"numpy.array",
"numpy.isreal",
"numpy.linalg.norm",
"numpy.sin",
"numpy.mean",
"numpy.reshape",
"numpy.exp",
"sct.analysis.irfanalyser.IRFAnalyser",
"numpy.rint",
"numpy.tile",
"numpy.abs",
"arepytools.math.genericpoly.create_sorted_poly_l... | [((91, 118), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (108, 118), False, 'import logging\n'), ((4072, 4083), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4080, 4083), True, 'import numpy as np\n'), ((2996, 3039), 'numpy.array', 'np.array', (['[[self.calibration_target.delay]]'], {}), '([[self.calibration_target.delay]])\n', (3004, 3039), True, 'import numpy as np\n'), ((3331, 3399), 'numpy.logical_and', 'np.logical_and', (['(pt_sar__rg_pos__mask != 0)', '(pt_sar__az_pos__mask != 0)'], {}), '(pt_sar__rg_pos__mask != 0, pt_sar__az_pos__mask != 0)\n', (3345, 3399), True, 'import numpy as np\n'), ((4362, 4386), 'numpy.mean', 'np.mean', (['t_az_step[:, 0]'], {}), '(t_az_step[:, 0])\n', (4369, 4386), True, 'import numpy as np\n'), ((4421, 4445), 'numpy.mean', 'np.mean', (['t_rg_step[:, 0]'], {}), '(t_rg_step[:, 0])\n', (4428, 4445), True, 'import numpy as np\n'), ((4764, 4796), 'numpy.tile', 'np.tile', (['pt_sar__mask', '(1, 1, 2)'], {}), '(pt_sar__mask, (1, 1, 2))\n', (4771, 4796), True, 'import numpy as np\n'), ((4857, 4889), 'numpy.tile', 'np.tile', (['pt_sar__mask', '(1, 1, 2)'], {}), '(pt_sar__mask, (1, 1, 2))\n', (4864, 4889), True, 'import numpy as np\n'), ((4172, 4196), 'numpy.mean', 'np.mean', (['t_az_step[:, 0]'], {}), '(t_az_step[:, 0])\n', (4179, 4196), True, 'import numpy as np\n'), ((4303, 4327), 'numpy.mean', 'np.mean', (['t_rg_step[:, 0]'], {}), '(t_rg_step[:, 0])\n', (4310, 4327), True, 'import numpy as np\n'), ((5506, 5519), 'sct.analysis.irfanalyser.IRFAnalyser', 'IRFAnalyser', ([], {}), '()\n', (5517, 5519), False, 'from sct.analysis.irfanalyser import IRFAnalyser\n'), ((12621, 12663), 'numpy.array', 'np.array', (['[rg_step[t, tt], az_step[t, tt]]'], {}), '([rg_step[t, tt], az_step[t, tt]])\n', (12629, 12663), True, 'import numpy as np\n'), ((6198, 6221), 'numpy.isreal', 'np.isreal', (['data_portion'], {}), '(data_portion)\n', (6207, 6221), True, 'import numpy as np\n'), ((6471, 6498), 'sct.support.signalprocessing.max2d_fine', 'sp.max2d_fine', (['data_portion'], {}), '(data_portion)\n', (6484, 6498), True, 'import sct.support.signalprocessing as sp\n'), ((8251, 8395), 'numpy.array', 'np.array', (['[roi[0] - roi_size * (az_ovrs - 1) / 2, roi[1] - roi_size * (rg_ovrs - 1) /\n 2, roi_size * az_ovrs, roi_size * rg_ovrs]'], {'dtype': 'int'}), '([roi[0] - roi_size * (az_ovrs - 1) / 2, roi[1] - roi_size * (\n rg_ovrs - 1) / 2, roi_size * az_ovrs, roi_size * rg_ovrs], dtype=int)\n', (8259, 8395), True, 'import numpy as np\n'), ((8735, 8845), 'numpy.array', 'np.array', (['[roi_max__pos[0] + roi_size * (rg_ovrs - 1) / 2, roi_max__pos[1] + roi_size *\n (az_ovrs - 1) / 2]'], {}), '([roi_max__pos[0] + roi_size * (rg_ovrs - 1) / 2, roi_max__pos[1] +\n roi_size * (az_ovrs - 1) / 2])\n', (8743, 8845), True, 'import numpy as np\n'), ((11138, 11164), 'numpy.reshape', 'np.reshape', (['pt__near', '(3,)'], {}), '(pt__near, (3,))\n', (11148, 11164), True, 'import numpy as np\n'), ((11495, 11520), 'numpy.reshape', 'np.reshape', (['pt__far', '(3,)'], {}), '(pt__far, (3,))\n', (11505, 11520), True, 'import numpy as np\n'), ((11951, 11986), 'numpy.abs', 'np.abs', (['((roi_size - 1) / irf_rg_cut)'], {}), '((roi_size - 1) / irf_rg_cut)\n', (11957, 11986), True, 'import numpy as np\n'), ((12585, 12597), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12593, 12597), True, 'import numpy as np\n'), ((13592, 13619), 'numpy.any', 'np.any', (['(irf_resolution == 0)'], {}), '(irf_resolution == 0)\n', (13598, 13619), True, 'import numpy as np\n'), ((13643, 13669), 'numpy.any', 'np.any', (['(irf_pslr[0:2] == 0)'], {}), '(irf_pslr[0:2] == 0)\n', (13649, 13669), True, 'import numpy as np\n'), ((13693, 13719), 'numpy.any', 'np.any', (['(irf_islr[0:2] == 0)'], {}), '(irf_islr[0:2] == 0)\n', (13699, 13719), True, 'import numpy as np\n'), ((13743, 13769), 'numpy.any', 'np.any', (['(irf_sslr[0:2] == 0)'], {}), '(irf_sslr[0:2] == 0)\n', (13749, 13769), True, 'import numpy as np\n'), ((10674, 10745), 'arepytools.math.genericpoly.create_sorted_poly_list', 'genericpoly.create_sorted_poly_list', (['self.sar_product.dc_vector_list[t]'], {}), '(self.sar_product.dc_vector_list[t])\n', (10709, 10745), False, 'from arepytools.math import genericpoly\n'), ((16583, 16633), 'numpy.sqrt', 'np.sqrt', (['(10 ** ((rcs - pt_rcs[polarization]) / 10))'], {}), '(10 ** ((rcs - pt_rcs[polarization]) / 10))\n', (16590, 16633), True, 'import numpy as np\n'), ((6114, 6134), 'numpy.abs', 'np.abs', (['data_portion'], {}), '(data_portion)\n', (6120, 6134), True, 'import numpy as np\n'), ((6311, 6331), 'numpy.abs', 'np.abs', (['data_portion'], {}), '(data_portion)\n', (6317, 6331), True, 'import numpy as np\n'), ((7436, 7463), 'numpy.prod', 'np.prod', (['data_portion.shape'], {}), '(data_portion.shape)\n', (7443, 7463), True, 'import numpy as np\n'), ((7849, 7873), 'numpy.rint', 'np.rint', (['(f_rg / b_rg / 5)'], {}), '(f_rg / b_rg / 5)\n', (7856, 7873), True, 'import numpy as np\n'), ((8106, 8130), 'numpy.rint', 'np.rint', (['(f_az / b_az / 5)'], {}), '(f_az / b_az / 5)\n', (8113, 8130), True, 'import numpy as np\n'), ((12377, 12419), 'numpy.squeeze', 'np.squeeze', (['pt_sar__rg_pos__mask[t, tt, 0]'], {}), '(pt_sar__rg_pos__mask[t, tt, 0])\n', (12387, 12419), True, 'import numpy as np\n'), ((12454, 12496), 'numpy.squeeze', 'np.squeeze', (['pt_sar__az_pos__mask[t, tt, 0]'], {}), '(pt_sar__az_pos__mask[t, tt, 0])\n', (12464, 12496), True, 'import numpy as np\n'), ((17022, 17053), 'numpy.exp', 'np.exp', (['(1.0j * peak_phase_error)'], {}), '(1.0j * peak_phase_error)\n', (17028, 17053), True, 'import numpy as np\n'), ((3928, 3950), 'numpy.mean', 'np.mean', (['rg_step[:, 0]'], {}), '(rg_step[:, 0])\n', (3935, 3950), True, 'import numpy as np\n'), ((4022, 4044), 'numpy.mean', 'np.mean', (['az_step[:, 0]'], {}), '(az_step[:, 0])\n', (4029, 4044), True, 'import numpy as np\n'), ((6687, 6707), 'numpy.floor', 'np.floor', (['roi_max_az'], {}), '(roi_max_az)\n', (6695, 6707), True, 'import numpy as np\n'), ((6757, 6777), 'numpy.floor', 'np.floor', (['roi_max_rg'], {}), '(roi_max_rg)\n', (6765, 6777), True, 'import numpy as np\n'), ((7145, 7165), 'numpy.floor', 'np.floor', (['roi_max_rg'], {}), '(roi_max_rg)\n', (7153, 7165), True, 'import numpy as np\n'), ((7221, 7241), 'numpy.floor', 'np.floor', (['roi_max_az'], {}), '(roi_max_az)\n', (7229, 7241), True, 'import numpy as np\n'), ((7412, 7432), 'numpy.abs', 'np.abs', (['data_portion'], {}), '(data_portion)\n', (7418, 7432), True, 'import numpy as np\n'), ((15036, 15073), 'numpy.sin', 'np.sin', (['(incidence_angle / 180 * np.pi)'], {}), '(incidence_angle / 180 * np.pi)\n', (15042, 15073), True, 'import numpy as np\n'), ((15274, 15311), 'numpy.sin', 'np.sin', (['(incidence_angle / 180 * np.pi)'], {}), '(incidence_angle / 180 * np.pi)\n', (15280, 15311), True, 'import numpy as np\n'), ((14350, 14387), 'numpy.sin', 'np.sin', (['(incidence_angle / 180 * np.pi)'], {}), '(incidence_angle / 180 * np.pi)\n', (14356, 14387), True, 'import numpy as np\n'), ((14763, 14800), 'numpy.cos', 'np.cos', (['(incidence_angle / 180 * np.pi)'], {}), '(incidence_angle / 180 * np.pi)\n', (14769, 14800), True, 'import numpy as np\n'), ((16913, 16945), 'numpy.linalg.norm', 'np.linalg.norm', (['(sat_geo - pt_geo)'], {}), '(sat_geo - pt_geo)\n', (16927, 16945), True, 'import numpy as np\n'), ((17390, 17408), 'numpy.abs', 'np.abs', (['peak_value'], {}), '(peak_value)\n', (17396, 17408), True, 'import numpy as np\n'), ((14682, 14719), 'numpy.sin', 'np.sin', (['(incidence_angle / 180 * np.pi)'], {}), '(incidence_angle / 180 * np.pi)\n', (14688, 14719), True, 'import numpy as np\n')] |
"""
Functions to calculate different visualisations for 1D models
"""
from tensorflow.keras.models import Model
from signal_screen_tools import add_zeros, change_last_activation_to_linear, normalise_array
import numpy as np
import tensorflow as tf
def calculate_occlusion_sensitivity(model: Model, data: np.ndarray, c: int, number_of_zeros=None):
"""
https://arxiv.org/abs/1311.2901
Places zeros through the all measurements and process them through model. Changes are relative to reference created
by unchanged data. New point is calculated by subtraction of newly generated data from reference.
:param model: Keras Sequential model
:param data: array of signals
:param c: index of class
:param number_of_zeros: number of zeros placed in the signal - list is iterated e.g. [5, 9, 15]
:return: data, list of all differences based on number of zeros placed, [number of used zeros]
"""
if number_of_zeros is None:
number_of_zeros = [15] # [3, 5, 9] no range is defined
# creation of reference - to which the difference is calculated
reference = model.predict(data)[..., c]
sensitivities = []
# putting zeros into signals
for zeros in number_of_zeros: # calculated for picked ranges
sensitivity_temp = np.empty([data.shape[0], data.shape[1]]) # temporal memory for actual sensitivity
# create iteration object with tqdm library
try:
from tqdm import tqdm
to_iterate = tqdm(range(data.shape[1]),
desc="Occlusion sensitivity for {} samples and class {}".format(zeros, c))
except ModuleNotFoundError:
to_iterate = range(data.shape[1])
# iterate through signal
for index in to_iterate:
prediction = model.predict(add_zeros(data.copy(), index, zeros))[..., c] # get prediction
sensitivity_temp[:, index] = reference - prediction
# save all configurations of zeros
sensitivities.append(sensitivity_temp) # stacking for all zero frames
return sensitivities, number_of_zeros
def calculate_grad_cam(model: Model, data: np.ndarray, c: int,
name_of_conv_layer: str = None, index_of_conv_layer: int = None,
dependencies=None, use_relu=True, normalise=True, upscale_to_input=True) -> np.ndarray:
"""
https://arxiv.org/abs/1610.02391 - gradCAM
Calculates gradCAM for feedforward neural network.
Modified code from:
:param c: index of class
:param dependencies: custom dependencies for neural network
:param model: model for gradCAM
:param data: data to visualise
:param index_of_conv_layer: index of conv layer
:param name_of_conv_layer: string of conv layer
:param use_relu: raw gradients can be more interpretative then only positive gradients
:param upscale_to_input: result will be the same size as
:param normalise: to put values between 0-1
:return: np.ndarray with saliency map
"""
model_modified: Model = change_last_activation_to_linear(model, dependencies)
# find last layer of CNN if not defined
if name_of_conv_layer is None and index_of_conv_layer is None:
for i, layer in enumerate(model_modified.layers):
if isinstance(layer, tf.keras.layers.Conv1D):
index_of_conv_layer = i
if index_of_conv_layer:
layer = model_modified.get_layer(index=index_of_conv_layer)
if not isinstance(layer, tf.keras.layers.Conv1D):
raise Exception("Convolution layer is not correctly defined")
model_with_conv_output = Model([model_modified.input],
[model_modified.output,
model_modified.get_layer(index=index_of_conv_layer).output])
elif name_of_conv_layer:
layer = model_modified.get_layer(name=name_of_conv_layer)
if not isinstance(layer, tf.keras.layers.Conv1D):
raise Exception("Convolution layer is not correctly defined")
model_with_conv_output = Model([model_modified.input],
[model_modified.output,
model_modified.get_layer(name=name_of_conv_layer).output])
else:
raise Exception("Convolution layer is missing / is not correctly defined")
# calculate gradient
with tf.GradientTape() as g:
data = tf.convert_to_tensor(data)
y_A = model_with_conv_output(data)
dy_dA = g.gradient(y_A[0][:, c], y_A[1])
# weights for the masks
weights = tf.reduce_mean(dy_dA, axis=(0, 1))
grad_CAM = tf.reduce_sum(tf.multiply(weights, y_A[1]), axis=-1) # multiplication of the masks
# reducing to one projection
# _grad_CAM = tf.reduce_mean(tf.reduce_sum(tf.multiply(weights, y_A[1]), axis=-1), axis=0)
# use relu to get only positive gradients
if use_relu:
grad_CAM = tf.nn.relu(grad_CAM)
if upscale_to_input:
from scipy.ndimage.interpolation import zoom
if len(grad_CAM.shape) == 1:
scale_factor = data.shape[1] / grad_CAM.shape[0]
else:
scale_factor = data.shape[1] / grad_CAM.shape[1] # expand to shape of input
grad_CAM = zoom(grad_CAM, scale_factor)
if normalise:
grad_CAM = normalise_array(grad_CAM)
if not isinstance(grad_CAM, np.ndarray):
return grad_CAM.numpy()
return grad_CAM
def calculate_saliency_map(model: Model, data: np.ndarray, c: int, dependencies=None, normalise=True) -> np.ndarray:
"""
https://arxiv.org/abs/1312.6034 - saliency maps
https://arxiv.org/abs/1706.03825 - smoothed version
Calculates saliency map / vanilla gradient for feedforward neural network. Calculated from all the data.
Modified code from:
:param c: class index
:param normalise: output will be in range 0-1
:param dependencies: custom dependencies for neural network
:param model: model for saliency calculation
:param data: data for
:return: np.ndarray with saliency map
"""
model_modified = change_last_activation_to_linear(model, dependencies)
# calculate gradient d y/d input
with tf.GradientTape() as g:
data = tf.convert_to_tensor(data)
g.watch(data)
loss = model_modified(data)[:, c]
d_loss_d_data = g.gradient(loss, data)
grads = np.abs(d_loss_d_data)
if normalise:
grads = normalise_array(grads)
grads = tf.reshape(grads, [grads.shape[0], grads.shape[1]])
return grads.numpy()
| [
"numpy.abs",
"signal_screen_tools.change_last_activation_to_linear",
"tensorflow.nn.relu",
"tensorflow.multiply",
"scipy.ndimage.interpolation.zoom",
"tensorflow.GradientTape",
"signal_screen_tools.normalise_array",
"numpy.empty",
"tensorflow.reshape",
"tensorflow.convert_to_tensor",
"tensorflow... | [((3057, 3110), 'signal_screen_tools.change_last_activation_to_linear', 'change_last_activation_to_linear', (['model', 'dependencies'], {}), '(model, dependencies)\n', (3089, 3110), False, 'from signal_screen_tools import add_zeros, change_last_activation_to_linear, normalise_array\n'), ((4616, 4650), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dy_dA'], {'axis': '(0, 1)'}), '(dy_dA, axis=(0, 1))\n', (4630, 4650), True, 'import tensorflow as tf\n'), ((6129, 6182), 'signal_screen_tools.change_last_activation_to_linear', 'change_last_activation_to_linear', (['model', 'dependencies'], {}), '(model, dependencies)\n', (6161, 6182), False, 'from signal_screen_tools import add_zeros, change_last_activation_to_linear, normalise_array\n'), ((6420, 6441), 'numpy.abs', 'np.abs', (['d_loss_d_data'], {}), '(d_loss_d_data)\n', (6426, 6441), True, 'import numpy as np\n'), ((6513, 6564), 'tensorflow.reshape', 'tf.reshape', (['grads', '[grads.shape[0], grads.shape[1]]'], {}), '(grads, [grads.shape[0], grads.shape[1]])\n', (6523, 6564), True, 'import tensorflow as tf\n'), ((1290, 1330), 'numpy.empty', 'np.empty', (['[data.shape[0], data.shape[1]]'], {}), '([data.shape[0], data.shape[1]])\n', (1298, 1330), True, 'import numpy as np\n'), ((4415, 4432), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4430, 4432), True, 'import tensorflow as tf\n'), ((4454, 4480), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['data'], {}), '(data)\n', (4474, 4480), True, 'import tensorflow as tf\n'), ((4681, 4709), 'tensorflow.multiply', 'tf.multiply', (['weights', 'y_A[1]'], {}), '(weights, y_A[1])\n', (4692, 4709), True, 'import tensorflow as tf\n'), ((4962, 4982), 'tensorflow.nn.relu', 'tf.nn.relu', (['grad_CAM'], {}), '(grad_CAM)\n', (4972, 4982), True, 'import tensorflow as tf\n'), ((5282, 5310), 'scipy.ndimage.interpolation.zoom', 'zoom', (['grad_CAM', 'scale_factor'], {}), '(grad_CAM, scale_factor)\n', (5286, 5310), False, 'from scipy.ndimage.interpolation import zoom\n'), ((5349, 5374), 'signal_screen_tools.normalise_array', 'normalise_array', (['grad_CAM'], {}), '(grad_CAM)\n', (5364, 5374), False, 'from signal_screen_tools import add_zeros, change_last_activation_to_linear, normalise_array\n'), ((6230, 6247), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6245, 6247), True, 'import tensorflow as tf\n'), ((6269, 6295), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['data'], {}), '(data)\n', (6289, 6295), True, 'import tensorflow as tf\n'), ((6477, 6499), 'signal_screen_tools.normalise_array', 'normalise_array', (['grads'], {}), '(grads)\n', (6492, 6499), False, 'from signal_screen_tools import add_zeros, change_last_activation_to_linear, normalise_array\n')] |
import os, glob
import torch, sys
from torch.utils.data import Dataset
from .data_utils import pkload
import matplotlib.pyplot as plt
import random
import numpy as np
class OASISBrainDataset(Dataset):
def __init__(self, data_path, transforms):
self.paths = data_path
self.transforms = transforms
def one_hot(self, img, C):
out = np.zeros((C, img.shape[1], img.shape[2], img.shape[3]))
for i in range(C):
out[i,...] = img == i
return out
def __getitem__(self, index):
path = self.paths[index]
tar_list = self.paths.copy()
tar_list.remove(path)
random.shuffle(tar_list)
tar_file = tar_list[0]
x, x_seg = pkload(path)
y, y_seg = pkload(tar_file)
x, y = x[None, ...], y[None, ...]
x_seg, y_seg = x_seg[None, ...], y_seg[None, ...]
x, x_seg = self.transforms([x, x_seg])
y, y_seg = self.transforms([y, y_seg])
x = np.ascontiguousarray(x) # [Bsize,channelsHeight,,Width,Depth]
y = np.ascontiguousarray(y)
x_seg = np.ascontiguousarray(x_seg) # [Bsize,channelsHeight,,Width,Depth]
y_seg = np.ascontiguousarray(y_seg)
x, y, x_seg, y_seg = torch.from_numpy(x), torch.from_numpy(y), torch.from_numpy(x_seg), torch.from_numpy(y_seg)
return x, y, x_seg, y_seg
def __len__(self):
return len(self.paths)
class OASISBrainInferDataset(Dataset):
def __init__(self, data_path, transforms):
self.paths = data_path
self.transforms = transforms
def one_hot(self, img, C):
out = np.zeros((C, img.shape[1], img.shape[2], img.shape[3]))
for i in range(C):
out[i,...] = img == i
return out
def __getitem__(self, index):
path = self.paths[index]
x, y, x_seg, y_seg = pkload(path)
x, y = x[None, ...], y[None, ...]
x_seg, y_seg= x_seg[None, ...], y_seg[None, ...]
x, x_seg = self.transforms([x, x_seg])
y, y_seg = self.transforms([y, y_seg])
x = np.ascontiguousarray(x)# [Bsize,channelsHeight,,Width,Depth]
y = np.ascontiguousarray(y)
x_seg = np.ascontiguousarray(x_seg) # [Bsize,channelsHeight,,Width,Depth]
y_seg = np.ascontiguousarray(y_seg)
x, y, x_seg, y_seg = torch.from_numpy(x), torch.from_numpy(y), torch.from_numpy(x_seg), torch.from_numpy(y_seg)
return x, y, x_seg, y_seg
def __len__(self):
return len(self.paths) | [
"numpy.zeros",
"random.shuffle",
"torch.from_numpy",
"numpy.ascontiguousarray"
] | [((379, 434), 'numpy.zeros', 'np.zeros', (['(C, img.shape[1], img.shape[2], img.shape[3])'], {}), '((C, img.shape[1], img.shape[2], img.shape[3]))\n', (387, 434), True, 'import numpy as np\n'), ((667, 691), 'random.shuffle', 'random.shuffle', (['tar_list'], {}), '(tar_list)\n', (681, 691), False, 'import random\n'), ((1005, 1028), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x'], {}), '(x)\n', (1025, 1028), True, 'import numpy as np\n'), ((1081, 1104), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['y'], {}), '(y)\n', (1101, 1104), True, 'import numpy as np\n'), ((1122, 1149), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x_seg'], {}), '(x_seg)\n', (1142, 1149), True, 'import numpy as np\n'), ((1206, 1233), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['y_seg'], {}), '(y_seg)\n', (1226, 1233), True, 'import numpy as np\n'), ((1659, 1714), 'numpy.zeros', 'np.zeros', (['(C, img.shape[1], img.shape[2], img.shape[3])'], {}), '((C, img.shape[1], img.shape[2], img.shape[3]))\n', (1667, 1714), True, 'import numpy as np\n'), ((2122, 2145), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x'], {}), '(x)\n', (2142, 2145), True, 'import numpy as np\n'), ((2196, 2219), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['y'], {}), '(y)\n', (2216, 2219), True, 'import numpy as np\n'), ((2237, 2264), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x_seg'], {}), '(x_seg)\n', (2257, 2264), True, 'import numpy as np\n'), ((2321, 2348), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['y_seg'], {}), '(y_seg)\n', (2341, 2348), True, 'import numpy as np\n'), ((1264, 1283), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1280, 1283), False, 'import torch, sys\n'), ((1285, 1304), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (1301, 1304), False, 'import torch, sys\n'), ((1306, 1329), 'torch.from_numpy', 'torch.from_numpy', (['x_seg'], {}), '(x_seg)\n', (1322, 1329), False, 'import torch, sys\n'), ((1331, 1354), 'torch.from_numpy', 'torch.from_numpy', (['y_seg'], {}), '(y_seg)\n', (1347, 1354), False, 'import torch, sys\n'), ((2379, 2398), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2395, 2398), False, 'import torch, sys\n'), ((2400, 2419), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (2416, 2419), False, 'import torch, sys\n'), ((2421, 2444), 'torch.from_numpy', 'torch.from_numpy', (['x_seg'], {}), '(x_seg)\n', (2437, 2444), False, 'import torch, sys\n'), ((2446, 2469), 'torch.from_numpy', 'torch.from_numpy', (['y_seg'], {}), '(y_seg)\n', (2462, 2469), False, 'import torch, sys\n')] |
import gym
import gym_battleship_basic
import os
import numpy as np
import random
from keras.optimizers import Adam
from collections import deque
from tqdm import tqdm
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.models import load_model
from tqdm import tqdm
from agents.util import train_data_process
import copy
import pickle
# reference: https://gist.github.com/yashpatel5400/049fe6f4372b16bab5d3dab36854f262#file-mountaincar-py
class DoubleModel_DQN:
def __init__(self, env, model=None, log_saving=False):
self.log_saving = log_saving
self.env = env
self.memory = deque(maxlen=2000)
self.gamma = 0.9
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.7
self.learning_rate = 0.0001
self.exploration_min = 0.01
self.exploration_rate = 0.4
self.tau = .125
self.obs_hist = None
self.shot_log = []
self.model_estimates = None
self.state_shape = self.env.observation_space.shape
# self.weight_backup = r'.\model\DQN_V3_06082020_3.model'
# self.weight_backup = r'.\model\DQN_V3_06092020_1.model'
self.weight_backup = r'.\model\DQN_V3_06102020_2.model'
if model is not None:
self.model = model
self.target_model = model
else:
self.model = self.create_model(self.env.observation_space.shape)
self.target_model = self.create_model(self.env.observation_space.shape)
def reset_log(self):
self.obs_hist = None
self.model_estimates = None
self.shot_log = []
def create_model(self, input_shape):
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(32, (7, 7), strides=(1, 1), name='conv0')(X)
X = BatchNormalization(axis=3, name='bn0')(X)
X = ZeroPadding2D((3, 3))(X)
X = Conv2D(32, (7, 7), strides=(1, 1), name='conv1')(X)
X = BatchNormalization(axis=3, name='bn1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
# X = Dense(100, activation='sigmoid', name='fc')(X)
X = Dense(100, activation='linear', name='fc')(X)
model = Model(inputs=X_input, outputs=X, name='DQN_Battleship')
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
if os.path.isfile(self.weight_backup):
model.load_weights(self.weight_backup)
self.exploration_rate = self.exploration_min
print(model.summary())
return model
def model_pretrain(self, game_logs, batch_size, epochs):
train_x, train_y = train_data_process(game_logs, reward=True)
self.target_model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs, verbose=2)
self.save_model(self.weight_backup)
self.model.load_weights(self.weight_backup) # assign same weights
def act(self, state, explore=True):
self.epsilon *= self.epsilon_decay
self.epsilon = max(self.epsilon_min, self.epsilon)
if explore and (np.random.random() < self.epsilon):
return self.env.action_space.sample()
act_values = self.model.predict(state)[0]
if self.log_saving:
if self.model_estimates is None:
self.model_estimates = act_values.reshape((1,) + act_values.shape)
else:
self.model_estimates = np.concatenate([self.model_estimates, act_values.reshape((1,) + act_values.shape)])
act_values[state[..., 0].flatten() != 0] = -99
return np.argmax(act_values)
def remember(self, state, action, reward, new_state, done):
self.memory.append([state, action, reward, new_state, done])
def replay(self):
batch_size = 256
if len(self.memory) < batch_size:
return
samples = random.sample(self.memory, batch_size)
state_input = np.array([])
target_input = np.array([])
for sample in samples:
state, action, reward, new_state, done = sample
target = self.target_model.predict(state)
if done:
target[0][action] = reward
else:
Q_future = max(self.target_model.predict(new_state)[0])
target[0][action] = reward + Q_future * self.gamma
if sum(state_input.shape) == 0:
state_input = state
target_input = target
else:
state_input = np.concatenate([state_input, state])
target_input = np.concatenate([target_input, target])
self.model.fit(state_input, target_input, batch_size=32, epochs=2, verbose=1)
def target_train(self):
weights = self.model.get_weights()
target_weights = self.target_model.get_weights()
for i in range(len(target_weights)):
target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)
self.target_model.set_weights(target_weights)
def save_model(self, fn):
self.target_model.save(fn)
def test(self, env):
obs, done, ep_reward = env.reset(), False, 0
i = 0
while not done:
i += 1
obs = np.reshape(obs, (1,) + env.observation_space.shape)
action = self.act(obs, explore=False)
obs, reward, done, _ = env.step(action)
if self.log_saving:
if i == 1:
self.obs_hist = obs.reshape((1,) + obs.shape)
self.shot_log = [action]
else:
self.obs_hist = np.concatenate([self.obs_hist, obs.reshape((1,) + obs.shape)])
self.shot_log += [action]
ep_reward += reward
return i, ep_reward
def train(self, env, trials=1000, game_logs=None):
if game_logs is not None:
self.model_pretrain(game_logs, 128, 4)
steps = []
for trial in range(trials):
cur_state = env.reset()
cur_state = np.reshape(cur_state, (1,) + env.observation_space.shape)
total_reward = 0
total_step = 0
done = False
while not done:
action = self.act(cur_state)
# print(action)
new_state, reward, done, _ = env.step(action)
new_state = np.reshape(new_state, (1,) + env.observation_space.shape)
self.remember(cur_state, action, reward, new_state, done)
cur_state = new_state
total_reward += reward
total_step += 1
self.replay() # internally iterates default (prediction) model
if trial % 100 == 0:
self.target_train()
self.save_model(self.weight_backup)
steps += [total_step]
if len(steps) <= 200:
mean_steps = np.mean(steps)
else:
mean_steps = np.mean(steps[-200:])
print("Completed in {} trials with reward {} and step {}, average steps {}".format(trial,
total_reward, total_step,
mean_steps))
self.save_model(self.weight_backup)
if __name__ == "__main__":
test_env = gym.make('battleshipBasic-v0', board_shape=(10, 10), verbose=False, obs_3d=True)
with open(r'.\..\data\huntSearch_agentGames.pickle', 'rb') as handle:
sample_data = pickle.load(handle)
dqn_agent = DoubleModel_DQN(env=test_env, log_saving=False)
dqn_agent.train(test_env, 30000, sample_data)
# main(test_env)
| [
"keras.layers.Conv2D",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"gym.make",
"numpy.mean",
"collections.deque",
"numpy.reshape",
"numpy.random.random",
"agents.util.train_data_process",
"keras.models.Model",
"numpy.concatenate",
"keras.layers.ZeroPadding2D",
"keras.op... | [((7774, 7859), 'gym.make', 'gym.make', (['"""battleshipBasic-v0"""'], {'board_shape': '(10, 10)', 'verbose': '(False)', 'obs_3d': '(True)'}), "('battleshipBasic-v0', board_shape=(10, 10), verbose=False, obs_3d=True\n )\n", (7782, 7859), False, 'import gym\n'), ((807, 825), 'collections.deque', 'deque', ([], {'maxlen': '(2000)'}), '(maxlen=2000)\n', (812, 825), False, 'from collections import deque\n'), ((1875, 1893), 'keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (1880, 1893), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2525, 2580), 'keras.models.Model', 'Model', ([], {'inputs': 'X_input', 'outputs': 'X', 'name': '"""DQN_Battleship"""'}), "(inputs=X_input, outputs=X, name='DQN_Battleship')\n", (2530, 2580), False, 'from keras.models import Model\n'), ((2665, 2699), 'os.path.isfile', 'os.path.isfile', (['self.weight_backup'], {}), '(self.weight_backup)\n', (2679, 2699), False, 'import os\n'), ((2950, 2992), 'agents.util.train_data_process', 'train_data_process', (['game_logs'], {'reward': '(True)'}), '(game_logs, reward=True)\n', (2968, 2992), False, 'from agents.util import train_data_process\n'), ((3938, 3959), 'numpy.argmax', 'np.argmax', (['act_values'], {}), '(act_values)\n', (3947, 3959), True, 'import numpy as np\n'), ((4221, 4259), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (4234, 4259), False, 'import random\n'), ((4282, 4294), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4290, 4294), True, 'import numpy as np\n'), ((4318, 4330), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4326, 4330), True, 'import numpy as np\n'), ((7951, 7970), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (7962, 7970), False, 'import pickle\n'), ((1906, 1927), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(3, 3)'], {}), '((3, 3))\n', (1919, 1927), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((1949, 1997), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(7, 7)'], {'strides': '(1, 1)', 'name': '"""conv0"""'}), "(32, (7, 7), strides=(1, 1), name='conv0')\n", (1955, 1997), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2013, 2051), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'name': '"""bn0"""'}), "(axis=3, name='bn0')\n", (2031, 2051), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2067, 2088), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(3, 3)'], {}), '((3, 3))\n', (2080, 2088), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2104, 2152), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(7, 7)'], {'strides': '(1, 1)', 'name': '"""conv1"""'}), "(32, (7, 7), strides=(1, 1), name='conv1')\n", (2110, 2152), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2168, 2206), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'name': '"""bn1"""'}), "(axis=3, name='bn1')\n", (2186, 2206), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2222, 2240), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2232, 2240), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2256, 2293), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'name': '"""max_pool"""'}), "((2, 2), name='max_pool')\n", (2268, 2293), False, 'from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D\n'), ((2377, 2386), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2384, 2386), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((2463, 2505), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""linear"""', 'name': '"""fc"""'}), "(100, activation='linear', name='fc')\n", (2468, 2505), False, 'from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D\n'), ((5595, 5646), 'numpy.reshape', 'np.reshape', (['obs', '((1,) + env.observation_space.shape)'], {}), '(obs, (1,) + env.observation_space.shape)\n', (5605, 5646), True, 'import numpy as np\n'), ((6402, 6459), 'numpy.reshape', 'np.reshape', (['cur_state', '((1,) + env.observation_space.shape)'], {}), '(cur_state, (1,) + env.observation_space.shape)\n', (6412, 6459), True, 'import numpy as np\n'), ((2625, 2652), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learning_rate'}), '(lr=self.learning_rate)\n', (2629, 2652), False, 'from keras.optimizers import Adam\n'), ((3435, 3453), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3451, 3453), True, 'import numpy as np\n'), ((4863, 4899), 'numpy.concatenate', 'np.concatenate', (['[state_input, state]'], {}), '([state_input, state])\n', (4877, 4899), True, 'import numpy as np\n'), ((4931, 4969), 'numpy.concatenate', 'np.concatenate', (['[target_input, target]'], {}), '([target_input, target])\n', (4945, 4969), True, 'import numpy as np\n'), ((6736, 6793), 'numpy.reshape', 'np.reshape', (['new_state', '((1,) + env.observation_space.shape)'], {}), '(new_state, (1,) + env.observation_space.shape)\n', (6746, 6793), True, 'import numpy as np\n'), ((7271, 7285), 'numpy.mean', 'np.mean', (['steps'], {}), '(steps)\n', (7278, 7285), True, 'import numpy as np\n'), ((7333, 7354), 'numpy.mean', 'np.mean', (['steps[-200:]'], {}), '(steps[-200:])\n', (7340, 7354), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from transformers import *
import re, string
import pandas as pd
import sys
from keras.preprocessing.sequence import pad_sequences
def map_sent(sent):
if sent == 'positive' or sent == 'neutral':
return 1
if sent == 'negative':
return 0
def deEmojify(inputString):
return inputString.encode('ascii', 'ignore').decode('ascii')
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
pattern = re.compile('[\W_]+', re.UNICODE)
df = pd.read_csv('dataset/Tweets.csv')
df = df[:5000]
df['text'] = df['text'].apply(lambda title: deEmojify(title))
df['text'] = df['text'].apply(lambda title: re.sub(r"http\S+", "", title))
df['text'] = df['text'].apply(lambda title: ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",title).split()))
df['original'] = df['text'].copy()
df['text'] = df['text'].apply(lambda title: '[CLS] ' + title + ' [CEP]')
df['text'] = df['text'].apply(lambda title: tokenizer.tokenize(title))
df['airline_sentiment'] = df['airline_sentiment'].apply(lambda sent: map_sent(sent))
MAX_LEN=128
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(title) for title in df['text']],
maxlen=MAX_LEN, dtype='long', truncating='post', padding='post')
model = TFBertForSequenceClassification.from_pretrained('./seemsaccurate7/')
classes = ['negative', 'positive']
results = model.predict(input_ids)
count = 0
total = 0
for i in range(len(results)):
classi = np.argmax(results[i])
orig_sent = df['airline_sentiment'][i]
confidence = df['airline_sentiment_confidence'][i]
if confidence == 1:
total += 1
if orig_sent == classi:
count += 1
print('Sentence: {:s}'.format(df['original'][i]))
print('Sentiment: {:s}'.format(classes[classi]))
print('Real Sentiment: {:s}'.format(classes[orig_sent]))
accuracy = (count / total) * 100
print(count)
print(total)
print('Accuracy {:.2f}'.format(accuracy))
| [
"re.sub",
"numpy.argmax",
"pandas.read_csv",
"re.compile"
] | [((474, 507), 're.compile', 're.compile', (['"""[\\\\W_]+"""', 're.UNICODE'], {}), "('[\\\\W_]+', re.UNICODE)\n", (484, 507), False, 'import re, string\n'), ((513, 546), 'pandas.read_csv', 'pd.read_csv', (['"""dataset/Tweets.csv"""'], {}), "('dataset/Tweets.csv')\n", (524, 546), True, 'import pandas as pd\n'), ((1491, 1512), 'numpy.argmax', 'np.argmax', (['results[i]'], {}), '(results[i])\n', (1500, 1512), True, 'import numpy as np\n'), ((670, 699), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'title'], {}), "('http\\\\S+', '', title)\n", (676, 699), False, 'import re, string\n'), ((754, 827), 're.sub', 're.sub', (['"""(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\\\\w+:\\\\/\\\\/\\\\S+)"""', '""" """', 'title'], {}), "('(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\\\w+:\\\\/\\\\/\\\\S+)', ' ', title)\n", (760, 827), False, 'import re, string\n')] |
#!/usr/bin/env python
import argparse
import concurrent.futures
import logging
import os
import re
import threading
import time
import cv2
import numpy as np
import tensorboardX
import torch
from scipy import ndimage
from robot import SimRobot
from trainer import Trainer
from utils import utils, viz
from utils.logger import Logger
class LearnManipulation:
def __init__(self, args):
# --------------- Setup options ---------------
self.is_sim = args.is_sim
sim_port = args.sim_port
obj_mesh_dir = os.path.abspath(args.obj_mesh_dir) if self.is_sim else None
num_obj = args.num_obj if self.is_sim else None
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
if self.is_sim:
self.workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.8]])
else:
self.workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])
self.heightmap_resolution = args.heightmap_resolution
random_seed = args.random_seed
force_cpu = args.force_cpu
# ------------- Algorithm options -------------
network = args.network
num_rotations = args.num_rotations
self.future_reward_discount = args.future_reward_discount
self.explore_actions = args.explore_actions
self.explore_type = args.explore_type
self.explore_rate_decay = args.explore_rate_decay
self.LAE_sigma = 0.33
self.LAE_beta = 0.25
self.experience_replay_disabled = args.experience_replay_disabled
self.push_enabled = args.push_enabled
self.place_enabled = args.place_enabled
self.max_iter = args.max_iter
self.reward_type = args.reward_type
self.filter_type = args.filter_type
self.place_reward_scale = args.place_reward_scale
self.goal_stack_height = args.goal_stack_height
# -------------- Testing options --------------
self.is_testing = args.is_testing
self.max_test_trials = args.max_test_trials
test_preset_cases = args.test_preset_cases
test_preset_file = os.path.abspath(args.test_preset_file) if test_preset_cases else None
# ------ Pre-loading and logging options ------
if args.logging_directory and not args.snapshot_file:
logging_directory = os.path.abspath(args.logging_directory)
self.snapshot_file = os.path.join(logging_directory, 'models/snapshot-backup.pth')
elif args.snapshot_file:
logging_directory = os.path.abspath(args.logging_directory)
self.snapshot_file = os.path.abspath(args.snapshot_file)
else:
logging_directory = None
self.snapshot_file = None
self.save_visualizations = args.save_visualizations
# Initialize pick-and-place system (camera and robot)
if self.is_sim:
self.robot = SimRobot(sim_port, obj_mesh_dir, num_obj, self.workspace_limits, self.is_testing,
test_preset_cases, test_preset_file, self.place_enabled)
else:
raise NotImplementedError
# Initialize data logger
self.logger = Logger(logging_directory, args)
self.logger.save_camera_info(self.robot.cam_intrinsics, self.robot.cam_pose, self.robot.cam_depth_scale)
self.logger.save_heightmap_info(self.workspace_limits, self.heightmap_resolution)
# Tensorboard
self.tb = tensorboardX.SummaryWriter(logging_directory)
# Initialize trainer
self.trainer = Trainer(network, force_cpu, self.push_enabled, self.place_enabled, num_rotations)
# Find last executed iteration of pre-loaded log, and load execution info and RL variables
if self.logger.logging_directory_exists and not self.is_testing:
self.trainer.preload(self.logger.transitions_directory)
self.trainer.load_snapshot(self.snapshot_file)
elif args.snapshot_file:
self.trainer.load_snapshot(self.snapshot_file)
# Set random seed
np.random.seed(random_seed)
# Initialize variables for heuristic bootstrapping and exploration probability
self.no_change_count = [2, 2] if not self.is_testing else [0, 0]
self.explore_prob = 0.5 if not self.is_testing else 0.0
self.mission_complete = False
self.execute_action = False
self.shutdown_called = False
self.prev_primitive_action = None
self.prev_grasp_success = None
self.prev_push_success = None
self.prev_place_success = None
self.prev_color_heightmap = None
self.prev_depth_heightmap = None
self.prev_best_pix_ind = None
self.prev_stack_height = 0
self.last_task_complete = 0
self.push_predictions = None
self.grasp_predictions = None
self.place_predictions = None
self.color_heightmap = None
self.depth_heightmap = None
self.primitive_action = None
self.best_pix_ind = None
self.predicted_value = None
def policy(self):
"""
Determine whether grasping or pushing or placing should be executed based on network predictions
"""
best_push_conf = np.max(self.push_predictions)
best_grasp_conf = np.max(self.grasp_predictions)
best_place_conf = np.max(self.place_predictions)
logging.info('Primitive confidence scores: %f (push), %f (grasp), %f (place)' % (
best_push_conf, best_grasp_conf, best_place_conf))
# Exploitation (do best action) vs exploration (do other action)
if self.explore_actions and not self.is_testing:
explore_actions = np.random.uniform() < self.explore_prob
logging.info('Strategy: explore (exploration probability: %f)' % self.explore_prob)
else:
explore_actions = False
self.trainer.is_exploit_log.append([0 if explore_actions else 1])
self.logger.write_to_log('is-exploit', self.trainer.is_exploit_log)
# Select action type
self.primitive_action = 'grasp'
if self.place_enabled and self.prev_primitive_action == 'grasp' and self.prev_grasp_success:
self.primitive_action = 'place'
elif self.push_enabled:
if best_push_conf > best_grasp_conf:
self.primitive_action = 'push'
if explore_actions:
self.primitive_action = 'push' if np.random.randint(0, 2) == 0 else 'grasp'
# Get pixel location and rotation with highest affordance prediction (rotation, y, x)
if self.primitive_action == 'push':
self.compute_action(explore_actions, self.push_predictions)
elif self.primitive_action == 'grasp':
self.compute_action(explore_actions, self.grasp_predictions)
elif self.primitive_action == 'place':
self.compute_action(explore_actions, self.place_predictions)
else:
raise NotImplementedError('Primitive action type {} is not implemented'.format(self.primitive_action))
# Save predicted confidence value
self.trainer.predicted_value_log.append([self.predicted_value])
self.logger.write_to_log('predicted-value', self.trainer.predicted_value_log)
def compute_action(self, explore_actions, predictions):
if explore_actions:
maximas = utils.k_largest_index_argpartition(predictions, k=10)
self.best_pix_ind = maximas[np.random.choice(maximas.shape[0])]
else:
self.best_pix_ind = np.unravel_index(np.argmax(predictions), predictions.shape)
self.predicted_value = predictions[self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]
def agent(self):
"""
Parallel thread to process network output and execute actions
"""
while not self.shutdown_called and self.trainer.iteration <= self.max_iter:
if self.execute_action:
# Select action based on policy
self.policy()
# Compute 3D position of pixel
logging.info(
'Action: %s at (%d, %d, %d)' % (
self.primitive_action, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]))
best_rotation_angle = np.deg2rad(self.best_pix_ind[0] * (360.0 / self.trainer.model.num_rotations))
best_pix_x = self.best_pix_ind[2]
best_pix_y = self.best_pix_ind[1]
primitive_position = [best_pix_x * self.heightmap_resolution + self.workspace_limits[0][0],
best_pix_y * self.heightmap_resolution + self.workspace_limits[1][0],
self.depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]]
# If pushing, adjust start position, and make sure z value is safe and not too low
if self.primitive_action == 'push' or self.primitive_action == 'place':
finger_width = 0.02
safe_kernel_width = int(np.round((finger_width / 2) / self.heightmap_resolution))
local_region = self.depth_heightmap[
max(best_pix_y - safe_kernel_width, 0):min(best_pix_y + safe_kernel_width + 1,
self.depth_heightmap.shape[0]),
max(best_pix_x - safe_kernel_width, 0):min(best_pix_x + safe_kernel_width + 1,
self.depth_heightmap.shape[1])]
if local_region.size == 0:
safe_z_position = self.workspace_limits[2][0]
else:
safe_z_position = np.max(local_region) + self.workspace_limits[2][0]
primitive_position[2] = safe_z_position
# Save executed primitive
if self.primitive_action == 'push':
self.trainer.executed_action_log.append(
[0, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]) # 0 - push
elif self.primitive_action == 'grasp':
self.trainer.executed_action_log.append(
[1, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]) # 1 - grasp
elif self.primitive_action == 'place':
self.trainer.executed_action_log.append(
[2, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]) # 2 - place
self.logger.write_to_log('executed-action', self.trainer.executed_action_log)
# Visualize executed primitive, and affordances
grasp_pred_vis = viz.get_prediction_vis(self.grasp_predictions, self.color_heightmap,
self.best_pix_ind, 'grasp')
imgs = torch.from_numpy(grasp_pred_vis).permute(2, 0, 1)
self.tb.add_image('grasp_pred', imgs, self.trainer.iteration)
# grasp_pred_vis = viz.get_prediction_full_vis(self.grasp_predictions, self.color_heightmap, self.best_pix_ind)
# imgs = torch.from_numpy(grasp_pred_vis).permute(2, 0, 1)
# self.tb.add_image('grasp_pred_full', imgs, self.trainer.iteration)
if self.push_enabled:
push_pred_vis = viz.get_prediction_vis(self.push_predictions, self.color_heightmap,
self.best_pix_ind, 'push')
imgs = torch.from_numpy(push_pred_vis).permute(2, 0, 1)
self.tb.add_image('push_pred', imgs, self.trainer.iteration)
if self.place_enabled:
place_pred_vis = viz.get_prediction_vis(self.place_predictions, self.color_heightmap,
self.best_pix_ind, 'place')
imgs = torch.from_numpy(place_pred_vis).permute(2, 0, 1)
self.tb.add_image('place_pred', imgs, self.trainer.iteration)
if self.save_visualizations:
if self.primitive_action == 'push':
self.logger.save_visualizations(self.trainer.iteration, push_pred_vis, 'push')
elif self.primitive_action == 'grasp':
self.logger.save_visualizations(self.trainer.iteration, grasp_pred_vis, 'grasp')
elif self.primitive_action == 'place':
self.logger.save_visualizations(self.trainer.iteration, place_pred_vis, 'place')
# Initialize variables that influence reward
push_success = False
grasp_success = False
place_success = False
# Execute primitive
pool = concurrent.futures.ThreadPoolExecutor()
try:
if self.primitive_action == 'push':
future = pool.submit(self.robot.push, primitive_position, best_rotation_angle)
push_success = future.result(timeout=60)
logging.info('Push successful: %r' % push_success)
elif self.primitive_action == 'grasp':
future = pool.submit(self.robot.grasp, primitive_position, best_rotation_angle)
grasp_success = future.result(timeout=60)
logging.info('Grasp successful: %r' % grasp_success)
elif self.primitive_action == 'place':
future = pool.submit(self.robot.place, primitive_position, best_rotation_angle)
place_success = future.result(timeout=60)
logging.info('Place successful: %r' % place_success)
except concurrent.futures.TimeoutError:
logging.error('Robot execution timeout!')
self.mission_complete = False
else:
self.mission_complete = True
# Save information for next training step
self.prev_color_heightmap = self.color_heightmap.copy()
self.prev_depth_heightmap = self.depth_heightmap.copy()
self.prev_grasp_success = grasp_success
self.prev_push_success = push_success
self.prev_place_success = place_success
self.prev_primitive_action = self.primitive_action
self.prev_best_pix_ind = self.best_pix_ind
self.execute_action = False
else:
time.sleep(0.1)
def compute_reward(self, change_detected, stack_height):
# Compute current reward
current_reward = 0
if self.prev_primitive_action == 'push' and self.prev_push_success:
if change_detected:
if self.reward_type == 3:
current_reward = 0.75
else:
current_reward = 0.5
else:
self.prev_push_success = False
elif self.prev_primitive_action == 'grasp' and self.prev_grasp_success:
if self.reward_type < 4:
if (self.place_enabled and stack_height >= self.prev_stack_height) or (not self.place_enabled):
current_reward = 1.0
else:
self.prev_grasp_success = False
elif self.reward_type == 4:
if self.place_enabled:
if stack_height >= self.prev_stack_height:
current_reward = 1.0
else:
self.prev_grasp_success = False
current_reward = -0.5
else:
current_reward = 1.0
elif self.prev_primitive_action == 'place' and self.prev_place_success:
if stack_height > self.prev_stack_height:
current_reward = self.place_reward_scale * stack_height
else:
self.prev_place_success = False
# Compute future reward
if self.place_enabled and not change_detected and not self.prev_grasp_success and not self.prev_place_success:
future_reward = 0
elif not self.place_enabled and not change_detected and not self.prev_grasp_success:
future_reward = 0
elif self.reward_type > 1 and current_reward == 0:
future_reward = 0
else:
future_reward = self.predicted_value
expected_reward = current_reward + self.future_reward_discount * future_reward
return expected_reward, current_reward, future_reward
def reward_function(self):
# Detect changes
depth_diff = abs(self.depth_heightmap - self.prev_depth_heightmap)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = 0
depth_diff[depth_diff < 0.01] = 0
depth_diff[depth_diff > 0] = 1
change_threshold = 300
change_value = np.sum(depth_diff)
change_detected = change_value > change_threshold or self.prev_grasp_success
logging.info('Change detected: %r (value: %d)' % (change_detected, change_value))
if change_detected:
if self.prev_primitive_action == 'push':
self.no_change_count[0] = 0
elif self.prev_primitive_action == 'grasp' or self.prev_primitive_action == 'place':
self.no_change_count[1] = 0
else:
if self.prev_primitive_action == 'push':
self.no_change_count[0] += 1
elif self.prev_primitive_action == 'grasp':
self.no_change_count[1] += 1
# Check stack height
img_median = ndimage.median_filter(self.depth_heightmap, size=5)
max_z = np.max(img_median)
if max_z <= 0.069:
stack_height = 1
elif (max_z > 0.069) and (max_z <= 0.11):
stack_height = 2
elif (max_z > 0.11) and (max_z <= 0.156):
stack_height = 3
elif (max_z > 0.156) and (max_z <= 0.21):
stack_height = 4
else:
stack_height = 0
if self.place_enabled:
logging.info('Current stack height is {}'.format(stack_height))
self.tb.add_scalar('stack_height', stack_height, self.trainer.iteration)
# Compute reward
expected_reward, current_reward, future_reward = self.compute_reward(change_detected, stack_height)
logging.info('Current reward: %f' % current_reward)
logging.info('Future reward: %f' % future_reward)
logging.info('Expected reward: %f + %f x %f = %f' % (
current_reward, self.future_reward_discount, future_reward, expected_reward))
self.prev_stack_height = stack_height
return expected_reward, current_reward
def experience_replay(self, prev_reward_value):
"""
Sample a reward value from the same action as the current one which differs from the most recent reward value
to reduce the chance of catastrophic forgetting
"""
sample_primitive_action = self.prev_primitive_action
if sample_primitive_action == 'push':
sample_primitive_action_id = 0
sample_reward_value = 0 if prev_reward_value == 0.5 else 0.5
elif sample_primitive_action == 'grasp':
sample_primitive_action_id = 1
sample_reward_value = 0 if prev_reward_value == 1 else 1
elif sample_primitive_action == 'place':
sample_primitive_action_id = 2
sample_reward_value = 0 if prev_reward_value >= 1 else 1
else:
raise NotImplementedError(
'ERROR: {} action is not yet supported in experience replay'.format(sample_primitive_action))
# Get samples of the same primitive but with different results
sample_ind = np.argwhere(np.logical_and(
np.asarray(self.trainer.reward_value_log)[1:self.trainer.iteration, 0] == sample_reward_value,
np.asarray(self.trainer.executed_action_log)[1:self.trainer.iteration, 0] == sample_primitive_action_id))
if sample_ind.size > 0:
# Find sample with highest surprise value
sample_surprise_values = np.abs(np.asarray(self.trainer.predicted_value_log)[sample_ind[:, 0]] -
np.asarray(self.trainer.label_value_log)[sample_ind[:, 0]])
sorted_surprise_ind = np.argsort(sample_surprise_values[:, 0])
sorted_sample_ind = sample_ind[sorted_surprise_ind, 0]
pow_law_exp = 2
rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1)))
sample_iteration = sorted_sample_ind[rand_sample_ind]
logging.info('Experience replay: iteration %d (surprise value: %f)' % (
sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))
# Load sample RGB-D heightmap
sample_color_heightmap = cv2.imread(
os.path.join(self.logger.color_heightmaps_directory, '%06d.0.color.png' % sample_iteration))
sample_color_heightmap = cv2.cvtColor(sample_color_heightmap, cv2.COLOR_BGR2RGB)
sample_depth_heightmap = cv2.imread(
os.path.join(self.logger.depth_heightmaps_directory, '%06d.0.depth.png' % sample_iteration), -1)
sample_depth_heightmap = sample_depth_heightmap.astype(np.float32) / 100000
# Compute forward pass with sample
with torch.no_grad():
sample_push_predictions, sample_grasp_predictions, sample_place_predictions = self.trainer.forward(
sample_color_heightmap, sample_depth_heightmap, is_volatile=True)
# Get labels for sample and backpropagate
sample_best_pix_ind = (np.asarray(self.trainer.executed_action_log)[sample_iteration, 1:4]).astype(int)
self.trainer.backprop(sample_color_heightmap, sample_depth_heightmap, sample_primitive_action,
sample_best_pix_ind, self.trainer.label_value_log[sample_iteration], self.filter_type)
# Recompute prediction value and label for replay buffer
if sample_primitive_action == 'push':
self.trainer.predicted_value_log[sample_iteration] = [np.max(sample_push_predictions)]
elif sample_primitive_action == 'grasp':
self.trainer.predicted_value_log[sample_iteration] = [np.max(sample_grasp_predictions)]
elif sample_primitive_action == 'place':
self.trainer.predicted_value_log[sample_iteration] = [np.max(sample_place_predictions)]
else:
logging.info('Not enough prior training samples. Skipping experience replay.')
def loop(self):
"""
Main training/testing loop
"""
# Init current mission
self.mission_complete = False
reset_trial = False
# Make sure simulation is still stable (if not, reset simulation)
if self.is_sim:
self.robot.check_sim()
# Get latest RGB-D image
color_img, depth_img = self.robot.get_camera_data()
depth_img = depth_img * self.robot.cam_depth_scale # Apply depth scale from calibration
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
self.color_heightmap, self.depth_heightmap = utils.get_heightmap(color_img, depth_img,
self.robot.cam_intrinsics,
self.robot.cam_pose, self.workspace_limits,
self.heightmap_resolution)
# Remove NaNs from the depth heightmap
self.depth_heightmap[np.isnan(self.depth_heightmap)] = 0
# Reset simulation or pause real-world training if table is empty
stuff_count = np.zeros(self.depth_heightmap.shape)
stuff_count[self.depth_heightmap > 0.02] = 1
empty_threshold = 300
if self.is_sim and self.is_testing:
empty_threshold = 10
if np.sum(stuff_count) < empty_threshold:
logging.info('Not enough objects in view (value: %d)! Repositioning objects.' % (np.sum(stuff_count)))
reset_trial = True
# Reset simulation or pause real-world training if no change is detected for last 10 iterations
if self.is_sim and self.no_change_count[0] + self.no_change_count[1] > 15:
logging.info('No change is detected for last 15 iterations. Resetting simulation.')
reset_trial = True
if self.prev_stack_height >= self.goal_stack_height and self.place_enabled:
logging.info('Stack completed. Repositioning objects.')
reset_trial = True
if not reset_trial:
# Run forward pass with network to get affordances
self.push_predictions, self.grasp_predictions, self.place_predictions = self.trainer.forward(
self.color_heightmap, self.depth_heightmap, is_volatile=True)
# Execute best primitive action on robot in another thread
self.execute_action = True
# Save RGB-D images and RGB-D heightmaps
self.logger.save_images(self.trainer.iteration, color_img, depth_img, '0')
self.logger.save_heightmaps(self.trainer.iteration, self.color_heightmap, self.depth_heightmap, '0')
# Run training iteration in current thread (aka training thread)
if self.prev_primitive_action is not None:
# Compute training labels
label_value, prev_reward_value = self.reward_function()
# Backpropagate
self.trainer.backprop(self.prev_color_heightmap, self.prev_depth_heightmap,
self.prev_primitive_action, self.prev_best_pix_ind, label_value, self.filter_type)
# Save training labels and reward
self.trainer.label_value_log.append([label_value])
self.trainer.reward_value_log.append([prev_reward_value])
self.trainer.grasp_success_log.append([int(self.prev_grasp_success)])
self.logger.write_to_log('label-value', self.trainer.label_value_log)
self.logger.write_to_log('reward-value', self.trainer.reward_value_log)
self.logger.write_to_log('grasp-success', self.trainer.grasp_success_log)
if self.push_enabled:
self.trainer.push_success_log.append([int(self.prev_push_success)])
self.logger.write_to_log('push-success', self.trainer.push_success_log)
if self.place_enabled:
self.trainer.place_success_log.append([int(self.prev_place_success)])
self.logger.write_to_log('place-success', self.trainer.place_success_log)
# Save to tensorboard
self.tb.add_scalar('loss', self.trainer.running_loss.mean(), self.trainer.iteration)
if self.prev_primitive_action == 'grasp':
self.tb.add_scalar('success-rate/grasp', self.prev_grasp_success, self.trainer.iteration)
elif self.prev_primitive_action == 'push':
self.tb.add_scalar('success-rate/push', self.prev_push_success, self.trainer.iteration)
elif self.prev_primitive_action == 'place':
self.tb.add_scalar('success-rate/place', self.prev_place_success, self.trainer.iteration)
if not self.is_testing:
# Adjust exploration probability
if self.explore_type == 1:
self.explore_prob = max(0.5 * np.power(0.99994, self.trainer.iteration),
0.1) if self.explore_rate_decay else 0.5
elif self.explore_type == 2:
f = (1.0 - np.exp((-self.trainer.running_loss.mean()) / self.LAE_sigma)) \
/ (1.0 + np.exp((-self.trainer.running_loss.mean()) / self.LAE_sigma))
self.explore_prob = self.LAE_beta * f + (1 - self.LAE_beta) * self.explore_prob
# Check for progress counting inconsistencies
if len(self.trainer.reward_value_log) < self.trainer.iteration - 2:
logging.warning(
'WARNING POSSIBLE CRITICAL ERROR DETECTED: log data index and trainer.iteration out of sync!!! '
'Experience Replay may break! '
'Check code for errors in indexes, continue statements etc.')
if not self.experience_replay_disabled:
# Do sampling for experience replay
self.experience_replay(prev_reward_value)
# Save model snapshot
self.logger.save_backup_model(self.trainer.model)
if self.trainer.iteration % 1000 == 0:
self.logger.save_model(self.trainer.iteration, self.trainer.model)
self.trainer.model.to(self.trainer.device)
if not reset_trial:
# Sync both action thread and training thread
while self.execute_action:
time.sleep(0.1)
if self.mission_complete:
logging.info('Mission complete')
else:
logging.warning('Robot execution failed. Restarting simulation..')
self.robot.restart_sim()
if reset_trial:
if self.is_sim:
self.robot.restart_sim()
self.robot.add_objects()
else:
time.sleep(30)
if self.is_testing: # If at end of test run, re-load original weights (before test run)
self.trainer.model.load_state_dict(torch.load(self.snapshot_file))
self.trainer.task_complete_log.append([self.trainer.iteration])
self.logger.write_to_log('task_complete', self.trainer.task_complete_log)
self.tb.add_scalar('task_complete', self.trainer.iteration - self.last_task_complete, len(self.trainer.task_complete_log))
self.last_task_complete = self.trainer.iteration
self.no_change_count = [2, 2] if not self.is_testing else [0, 0]
self.prev_stack_height = 0
self.prev_primitive_action = None
def run(self):
agent_thread = threading.Thread(target=self.agent)
agent_thread.daemon = True
agent_thread.start()
while self.trainer.iteration <= self.max_iter:
logging.info('\n%s iteration: %d' % ('Testing' if self.is_testing else 'Training', self.trainer.iteration))
# Main loop
iteration_time_0 = time.time()
self.loop()
if self.mission_complete:
self.trainer.iteration += 1
iteration_time_1 = time.time()
logging.info('Time elapsed: %f' % (iteration_time_1 - iteration_time_0))
# Check for number of test trails completed
if self.is_testing and len(self.trainer.task_complete_log) >= self.max_test_trials:
break
self.shutdown_called = True
agent_thread.join()
def teardown(self):
self.robot.shutdown()
del self.trainer, self.robot
torch.cuda.empty_cache()
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description='Train robotic agents to learn manipulation actions with deep reinforcement learning in PyTorch.')
# --------------- Setup options ---------------
parser.add_argument('--is_sim', dest='is_sim', action='store_true', default=True,
help='run in simulation?')
parser.add_argument('--sim_port', dest='sim_port', type=int, action='store', default=19997,
help='port for simulation')
parser.add_argument('--obj_mesh_dir', dest='obj_mesh_dir', action='store', default='simulation/objects/mixed_shapes',
help='directory containing 3D mesh files (.obj) of objects to be added to simulation')
parser.add_argument('--num_obj', dest='num_obj', type=int, action='store', default=10,
help='number of objects to add to simulation')
parser.add_argument('--heightmap_resolution', dest='heightmap_resolution', type=float, action='store',
default=0.002, help='meters per pixel of heightmap')
parser.add_argument('--random_seed', dest='random_seed', type=int, action='store', default=123,
help='random seed for simulation and neural net initialization')
parser.add_argument('--cpu', dest='force_cpu', action='store_true', default=False,
help='force code to run in CPU mode')
# ------------- Algorithm options -------------
parser.add_argument('--network', dest='network', action='store', default='grconvnet4',
help='Neural network architecture choice, options are grconvnet, efficientnet, denseunet')
parser.add_argument('--num_rotations', dest='num_rotations', type=int, action='store', default=16)
parser.add_argument('--push_enabled', dest='push_enabled', action='store_true', default=False)
parser.add_argument('--place_enabled', dest='place_enabled', action='store_true', default=False)
parser.add_argument('--reward_type', dest='reward_type', type=int, action='store', default=2)
parser.add_argument('--filter_type', dest='filter_type', type=int, action='store', default=4)
parser.add_argument('--experience_replay_disabled', dest='experience_replay_disabled', action='store_true',
default=False, help='disable prioritized experience replay')
parser.add_argument('--future_reward_discount', dest='future_reward_discount', type=float, action='store',
default=0.5)
parser.add_argument('--place_reward_scale', dest='place_reward_scale', type=float, action='store', default=1.0)
parser.add_argument('--goal_stack_height', dest='goal_stack_height', type=int, action='store', default=4)
parser.add_argument('--explore_actions', dest='explore_actions', type=int, action='store', default=1)
parser.add_argument('--explore_type', dest='explore_type', type=int, action='store', default=1)
parser.add_argument('--explore_rate_decay', dest='explore_rate_decay', action='store_true', default=True)
parser.add_argument('--max_iter', dest='max_iter', action='store', type=int, default=50000,
help='max iter for training')
# -------------- Testing options --------------
parser.add_argument('--is_testing', dest='is_testing', action='store_true', default=False)
parser.add_argument('--max_test_trials', dest='max_test_trials', type=int, action='store', default=30,
help='maximum number of test runs per case/scenario')
parser.add_argument('--test_preset_cases', dest='test_preset_cases', action='store_true', default=False)
parser.add_argument('--test_preset_file', dest='test_preset_file', action='store', default='')
parser.add_argument('--test_preset_dir', dest='test_preset_dir', action='store', default='simulation/test-cases/')
# ------ Pre-loading and logging options ------
parser.add_argument('--snapshot_file', dest='snapshot_file', action='store')
parser.add_argument('--logging_directory', dest='logging_directory', action='store')
parser.add_argument('--save_visualizations', dest='save_visualizations', action='store_true', default=False,
help='save visualizations of model predictions?')
# Run main program with specified arguments
args = parser.parse_args()
if args.is_testing and args.test_preset_cases:
preset_files = os.listdir(args.test_preset_dir)
preset_files = [os.path.abspath(os.path.join(args.test_preset_dir, filename)) for filename in preset_files]
preset_files = sorted(preset_files)
args.continue_logging = True
for idx, preset_file in enumerate(preset_files):
logging.info('Running test {}'.format(preset_file))
args.test_preset_file = preset_file
args.num_obj = 10
args.logging_directory = args.snapshot_file.split('/')[0] + '/' + args.snapshot_file.split('/')[
1] + '/preset-test/' + re.findall("\d+", args.snapshot_file.split('/')[3])[0] + '/' + str(idx)
task = LearnManipulation(args)
task.run()
task.teardown()
else:
task = LearnManipulation(args)
task.run()
task.teardown()
| [
"time.sleep",
"torch.from_numpy",
"numpy.argsort",
"scipy.ndimage.median_filter",
"utils.viz.get_prediction_vis",
"logging.info",
"logging.error",
"os.listdir",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.asarray",
"numpy.max",
"numpy.random.seed",
"utils.utils.k_larges... | [((31756, 31900), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train robotic agents to learn manipulation actions with deep reinforcement learning in PyTorch."""'}), "(description=\n 'Train robotic agents to learn manipulation actions with deep reinforcement learning in PyTorch.'\n )\n", (31779, 31900), False, 'import argparse\n'), ((3231, 3262), 'utils.logger.Logger', 'Logger', (['logging_directory', 'args'], {}), '(logging_directory, args)\n', (3237, 3262), False, 'from utils.logger import Logger\n'), ((3507, 3552), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', (['logging_directory'], {}), '(logging_directory)\n', (3533, 3552), False, 'import tensorboardX\n'), ((3606, 3691), 'trainer.Trainer', 'Trainer', (['network', 'force_cpu', 'self.push_enabled', 'self.place_enabled', 'num_rotations'], {}), '(network, force_cpu, self.push_enabled, self.place_enabled,\n num_rotations)\n', (3613, 3691), False, 'from trainer import Trainer\n'), ((4115, 4142), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (4129, 4142), True, 'import numpy as np\n'), ((5299, 5328), 'numpy.max', 'np.max', (['self.push_predictions'], {}), '(self.push_predictions)\n', (5305, 5328), True, 'import numpy as np\n'), ((5355, 5385), 'numpy.max', 'np.max', (['self.grasp_predictions'], {}), '(self.grasp_predictions)\n', (5361, 5385), True, 'import numpy as np\n'), ((5412, 5442), 'numpy.max', 'np.max', (['self.place_predictions'], {}), '(self.place_predictions)\n', (5418, 5442), True, 'import numpy as np\n'), ((5451, 5592), 'logging.info', 'logging.info', (["('Primitive confidence scores: %f (push), %f (grasp), %f (place)' % (\n best_push_conf, best_grasp_conf, best_place_conf))"], {}), "(\n 'Primitive confidence scores: %f (push), %f (grasp), %f (place)' % (\n best_push_conf, best_grasp_conf, best_place_conf))\n", (5463, 5592), False, 'import logging\n'), ((17275, 17293), 'numpy.sum', 'np.sum', (['depth_diff'], {}), '(depth_diff)\n', (17281, 17293), True, 'import numpy as np\n'), ((17387, 17472), 'logging.info', 'logging.info', (["('Change detected: %r (value: %d)' % (change_detected, change_value))"], {}), "('Change detected: %r (value: %d)' % (change_detected,\n change_value))\n", (17399, 17472), False, 'import logging\n'), ((18000, 18051), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['self.depth_heightmap'], {'size': '(5)'}), '(self.depth_heightmap, size=5)\n', (18021, 18051), False, 'from scipy import ndimage\n'), ((18068, 18086), 'numpy.max', 'np.max', (['img_median'], {}), '(img_median)\n', (18074, 18086), True, 'import numpy as np\n'), ((18759, 18810), 'logging.info', 'logging.info', (["('Current reward: %f' % current_reward)"], {}), "('Current reward: %f' % current_reward)\n", (18771, 18810), False, 'import logging\n'), ((18819, 18868), 'logging.info', 'logging.info', (["('Future reward: %f' % future_reward)"], {}), "('Future reward: %f' % future_reward)\n", (18831, 18868), False, 'import logging\n'), ((18877, 19012), 'logging.info', 'logging.info', (["('Expected reward: %f + %f x %f = %f' % (current_reward, self.\n future_reward_discount, future_reward, expected_reward))"], {}), "('Expected reward: %f + %f x %f = %f' % (current_reward, self.\n future_reward_discount, future_reward, expected_reward))\n", (18889, 19012), False, 'import logging\n'), ((23743, 23887), 'utils.utils.get_heightmap', 'utils.get_heightmap', (['color_img', 'depth_img', 'self.robot.cam_intrinsics', 'self.robot.cam_pose', 'self.workspace_limits', 'self.heightmap_resolution'], {}), '(color_img, depth_img, self.robot.cam_intrinsics, self.\n robot.cam_pose, self.workspace_limits, self.heightmap_resolution)\n', (23762, 23887), False, 'from utils import utils, viz\n'), ((24311, 24347), 'numpy.zeros', 'np.zeros', (['self.depth_heightmap.shape'], {}), '(self.depth_heightmap.shape)\n', (24319, 24347), True, 'import numpy as np\n'), ((30750, 30785), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.agent'}), '(target=self.agent)\n', (30766, 30785), False, 'import threading\n'), ((31666, 31690), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (31688, 31690), False, 'import torch\n'), ((36175, 36207), 'os.listdir', 'os.listdir', (['args.test_preset_dir'], {}), '(args.test_preset_dir)\n', (36185, 36207), False, 'import os\n'), ((539, 573), 'os.path.abspath', 'os.path.abspath', (['args.obj_mesh_dir'], {}), '(args.obj_mesh_dir)\n', (554, 573), False, 'import os\n'), ((799, 862), 'numpy.asarray', 'np.asarray', (['[[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.8]]'], {}), '([[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.8]])\n', (809, 862), True, 'import numpy as np\n'), ((913, 972), 'numpy.asarray', 'np.asarray', (['[[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]]'], {}), '([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])\n', (923, 972), True, 'import numpy as np\n'), ((2158, 2196), 'os.path.abspath', 'os.path.abspath', (['args.test_preset_file'], {}), '(args.test_preset_file)\n', (2173, 2196), False, 'import os\n'), ((2379, 2418), 'os.path.abspath', 'os.path.abspath', (['args.logging_directory'], {}), '(args.logging_directory)\n', (2394, 2418), False, 'import os\n'), ((2452, 2513), 'os.path.join', 'os.path.join', (['logging_directory', '"""models/snapshot-backup.pth"""'], {}), "(logging_directory, 'models/snapshot-backup.pth')\n", (2464, 2513), False, 'import os\n'), ((2950, 3093), 'robot.SimRobot', 'SimRobot', (['sim_port', 'obj_mesh_dir', 'num_obj', 'self.workspace_limits', 'self.is_testing', 'test_preset_cases', 'test_preset_file', 'self.place_enabled'], {}), '(sim_port, obj_mesh_dir, num_obj, self.workspace_limits, self.\n is_testing, test_preset_cases, test_preset_file, self.place_enabled)\n', (2958, 3093), False, 'from robot import SimRobot\n'), ((5809, 5897), 'logging.info', 'logging.info', (["('Strategy: explore (exploration probability: %f)' % self.explore_prob)"], {}), "('Strategy: explore (exploration probability: %f)' % self.\n explore_prob)\n", (5821, 5897), False, 'import logging\n'), ((7453, 7506), 'utils.utils.k_largest_index_argpartition', 'utils.k_largest_index_argpartition', (['predictions'], {'k': '(10)'}), '(predictions, k=10)\n', (7487, 7506), False, 'from utils import utils, viz\n'), ((17073, 17093), 'numpy.isnan', 'np.isnan', (['depth_diff'], {}), '(depth_diff)\n', (17081, 17093), True, 'import numpy as np\n'), ((20758, 20798), 'numpy.argsort', 'np.argsort', (['sample_surprise_values[:, 0]'], {}), '(sample_surprise_values[:, 0])\n', (20768, 20798), True, 'import numpy as np\n'), ((21073, 21234), 'logging.info', 'logging.info', (["('Experience replay: iteration %d (surprise value: %f)' % (sample_iteration,\n sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))"], {}), "('Experience replay: iteration %d (surprise value: %f)' % (\n sample_iteration, sample_surprise_values[sorted_surprise_ind[\n rand_sample_ind]]))\n", (21085, 21234), False, 'import logging\n'), ((21480, 21535), 'cv2.cvtColor', 'cv2.cvtColor', (['sample_color_heightmap', 'cv2.COLOR_BGR2RGB'], {}), '(sample_color_heightmap, cv2.COLOR_BGR2RGB)\n', (21492, 21535), False, 'import cv2\n'), ((23033, 23111), 'logging.info', 'logging.info', (['"""Not enough prior training samples. Skipping experience replay."""'], {}), "('Not enough prior training samples. Skipping experience replay.')\n", (23045, 23111), False, 'import logging\n'), ((24178, 24208), 'numpy.isnan', 'np.isnan', (['self.depth_heightmap'], {}), '(self.depth_heightmap)\n', (24186, 24208), True, 'import numpy as np\n'), ((24519, 24538), 'numpy.sum', 'np.sum', (['stuff_count'], {}), '(stuff_count)\n', (24525, 24538), True, 'import numpy as np\n'), ((24904, 24992), 'logging.info', 'logging.info', (['"""No change is detected for last 15 iterations. Resetting simulation."""'], {}), "(\n 'No change is detected for last 15 iterations. Resetting simulation.')\n", (24916, 24992), False, 'import logging\n'), ((25116, 25171), 'logging.info', 'logging.info', (['"""Stack completed. Repositioning objects."""'], {}), "('Stack completed. Repositioning objects.')\n", (25128, 25171), False, 'import logging\n'), ((30917, 31031), 'logging.info', 'logging.info', (['("""\n%s iteration: %d""" % (\'Testing\' if self.is_testing else \'Training\',\n self.trainer.iteration))'], {}), '("""\n%s iteration: %d""" % (\'Testing\' if self.is_testing else\n \'Training\', self.trainer.iteration))\n', (30929, 31031), False, 'import logging\n'), ((31081, 31092), 'time.time', 'time.time', ([], {}), '()\n', (31090, 31092), False, 'import time\n'), ((31230, 31241), 'time.time', 'time.time', ([], {}), '()\n', (31239, 31241), False, 'import time\n'), ((31254, 31326), 'logging.info', 'logging.info', (["('Time elapsed: %f' % (iteration_time_1 - iteration_time_0))"], {}), "('Time elapsed: %f' % (iteration_time_1 - iteration_time_0))\n", (31266, 31326), False, 'import logging\n'), ((2579, 2618), 'os.path.abspath', 'os.path.abspath', (['args.logging_directory'], {}), '(args.logging_directory)\n', (2594, 2618), False, 'import os\n'), ((2652, 2687), 'os.path.abspath', 'os.path.abspath', (['args.snapshot_file'], {}), '(args.snapshot_file)\n', (2667, 2687), False, 'import os\n'), ((5757, 5776), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5774, 5776), True, 'import numpy as np\n'), ((7547, 7581), 'numpy.random.choice', 'np.random.choice', (['maximas.shape[0]'], {}), '(maximas.shape[0])\n', (7563, 7581), True, 'import numpy as np\n'), ((7646, 7668), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (7655, 7668), True, 'import numpy as np\n'), ((8177, 8316), 'logging.info', 'logging.info', (["('Action: %s at (%d, %d, %d)' % (self.primitive_action, self.best_pix_ind[0\n ], self.best_pix_ind[1], self.best_pix_ind[2]))"], {}), "('Action: %s at (%d, %d, %d)' % (self.primitive_action, self.\n best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]))\n", (8189, 8316), False, 'import logging\n'), ((8396, 8473), 'numpy.deg2rad', 'np.deg2rad', (['(self.best_pix_ind[0] * (360.0 / self.trainer.model.num_rotations))'], {}), '(self.best_pix_ind[0] * (360.0 / self.trainer.model.num_rotations))\n', (8406, 8473), True, 'import numpy as np\n'), ((10938, 11039), 'utils.viz.get_prediction_vis', 'viz.get_prediction_vis', (['self.grasp_predictions', 'self.color_heightmap', 'self.best_pix_ind', '"""grasp"""'], {}), "(self.grasp_predictions, self.color_heightmap, self.\n best_pix_ind, 'grasp')\n", (10960, 11039), False, 'from utils import utils, viz\n'), ((14863, 14878), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (14873, 14878), False, 'import time\n'), ((21350, 21445), 'os.path.join', 'os.path.join', (['self.logger.color_heightmaps_directory', "('%06d.0.color.png' % sample_iteration)"], {}), "(self.logger.color_heightmaps_directory, '%06d.0.color.png' %\n sample_iteration)\n", (21362, 21445), False, 'import os\n'), ((21601, 21696), 'os.path.join', 'os.path.join', (['self.logger.depth_heightmaps_directory', "('%06d.0.depth.png' % sample_iteration)"], {}), "(self.logger.depth_heightmaps_directory, '%06d.0.depth.png' %\n sample_iteration)\n", (21613, 21696), False, 'import os\n'), ((21851, 21866), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21864, 21866), False, 'import torch\n'), ((29570, 29585), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (29580, 29585), False, 'import time\n'), ((29641, 29673), 'logging.info', 'logging.info', (['"""Mission complete"""'], {}), "('Mission complete')\n", (29653, 29673), False, 'import logging\n'), ((29708, 29774), 'logging.warning', 'logging.warning', (['"""Robot execution failed. Restarting simulation.."""'], {}), "('Robot execution failed. Restarting simulation..')\n", (29723, 29774), False, 'import logging\n'), ((29985, 29999), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (29995, 29999), False, 'import time\n'), ((36248, 36292), 'os.path.join', 'os.path.join', (['args.test_preset_dir', 'filename'], {}), '(args.test_preset_dir, filename)\n', (36260, 36292), False, 'import os\n'), ((11606, 11705), 'utils.viz.get_prediction_vis', 'viz.get_prediction_vis', (['self.push_predictions', 'self.color_heightmap', 'self.best_pix_ind', '"""push"""'], {}), "(self.push_predictions, self.color_heightmap, self.\n best_pix_ind, 'push')\n", (11628, 11705), False, 'from utils import utils, viz\n'), ((11994, 12095), 'utils.viz.get_prediction_vis', 'viz.get_prediction_vis', (['self.place_predictions', 'self.color_heightmap', 'self.best_pix_ind', '"""place"""'], {}), "(self.place_predictions, self.color_heightmap, self.\n best_pix_ind, 'place')\n", (12016, 12095), False, 'from utils import utils, viz\n'), ((22659, 22690), 'numpy.max', 'np.max', (['sample_push_predictions'], {}), '(sample_push_predictions)\n', (22665, 22690), True, 'import numpy as np\n'), ((24651, 24670), 'numpy.sum', 'np.sum', (['stuff_count'], {}), '(stuff_count)\n', (24657, 24670), True, 'import numpy as np\n'), ((28663, 28873), 'logging.warning', 'logging.warning', (['"""WARNING POSSIBLE CRITICAL ERROR DETECTED: log data index and trainer.iteration out of sync!!! Experience Replay may break! Check code for errors in indexes, continue statements etc."""'], {}), "(\n 'WARNING POSSIBLE CRITICAL ERROR DETECTED: log data index and trainer.iteration out of sync!!! Experience Replay may break! Check code for errors in indexes, continue statements etc.'\n )\n", (28678, 28873), False, 'import logging\n'), ((30153, 30183), 'torch.load', 'torch.load', (['self.snapshot_file'], {}), '(self.snapshot_file)\n', (30163, 30183), False, 'import torch\n'), ((9176, 9230), 'numpy.round', 'np.round', (['(finger_width / 2 / self.heightmap_resolution)'], {}), '(finger_width / 2 / self.heightmap_resolution)\n', (9184, 9230), True, 'import numpy as np\n'), ((11114, 11146), 'torch.from_numpy', 'torch.from_numpy', (['grasp_pred_vis'], {}), '(grasp_pred_vis)\n', (11130, 11146), False, 'import torch\n'), ((13387, 13437), 'logging.info', 'logging.info', (["('Push successful: %r' % push_success)"], {}), "('Push successful: %r' % push_success)\n", (13399, 13437), False, 'import logging\n'), ((14126, 14167), 'logging.error', 'logging.error', (['"""Robot execution timeout!"""'], {}), "('Robot execution timeout!')\n", (14139, 14167), False, 'import logging\n'), ((20210, 20251), 'numpy.asarray', 'np.asarray', (['self.trainer.reward_value_log'], {}), '(self.trainer.reward_value_log)\n', (20220, 20251), True, 'import numpy as np\n'), ((20317, 20361), 'numpy.asarray', 'np.asarray', (['self.trainer.executed_action_log'], {}), '(self.trainer.executed_action_log)\n', (20327, 20361), True, 'import numpy as np\n'), ((20555, 20599), 'numpy.asarray', 'np.asarray', (['self.trainer.predicted_value_log'], {}), '(self.trainer.predicted_value_log)\n', (20565, 20599), True, 'import numpy as np\n'), ((20664, 20704), 'numpy.asarray', 'np.asarray', (['self.trainer.label_value_log'], {}), '(self.trainer.label_value_log)\n', (20674, 20704), True, 'import numpy as np\n'), ((20937, 20968), 'numpy.random.power', 'np.random.power', (['pow_law_exp', '(1)'], {}), '(pow_law_exp, 1)\n', (20952, 20968), True, 'import numpy as np\n'), ((22160, 22204), 'numpy.asarray', 'np.asarray', (['self.trainer.executed_action_log'], {}), '(self.trainer.executed_action_log)\n', (22170, 22204), True, 'import numpy as np\n'), ((22815, 22847), 'numpy.max', 'np.max', (['sample_grasp_predictions'], {}), '(sample_grasp_predictions)\n', (22821, 22847), True, 'import numpy as np\n'), ((6519, 6542), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (6536, 6542), True, 'import numpy as np\n'), ((9924, 9944), 'numpy.max', 'np.max', (['local_region'], {}), '(local_region)\n', (9930, 9944), True, 'import numpy as np\n'), ((11787, 11818), 'torch.from_numpy', 'torch.from_numpy', (['push_pred_vis'], {}), '(push_pred_vis)\n', (11803, 11818), False, 'import torch\n'), ((12178, 12210), 'torch.from_numpy', 'torch.from_numpy', (['place_pred_vis'], {}), '(place_pred_vis)\n', (12194, 12210), False, 'import torch\n'), ((13691, 13743), 'logging.info', 'logging.info', (["('Grasp successful: %r' % grasp_success)"], {}), "('Grasp successful: %r' % grasp_success)\n", (13703, 13743), False, 'import logging\n'), ((22972, 23004), 'numpy.max', 'np.max', (['sample_place_predictions'], {}), '(sample_place_predictions)\n', (22978, 23004), True, 'import numpy as np\n'), ((13997, 14049), 'logging.info', 'logging.info', (["('Place successful: %r' % place_success)"], {}), "('Place successful: %r' % place_success)\n", (14009, 14049), False, 'import logging\n'), ((28033, 28074), 'numpy.power', 'np.power', (['(0.99994)', 'self.trainer.iteration'], {}), '(0.99994, self.trainer.iteration)\n', (28041, 28074), True, 'import numpy as np\n')] |
import logging
import sys
import threading
import unittest
from time import sleep
import importlib_resources
import mock
import numpy as np
from pepper.framework.application.intention import AbstractIntention
from pepper.framework.backend.abstract.microphone import AbstractMicrophone
from pepper.framework.backend.abstract.text_to_speech import AbstractTextToSpeech
from pepper.framework.application.application import AbstractApplication
from pepper.framework.backend.abstract.backend import AbstractBackend
from pepper.framework.backend.container import BackendContainer
from pepper.framework.application.speech_recognition import SpeechRecognitionComponent
from pepper.framework.application.text_to_speech import TextToSpeechComponent
from pepper.framework.infra.config.api import ConfigurationContainer
from pepper.framework.infra.config.local import LocalConfigurationContainer
from pepper.framework.infra.di_container import singleton, DIContainer
from pepper.framework.infra.event.api import EventBusContainer
from pepper.framework.infra.event.memory import SynchronousEventBusContainer
from pepper.framework.infra.resource.api import ResourceContainer
from pepper.framework.infra.resource.threaded import ThreadedResourceContainer
from pepper.framework.sensor.api import AbstractTranslator, AbstractASR, UtteranceHypothesis, SensorContainer
from pepper.framework.sensor.container import DefaultSensorWorkerContainer
from pepper.framework.sensor.vad import AbstractVAD
from test import util
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(stream=sys.stdout))
logger.setLevel(logging.ERROR)
AUDIO_FRAME = np.zeros(80).astype(np.int16)
def setupTestComponents():
"""Workaround to overwrite static state in DIContainers across tests"""
global TestIntention
global TestApplication
with importlib_resources.path(__package__, "test.config") as test_config:
LocalConfigurationContainer.load_configuration(str(test_config), [])
class TestBackendContainer(BackendContainer, EventBusContainer, ResourceContainer):
@property
@singleton
def backend(self):
return TestBackend(self.event_bus, self.resource_manager)
class TestTextToSpeech(AbstractTextToSpeech):
def __init__(self, event_bus, resource_manager):
super(TestTextToSpeech, self).__init__("nl", event_bus, resource_manager)
self.latch = threading.Event()
self.utterances = []
def on_text_to_speech(self, text, animation=None):
# type: (Union[str, unicode], Optional[str]) -> None
self.latch.wait()
self.utterances.append(text)
class TestBackend(AbstractBackend):
def __init__(self, event_bus, resource_manager):
super(TestBackend, self).__init__(microphone=AbstractMicrophone(8000, 1, event_bus, resource_manager),
text_to_speech=TestTextToSpeech(event_bus, resource_manager),
camera=None, motion=None, led=None, tablet=None)
class TestVAD(AbstractVAD):
def __init__(self, resource_manager, configuration_manager):
super(TestVAD, self).__init__(resource_manager, configuration_manager)
self.speech_flag = ThreadsafeBoolean()
def _is_speech(self, frame):
return self.speech_flag.val
class TestSensorContainer(BackendContainer, SensorContainer, ConfigurationContainer):
@property
@singleton
def vad(self):
return TestVAD(self.resource_manager, self.config_manager)
def asr(self, language="nl"):
mock_asr = mock.create_autospec(AbstractASR)
mock_asr.transcribe.return_value = [UtteranceHypothesis("Test one two", 1.0)]
return mock_asr
def translator(self, source_language, target_language):
mock_translator = mock.create_autospec(AbstractTranslator)
mock_translator.translate.side_effect = lambda text: "Translated: " + text
return mock_translator
@property
def face_detector(self):
return None
def object_detector(self, target):
return None
class ApplicationContainer(TestBackendContainer,
TestSensorContainer,
SynchronousEventBusContainer,
ThreadedResourceContainer,
LocalConfigurationContainer):
pass
class TestIntention(ApplicationContainer, AbstractIntention, DefaultSensorWorkerContainer,
SpeechRecognitionComponent, TextToSpeechComponent):
def __init__(self):
super(TestIntention, self).__init__()
self.hypotheses = []
def on_transcript(self, hypotheses, audio):
self.hypotheses.extend(hypotheses)
class TestApplication(AbstractApplication, ApplicationContainer):
def __init__(self, intention):
super(TestApplication, self).__init__(intention)
class ListeningThread(threading.Thread):
def __init__(self, speech_flag, microphone, webrtc_buffer_size, name="Listening"):
super(ListeningThread, self).__init__(name=name)
self._webrtc_buffer_size = webrtc_buffer_size
self._speech_flag = speech_flag
self._microphone = microphone
self.running = True
self.listen_to_frames = False
self.listening_latch = threading.Event()
self.exit_latch = threading.Event()
self.continue_speech_latch = threading.Event()
self.in_speech_latch = threading.Event()
def stop(self):
self.running = False
self.exit_latch.wait(1)
self.listening_latch.set()
self.exit_latch.set()
self.continue_speech_latch.set()
self.in_speech_latch.set()
def listen(self, frames, continue_latch=False):
self.in_speech_latch = threading.Event()
self.continue_speech_latch = threading.Event()
self.exit_latch = threading.Event()
self.listen_to_frames = frames
self.listening_latch.set()
if continue_latch:
return self.in_speech_latch, self.exit_latch, self.continue_speech_latch
else:
self.continue_speech_latch.set()
return self.in_speech_latch, self.exit_latch
def run(self):
logger.debug("Thread %s started", self.name)
self.listening_latch.wait()
while self.running:
logger.debug("Started listening")
for i in range(self.listen_to_frames):
self._speech_flag.val = True
# Fill speech buffer
buffer_size = self._webrtc_buffer_size
for j in range(2 * buffer_size + 10):
self._microphone.on_audio(AUDIO_FRAME)
logger.debug("Listened to frame %s-%s", i, j)
sleep(0.001)
self.in_speech_latch.set()
self.continue_speech_latch.wait()
self._speech_flag.val = False
# Empty speech buffer
for j in range(buffer_size + 10):
self._microphone.on_audio(AUDIO_FRAME)
logger.debug("Void %s-%s", i, j)
sleep(0.001)
self.listening_latch.clear()
logger.debug("Stopped listening")
self.exit_latch.set()
self.listening_latch.wait()
logger.debug("Thread %s stopped", self.name)
class TalkingThread(threading.Thread):
def __init__(self, intention, name="Talking"):
super(TalkingThread, self).__init__(name=name)
self._intention = intention
self.running = True
self.talking = False
self.talking_latch = threading.Event()
self.exit_latch = threading.Event()
def stop(self):
self.running = False
self.exit_latch.wait(1)
self.talking_latch.set()
self.exit_latch.set()
def talk(self, utterances):
self.exit_latch = threading.Event()
self.utterances = utterances
self.talking_latch.set()
return self.exit_latch
def run(self):
logger.debug("Thread % started", self.name)
self.talking_latch.wait()
while self.running:
logger.debug("Started talking")
for utterance in self.utterances:
self._intention.say(utterance, block=True)
logger.debug("Said utterance")
sleep(0.001)
self.talking_latch.clear()
logger.debug("Stopped talking")
self.exit_latch.set()
self.talking_latch.wait()
logger.debug("Thread %s stopped", self.name)
class ResourceITest(unittest.TestCase):
def setUp(self):
setupTestComponents()
self.intention = TestIntention()
self.application = TestApplication(self.intention)
self.application._start()
sleep(1)
webrtc_buffer_size = self.intention.config_manager\
.get_config("pepper.framework.sensors.vad.webrtc")\
.get_int("buffer_size")
self.threads = [ListeningThread(self.intention.vad.speech_flag, self.intention.backend.microphone, webrtc_buffer_size),
TalkingThread(self.intention)]
for thread in self.threads:
thread.start()
def tearDown(self):
self.application._stop()
for thread in self.threads:
thread.stop()
thread.join()
del self.application
DIContainer._singletons.clear()
# Try to ensure that the application is stopped
try:
util.await_predicate(lambda: threading.active_count() < 2, max=100)
except:
sleep(1)
def test_listen(self):
listening_thread, _ = self.threads
_, exit_speech_latch = listening_thread.listen(2)
exit_speech_latch.wait()
sleep(0.1)
self.assertEqual(2, len(self.intention.hypotheses))
self.assertEqual('Test one two', self.intention.hypotheses[0].transcript)
self.assertEqual(1.0, self.intention.hypotheses[0].confidence)
self.assertEqual('Test one two', self.intention.hypotheses[1].transcript)
self.assertEqual(1.0, self.intention.hypotheses[1].confidence)
def test_talk(self):
self.intention.backend.text_to_speech.latch.set()
_, talking_thread = self.threads
talk_latch = talking_thread.talk(["Test"])
self.assertTrue(talk_latch.wait())
sleep(0.1)
self.assertEqual(1, len(self.intention.backend.text_to_speech.utterances))
self.assertEqual("Test", self.intention.backend.text_to_speech.utterances[0])
def test_talk_after_vad_stops(self):
self.intention.backend.text_to_speech.latch.set()
listening_thread, talking_thread = self.threads
in_speech_latch, exit_speech_latch, continue_speech_latch = listening_thread.listen(1, continue_latch=True)
in_speech_latch.wait()
exit_talk_latch = talking_thread.talk(["Test"])
self.assertFalse(exit_talk_latch.wait(0.5))
continue_speech_latch.set()
exit_speech_latch.wait()
exit_talk_latch.wait()
sleep(0.1)
self.assertEqual(1, len(self.intention.backend.text_to_speech.utterances))
self.assertEqual("Test", self.intention.backend.text_to_speech.utterances[0])
def test_listen_after_talk_stops(self):
listening_thread, talking_thread = self.threads
in_speech_latch, exit_speech_latch, continue_speech_latch = listening_thread.listen(1, continue_latch=True)
in_speech_latch.wait()
exit_talk_latch = talking_thread.talk(["Test"])
self.assertFalse(exit_talk_latch.wait(0.5))
continue_speech_latch.set()
exit_speech_latch.wait()
# Ignoring speech while talking
self.assertEqual(1, len(self.intention.hypotheses))
self.assertEqual('Test one two', self.intention.hypotheses[0].transcript)
self.assertEqual(1.0, self.intention.hypotheses[0].confidence)
_, exit_speech_latch = listening_thread.listen(1)
exit_speech_latch.wait()
self.assertEqual(1, len(self.intention.hypotheses))
self.assertEqual('Test one two', self.intention.hypotheses[0].transcript)
self.assertEqual(1.0, self.intention.hypotheses[0].confidence)
# Pick up speech again after talking
_, exit_speech_latch, continue_speech_latch = listening_thread.listen(1, continue_latch=True)
continue_speech_latch.set()
self.intention.backend.text_to_speech.latch.set()
exit_talk_latch.wait()
exit_speech_latch.wait()
sleep(0.1)
self.assertEqual(2, len(self.intention.hypotheses))
self.assertEqual('Test one two', self.intention.hypotheses[0].transcript)
self.assertEqual(1.0, self.intention.hypotheses[0].confidence)
self.assertEqual('Test one two', self.intention.hypotheses[1].transcript)
self.assertEqual(1.0, self.intention.hypotheses[1].confidence)
def await_predicate(self, predicate, max=100, msg="predicate"):
cnt = 0
while not predicate() and cnt < max:
sleep(0.01)
cnt += 1
if cnt == max:
self.fail("Test timed out waiting for " + msg)
class ThreadsafeBoolean(object):
def __init__(self):
self._value = False
self._lock = threading.Lock()
@property
def val(self):
with self._lock:
return self._value
@val.setter
def val(self, value):
with self._lock:
self._value = value
if __name__ == '__main__':
unittest.main() | [
"logging.getLogger",
"pepper.framework.sensor.api.UtteranceHypothesis",
"logging.StreamHandler",
"threading.active_count",
"threading.Lock",
"time.sleep",
"pepper.framework.backend.abstract.microphone.AbstractMicrophone",
"threading.Event",
"mock.create_autospec",
"numpy.zeros",
"unittest.main",... | [((1512, 1539), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1529, 1539), False, 'import logging\n'), ((1558, 1598), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1579, 1598), False, 'import logging\n'), ((13825, 13840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13838, 13840), False, 'import unittest\n'), ((1647, 1659), 'numpy.zeros', 'np.zeros', (['(80)'], {}), '(80)\n', (1655, 1659), True, 'import numpy as np\n'), ((1844, 1896), 'importlib_resources.path', 'importlib_resources.path', (['__package__', '"""test.config"""'], {}), "(__package__, 'test.config')\n", (1868, 1896), False, 'import importlib_resources\n'), ((5526, 5543), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5541, 5543), False, 'import threading\n'), ((5570, 5587), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5585, 5587), False, 'import threading\n'), ((5625, 5642), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5640, 5642), False, 'import threading\n'), ((5674, 5691), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5689, 5691), False, 'import threading\n'), ((5999, 6016), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6014, 6016), False, 'import threading\n'), ((6054, 6071), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6069, 6071), False, 'import threading\n'), ((6098, 6115), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6113, 6115), False, 'import threading\n'), ((7860, 7877), 'threading.Event', 'threading.Event', ([], {}), '()\n', (7875, 7877), False, 'import threading\n'), ((7904, 7921), 'threading.Event', 'threading.Event', ([], {}), '()\n', (7919, 7921), False, 'import threading\n'), ((8126, 8143), 'threading.Event', 'threading.Event', ([], {}), '()\n', (8141, 8143), False, 'import threading\n'), ((9051, 9059), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (9056, 9059), False, 'from time import sleep\n'), ((9652, 9683), 'pepper.framework.infra.di_container.DIContainer._singletons.clear', 'DIContainer._singletons.clear', ([], {}), '()\n', (9681, 9683), False, 'from pepper.framework.infra.di_container import singleton, DIContainer\n'), ((10043, 10053), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (10048, 10053), False, 'from time import sleep\n'), ((10652, 10662), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (10657, 10662), False, 'from time import sleep\n'), ((11357, 11367), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (11362, 11367), False, 'from time import sleep\n'), ((12842, 12852), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (12847, 12852), False, 'from time import sleep\n'), ((13586, 13602), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (13600, 13602), False, 'import threading\n'), ((2432, 2449), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2447, 2449), False, 'import threading\n'), ((3696, 3729), 'mock.create_autospec', 'mock.create_autospec', (['AbstractASR'], {}), '(AbstractASR)\n', (3716, 3729), False, 'import mock\n'), ((3944, 3984), 'mock.create_autospec', 'mock.create_autospec', (['AbstractTranslator'], {}), '(AbstractTranslator)\n', (3964, 3984), False, 'import mock\n'), ((13362, 13373), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (13367, 13373), False, 'from time import sleep\n'), ((3778, 3818), 'pepper.framework.sensor.api.UtteranceHypothesis', 'UtteranceHypothesis', (['"""Test one two"""', '(1.0)'], {}), "('Test one two', 1.0)\n", (3797, 3818), False, 'from pepper.framework.sensor.api import AbstractTranslator, AbstractASR, UtteranceHypothesis, SensorContainer\n'), ((8592, 8604), 'time.sleep', 'sleep', (['(0.001)'], {}), '(0.001)\n', (8597, 8604), False, 'from time import sleep\n'), ((9862, 9870), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (9867, 9870), False, 'from time import sleep\n'), ((2835, 2891), 'pepper.framework.backend.abstract.microphone.AbstractMicrophone', 'AbstractMicrophone', (['(8000)', '(1)', 'event_bus', 'resource_manager'], {}), '(8000, 1, event_bus, resource_manager)\n', (2853, 2891), False, 'from pepper.framework.backend.abstract.microphone import AbstractMicrophone\n'), ((6989, 7001), 'time.sleep', 'sleep', (['(0.001)'], {}), '(0.001)\n', (6994, 7001), False, 'from time import sleep\n'), ((7363, 7375), 'time.sleep', 'sleep', (['(0.001)'], {}), '(0.001)\n', (7368, 7375), False, 'from time import sleep\n'), ((9795, 9819), 'threading.active_count', 'threading.active_count', ([], {}), '()\n', (9817, 9819), False, 'import threading\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:41:17 2015.
@author: mje
"""
import numpy as np
import numpy.random as npr
import os
import socket
import mne
# import pandas as pd
from mne.connectivity import spectral_connectivity
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
# Permutation test.
def permutation_resampling(case, control, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for case is different
from statistc for control.
"""
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff) +
np.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def permutation_test(a, b, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for a is different
from statistc for b.
"""
observed_diff = abs(statistic(b) - statistic(a))
num_a = len(a)
combined = np.concatenate([a, b])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_a]) - np.mean(xs[num_a:])
diffs.append(diff)
pval = np.sum(np.abs(diffs) >= np.abs(observed_diff)) / float(num_samples)
return pval, observed_diff, diffs
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
# change dir to save files the rigth place
os.chdir(data_path)
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
# Parameters
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
#labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Brodmann',
regexp="Brodmann",
subjects_dir=subjects_dir)
labels_occ = labels[6:12]
# labels = mne.read_labels_from_annot('subject_1', parc='aparc.DKTatlas40',
# subjects_dir=subjects_dir)
for cond in epochs.event_id.keys():
stcs = apply_inverse_epochs(epochs[cond], inverse_operator, lambda2,
method, pick_ori="normal")
exec("stcs_%s = stcs" % cond)
labels_name = [label.name for label in labels_occ]
for label in labels_occ:
labels_name += [label.name]
# Extract time series
ts_ctl_left = mne.extract_label_time_course(stcs_ctl_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
ts_ent_left = mne.extract_label_time_course(stcs_ent_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
stcs_all_left = stcs_ctl_left + stcs_ent_left
ts_all_left = np.asarray(mne.extract_label_time_course(stcs_all_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip"))
number_of_permutations = 2000
index = np.arange(0, len(ts_all_left))
permutations_results = np.empty(number_of_permutations)
fmin, fmax = 7, 12
tmin, tmax = 0, 1
con_method = "plv"
diff_permuatation = np.empty([6, 6, number_of_permutations])
# diff
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
ts_ctl_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
con_ent, freqs_ent, times_ent, n_epochs_ent, n_tapers_ent =\
spectral_connectivity(
ts_ent_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
diff = con_ctl[:, :, 0] - con_ent[:, :, 0]
for i in range(number_of_permutations):
index = np.random.permutation(index)
tmp_ctl = ts_all_left[index[:64], :, :]
tmp_case = ts_all_left[index[64:], :, :]
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
tmp_ctl,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
con_case, freqs_case, times_case, n_epochs_case, n_tapers_case =\
spectral_connectivity(
tmp_case,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
diff_permuatation[:, :, i] = con_ctl[:, :, 0] - con_case[:, :, 0]
pval = np.empty_like(diff)
for h in range(diff.shape[0]):
for j in range(diff.shape[1]):
if diff[h, j] != 0:
pval[h, j] = np.sum(np.abs(diff_permuatation[h, h, :] >=
np.abs(diff[h, j, :])))/float(number_of_permutations)
# np.sum(np.abs(diff[h, j]) >= np.abs(
# diff_permuatation[h, j, :]))\
# / float(number_of_permutations)
| [
"mne.minimum_norm.read_inverse_operator",
"numpy.mean",
"mne.extract_label_time_course",
"numpy.abs",
"mne.minimum_norm.apply_inverse_epochs",
"os.chdir",
"mne.read_labels_from_annot",
"numpy.sum",
"numpy.empty",
"mne.read_epochs",
"numpy.empty_like",
"numpy.concatenate",
"mne.connectivity.s... | [((1606, 1626), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1624, 1626), False, 'import socket\n'), ((1905, 1924), 'os.chdir', 'os.chdir', (['data_path'], {}), '(data_path)\n', (1913, 1924), False, 'import os\n'), ((2336, 2368), 'mne.minimum_norm.read_inverse_operator', 'read_inverse_operator', (['fname_inv'], {}), '(fname_inv)\n', (2357, 2368), False, 'from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator\n'), ((2378, 2407), 'mne.read_epochs', 'mne.read_epochs', (['fname_epochs'], {}), '(fname_epochs)\n', (2393, 2407), False, 'import mne\n'), ((2564, 2675), 'mne.read_labels_from_annot', 'mne.read_labels_from_annot', (['"""0001"""'], {'parc': '"""PALS_B12_Brodmann"""', 'regexp': '"""Brodmann"""', 'subjects_dir': 'subjects_dir'}), "('0001', parc='PALS_B12_Brodmann', regexp=\n 'Brodmann', subjects_dir=subjects_dir)\n", (2590, 2675), False, 'import mne\n'), ((3263, 3371), 'mne.extract_label_time_course', 'mne.extract_label_time_course', (['stcs_ctl_left', 'labels_occ'], {'src': "inverse_operator['src']", 'mode': '"""mean_flip"""'}), "(stcs_ctl_left, labels_occ, src=\n inverse_operator['src'], mode='mean_flip')\n", (3292, 3371), False, 'import mne\n'), ((3516, 3624), 'mne.extract_label_time_course', 'mne.extract_label_time_course', (['stcs_ent_left', 'labels_occ'], {'src': "inverse_operator['src']", 'mode': '"""mean_flip"""'}), "(stcs_ent_left, labels_occ, src=\n inverse_operator['src'], mode='mean_flip')\n", (3545, 3624), False, 'import mne\n'), ((4158, 4190), 'numpy.empty', 'np.empty', (['number_of_permutations'], {}), '(number_of_permutations)\n', (4166, 4190), True, 'import numpy as np\n'), ((4268, 4308), 'numpy.empty', 'np.empty', (['[6, 6, number_of_permutations]'], {}), '([6, 6, number_of_permutations])\n', (4276, 4308), True, 'import numpy as np\n'), ((4387, 4580), 'mne.connectivity.spectral_connectivity', 'spectral_connectivity', (['ts_ctl_left'], {'method': 'con_method', 'mode': '"""multitaper"""', 'sfreq': '(250)', 'fmin': 'fmin', 'fmax': 'fmax', 'faverage': '(True)', 'tmin': 'tmin', 'tmax': 'tmax', 'mt_adaptive': '(False)', 'n_jobs': '(1)', 'verbose': 'None'}), "(ts_ctl_left, method=con_method, mode='multitaper',\n sfreq=250, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, tmax=tmax,\n mt_adaptive=False, n_jobs=1, verbose=None)\n", (4408, 4580), False, 'from mne.connectivity import spectral_connectivity\n'), ((4764, 4957), 'mne.connectivity.spectral_connectivity', 'spectral_connectivity', (['ts_ent_left'], {'method': 'con_method', 'mode': '"""multitaper"""', 'sfreq': '(250)', 'fmin': 'fmin', 'fmax': 'fmax', 'faverage': '(True)', 'tmin': 'tmin', 'tmax': 'tmax', 'mt_adaptive': '(False)', 'n_jobs': '(1)', 'verbose': 'None'}), "(ts_ent_left, method=con_method, mode='multitaper',\n sfreq=250, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, tmax=tmax,\n mt_adaptive=False, n_jobs=1, verbose=None)\n", (4785, 4957), False, 'from mne.connectivity import spectral_connectivity\n'), ((6094, 6113), 'numpy.empty_like', 'np.empty_like', (['diff'], {}), '(diff)\n', (6107, 6113), True, 'import numpy as np\n'), ((632, 663), 'numpy.concatenate', 'np.concatenate', (['[case, control]'], {}), '([case, control])\n', (646, 663), True, 'import numpy as np\n'), ((1246, 1268), 'numpy.concatenate', 'np.concatenate', (['[a, b]'], {}), '([a, b])\n', (1260, 1268), True, 'import numpy as np\n'), ((2960, 3052), 'mne.minimum_norm.apply_inverse_epochs', 'apply_inverse_epochs', (['epochs[cond]', 'inverse_operator', 'lambda2', 'method'], {'pick_ori': '"""normal"""'}), "(epochs[cond], inverse_operator, lambda2, method,\n pick_ori='normal')\n", (2980, 3052), False, 'from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator\n'), ((3826, 3934), 'mne.extract_label_time_course', 'mne.extract_label_time_course', (['stcs_all_left', 'labels_occ'], {'src': "inverse_operator['src']", 'mode': '"""mean_flip"""'}), "(stcs_all_left, labels_occ, src=\n inverse_operator['src'], mode='mean_flip')\n", (3855, 3934), False, 'import mne\n'), ((5170, 5198), 'numpy.random.permutation', 'np.random.permutation', (['index'], {}), '(index)\n', (5191, 5198), True, 'import numpy as np\n'), ((5362, 5538), 'mne.connectivity.spectral_connectivity', 'spectral_connectivity', (['tmp_ctl'], {'method': 'con_method', 'mode': '"""multitaper"""', 'sfreq': '(250)', 'fmin': 'fmin', 'fmax': 'fmax', 'faverage': '(True)', 'tmin': 'tmin', 'tmax': 'tmax', 'mt_adaptive': '(False)', 'n_jobs': '(1)'}), "(tmp_ctl, method=con_method, mode='multitaper', sfreq=\n 250, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, tmax=tmax,\n mt_adaptive=False, n_jobs=1)\n", (5383, 5538), False, 'from mne.connectivity import spectral_connectivity\n'), ((5727, 5904), 'mne.connectivity.spectral_connectivity', 'spectral_connectivity', (['tmp_case'], {'method': 'con_method', 'mode': '"""multitaper"""', 'sfreq': '(250)', 'fmin': 'fmin', 'fmax': 'fmax', 'faverage': '(True)', 'tmin': 'tmin', 'tmax': 'tmax', 'mt_adaptive': '(False)', 'n_jobs': '(1)'}), "(tmp_case, method=con_method, mode='multitaper', sfreq\n =250, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, tmax=tmax,\n mt_adaptive=False, n_jobs=1)\n", (5748, 5904), False, 'from mne.connectivity import spectral_connectivity\n'), ((725, 750), 'numpy.random.permutation', 'npr.permutation', (['combined'], {}), '(combined)\n', (740, 750), True, 'import numpy.random as npr\n'), ((1330, 1355), 'numpy.random.permutation', 'npr.permutation', (['combined'], {}), '(combined)\n', (1345, 1355), True, 'import numpy.random as npr\n'), ((766, 788), 'numpy.mean', 'np.mean', (['xs[:num_case]'], {}), '(xs[:num_case])\n', (773, 788), True, 'import numpy as np\n'), ((791, 813), 'numpy.mean', 'np.mean', (['xs[num_case:]'], {}), '(xs[num_case:])\n', (798, 813), True, 'import numpy as np\n'), ((854, 883), 'numpy.sum', 'np.sum', (['(diffs > observed_diff)'], {}), '(diffs > observed_diff)\n', (860, 883), True, 'import numpy as np\n'), ((898, 928), 'numpy.sum', 'np.sum', (['(diffs < -observed_diff)'], {}), '(diffs < -observed_diff)\n', (904, 928), True, 'import numpy as np\n'), ((1371, 1390), 'numpy.mean', 'np.mean', (['xs[:num_a]'], {}), '(xs[:num_a])\n', (1378, 1390), True, 'import numpy as np\n'), ((1393, 1412), 'numpy.mean', 'np.mean', (['xs[num_a:]'], {}), '(xs[num_a:])\n', (1400, 1412), True, 'import numpy as np\n'), ((1459, 1472), 'numpy.abs', 'np.abs', (['diffs'], {}), '(diffs)\n', (1465, 1472), True, 'import numpy as np\n'), ((1476, 1497), 'numpy.abs', 'np.abs', (['observed_diff'], {}), '(observed_diff)\n', (1482, 1497), True, 'import numpy as np\n'), ((6303, 6324), 'numpy.abs', 'np.abs', (['diff[h, j, :]'], {}), '(diff[h, j, :])\n', (6309, 6324), True, 'import numpy as np\n')] |
"""A collection of functions that assist in validation/comparison of data and conditions.
"""
from collections.abc import Sized
from typing import List, Union, Callable, Type, Iterable
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_categorical
from helpsk.exceptions import * # pylint: disable=wildcard-import,unused-wildcard-import
from helpsk.utility import suppress_warnings
def any_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> bool:
"""Can be used with a single value or a collection of values. Returns `True` if any item in `values` are
`None`, `np.Nan`, `pd.NA`, `pd.NaT` or if the length of `values` is `0`.
Args:
values:
A collection of values to check.
Returns:
bool - True if any item in `values` are None/np.NaN
"""
# pylint: disable=too-many-return-statements
if values is None or values is np.NaN or values is pd.NA or values is pd.NaT: # pylint: disable=nan-comparison
return True
if isinstance(values, Sized) and not isinstance(values, str) and len(values) == 0:
return True
if isinstance(values, pd.Series):
return values.isnull().any() or values.isna().any()
if isinstance(values, pd.DataFrame):
return values.isnull().any().any() or values.isna().any().any()
if isinstance(values, Iterable) and not isinstance(values, str):
if len(values) == 0:
return True
return any((any_none_nan(x) for x in values))
try:
if not isinstance(values, str) and None in values:
return True
except Exception: # pylint: disable=broad-except # noqa
pass
try:
if np.isnan(values).any():
return True
except TypeError:
return False
return False
def assert_not_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> None:
"""Raises an HelpskAssertionError if any item in `values` are `None`, `np.Nan`, or if the length of
`values` is `0`.
For numeric types only.
Args:
values:
A collection of values to check.
"""
assert_false(any_none_nan(values), message='None/NaN Values Found')
def any_missing(values: Union[List, pd.Series, pd.DataFrame, object]) -> bool:
"""Same as `any_none_nan` but checks for empty strings
Args:
values:
A collection of values to check.
Returns:
bool - True if any item in `values` are None/np.NaN/''
"""
if any_none_nan(values):
return True
if isinstance(values, pd.Series):
return values.isin(['']).any() # noqa
if isinstance(values, pd.DataFrame):
return values.isin(['']).any().any() # noqa
if isinstance(values, str) and values.strip() == '':
return True
if isinstance(values, Iterable) and '' in values:
return True
return False
def assert_not_any_missing(values: Union[List, pd.Series, pd.DataFrame, object]) -> None:
"""Raises an HelpskAssertionError if any item in `values` are `None`, `np.Nan`, an empty string (i.e. '')
or if the length of `values` is `0`.
Args:
values:
A collection of values to check.
"""
assert_false(any_missing(values), message='Missing Values Found')
def any_duplicated(values: Union[List, np.ndarray, pd.Series]) -> bool:
"""Returns `True` if any items in `values` are duplicated.
Args:
values: list, np.ndarray, pd.Series
A collection of values to check.
Returns:
bool
"""
return len(values) != len(set(values))
def assert_not_duplicated(values: Union[List, np.ndarray, pd.Series]) -> None:
"""Raises an HelpskAssertionError if any items in `values` are duplicated.
Args:
values: list, np.ndarray, pd.Series
A collection of values to check.
"""
assert_false(any_duplicated(values), message='Duplicate Values Found')
def assert_all(values: Union[List, np.ndarray, pd.Series, pd.DataFrame]) -> None:
"""Raises an `HelpskAssertionError` unless all items in `values` are `True`
Args:
values:
A collection of values to check.
"""
if isinstance(values, pd.Series):
if not values.all(): # noqa
raise HelpskAssertionError('Not All True')
elif isinstance(values, pd.DataFrame):
if not values.all().all(): # noqa
raise HelpskAssertionError('Not All True')
else:
if not all(values):
raise HelpskAssertionError('Not All True')
def assert_not_any(values: Union[List, np.ndarray, pd.Series, pd.DataFrame]) -> None:
"""Raises an `HelpskAssertionError` if any items in `values` are `True`
Args:
values:
A collection of values to check.
"""
if isinstance(values, pd.Series):
assert_false(values.any(), message='Found True') # noqa
elif isinstance(values, pd.DataFrame):
assert_false(values.any().any(), message='Found True') # noqa
else:
assert_false(any(values), message='Found True')
def assert_true(condition: bool, message: str = 'Condition Not True') -> None:
"""Raises an HelpskAssertionError if `condition` is not True
Args:
condition:
Something that evaluates to True/False
message:
Message passed to the HelpskAssertionError
"""
if not isinstance(condition, (bool, np.bool_)):
raise HelpskParamTypeError('condition should be boolean')
if not condition:
raise HelpskAssertionError(message)
def assert_false(condition: bool, message: str = 'Condition True') -> None:
"""Raises an HelpskAssertionError if `condition` is not False
Args:
condition: bool
Something that evaluates to True/False
message:
Message passed to the HelpskAssertionError
"""
if not isinstance(condition, (bool, np.bool_)):
raise HelpskParamTypeError('condition should be boolean')
if condition:
raise HelpskAssertionError(message)
def iterables_are_equal(iterable_a: Iterable, iterable_b: Iterable) -> bool:
"""Compares the equality of the values of two iterables.
This function will generally give the same result as list equality (e.g. `[x, y, z] == [x, y, z]`)
However, in some strange scenarios, `==` will return `False` where it doesn't seem like it should
For example:
```
temp = pd.DataFrame({'col_a': [np.nan, 1.0]})
temp.col_a.tolist() == [np.nan, 1.0] # returns False. Why??
iterables_are_equal(temp.col_a.tolist(), [np.nan, 1]) # returns True
[np.nan, 1.0] == [np.nan, 1.0] # returns True
Also, when comparing a series with an ordered Categorical when the values are the same,
pd.Series.equals() will return False if the categories have different order. But we only care if the
values are the same, so this function will return True.
```
Args:
iterable_a:
an iterable to equate to iterable_b
iterable_b:
an iterable to equate to iterable_a
Returns:
True if iterable_a is equal to iterable_b
"""
# seems to be confusion and inconsistencies across stack overflow on how to properly check for category
# so this might be overkill but not exactly sure
# def is_categorical(series):
# if isinstance(series, (pd.Categorical, pd.CategoricalDtype)):
# return True
# if isinstance(series, pd.Series):
# return series.dtype.name == 'category'
# return False
with suppress_warnings():
# if either list-like structure is categorical, then we need to convert both to unordered categorical
if is_categorical(iterable_a) or is_categorical(iterable_b):
iterable_a = pd.Categorical(iterable_a, ordered=False)
iterable_b = pd.Categorical(iterable_b, ordered=False)
else:
iterable_a = pd.Series(iterable_a)
iterable_b = pd.Series(iterable_b)
return iterable_a.equals(iterable_b)
def dataframes_match(dataframes: List[pd.DataFrame],
float_tolerance: int = 6,
ignore_indexes: bool = True,
ignore_column_names: bool = True) -> bool:
"""
Because floating point numbers are difficult to accurate represent, when comparing multiple DataFrames,
this function first rounds any numeric columns to the number of decimal points indicated
`float_tolerance`.
Args:
dataframes:
Two or more dataframes to compare against each other and test for equality
float_tolerance:
numeric columns will be rounded to the number of digits to the right of the decimal specified by
this parameter.
ignore_indexes:
if True, the indexes of each DataFrame will be ignored for considering equality
ignore_column_names:
if True, the column names of each DataFrame will be ignored for considering equality
Returns:
Returns True if the dataframes match based on the conditions explained above, otherwise returns False
"""
if not isinstance(dataframes, list):
raise HelpskParamTypeError("Expected list of pd.DataFrame's.")
if not len(dataframes) >= 2:
raise HelpskParamValueError("Expected 2 or more pd.DataFrame's in list.")
first_dataframe = dataframes[0].round(float_tolerance)
def first_dataframe_equals_other(other_dataframe):
if first_dataframe.shape != other_dataframe.shape:
return False
if ignore_indexes or ignore_column_names:
# if either of these are True, then we are going to change the index and/or columns, but
# python is pass-by-reference so we don't want to change the original DataFrame object.
other_dataframe = other_dataframe.copy()
if ignore_indexes:
other_dataframe.index = first_dataframe.index
if ignore_column_names:
other_dataframe.columns = first_dataframe.columns
return first_dataframe.equals(other_dataframe.round(float_tolerance))
# compare the first dataframe to the rest of the dataframes, after rounding each to the tolerance, and
# performing other modifications
# check if all results are True
return all(first_dataframe_equals_other(x) for x in dataframes[1:])
def assert_dataframes_match(dataframes: List[pd.DataFrame],
float_tolerance: int = 6,
ignore_indexes: bool = True,
ignore_column_names: bool = True,
message: str = 'Dataframes do not match') -> None:
"""
Raises an assertion error if dataframes don't match.
Args:
dataframes:
Two or more dataframes to compare against each other and test for equality
float_tolerance:
numeric columns will be rounded to the number of digits to the right of the decimal specified by
this parameter.
ignore_indexes:
if True, the indexes of each DataFrame will be ignored for considering equality
ignore_column_names:
if True, the column names of each DataFrame will be ignored for considering equality
message:
message to pass to HelpskAssertionError
"""
if not dataframes_match(dataframes=dataframes,
float_tolerance=float_tolerance,
ignore_indexes=ignore_indexes,
ignore_column_names=ignore_column_names):
raise HelpskAssertionError(message)
def is_close(value_a: float, value_b: float, tolerance: float = 0.000001) -> bool:
"""Tests whether or not value_a and value_b are "close" (i.e. within the `tolerance` after subtracting)
Args:
value_a:
numeric value to test
value_b:
numeric value to test
tolerance:
the maximum difference (absolute value) allowed between value_a and value_b
Returns:
True if values are within specified tolerance
"""
return abs(value_a - value_b) <= tolerance
def assert_is_close(value_a: float, value_b: float, tolerance: float = 0.000001):
"""Raises an assert error if value_a and value_b are not "close" (see documentation of `is_close()`
function).
Args:
value_a:
numeric value to test
value_b:
numeric value to test
tolerance:
number of digits to round to
"""
if not is_close(value_a=value_a, value_b=value_b, tolerance=tolerance):
raise HelpskAssertionError(f"`{value_a}` and `{value_b}` are not within a tolerance of `{tolerance}`")
def raises_exception(function: Callable, exception_type: Type = None) -> bool:
"""Returns True if `function` raises an Exception; returns False if `function` runs without raising an
Exception.
Args:
function:
the function which does or does not raise an exception.
exception_type:
if `exception_type` is provided, `raises_exception` returns true only if the `function` argument
raises an Exception **and** the exception has type of `exception_type`.
"""
try:
function()
return False
except Exception as exception: # pylint: disable=broad-except
if exception_type:
return isinstance(exception, exception_type)
return True
| [
"pandas.core.dtypes.common.is_categorical",
"pandas.Series",
"helpsk.utility.suppress_warnings",
"pandas.Categorical",
"numpy.isnan"
] | [((7627, 7646), 'helpsk.utility.suppress_warnings', 'suppress_warnings', ([], {}), '()\n', (7644, 7646), False, 'from helpsk.utility import suppress_warnings\n'), ((7769, 7795), 'pandas.core.dtypes.common.is_categorical', 'is_categorical', (['iterable_a'], {}), '(iterable_a)\n', (7783, 7795), False, 'from pandas.core.dtypes.common import is_categorical\n'), ((7799, 7825), 'pandas.core.dtypes.common.is_categorical', 'is_categorical', (['iterable_b'], {}), '(iterable_b)\n', (7813, 7825), False, 'from pandas.core.dtypes.common import is_categorical\n'), ((7852, 7893), 'pandas.Categorical', 'pd.Categorical', (['iterable_a'], {'ordered': '(False)'}), '(iterable_a, ordered=False)\n', (7866, 7893), True, 'import pandas as pd\n'), ((7919, 7960), 'pandas.Categorical', 'pd.Categorical', (['iterable_b'], {'ordered': '(False)'}), '(iterable_b, ordered=False)\n', (7933, 7960), True, 'import pandas as pd\n'), ((8000, 8021), 'pandas.Series', 'pd.Series', (['iterable_a'], {}), '(iterable_a)\n', (8009, 8021), True, 'import pandas as pd\n'), ((8047, 8068), 'pandas.Series', 'pd.Series', (['iterable_b'], {}), '(iterable_b)\n', (8056, 8068), True, 'import pandas as pd\n'), ((1720, 1736), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (1728, 1736), True, 'import numpy as np\n')] |
from pathlib import Path
import numpy as np
import torch
from torch.nn import functional as nnfunc
from torchvision import transforms
from models.utils import get_bboxes
from utils.edge_detector import sobel
from utils.nms import nms, nms2
from pympler import asizeof
def print_state(idx, epoch, size, loss_cls, loss_reg):
if epoch >= 0:
message = "Epoch: [{0}][{1}/{2}]\t".format(epoch, idx, size)
else:
message = "Val: [{0}/{1}]\t".format(idx, size)
print(message +
'\tloss_cls: {loss_cls:.6f}' \
'\tloss_reg: {loss_reg:.6f}'.format(loss_cls=loss_cls, loss_reg=loss_reg))
def save_checkpoint(state, filename="checkpoint.pth", save_path="weights"):
# check if the save directory exists
if not Path(save_path).exists():
Path(save_path).mkdir()
save_path = Path(save_path, filename)
torch.save(state, str(save_path))
def visualize_output(img, output, templates, proc, prob_thresh=0.55, nms_thresh=0.1):
tensor_to_image = transforms.ToPILImage()
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(img[0], mean, std):
t.mul_(s).add_(m)
image = tensor_to_image(img[0]) # Index into the batch
cls_map = nnfunc.sigmoid(output[:, 0:templates.shape[0], :, :]).data.cpu(
).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
reg_map = output[:, templates.shape[0]:, :, :].data.cpu(
).numpy().transpose((0, 2, 3, 1))[0, :, :, :]
print(np.sort(np.unique(cls_map))[::-1])
proc.visualize_heatmaps(image, cls_map, reg_map, templates,
prob_thresh=prob_thresh, nms_thresh=nms_thresh)
p = input("Continue? [Yn]")
if p.lower().strip() == 'n':
exit(0)
def draw_bboxes(image, img_id, bboxes, scores, scales, processor):
processor.render_and_save_bboxes(image, img_id, bboxes, scores, scales)
def train(model, loss_fn, optimizer, dataloader, epoch, device):
model = model.to(device)
model.train()
cache_idx = []
cache_img = []
cache_class_map = []
cache_regression_map = []
total_memory = torch.cuda.get_device_properties(0).total_memory
for idx, (img, class_map, regression_map) in enumerate(dataloader):
if total_memory - torch.cuda.memory_allocated(device) - 4 * (np.prod(img.shape) -
np.prod(class_map.shape) - np.prod(regression_map.shape)) < 100 * 1024 * 1024:
cache_idx.append(idx)
cache_img.append(img)
cache_regression_map.append(regression_map)
cache_class_map.append(class_map)
continue
x = img.float().to(device)
class_map_var = class_map.float().to(device)
regression_map_var = regression_map.float().to(device)
output = model(x)
loss = loss_fn(output,
class_map_var, regression_map_var)
# visualize_output(img, output, dataloader.dataset.templates, dataloader.dataset.processor)
optimizer.zero_grad()
# Get the gradients
# torch will automatically mask the gradients to 0 where applicable!
loss.backward()
optimizer.step()
print_state(idx, epoch, len(dataloader),
loss_fn.class_average.average,
loss_fn.reg_average.average)
while len(cache_img) > 0 and total_memory - torch.cuda.memory_allocated(device) - 4 * (np.prod(img.shape) -
np.prod(class_map.shape) - np.prod(regression_map.shape)) < 100 * 1024 * 1024:
one_idx = cache_idx.pop()
one_img = cache_img.pop()
one_class_map = cache_class_map.pop()
one_regression_map = cache_regression_map.pop()
x = one_img.float().to(device)
class_map_var = one_class_map.float().to(device)
regression_map_var = one_regression_map.float().to(device)
output = model(x)
loss = loss_fn(output,
class_map_var, regression_map_var)
# visualize_output(img, output, dataloader.dataset.templates)
optimizer.zero_grad()
# Get the gradients
# torch will automatically mask the gradients to 0 where applicable!
loss.backward()
optimizer.step()
print_state(one_idx, epoch, len(dataloader),
loss_fn.class_average.average,
loss_fn.reg_average.average)
def get_detections(model, img, templates, rf, img_transforms,
prob_thresh=0.65, nms_thresh=0.3, scales=(-2, -1, 0, 1), device=None, enable_edge=False):
try:
model = model.to(device)
model.eval()
dets = np.empty((0, 5)) # store bbox (x1, y1, x2, y2), score
num_templates = templates.shape[0]
# Evaluate over multiple scale
scales_list = [2 ** x for x in scales]
# convert tensor to PIL image so we can perform resizing
image = transforms.functional.to_pil_image(img[0])
min_side = np.min(image.size)
for scale in scales_list:
# scale the images
scaled_image = transforms.functional.resize(image,
np.int(min_side * scale))
# normalize the images
img = img_transforms(scaled_image)
if enable_edge:
np_img = np.array(scaled_image).astype(np.uint8)
edge = sobel(np_img, 48)
edge_transforms = transforms.Compose([
transforms.ToTensor()
# transforms.Normalize(std=0.5, mean=0.5)
])
# edge = np.expand_dims(edge, axis=2)
edge = edge_transforms(edge / 255)
# edge = torch.from_numpy(np.expand_dims(edge, axis=0))
img = torch.cat((img, edge), 0)
# add batch dimension
img.unsqueeze_(0)
# now run the model
x = img.float().to(device)
output = model(x)
# first `num_templates` channels are class maps
score_cls = output[:, :num_templates, :, :]
prob_cls = torch.sigmoid(score_cls)
score_cls = score_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
prob_cls = prob_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
score_reg = output[:, num_templates:, :, :]
score_reg = score_reg.data.cpu().numpy().transpose((0, 2, 3, 1))
t_bboxes, scores = get_bboxes(score_cls, score_reg, prob_cls,
templates, prob_thresh, rf, scale)
scales = np.ones((t_bboxes.shape[0], 1)) / scale
# append scores at the end for NMS
d = np.hstack((t_bboxes, scores))
dets = np.vstack((dets, d))
# Apply NMS
keep = nms(dets, nms_thresh)
dets = dets[keep]
except RuntimeError:
torch.cuda.empty_cache()
device1 = torch.device('cpu')
model = model.to(device1)
model.eval()
dets = np.empty((0, 5)) # store bbox (x1, y1, x2, y2), score
num_templates = templates.shape[0]
# Evaluate over multiple scale
scales_list = [2 ** x for x in scales]
# convert tensor to PIL image so we can perform resizing
image = transforms.functional.to_pil_image(img[0])
min_side = np.min(image.size)
for scale in scales_list:
# scale the images
scaled_image = transforms.functional.resize(image,
np.int(min_side * scale))
# normalize the images
if enable_edge:
np_img = np.array(scaled_image).astype(np.uint8)
edge = sobel(np_img, 48)
img = img_transforms(scaled_image)
if enable_edge:
edge = torch.from_numpy(np.expand_dims(edge, axis=0))
img = torch.cat((img, edge), 0)
# add batch dimension
img.unsqueeze_(0)
# now run the model
x = img.float().to(device1)
output = model(x)
# first `num_templates` channels are class maps
score_cls = output[:, :num_templates, :, :]
prob_cls = torch.sigmoid(score_cls)
score_cls = score_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
prob_cls = prob_cls.data.cpu().numpy().transpose((0, 2, 3, 1))
score_reg = output[:, num_templates:, :, :]
score_reg = score_reg.data.cpu().numpy().transpose((0, 2, 3, 1))
t_bboxes, scores = get_bboxes(score_cls, score_reg, prob_cls,
templates, prob_thresh, rf, scale)
scales = np.ones((t_bboxes.shape[0], 1)) / scale
# append scores at the end for NMS
d = np.hstack((t_bboxes, scores))
dets = np.vstack((dets, d))
# Apply NMS
keep = nms(dets, nms_thresh)
dets = dets[keep]
return dets
| [
"numpy.prod",
"torchvision.transforms.ToPILImage",
"numpy.hstack",
"torchvision.transforms.functional.to_pil_image",
"torch.nn.functional.sigmoid",
"numpy.array",
"models.utils.get_bboxes",
"utils.nms.nms",
"pathlib.Path",
"numpy.empty",
"numpy.vstack",
"numpy.min",
"torchvision.transforms.T... | [((832, 857), 'pathlib.Path', 'Path', (['save_path', 'filename'], {}), '(save_path, filename)\n', (836, 857), False, 'from pathlib import Path\n'), ((1006, 1029), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1027, 1029), False, 'from torchvision import transforms\n'), ((2105, 2140), 'torch.cuda.get_device_properties', 'torch.cuda.get_device_properties', (['(0)'], {}), '(0)\n', (2137, 2140), False, 'import torch\n'), ((4752, 4768), 'numpy.empty', 'np.empty', (['(0, 5)'], {}), '((0, 5))\n', (4760, 4768), True, 'import numpy as np\n'), ((5020, 5062), 'torchvision.transforms.functional.to_pil_image', 'transforms.functional.to_pil_image', (['img[0]'], {}), '(img[0])\n', (5054, 5062), False, 'from torchvision import transforms\n'), ((5083, 5101), 'numpy.min', 'np.min', (['image.size'], {}), '(image.size)\n', (5089, 5101), True, 'import numpy as np\n'), ((6937, 6958), 'utils.nms.nms', 'nms', (['dets', 'nms_thresh'], {}), '(dets, nms_thresh)\n', (6940, 6958), False, 'from utils.nms import nms, nms2\n'), ((6241, 6265), 'torch.sigmoid', 'torch.sigmoid', (['score_cls'], {}), '(score_cls)\n', (6254, 6265), False, 'import torch\n'), ((6585, 6662), 'models.utils.get_bboxes', 'get_bboxes', (['score_cls', 'score_reg', 'prob_cls', 'templates', 'prob_thresh', 'rf', 'scale'], {}), '(score_cls, score_reg, prob_cls, templates, prob_thresh, rf, scale)\n', (6595, 6662), False, 'from models.utils import get_bboxes\n'), ((6830, 6859), 'numpy.hstack', 'np.hstack', (['(t_bboxes, scores)'], {}), '((t_bboxes, scores))\n', (6839, 6859), True, 'import numpy as np\n'), ((6880, 6900), 'numpy.vstack', 'np.vstack', (['(dets, d)'], {}), '((dets, d))\n', (6889, 6900), True, 'import numpy as np\n'), ((7018, 7042), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7040, 7042), False, 'import torch\n'), ((7061, 7080), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7073, 7080), False, 'import torch\n'), ((7152, 7168), 'numpy.empty', 'np.empty', (['(0, 5)'], {}), '((0, 5))\n', (7160, 7168), True, 'import numpy as np\n'), ((7420, 7462), 'torchvision.transforms.functional.to_pil_image', 'transforms.functional.to_pil_image', (['img[0]'], {}), '(img[0])\n', (7454, 7462), False, 'from torchvision import transforms\n'), ((7483, 7501), 'numpy.min', 'np.min', (['image.size'], {}), '(image.size)\n', (7489, 7501), True, 'import numpy as np\n'), ((9081, 9102), 'utils.nms.nms', 'nms', (['dets', 'nms_thresh'], {}), '(dets, nms_thresh)\n', (9084, 9102), False, 'from utils.nms import nms, nms2\n'), ((757, 772), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (761, 772), False, 'from pathlib import Path\n'), ((791, 806), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (795, 806), False, 'from pathlib import Path\n'), ((1485, 1503), 'numpy.unique', 'np.unique', (['cls_map'], {}), '(cls_map)\n', (1494, 1503), True, 'import numpy as np\n'), ((5287, 5311), 'numpy.int', 'np.int', (['(min_side * scale)'], {}), '(min_side * scale)\n', (5293, 5311), True, 'import numpy as np\n'), ((5512, 5529), 'utils.edge_detector.sobel', 'sobel', (['np_img', '(48)'], {}), '(np_img, 48)\n', (5517, 5529), False, 'from utils.edge_detector import sobel\n'), ((5907, 5932), 'torch.cat', 'torch.cat', (['(img, edge)', '(0)'], {}), '((img, edge), 0)\n', (5916, 5932), False, 'import torch\n'), ((6727, 6758), 'numpy.ones', 'np.ones', (['(t_bboxes.shape[0], 1)'], {}), '((t_bboxes.shape[0], 1))\n', (6734, 6758), True, 'import numpy as np\n'), ((8385, 8409), 'torch.sigmoid', 'torch.sigmoid', (['score_cls'], {}), '(score_cls)\n', (8398, 8409), False, 'import torch\n'), ((8729, 8806), 'models.utils.get_bboxes', 'get_bboxes', (['score_cls', 'score_reg', 'prob_cls', 'templates', 'prob_thresh', 'rf', 'scale'], {}), '(score_cls, score_reg, prob_cls, templates, prob_thresh, rf, scale)\n', (8739, 8806), False, 'from models.utils import get_bboxes\n'), ((8974, 9003), 'numpy.hstack', 'np.hstack', (['(t_bboxes, scores)'], {}), '((t_bboxes, scores))\n', (8983, 9003), True, 'import numpy as np\n'), ((9024, 9044), 'numpy.vstack', 'np.vstack', (['(dets, d)'], {}), '((dets, d))\n', (9033, 9044), True, 'import numpy as np\n'), ((2253, 2288), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['device'], {}), '(device)\n', (2280, 2288), False, 'import torch\n'), ((7687, 7711), 'numpy.int', 'np.int', (['(min_side * scale)'], {}), '(min_side * scale)\n', (7693, 7711), True, 'import numpy as np\n'), ((7865, 7882), 'utils.edge_detector.sobel', 'sobel', (['np_img', '(48)'], {}), '(np_img, 48)\n', (7870, 7882), False, 'from utils.edge_detector import sobel\n'), ((8050, 8075), 'torch.cat', 'torch.cat', (['(img, edge)', '(0)'], {}), '((img, edge), 0)\n', (8059, 8075), False, 'import torch\n'), ((8871, 8902), 'numpy.ones', 'np.ones', (['(t_bboxes.shape[0], 1)'], {}), '((t_bboxes.shape[0], 1))\n', (8878, 8902), True, 'import numpy as np\n'), ((2384, 2413), 'numpy.prod', 'np.prod', (['regression_map.shape'], {}), '(regression_map.shape)\n', (2391, 2413), True, 'import numpy as np\n'), ((3382, 3417), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['device'], {}), '(device)\n', (3409, 3417), False, 'import torch\n'), ((5449, 5471), 'numpy.array', 'np.array', (['scaled_image'], {}), '(scaled_image)\n', (5457, 5471), True, 'import numpy as np\n'), ((5605, 5626), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5624, 5626), False, 'from torchvision import transforms\n'), ((7998, 8026), 'numpy.expand_dims', 'np.expand_dims', (['edge'], {'axis': '(0)'}), '(edge, axis=0)\n', (8012, 8026), True, 'import numpy as np\n'), ((2296, 2314), 'numpy.prod', 'np.prod', (['img.shape'], {}), '(img.shape)\n', (2303, 2314), True, 'import numpy as np\n'), ((2357, 2381), 'numpy.prod', 'np.prod', (['class_map.shape'], {}), '(class_map.shape)\n', (2364, 2381), True, 'import numpy as np\n'), ((3513, 3542), 'numpy.prod', 'np.prod', (['regression_map.shape'], {}), '(regression_map.shape)\n', (3520, 3542), True, 'import numpy as np\n'), ((7802, 7824), 'numpy.array', 'np.array', (['scaled_image'], {}), '(scaled_image)\n', (7810, 7824), True, 'import numpy as np\n'), ((3425, 3443), 'numpy.prod', 'np.prod', (['img.shape'], {}), '(img.shape)\n', (3432, 3443), True, 'import numpy as np\n'), ((3486, 3510), 'numpy.prod', 'np.prod', (['class_map.shape'], {}), '(class_map.shape)\n', (3493, 3510), True, 'import numpy as np\n'), ((1241, 1294), 'torch.nn.functional.sigmoid', 'nnfunc.sigmoid', (['output[:, 0:templates.shape[0], :, :]'], {}), '(output[:, 0:templates.shape[0], :, :])\n', (1255, 1294), True, 'from torch.nn import functional as nnfunc\n')] |
from sample import *
import time
import os
import functools
from pprint import pprint
from fft import Fft
from prune import *
import math
import multiprocessing
import sys
import statistics
import numpy
import csv
def runParallelTest(params):
header = params[0]
data = params[1]
test = params[2]
#Start Time
startTime = time.time()
Config.verbose = False
s = Sample()
s.add(header)
for row in data:
s.add(row)
fft = Fft(s)
#Test sample
t = s.clone()
for row in test:
t.add(row)
treesSort = []
for f in fft.trees:
#Get accuracy for each tree
TP = 0
TN = 0
FP = 0
FN = 0
firstRow = True
for row in t.rows:
if firstRow:
firstRow = False
else:
result = row[t.y[0].at]
for b in f:
if not b.disc or b.disc.matches(row):
#True
if b.typ == result:
if result == 1:
TP+=1
if result == 0:
TN+=1
#False
else:
if result == 1:
FN+=1
if result == 0:
FP+=1
break;
break;
treesSort.append([f, TP, TN, FP, FN])
#Sort by accuracy
#Accuracy = TP+TN / TP+TN+FP+FN
treesSort.sort(key=lambda x: (x[1]+x[2])/(x[1]+x[2]+x[3]+x[4]))
chosenTree = treesSort[-1]
TP = chosenTree[1]
TN = chosenTree[2]
FP = chosenTree[3]
FN = chosenTree[4]
try:
accuracy = (TP+TN) / (TP+TN+FP+FN)
precision = TP / (TP+FP)
falseAlarm = FP / (FP+TN)
recall = TP/(TP+FN)
return [accuracy, precision, falseAlarm, recall]
except BaseException:
accuracy = (TP+TN) / (TP+TN+FP+FN)
calculatedTime = time.time() - startTime
return [accuracy, 0, 0, 0]
#Set the arguments
if len(sys.argv) > 1:
try:
chosenDataset = int(sys.argv[1])
Config.dataSet = Config.dataSets[chosenDataset]
print(Config.dataSet)
except BaseException:
print("error")
csvOutputHeader = ["Dataset " + sys.argv[1], "Mean", "Median", "StDev", "25th", "75th", "Min", "Max", "T Test"]
csvOutputTime = [["Time"]]
csvOutputAccuracy = [["Accuracy"]]
csvOutputPrecision = [["Precision"]]
csvOutputFalseAlarm = [["FalseAlarm"]]
csvOutputRecall = [["Recall"]]
for chosenImprovements in ["000000", "100000", "010000", "001000", "000100", "000010", "000001"] :
Config.DISCLESS = False if chosenImprovements[0] == '0' else True
Config.SHORTTREES = False if chosenImprovements[1] == '0' else True
Config.BASEBALLTREES = False if chosenImprovements[2] == '0' else True
Config.SPILLTREES = False if chosenImprovements[3] == '0' else True
Config.BINARYCHOPS = False if chosenImprovements[4] == '0' else True
Config.PRUNETREES = False if chosenImprovements[5] == '0' else True
startTime = time.time()
myPath = os.path.dirname(os.path.abspath(__file__))
myPath = myPath[:myPath.rindex("/")]
myPath = myPath[:myPath.rindex("/")]
# Get the data and headers
totalRows = 0
headers = None
data = []
for i, row in enumerate(readCSV(myPath + Config.dataSet)):
if i == 0 :
headers = row
else:
totalRows+=1
data.append(row)
#Split the data
fiveSplitData = []
for i in range(0, 5):
tempData = []
for j in range(math.floor(len(data)/5 * i), math.floor(len(data)/5 * (i+1))):
tempData.append(data[j])
fiveSplitData.append(tempData)
pool = multiprocessing.Pool()
pool = multiprocessing.Pool(processes=25)
inputs = []
for i in range(0, 5):
#Repeat 5 times
for j in range(0, 5):
seperateData = []
seperateTest = []
seperateTest.extend(fiveSplitData[j])
for k in range(0, 5):
if not k == j:
seperateData.extend(fiveSplitData[k])
params = [headers, seperateData, seperateTest]
inputs.append(params)
outputs = pool.map(runParallelTest, inputs)
print("\n\n\n\n", chosenImprovements, "\n\n")
print("\n--------------Time------------")
timeAvg = (time.time() - startTime)/25
csvOutputTime.append([chosenImprovements, timeAvg])
print(timeAvg)
print("\n--------------ACCURACY------------")
arr = list(map(lambda x: x[0], outputs))
resultMean = sum(arr)/25
resultMedian = statistics.median(arr)
resultStDev = statistics.stdev(arr)
result25 = numpy.percentile(arr, 25)
result75 = numpy.percentile(arr, 75)
resultMin = min(arr)
resultMax = max(arr)
csvOutputAccuracy.append([chosenImprovements, resultMean, resultMedian, resultStDev, result25, result75, resultMin, resultMax])
print(resultMean)
print("\n--------------PRECISION------------")
arr = list(map(lambda x: x[1], outputs))
resultMean = sum(arr)/25
resultMedian = statistics.median(arr)
resultStDev = statistics.stdev(arr)
result25 = numpy.percentile(arr, 25)
result75 = numpy.percentile(arr, 75)
resultMin = min(arr)
resultMax = max(arr)
csvOutputPrecision.append([chosenImprovements, resultMean, resultMedian, resultStDev, result25, result75, resultMin, resultMax])
print(resultMean)
print("\n--------------FALSE ALARM------------")
arr = list(map(lambda x: x[2], outputs))
resultMean = sum(arr)/25
resultMedian = statistics.median(arr)
resultStDev = statistics.stdev(arr)
result25 = numpy.percentile(arr, 25)
result75 = numpy.percentile(arr, 75)
resultMin = min(arr)
resultMax = max(arr)
csvOutputFalseAlarm.append([chosenImprovements, resultMean, resultMedian, resultStDev, result25, result75, resultMin, resultMax])
print(resultMean)
print("\n--------------RECALL------------")
arr = list(map(lambda x: x[3], outputs))
resultMean = sum(arr)/25
resultMedian = statistics.median(arr)
resultStDev = statistics.stdev(arr)
result25 = numpy.percentile(arr, 25)
result75 = numpy.percentile(arr, 75)
resultMin = min(arr)
resultMax = max(arr)
csvOutputRecall.append([chosenImprovements, resultMean, resultMedian, resultStDev, result25, result75, resultMin, resultMax])
print(resultMean)
#Print to CSV
with open(myPath + "/testOutput/Dataset" + sys.argv[1] + ".csv", 'w+') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(csvOutputHeader)
# writing the data rows
csvwriter.writerows(csvOutputTime)
csvwriter.writerows(csvOutputAccuracy)
csvwriter.writerows(csvOutputPrecision)
csvwriter.writerows(csvOutputFalseAlarm)
csvwriter.writerows(csvOutputRecall)
print("\n\n\n\n--------------CODE FINISHED--------------") | [
"fft.Fft",
"statistics.stdev",
"csv.writer",
"statistics.median",
"multiprocessing.Pool",
"os.path.abspath",
"numpy.percentile",
"time.time"
] | [((355, 366), 'time.time', 'time.time', ([], {}), '()\n', (364, 366), False, 'import time\n'), ((473, 479), 'fft.Fft', 'Fft', (['s'], {}), '(s)\n', (476, 479), False, 'from fft import Fft\n'), ((2906, 2917), 'time.time', 'time.time', ([], {}), '()\n', (2915, 2917), False, 'import time\n'), ((3540, 3562), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (3560, 3562), False, 'import multiprocessing\n'), ((3573, 3607), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(25)'}), '(processes=25)\n', (3593, 3607), False, 'import multiprocessing\n'), ((4375, 4397), 'statistics.median', 'statistics.median', (['arr'], {}), '(arr)\n', (4392, 4397), False, 'import statistics\n'), ((4415, 4436), 'statistics.stdev', 'statistics.stdev', (['arr'], {}), '(arr)\n', (4431, 4436), False, 'import statistics\n'), ((4451, 4476), 'numpy.percentile', 'numpy.percentile', (['arr', '(25)'], {}), '(arr, 25)\n', (4467, 4476), False, 'import numpy\n'), ((4491, 4516), 'numpy.percentile', 'numpy.percentile', (['arr', '(75)'], {}), '(arr, 75)\n', (4507, 4516), False, 'import numpy\n'), ((4859, 4881), 'statistics.median', 'statistics.median', (['arr'], {}), '(arr)\n', (4876, 4881), False, 'import statistics\n'), ((4899, 4920), 'statistics.stdev', 'statistics.stdev', (['arr'], {}), '(arr)\n', (4915, 4920), False, 'import statistics\n'), ((4935, 4960), 'numpy.percentile', 'numpy.percentile', (['arr', '(25)'], {}), '(arr, 25)\n', (4951, 4960), False, 'import numpy\n'), ((4975, 5000), 'numpy.percentile', 'numpy.percentile', (['arr', '(75)'], {}), '(arr, 75)\n', (4991, 5000), False, 'import numpy\n'), ((5346, 5368), 'statistics.median', 'statistics.median', (['arr'], {}), '(arr)\n', (5363, 5368), False, 'import statistics\n'), ((5386, 5407), 'statistics.stdev', 'statistics.stdev', (['arr'], {}), '(arr)\n', (5402, 5407), False, 'import statistics\n'), ((5422, 5447), 'numpy.percentile', 'numpy.percentile', (['arr', '(25)'], {}), '(arr, 25)\n', (5438, 5447), False, 'import numpy\n'), ((5462, 5487), 'numpy.percentile', 'numpy.percentile', (['arr', '(75)'], {}), '(arr, 75)\n', (5478, 5487), False, 'import numpy\n'), ((5829, 5851), 'statistics.median', 'statistics.median', (['arr'], {}), '(arr)\n', (5846, 5851), False, 'import statistics\n'), ((5869, 5890), 'statistics.stdev', 'statistics.stdev', (['arr'], {}), '(arr)\n', (5885, 5890), False, 'import statistics\n'), ((5905, 5930), 'numpy.percentile', 'numpy.percentile', (['arr', '(25)'], {}), '(arr, 25)\n', (5921, 5930), False, 'import numpy\n'), ((5945, 5970), 'numpy.percentile', 'numpy.percentile', (['arr', '(75)'], {}), '(arr, 75)\n', (5961, 5970), False, 'import numpy\n'), ((6321, 6340), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (6331, 6340), False, 'import csv\n'), ((2948, 2973), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2963, 2973), False, 'import os\n'), ((4133, 4144), 'time.time', 'time.time', ([], {}), '()\n', (4142, 4144), False, 'import time\n'), ((1798, 1809), 'time.time', 'time.time', ([], {}), '()\n', (1807, 1809), False, 'import time\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.