code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
'''Utility functions for plots.'''
import glob
import natsort
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.signal import medfilt
from nilearn import plotting as ni_plt
from pynwb import NWBHDF5IO
from dandi.dandiapi import DandiAPIClient
from ndx_events import LabeledEvents, AnnotatedEventsTable, Events
from nwbwidgets.utils.timeseries import align_by_times, timeseries_time_to_ind
def prune_clabels(clabels_orig, targeted=False,
targ_tlims=[13, 17], first_val=True,
targ_label='Eat'):
'''Modify coarse behavior labels based on whether
looking at whole day (targeted=False) or specific
hours (targeted=True). When selecting specific
hours, can look at either the first (first_val=True)
or last (first_val=False) label if there are multiple
overlapping activity labels.'''
clabels = clabels_orig.copy()
if not targeted:
for i in range(len(clabels_orig)):
lab = clabels_orig.loc[i, 'labels']
if lab[:5] == 'Block':
clabels.loc[i, 'labels'] = 'Blocklist'
elif lab == '':
clabels.loc[i, 'labels'] = 'Blocklist'
elif lab not in ['Sleep/rest', 'Inactive']:
clabels.loc[i, 'labels'] = 'Active'
else:
for i in range(len(clabels_orig)):
lab = clabels_orig.loc[i, 'labels']
if targ_label in lab.split(', '):
clabels.loc[i, 'labels'] = targ_label
else:
clabels.loc[i, 'labels'] = 'Blocklist'
# if lab[:5] == 'Block':
# clabels.loc[i, 'labels'] = 'Blocklist'
# elif lab == '':
# clabels.loc[i, 'labels'] = 'Blocklist'
# elif first_val:
# clabels.loc[i, 'labels'] = lab.split(', ')[0]
# else:
# clabels.loc[i, 'labels'] = lab.split(', ')[-1]
if targeted:
start_val, end_val = targ_tlims[0]*3600, targ_tlims[1]*3600
clabels = clabels[(clabels['start_time'] >= start_val) &\
(clabels['stop_time'] <= end_val)]
clabels.reset_index(inplace=True)
uni_labs = np.unique(clabels['labels'].values)
return clabels, uni_labs
def plot_clabels(clabels, uni_labs, targeted=False, first_val=True,
targ_tlims=[13, 17], scale_fact=1/3600,
bwidth=0.5, targlab_colind=0):
'''Plot coarse labels for one recording day.
Note that the colors for the plots are currently
pre-defined to work for sub-01 day 4.'''
# Define colors for each label
act_cols = plt.get_cmap('Reds')(np.linspace(0.15, 0.85, 5))
if targeted:
category_colors = np.array(['w', act_cols[targlab_colind]],
dtype=object)
# if first_val:
# category_colors = np.array(['dimgray', act_cols[1], act_cols[2],
# act_cols[0], act_cols[3], act_cols[4]],
# dtype=object)
# else:
# category_colors = np.array(['dimgray', act_cols[1], act_cols[0],
# act_cols[3], act_cols[4]],
# dtype=object)
else:
category_colors = np.array([[1, 128/255, 178/255],'dimgray',
'lightgreen','lightskyblue'],
dtype=object)
# Plot each label as a horizontal bar
fig, ax = plt.subplots(figsize=(20, 2), dpi=150)
for i in range(len(uni_labs)):
lab_inds = np.nonzero(uni_labs[i] == clabels['labels'].values)[0]
lab_starts = clabels.loc[lab_inds, 'start_time'].values
lab_stops = clabels.loc[lab_inds, 'stop_time'].values
lab_widths = lab_stops - lab_starts
rects = ax.barh(np.ones_like(lab_widths), lab_widths*scale_fact,
left=lab_starts*scale_fact,
height=bwidth, label=uni_labs[i],
color=category_colors[i])
ax.legend(ncol=len(uni_labs), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
# Define x-axis based on if targeted window or not
if targeted:
plt.xlim(targ_tlims)
targ_tlims_int = [int(val) for val in targ_tlims]
plt.xticks(targ_tlims_int)
ax.set_xticklabels(['{}:00'.format(targ_tlims_int[0]),
'{}:00'.format(targ_tlims_int[-1])])
else:
plt.xlim([0, 24])
plt.xticks([0, 12, 24])
ax.set_xticklabels(['0:00', '12:00', '0:00'])
# Remove border lines and show plot
ax.yaxis.set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.show()
return fig
def clabel_table_create(common_acts, n_parts=12,
data_lp='/data2/users/stepeter/files_nwb/downloads/000055/'):
'''Create table of coarse label durations across participants.
Labels to include in the table are specified by common_acts.'''
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_under_path(''):
paths.append(file.path)
paths = natsort.natsorted(paths)
vals_all = np.zeros([n_parts, len(common_acts)+1])
for part_ind in range(n_parts):
fids = [val for val in paths if 'sub-'+str(part_ind+1).zfill(2) in val]
for fid in fids:
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fid)
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
io = NWBHDF5IO(s3_path, mode='r', load_namespaces=False, driver='ros3')
nwb = io.read()
curr_labels = nwb.intervals['epochs'].to_dataframe()
durations = (curr_labels.loc[:,'stop_time'].values - curr_labels.loc[:,'start_time'].values)
# Add up durations of each label
for s, curr_act in enumerate(common_acts):
for i, curr_label in enumerate(curr_labels['labels'].tolist()):
if curr_act in curr_label.split(', '):
vals_all[part_ind, s] += durations[i]/3600
# Add up total durations of selected labels (avoid double counting)
for i, curr_label in enumerate(curr_labels['labels'].tolist()):
in_lab_grp = False
for sub_lab in curr_label.split(', '):
if sub_lab in common_acts:
in_lab_grp = True
vals_all[part_ind, -1] += durations[i]/3600 if in_lab_grp else 0
io.close()
del nwb, io
# Make final table/dataframe
common_acts_col = [val.lstrip('Blocklist (').rstrip(')') for val in common_acts]
df_all = pd.DataFrame(vals_all.round(1), index=['P'+str(val+1).zfill(2) for val in range(n_parts)],
columns=common_acts_col+['Total'])
return df_all
def identify_elecs(group_names):
'''Determine surface v. depth ECoG electrodes'''
is_surf = []
for label in group_names:
if 'grid' in label.lower():
is_surf.append(True)
elif label.lower() in ['mhd', 'latd', 'lmtd', 'ltpd']:
is_surf.append(True) # special cases
elif (label.lower() == 'ahd') & ('PHD' not in group_names):
is_surf.append(True) # special case
elif 'd' in label.lower():
is_surf.append(False)
else:
is_surf.append(True)
return np.array(is_surf)
def load_data_characteristics(nparts=12):
'''Load data characteristics including the number of
good and total ECoG electrodes, hemisphere implanted,
and number of recording days for each participant.'''
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_under_path(''):
paths.append(file.path)
paths = natsort.natsorted(paths)
n_elecs_tot, n_elecs_good = [], []
rec_days, hemis, n_elecs_surf_tot, n_elecs_depth_tot = [], [], [], []
n_elecs_surf_good, n_elecs_depth_good = [], []
for part_ind in range(nparts):
cur_t = 0
fids = [val for val in paths if 'sub-'+str(part_ind+1).zfill(2) in val]
rec_days.append(len(fids))
for fid in fids[:1]:
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fid)
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
io = NWBHDF5IO(s3_path, mode='r', load_namespaces=False, driver='ros3')
nwb = io.read()
# Determine good/total electrodes
n_elecs_good.append(np.sum(nwb.electrodes['good'][:]))
n_elecs_tot.append(len(nwb.electrodes['good'][:]))
# Determine implanted hemisphere
c_wrist = nwb.processing['behavior'].data_interfaces['ReachEvents'].description[0]
hemis.append('L' if c_wrist == 'r' else 'R')
# Determine surface vs. depth electrode count
is_surf = identify_elecs(nwb.electrodes['group_name'][:])
n_elecs_surf_tot.append(np.sum(is_surf))
n_elecs_depth_tot.append(np.sum(1-is_surf))
n_elecs_surf_good.append(np.sum(nwb.electrodes['good'][is_surf.nonzero()[0]]))
n_elecs_depth_good.append(np.sum(nwb.electrodes['good'][(1-is_surf).nonzero()[0]]))
io.close()
del nwb, io
part_nums = [val+1 for val in range(nparts)]
part_ids = ['P'+str(val).zfill(2) for val in part_nums]
return [rec_days, hemis, n_elecs_surf_tot, n_elecs_surf_good,
n_elecs_depth_tot, n_elecs_depth_good, part_nums,
part_ids, n_elecs_good, n_elecs_tot]
def plot_ecog_descript(n_elecs_tot, n_elecs_good, part_ids,
nparts=12, allLH=False, nrows=3,
chan_labels='all', width=7, height=3):
'''Plot ECoG electrode positions and identified noisy
electrodes side by side.'''
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_under_path(''):
paths.append(file.path)
paths = natsort.natsorted(paths)
fig = plt.figure(figsize=(width*3, height*3), dpi=150)
# First subplot: electrode locations
ncols = nparts//nrows
gs = gridspec.GridSpec(nrows=nrows,
ncols=ncols, #+2,
figure=fig,
width_ratios= [width/ncols]*ncols, #[width/ncols/2]*ncols+[width/10, 4*width/10],
height_ratios= [height/nrows]*nrows,
wspace=0, hspace=-.5
)
ax = [None]*(nparts) #+1)
for part_ind in range(nparts):
# Load NWB data file
fids = [val for val in paths if 'sub-'+str(part_ind+1).zfill(2) in val]
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fids[0])
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
io = NWBHDF5IO(s3_path, mode='r', load_namespaces=False, driver='ros3')
nwb = io.read()
# Determine hemisphere to display
if allLH:
sides_2_display ='l'
else:
average_xpos_sign = np.nanmean(nwb.electrodes['x'][:])
sides_2_display = 'r' if average_xpos_sign>0 else 'l'
# Run electrode plotting function
ax[part_ind] = fig.add_subplot(gs[part_ind//ncols, part_ind%ncols])
plot_ecog_electrodes_mni_from_nwb_file(nwb,chan_labels,num_grid_chans=64,node_size=50,
colors='silver',alpha=.9,sides_2_display=sides_2_display,
node_edge_colors='k',edge_linewidths=1.5,
ax_in=ax[part_ind],allLH=allLH)
io.close()
del nwb, io
# ax[part_ind].text(-0.2,0.1,'P'+str(part_ind+1).zfill(2), fontsize=8)
# fig.text(0.1, 0.91, '(a) ECoG electrode positions', fontsize=10)
# Second subplot: noisy electrodes per participant
# ax[-1] = fig.add_subplot(gs[:, -1])
# ax[-1].bar(part_ids,n_elecs_tot,color='lightgrey')
# ax[-1].bar(part_ids,n_elecs_good,color='dimgrey')
# ax[-1].spines['right'].set_visible(False)
# ax[-1].spines['top'].set_visible(False)
# ax[-1].set_xticklabels(part_ids, rotation=45)
# ax[-1].legend(['Total','Good'], frameon=False, fontsize=8)
# ax[-1].tick_params(labelsize=9)
# ax[-1].set_ylabel('Number of electrodes', fontsize=9, labelpad=0)
# ax[-1].set_title('(b) Total/good electrodes per participant',
# fontsize=10)
plt.show()
return fig
def plot_ecog_electrodes_mni_from_nwb_file(nwb_dat,chan_labels='all',num_grid_chans=64,colors=None,node_size=50,
figsize=(16,6),sides_2_display='auto',node_edge_colors=None,
alpha=0.5,edge_linewidths=3,ax_in=None,rem_zero_chans=False,
allLH=False,zero_rem_thresh=.99,elec_col_suppl=None):
"""
Plots ECoG electrodes from MNI coordinate file (only for specified labels)
NOTE: If running in Jupyter, use '%matplotlib inline' instead of '%matplotlib notebook'
"""
#Load channel locations
chan_info = nwb_dat.electrodes.to_dataframe()
#Create dataframe for electrode locations
if chan_labels== 'all':
locs = chan_info.loc[:,['x','y','z']]
elif chan_labels== 'allgood':
locs = chan_info.loc[:,['x','y','z','good']]
else:
locs = chan_info.loc[chan_labels,['x','y','z']]
if (colors is not None):
if (locs.shape[0]>len(colors)) & isinstance(colors, list):
locs = locs.iloc[:len(colors),:]
# locs.rename(columns={'X':'x','Y':'y','Z':'z'}, inplace=True)
chan_loc_x = chan_info.loc[:,'x'].values
#Remove NaN electrode locations (no location info)
nan_drop_inds = np.nonzero(np.isnan(chan_loc_x))[0]
locs.dropna(axis=0,inplace=True) #remove NaN locations
if (colors is not None) & isinstance(colors, list):
colors_new,loc_inds_2_drop = [],[]
for s,val in enumerate(colors):
if not (s in nan_drop_inds):
colors_new.append(val)
else:
loc_inds_2_drop.append(s)
colors = colors_new.copy()
if elec_col_suppl is not None:
loc_inds_2_drop.reverse() #go from high to low values
for val in loc_inds_2_drop:
del elec_col_suppl[val]
if chan_labels=='allgood':
goodChanInds = chan_info.loc[:,'good',:]
inds2drop = np.nonzero(locs['good']==0)[0]
locs.drop(columns=['good'],inplace=True)
locs.drop(locs.index[inds2drop],inplace=True)
if colors is not None:
colors_new,loc_inds_2_drop = [],[]
for s,val in enumerate(colors):
if not (s in inds2drop):
# np.all(s!=inds2drop):
colors_new.append(val)
else:
loc_inds_2_drop.append(s)
colors = colors_new.copy()
if elec_col_suppl is not None:
loc_inds_2_drop.reverse() #go from high to low values
for val in loc_inds_2_drop:
del elec_col_suppl[val]
if rem_zero_chans:
#Remove channels with zero values (white colors)
colors_new,loc_inds_2_drop = [],[]
for s,val in enumerate(colors):
if np.mean(val)<zero_rem_thresh:
colors_new.append(val)
else:
loc_inds_2_drop.append(s)
colors = colors_new.copy()
locs.drop(locs.index[loc_inds_2_drop],inplace=True)
if elec_col_suppl is not None:
loc_inds_2_drop.reverse() #go from high to low values
for val in loc_inds_2_drop:
del elec_col_suppl[val]
#Decide whether to plot L or R hemisphere based on x coordinates
if len(sides_2_display)>1:
N,axes,sides_2_display = _setup_subplot_view(locs,sides_2_display,figsize)
else:
N = 1
axes = ax_in
if allLH:
average_xpos_sign = np.mean(np.asarray(locs['x']))
if average_xpos_sign>0:
locs['x'] = -locs['x']
sides_2_display ='l'
if colors is None:
colors = list()
#Label strips/depths differently for easier visualization (or use defined color list)
if len(colors)==0:
for s in range(locs.shape[0]):
if s>=num_grid_chans:
colors.append('r')
else:
colors.append('b')
if elec_col_suppl is not None:
colors = elec_col_suppl.copy()
#Rearrange to plot non-grid electrode first
if num_grid_chans>0: #isinstance(colors, list):
locs2 = locs.copy()
locs2['x'] = np.concatenate((locs['x'][num_grid_chans:],locs['x'][:num_grid_chans]),axis=0)
locs2['y'] = np.concatenate((locs['y'][num_grid_chans:],locs['y'][:num_grid_chans]),axis=0)
locs2['z'] = np.concatenate((locs['z'][num_grid_chans:],locs['z'][:num_grid_chans]),axis=0)
if isinstance(colors, list):
colors2 = colors.copy()
colors2 = colors[num_grid_chans:]+colors[:num_grid_chans]
else:
colors2 = colors
else:
locs2 = locs.copy()
if isinstance(colors, list):
colors2 = colors.copy()
else:
colors2 = colors #[colors for i in range(locs2.shape[0])]
#Plot the result
_plot_electrodes(locs2,node_size,colors2,axes,sides_2_display,
N,node_edge_colors,alpha,edge_linewidths)
def _plot_electrodes(locs,node_size,colors,axes,sides_2_display,N,node_edge_colors,
alpha,edge_linewidths,marker='o'):
"""
Handles plotting of electrodes.
"""
if N==1:
ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,
node_kwargs={'alpha': alpha, 'edgecolors': node_edge_colors,
'linewidths':edge_linewidths,'marker': marker},
node_size=node_size, node_color=colors,axes=axes,display_mode=sides_2_display)
elif sides_2_display=='yrz' or sides_2_display=='ylz':
colspans=[5,6,5] #different sized subplot to make saggital view similar size to other two slices
current_col=0
total_colspans=int(np.sum(np.asarray(colspans)))
for ind,colspan in enumerate(colspans):
axes[ind]=plt.subplot2grid((1,total_colspans), (0,current_col), colspan=colspan, rowspan=1)
ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,
node_kwargs={'alpha': alpha, 'edgecolors': node_edge_colors,
'linewidths':edge_linewidths,'marker': marker},
node_size=node_size, node_color=colors,
axes=axes[ind],display_mode=sides_2_display[ind])
current_col+=colspan
else:
for i in range(N):
ni_plt.plot_connectome(np.eye(locs.shape[0]), locs, output_file=None,
node_kwargs={'alpha': alpha, 'edgecolors': node_edge_colors,
'linewidths':edge_linewidths,'marker': marker},
node_size=node_size, node_color=colors,
axes=axes[i],display_mode=sides_2_display[i])
def plot_ecog_pow(lp, rois_plt, freq_range, sbplt_titles,
part_id='P01', n_parts=12, nrows=2, ncols=4,
figsize=(7,4)):
'''Plot ECoG projected spectral power.'''
fig, ax = plt.subplots(nrows, ncols, figsize=figsize, dpi=150)
# Plot projected power for all participants
fig, ax = _ecog_pow_group(fig, ax, lp, rois_plt, freq_range, sbplt_titles,
n_parts, nrows, ncols, row_ind=0)
# Plot projected power for 1 participant
fig, ax = _ecog_pow_single(fig, ax, lp, rois_plt, freq_range, sbplt_titles,
n_parts, nrows, ncols, row_ind=1,
part_id=part_id)
fig.tight_layout()
plt.show()
def _ecog_pow_group(fig, ax, lp, rois_plt, freq_range, sbplt_titles,
n_parts=12, nrows=2, ncols=4, row_ind=0):
'''Plot projected power for all participants.'''
freqs_vals = np.arange(freq_range[0],freq_range[1]+1).tolist()
fig.subplots_adjust(hspace=0.5)
fig.subplots_adjust(wspace=0.1)
power, freqs, parts = [], [], []
n_wins_sbj = []
for k,roi in enumerate(rois_plt):
power_roi, freqs_roi, parts_roi = [], [], []
for j in range(n_parts):
dat = np.load(lp+'P'+str(j+1).zfill(2)+'_'+roi+'.npy')
dat = 10*np.log10(dat)
for i in range(dat.shape[0]):
power_roi.extend(dat[i,:].tolist())
freqs_roi.extend(freqs_vals)
parts_roi.extend(['P'+str(j+1).zfill(2)]*len(freqs_vals))
if k==0:
n_wins_sbj.append(dat.shape[0])
power.extend(power_roi)
freqs.extend(freqs_roi)
parts.extend(parts_roi)
parts_uni = np.unique(np.asarray(parts_roi))[::-1].tolist()
df_roi = pd.DataFrame({'Power': power_roi, 'Freqs': freqs_roi, 'Parts': parts_roi})
col = k%ncols
ax_curr = ax[row_ind,col] if nrows > 1 else ax[col]
leg = False # 'brief' if k==3 else False
sns.lineplot(data=df_roi, x="Freqs", y="Power", hue="Parts",
ax=ax_curr, ci='sd', legend=leg, palette=['darkgray']*len(parts_uni),
hue_order=parts_uni) # palette='Blues'
# ax_curr.set_xscale('log')
ax_curr.set_xlim(freq_range)
ax_curr.set_ylim([-20,30])
ax_curr.spines['right'].set_visible(False)
ax_curr.spines['top'].set_visible(False)
ax_curr.set_xlim(freq_range)
ax_curr.set_xticks([freq_range[0]]+np.arange(20,101,20).tolist()+[freq_range[1]])
ylab = '' # '' if k%ncols > 0 else 'Power\n(dB)' # 10log(uV^2)
xlab = '' # 'Frequency (Hz)' if k//ncols==(nrows-1) else ''
ax_curr.set_ylabel(ylab, rotation=0, labelpad=15, fontsize=9)
ax_curr.set_xlabel(xlab, fontsize=9)
if k%ncols > 0:
l_yticks = len(ax_curr.get_yticklabels())
ax_curr.set_yticks(ax_curr.get_yticks().tolist())
ax_curr.set_yticklabels(['']*l_yticks)
ax_curr.tick_params(axis='both', which='major', labelsize=8)
ax_curr.set_title(sbplt_titles[k], fontsize=9)
return fig, ax
def _ecog_pow_single(fig, ax, lp, rois_plt, freq_range, sbplt_titles,
n_parts=12, nrows=2, ncols=4, row_ind=1, part_id='P01'):
'''Plot projected power for a single participant.'''
part_id = 'P01'
freqs_vals = np.arange(freq_range[0],freq_range[1]+1).tolist()
power, freqs, parts = [], [], []
n_wins_sbj = []
for k,roi in enumerate(rois_plt):
power_roi, freqs_roi, parts_roi = [], [], []
dat = np.load(lp+part_id+'_'+roi+'.npy')
dat = 10*np.log10(dat)
for i in range(dat.shape[0]):
power_roi.extend(dat[i,:].tolist())
freqs_roi.extend(freqs_vals)
parts_roi.extend([i]*len(freqs_vals))
if k==0:
n_wins_sbj.append(dat.shape[0])
power.extend(power_roi)
freqs.extend(freqs_roi)
parts.extend(parts_roi)
parts_uni = np.unique(np.asarray(parts_roi))[::-1].tolist()
df_roi = pd.DataFrame({'Power': power_roi, 'Freqs': freqs_roi, 'Parts': parts_roi})
col = k%ncols
ax_curr = ax[row_ind,col] if nrows > 1 else ax[col]
leg = False # 'brief' if k==3 else False
sns.lineplot(data=df_roi, x="Freqs", y="Power", hue="Parts",
ax=ax_curr, ci=None, legend=leg, palette=['darkgray']*len(parts_uni),
hue_order=parts_uni, linewidth=0.2) # palette='Blues'
ax_curr.set_xlim(freq_range)
ax_curr.set_ylim([-20,30])
ax_curr.spines['right'].set_visible(False)
ax_curr.spines['top'].set_visible(False)
ax_curr.set_xlim(freq_range)
ax_curr.set_xticks([freq_range[0]]+np.arange(20,101,20).tolist()+[freq_range[1]])
ylab = '' # '' if k%ncols > 0 else 'Power\n(dB)' # 10log(uV^2)
xlab = '' # 'Frequency (Hz)' if k//ncols==(nrows-1) else ''
ax_curr.set_ylabel(ylab, rotation=0, labelpad=15, fontsize=9)
ax_curr.set_xlabel(xlab, fontsize=9)
if k%ncols > 0:
l_yticks = len(ax_curr.get_yticklabels())
ax_curr.set_yticks(ax_curr.get_yticks().tolist())
ax_curr.set_yticklabels(['']*l_yticks)
ax_curr.tick_params(axis='both', which='major', labelsize=8)
ax_curr.set_title(sbplt_titles[k], fontsize=9)
return fig, ax
def plot_dlc_recon_errs(fig, ax):
'''Plots DeepLabCut reconstruction errors on training and heldout
images. This information is not present in the NWB files.'''
# DLC reconstruction errors [train set, holdout set]
sbj_d = {'P01': [1.45, 4.27], 'P02': [1.44, 3.58],
'P03': [1.58, 6.95], 'P04': [1.63, 6.02],
'P05': [1.43, 3.42], 'P06': [1.43, 6.63],
'P07': [1.51, 5.45], 'P08': [1.84, 10.35],
'P09': [1.4, 4.05], 'P10': [1.48, 7.59],
'P11': [1.51, 5.45], 'P12': [1.52, 4.73]}
train_err = [val[0] for key, val in sbj_d.items()]
test_err = [val[1] for key, val in sbj_d.items()]
nsbjs = len(train_err)
sbj_nums = [val+1 for val in range(nsbjs)]
sbj = ['P'+str(val).zfill(2) for val in sbj_nums]
# Create plot
ax.bar(sbj,train_err,color='dimgrey')
ax.bar(sbj,test_err,color='lightgrey')
ax.bar(sbj,train_err,color='dimgrey')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticklabels(sbj, rotation=45)
ax.legend(['Train set','Holdout set'], frameon=False, fontsize=8)
ax.tick_params(labelsize=9)
ax.set_ylabel('Reconstruction error (pixels)')
ax.set_title('(a) Pose estimation model errors',
fontsize=10)
def plot_wrist_trajs(fig, ax, lp=None, base_start=-1.5, base_end=-1,
before=3, after=3, fs_video=30, n_parts=12):
'''Plot contralateral wrist trajectories during move onset events.'''
df_pose, part_lst = _get_wrist_trajs(base_start, base_end, before, after,
fs_video, n_parts)
df_pose_orig = df_pose.copy()
df_pose = df_pose_orig.loc[df_pose['Contra']=='contra', :]
# Set custom color palette
sns.set_palette(sns.color_palette(["gray"]))
uni_sbj = np.unique(np.asarray(part_lst))
for j in range(n_parts):
sns.lineplot(x="Time",y="Displ",data=df_pose[df_pose['Sbj']==uni_sbj[j]],ax=ax,
linewidth=1.5,hue='Contra',legend=False,
estimator=np.median, ci=95)
ax.set_ylim([0,60])
ax.set_xlim([-0.5,1.5])
ax.set_xticks([-0.5,0,0.5,1,1.5])
ax.set_ylabel('Displacement (px)', fontsize=9)
ax.set_xlabel('Time (sec)', fontsize=9)
sns.set_style("ticks")
sns.despine()
ax.axvline(0, linewidth=1.5, color="black", linestyle="--")
ax.set_title('(b) Contralateral wrist trajectories during move events',
fontsize=10)
def _get_wrist_trajs(base_start=-1.5, base_end=-1,
before=3, after=3, fs_video=30,
n_parts=12):
'''Load in wrist trajectories around move onset events.'''
with DandiAPIClient() as client:
paths = []
for file in client.get_dandiset("000055", "draft").get_assets_under_path(''):
paths.append(file.path)
paths = natsort.natsorted(paths)
displ_lst, part_lst, time_lst, pose_lst = [], [], [], []
for pat in range(n_parts):
fids = [val for val in paths if 'sub-'+str(pat+1).zfill(2) in val]
for i, fid in enumerate(fids):
with DandiAPIClient() as client:
asset = client.get_dandiset("000055", "draft").get_asset_by_path(fid)
s3_path = asset.get_content_url(follow_redirects=1, strip_query=True)
io = NWBHDF5IO(s3_path, mode='r', load_namespaces=False, driver='ros3')
nwb_file = io.read()
# Segment data
events = nwb_file.processing["behavior"].data_interfaces["ReachEvents"]
times = events.timestamps[:]
starts = times - before
stops = times + after
# Get event hand label
contra_arm = events.description
contra_arm = map(lambda x: x.capitalize(), contra_arm.split("_"))
contra_arm = list(contra_arm)
contra_arm = "_".join(contra_arm)
ipsi_arm = 'R'+contra_arm[1:] if contra_arm[0] == 'L' else 'L'+contra_arm[1:]
reach_lab = ['contra', 'ipsi']
for k, reach_arm in enumerate([contra_arm, ipsi_arm]):
spatial_series = nwb_file.processing["behavior"].data_interfaces["Position"][reach_arm]
ep_dat = align_by_times(spatial_series, starts, stops)
ep_dat_mag = np.sqrt(np.square(ep_dat[...,0]) + np.square(ep_dat[...,1]))
# Interpolate and median filter
for j in range(ep_dat_mag.shape[0]):
df_mag = pd.DataFrame(ep_dat_mag[j,:])
df_mag = df_mag.interpolate(method='pad')
tmp_val = df_mag.values.copy().flatten() #medfilt(df_mag.values, kernel_size=31)
df_mag = pd.DataFrame(tmp_val[::-1])
df_mag = df_mag.interpolate(method='pad')
ep_dat_mag[j,:] = medfilt(df_mag.values.copy().flatten()[::-1], kernel_size=31)
zero_ind = timeseries_time_to_ind(spatial_series, before)
base_start_ind = timeseries_time_to_ind(spatial_series, base_start+before)
base_end_ind = timeseries_time_to_ind(spatial_series, base_end+before)
n_tpoints = ep_dat_mag.shape[1]
t_vals = np.arange(n_tpoints)/fs_video - before
# Subtract baseline from position data
for j in range(ep_dat_mag.shape[0]):
curr_magnitude = ep_dat_mag[j,:]
curr_magnitude = np.abs(curr_magnitude - \
np.mean(curr_magnitude[base_start_ind:base_end_ind]))
curr_magnitude[np.isnan(curr_magnitude)] = 0
displ_lst.extend(curr_magnitude.tolist())
part_lst.extend(['P'+str(pat+1).zfill(2)]*n_tpoints)
time_lst.extend(t_vals.tolist())
pose_lst.extend([reach_lab[k]]*n_tpoints)
io.close()
del nwb_file, io
df_pose = pd.DataFrame({'Displ': displ_lst, 'Sbj': part_lst,
'Time': time_lst, 'Contra': pose_lst})
return df_pose, part_lst
| [
"numpy.log10",
"seaborn.set_style",
"numpy.array",
"numpy.nanmean",
"nwbwidgets.utils.timeseries.timeseries_time_to_ind",
"matplotlib.pyplot.subplot2grid",
"numpy.arange",
"numpy.mean",
"seaborn.despine",
"seaborn.color_palette",
"numpy.asarray",
"matplotlib.gridspec.GridSpec",
"numpy.linspa... | [((2259, 2294), 'numpy.unique', 'np.unique', (["clabels['labels'].values"], {}), "(clabels['labels'].values)\n", (2268, 2294), True, 'import numpy as np\n'), ((3589, 3627), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 2)', 'dpi': '(150)'}), '(figsize=(20, 2), dpi=150)\n', (3601, 3627), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4900, 4902), True, 'import matplotlib.pyplot as plt\n'), ((5380, 5404), 'natsort.natsorted', 'natsort.natsorted', (['paths'], {}), '(paths)\n', (5397, 5404), False, 'import natsort\n'), ((7741, 7758), 'numpy.array', 'np.array', (['is_surf'], {}), '(is_surf)\n', (7749, 7758), True, 'import numpy as np\n'), ((8166, 8190), 'natsort.natsorted', 'natsort.natsorted', (['paths'], {}), '(paths)\n', (8183, 8190), False, 'import natsort\n'), ((10486, 10510), 'natsort.natsorted', 'natsort.natsorted', (['paths'], {}), '(paths)\n', (10503, 10510), False, 'import natsort\n'), ((10526, 10578), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width * 3, height * 3)', 'dpi': '(150)'}), '(figsize=(width * 3, height * 3), dpi=150)\n', (10536, 10578), True, 'import matplotlib.pyplot as plt\n'), ((10651, 10815), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figure': 'fig', 'width_ratios': '([width / ncols] * ncols)', 'height_ratios': '([height / nrows] * nrows)', 'wspace': '(0)', 'hspace': '(-0.5)'}), '(nrows=nrows, ncols=ncols, figure=fig, width_ratios=[width /\n ncols] * ncols, height_ratios=[height / nrows] * nrows, wspace=0,\n hspace=-0.5)\n', (10668, 10815), False, 'from matplotlib import gridspec\n'), ((13075, 13085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13083, 13085), True, 'import matplotlib.pyplot as plt\n'), ((20369, 20421), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': 'figsize', 'dpi': '(150)'}), '(nrows, ncols, figsize=figsize, dpi=150)\n', (20381, 20421), True, 'import matplotlib.pyplot as plt\n'), ((20885, 20895), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20893, 20895), True, 'import matplotlib.pyplot as plt\n'), ((27897, 27919), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (27910, 27919), True, 'import seaborn as sns\n'), ((27924, 27937), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (27935, 27937), True, 'import seaborn as sns\n'), ((28501, 28525), 'natsort.natsorted', 'natsort.natsorted', (['paths'], {}), '(paths)\n', (28518, 28525), False, 'import natsort\n'), ((31640, 31733), 'pandas.DataFrame', 'pd.DataFrame', (["{'Displ': displ_lst, 'Sbj': part_lst, 'Time': time_lst, 'Contra': pose_lst}"], {}), "({'Displ': displ_lst, 'Sbj': part_lst, 'Time': time_lst,\n 'Contra': pose_lst})\n", (31652, 31733), True, 'import pandas as pd\n'), ((2696, 2716), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Reds"""'], {}), "('Reds')\n", (2708, 2716), True, 'import matplotlib.pyplot as plt\n'), ((2717, 2743), 'numpy.linspace', 'np.linspace', (['(0.15)', '(0.85)', '(5)'], {}), '(0.15, 0.85, 5)\n', (2728, 2743), True, 'import numpy as np\n'), ((2788, 2843), 'numpy.array', 'np.array', (["['w', act_cols[targlab_colind]]"], {'dtype': 'object'}), "(['w', act_cols[targlab_colind]], dtype=object)\n", (2796, 2843), True, 'import numpy as np\n'), ((3374, 3470), 'numpy.array', 'np.array', (["[[1, 128 / 255, 178 / 255], 'dimgray', 'lightgreen', 'lightskyblue']"], {'dtype': 'object'}), "([[1, 128 / 255, 178 / 255], 'dimgray', 'lightgreen',\n 'lightskyblue'], dtype=object)\n", (3382, 3470), True, 'import numpy as np\n'), ((4328, 4348), 'matplotlib.pyplot.xlim', 'plt.xlim', (['targ_tlims'], {}), '(targ_tlims)\n', (4336, 4348), True, 'import matplotlib.pyplot as plt\n'), ((4415, 4441), 'matplotlib.pyplot.xticks', 'plt.xticks', (['targ_tlims_int'], {}), '(targ_tlims_int)\n', (4425, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4588, 4605), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 24]'], {}), '([0, 24])\n', (4596, 4605), True, 'import matplotlib.pyplot as plt\n'), ((4614, 4637), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 12, 24]'], {}), '([0, 12, 24])\n', (4624, 4637), True, 'import matplotlib.pyplot as plt\n'), ((5199, 5215), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (5213, 5215), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((7985, 8001), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (7999, 8001), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((10305, 10321), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (10319, 10321), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((11415, 11481), 'pynwb.NWBHDF5IO', 'NWBHDF5IO', (['s3_path'], {'mode': '"""r"""', 'load_namespaces': '(False)', 'driver': '"""ros3"""'}), "(s3_path, mode='r', load_namespaces=False, driver='ros3')\n", (11424, 11481), False, 'from pynwb import NWBHDF5IO\n'), ((17419, 17504), 'numpy.concatenate', 'np.concatenate', (["(locs['x'][num_grid_chans:], locs['x'][:num_grid_chans])"], {'axis': '(0)'}), "((locs['x'][num_grid_chans:], locs['x'][:num_grid_chans]), axis=0\n )\n", (17433, 17504), True, 'import numpy as np\n'), ((17519, 17604), 'numpy.concatenate', 'np.concatenate', (["(locs['y'][num_grid_chans:], locs['y'][:num_grid_chans])"], {'axis': '(0)'}), "((locs['y'][num_grid_chans:], locs['y'][:num_grid_chans]), axis=0\n )\n", (17533, 17604), True, 'import numpy as np\n'), ((17619, 17704), 'numpy.concatenate', 'np.concatenate', (["(locs['z'][num_grid_chans:], locs['z'][:num_grid_chans])"], {'axis': '(0)'}), "((locs['z'][num_grid_chans:], locs['z'][:num_grid_chans]), axis=0\n )\n", (17633, 17704), True, 'import numpy as np\n'), ((21972, 22046), 'pandas.DataFrame', 'pd.DataFrame', (["{'Power': power_roi, 'Freqs': freqs_roi, 'Parts': parts_roi}"], {}), "({'Power': power_roi, 'Freqs': freqs_roi, 'Parts': parts_roi})\n", (21984, 22046), True, 'import pandas as pd\n'), ((23781, 23823), 'numpy.load', 'np.load', (["(lp + part_id + '_' + roi + '.npy')"], {}), "(lp + part_id + '_' + roi + '.npy')\n", (23788, 23823), True, 'import numpy as np\n'), ((24267, 24341), 'pandas.DataFrame', 'pd.DataFrame', (["{'Power': power_roi, 'Freqs': freqs_roi, 'Parts': parts_roi}"], {}), "({'Power': power_roi, 'Freqs': freqs_roi, 'Parts': parts_roi})\n", (24279, 24341), True, 'import pandas as pd\n'), ((27402, 27429), 'seaborn.color_palette', 'sns.color_palette', (["['gray']"], {}), "(['gray'])\n", (27419, 27429), True, 'import seaborn as sns\n'), ((27456, 27476), 'numpy.asarray', 'np.asarray', (['part_lst'], {}), '(part_lst)\n', (27466, 27476), True, 'import numpy as np\n'), ((27516, 27681), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""Time"""', 'y': '"""Displ"""', 'data': "df_pose[df_pose['Sbj'] == uni_sbj[j]]", 'ax': 'ax', 'linewidth': '(1.5)', 'hue': '"""Contra"""', 'legend': '(False)', 'estimator': 'np.median', 'ci': '(95)'}), "(x='Time', y='Displ', data=df_pose[df_pose['Sbj'] == uni_sbj[j]\n ], ax=ax, linewidth=1.5, hue='Contra', legend=False, estimator=np.\n median, ci=95)\n", (27528, 27681), True, 'import seaborn as sns\n'), ((28320, 28336), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (28334, 28336), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((3682, 3733), 'numpy.nonzero', 'np.nonzero', (["(uni_labs[i] == clabels['labels'].values)"], {}), "(uni_labs[i] == clabels['labels'].values)\n", (3692, 3733), True, 'import numpy as np\n'), ((3931, 3955), 'numpy.ones_like', 'np.ones_like', (['lab_widths'], {}), '(lab_widths)\n', (3943, 3955), True, 'import numpy as np\n'), ((5840, 5906), 'pynwb.NWBHDF5IO', 'NWBHDF5IO', (['s3_path'], {'mode': '"""r"""', 'load_namespaces': '(False)', 'driver': '"""ros3"""'}), "(s3_path, mode='r', load_namespaces=False, driver='ros3')\n", (5849, 5906), False, 'from pynwb import NWBHDF5IO\n'), ((8791, 8857), 'pynwb.NWBHDF5IO', 'NWBHDF5IO', (['s3_path'], {'mode': '"""r"""', 'load_namespaces': '(False)', 'driver': '"""ros3"""'}), "(s3_path, mode='r', load_namespaces=False, driver='ros3')\n", (8800, 8857), False, 'from pynwb import NWBHDF5IO\n'), ((11206, 11222), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (11220, 11222), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((11646, 11680), 'numpy.nanmean', 'np.nanmean', (["nwb.electrodes['x'][:]"], {}), "(nwb.electrodes['x'][:])\n", (11656, 11680), True, 'import numpy as np\n'), ((14409, 14429), 'numpy.isnan', 'np.isnan', (['chan_loc_x'], {}), '(chan_loc_x)\n', (14417, 14429), True, 'import numpy as np\n'), ((15106, 15135), 'numpy.nonzero', 'np.nonzero', (["(locs['good'] == 0)"], {}), "(locs['good'] == 0)\n", (15116, 15135), True, 'import numpy as np\n'), ((18482, 18503), 'numpy.eye', 'np.eye', (['locs.shape[0]'], {}), '(locs.shape[0])\n', (18488, 18503), True, 'import numpy as np\n'), ((21103, 21146), 'numpy.arange', 'np.arange', (['freq_range[0]', '(freq_range[1] + 1)'], {}), '(freq_range[0], freq_range[1] + 1)\n', (21112, 21146), True, 'import numpy as np\n'), ((23568, 23611), 'numpy.arange', 'np.arange', (['freq_range[0]', '(freq_range[1] + 1)'], {}), '(freq_range[0], freq_range[1] + 1)\n', (23577, 23611), True, 'import numpy as np\n'), ((23833, 23846), 'numpy.log10', 'np.log10', (['dat'], {}), '(dat)\n', (23841, 23846), True, 'import numpy as np\n'), ((28971, 29037), 'pynwb.NWBHDF5IO', 'NWBHDF5IO', (['s3_path'], {'mode': '"""r"""', 'load_namespaces': '(False)', 'driver': '"""ros3"""'}), "(s3_path, mode='r', load_namespaces=False, driver='ros3')\n", (28980, 29037), False, 'from pynwb import NWBHDF5IO\n'), ((5623, 5639), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (5637, 5639), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((8574, 8590), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (8588, 8590), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((8965, 8998), 'numpy.sum', 'np.sum', (["nwb.electrodes['good'][:]"], {}), "(nwb.electrodes['good'][:])\n", (8971, 8998), True, 'import numpy as np\n'), ((9426, 9441), 'numpy.sum', 'np.sum', (['is_surf'], {}), '(is_surf)\n', (9432, 9441), True, 'import numpy as np\n'), ((9480, 9499), 'numpy.sum', 'np.sum', (['(1 - is_surf)'], {}), '(1 - is_surf)\n', (9486, 9499), True, 'import numpy as np\n'), ((16003, 16015), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (16010, 16015), True, 'import numpy as np\n'), ((16712, 16733), 'numpy.asarray', 'np.asarray', (["locs['x']"], {}), "(locs['x'])\n", (16722, 16733), True, 'import numpy as np\n'), ((19136, 19223), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, total_colspans)', '(0, current_col)'], {'colspan': 'colspan', 'rowspan': '(1)'}), '((1, total_colspans), (0, current_col), colspan=colspan,\n rowspan=1)\n', (19152, 19223), True, 'import matplotlib.pyplot as plt\n'), ((21494, 21507), 'numpy.log10', 'np.log10', (['dat'], {}), '(dat)\n', (21502, 21507), True, 'import numpy as np\n'), ((28754, 28770), 'dandi.dandiapi.DandiAPIClient', 'DandiAPIClient', ([], {}), '()\n', (28768, 28770), False, 'from dandi.dandiapi import DandiAPIClient\n'), ((29870, 29915), 'nwbwidgets.utils.timeseries.align_by_times', 'align_by_times', (['spatial_series', 'starts', 'stops'], {}), '(spatial_series, starts, stops)\n', (29884, 29915), False, 'from nwbwidgets.utils.timeseries import align_by_times, timeseries_time_to_ind\n'), ((30577, 30623), 'nwbwidgets.utils.timeseries.timeseries_time_to_ind', 'timeseries_time_to_ind', (['spatial_series', 'before'], {}), '(spatial_series, before)\n', (30599, 30623), False, 'from nwbwidgets.utils.timeseries import align_by_times, timeseries_time_to_ind\n'), ((30657, 30716), 'nwbwidgets.utils.timeseries.timeseries_time_to_ind', 'timeseries_time_to_ind', (['spatial_series', '(base_start + before)'], {}), '(spatial_series, base_start + before)\n', (30679, 30716), False, 'from nwbwidgets.utils.timeseries import align_by_times, timeseries_time_to_ind\n'), ((30746, 30803), 'nwbwidgets.utils.timeseries.timeseries_time_to_ind', 'timeseries_time_to_ind', (['spatial_series', '(base_end + before)'], {}), '(spatial_series, base_end + before)\n', (30768, 30803), False, 'from nwbwidgets.utils.timeseries import align_by_times, timeseries_time_to_ind\n'), ((19043, 19063), 'numpy.asarray', 'np.asarray', (['colspans'], {}), '(colspans)\n', (19053, 19063), True, 'import numpy as np\n'), ((19253, 19274), 'numpy.eye', 'np.eye', (['locs.shape[0]'], {}), '(locs.shape[0])\n', (19259, 19274), True, 'import numpy as np\n'), ((19757, 19778), 'numpy.eye', 'np.eye', (['locs.shape[0]'], {}), '(locs.shape[0])\n', (19763, 19778), True, 'import numpy as np\n'), ((30137, 30167), 'pandas.DataFrame', 'pd.DataFrame', (['ep_dat_mag[j, :]'], {}), '(ep_dat_mag[j, :])\n', (30149, 30167), True, 'import pandas as pd\n'), ((30359, 30386), 'pandas.DataFrame', 'pd.DataFrame', (['tmp_val[::-1]'], {}), '(tmp_val[::-1])\n', (30371, 30386), True, 'import pandas as pd\n'), ((21917, 21938), 'numpy.asarray', 'np.asarray', (['parts_roi'], {}), '(parts_roi)\n', (21927, 21938), True, 'import numpy as np\n'), ((24212, 24233), 'numpy.asarray', 'np.asarray', (['parts_roi'], {}), '(parts_roi)\n', (24222, 24233), True, 'import numpy as np\n'), ((29953, 29978), 'numpy.square', 'np.square', (['ep_dat[..., 0]'], {}), '(ep_dat[..., 0])\n', (29962, 29978), True, 'import numpy as np\n'), ((29980, 30005), 'numpy.square', 'np.square', (['ep_dat[..., 1]'], {}), '(ep_dat[..., 1])\n', (29989, 30005), True, 'import numpy as np\n'), ((30875, 30895), 'numpy.arange', 'np.arange', (['n_tpoints'], {}), '(n_tpoints)\n', (30884, 30895), True, 'import numpy as np\n'), ((31272, 31296), 'numpy.isnan', 'np.isnan', (['curr_magnitude'], {}), '(curr_magnitude)\n', (31280, 31296), True, 'import numpy as np\n'), ((22686, 22708), 'numpy.arange', 'np.arange', (['(20)', '(101)', '(20)'], {}), '(20, 101, 20)\n', (22695, 22708), True, 'import numpy as np\n'), ((24960, 24982), 'numpy.arange', 'np.arange', (['(20)', '(101)', '(20)'], {}), '(20, 101, 20)\n', (24969, 24982), True, 'import numpy as np\n'), ((31183, 31235), 'numpy.mean', 'np.mean', (['curr_magnitude[base_start_ind:base_end_ind]'], {}), '(curr_magnitude[base_start_ind:base_end_ind])\n', (31190, 31235), True, 'import numpy as np\n')] |
import numpy
import scipy
import scipy.special
from typing import NoReturn
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
# FIXME: estimate d by time for epithermal neutrons
class TOFParameters(ItemN):
"""Parameters of the reflexion positions in time-of-flight experiments.
Attributes
----------
- zero, dtt1, ttheta_bank (mandatory)
- neutrons, dtt2, zerot, dtt1t, dtt2t, width, x_cross, field,
exinction (optional)
for thermal neutrons
time = zero + dtt1 * d + dtt2 * d**2
or for epithermal neutrons
time_e = zero + dtt1 * d
time_t = zerot + dtt1t * d - dtt2t / d
n_cross = 0.5*erfc(Width * (x_cross - 1/d))
time = n_cross * time_e + (1-n_cross) time_t
time is given in microseconds.
"""
ATTR_MANDATORY_NAMES = ("zero", "dtt1", "ttheta_bank")
ATTR_MANDATORY_TYPES = (float, float, float)
ATTR_MANDATORY_CIF = ("Zero", "Dtt1", "2theta_bank")
ATTR_OPTIONAL_NAMES = ('neutrons', "dtt2", "zerot", "dtt1t", "dtt2t",
"width", "x_cross", "field", "extinction")
ATTR_OPTIONAL_TYPES = (str, float, float, float, float, float, float,
float, float)
ATTR_OPTIONAL_CIF = ('neutrons', "dtt2", "zerot", "dtt1t", "dtt2t",
"width", "x_cross", "field", "extinction")
ATTR_NAMES = ATTR_MANDATORY_NAMES + ATTR_OPTIONAL_NAMES
ATTR_TYPES = ATTR_MANDATORY_TYPES + ATTR_OPTIONAL_TYPES
ATTR_CIF = ATTR_MANDATORY_CIF + ATTR_OPTIONAL_CIF
ATTR_INT_NAMES = ()
ATTR_INT_PROTECTED_NAMES = ()
# parameters considered are refined parameters
ATTR_REF = ("zero", "dtt1", "dtt2", "zerot", "dtt1t", "dtt2t")
ATTR_SIGMA = tuple([f"{_h:}_sigma" for _h in ATTR_REF])
ATTR_CONSTR_FLAG = tuple([f"{_h:}_constraint" for _h in ATTR_REF])
ATTR_REF_FLAG = tuple([f"{_h:}_refinement" for _h in ATTR_REF])
# formats if cif format
D_FORMATS = {
"zero": "{:.5f}", "dtt1": "{:.5f}", "dtt2": "{:.5f}",
"zerot": "{:.5f}", "dtt1t": "{:.5f}", "dtt2t": "{:.5f}",
"ttheta_bank": "{:.2f}", "width": "{:.2f}", "x_cross": "{:.2f}",
"field": "{:.3f}", "extinction": "{:.5f}"}
# constraints on the parameters
D_CONSTRAINTS = {"neutrons": ["thermal", "epithermal"]}
# default values for the parameters
D_DEFAULT = {"zero": 0., "neutrons": "thermal"}
for key in ATTR_SIGMA:
D_DEFAULT[key] = 0.
for key in (ATTR_CONSTR_FLAG + ATTR_REF_FLAG):
D_DEFAULT[key] = False
PREFIX = "tof_parameters"
def __init__(self, **kwargs) -> NoReturn:
super(TOFParameters, self).__init__()
# defined for any integer and float parameters
D_MIN = {}
# defined for ani integer and float parameters
D_MAX = {}
self.__dict__["D_MIN"] = D_MIN
self.__dict__["D_MAX"] = D_MAX
for key, attr in self.D_DEFAULT.items():
setattr(self, key, attr)
for key, attr in kwargs.items():
setattr(self, key, attr)
def calc_time_by_d(self, d):
"""Calculate time by given d
time = zero + dtt1 * d + dtt2 * d**2
(if dtt2 is defined)
time = zero + dtt1 * d
(if dtt2 is not defined)
Parameters
----------
d : TYPE
DESCRIPTION.
Returns
-------
time : TYPE
DESCRIPTION.
"""
if self.neutrons == "epithermal":
time_e = self.zero + self.dtt1 * d
time_t = self.zerot + self.dtt1t * d - self.dtt2t / d
n_cross = 0.5*scipy.special.erfc(self.width * (self.x_cross - 1/d))
time = n_cross * time_e + (1-n_cross) * time_t
else: # self.neutrons == "thermal"
time = self.zero + self.dtt1 * d + self.dtt2 * d**2
return time
def calc_d_min_max(self, time):
"""Calculate d_min, d_max
Parameters
----------
time : TYPE
DESCRIPTION.
Returns
-------
d_min : TYPE
DESCRIPTION.
d_max : TYPE
DESCRIPTION.
"""
time_min = numpy.min(time)
time_max = numpy.max(time)
if self.neutrons == "epithermal":
raise AttributeError("function calc_d_min_max is not realized for \
epithermal neutrons")
d_min = (time_min-self.zero)/self.dtt1
d_max = (time_max-self.zero)/self.dtt1
else: # self.neutrons == "thermal"
det_sq_min = self.dtt1**2 - 4.*self.dtt2*(self.zero - time_min)
det_sq_max = self.dtt1**2 - 4.*self.dtt2*(self.zero - time_max)
d_max = (-self.dtt1+det_sq_max**0.5)/(2.*self.dtt2)
d_min = (-self.dtt1+det_sq_min**0.5)/(2.*self.dtt2)
return d_min, d_max
def calc_d_by_time(self, time):
"""Calculate d by given time
Relation between d and time is
time = zero + dtt1 * d + dtt2 * d**2
(if dtt2 is defined)
time = zero + dtt1 * d
(if dtt2 is not defined)
Parameters
----------
time : TYPE
DESCRIPTION.
Returns
-------
d : TYPE
DESCRIPTION.
"""
if self.neutrons == "epithermal":
raise AttributeError("function calc_d_min_max is not realized for \
epithermal neutrons")
d = (time - self.zero)/self.dtt1
else: # self.neutrons == "thermal"
det = numpy.sqrt(self.dtt1**2 - 4.*(self.zero-time)*self.dtt2)
if self.dtt2 < 0.:
d = (det-self.dtt1)/(2.*self.dtt2)
elif self.dtt2 > 0.:
d = (-det-self.dtt1)/(2.*self.dtt2)
else:
d = (time - self.zero)/self.dtt1
return d
def calc_time_by_sthovl(self, sthovl):
"""Calculate time by given sin(theta)/lambda
Returns
-------
time : TYPE
DESCRIPTION.
"""
d = 0.5/sthovl
time = self.calc_time_by_d(d)
return time
class TOFParametersL(LoopN):
"""Parameters of the reflexion positions in time-of-flight experiments.
"""
ITEM_CLASS = TOFParameters
ATTR_INDEX = None
def __init__(self, loop_name = None) -> NoReturn:
super(TOFParametersL, self).__init__()
self.__dict__["items"] = []
self.__dict__["loop_name"] = loop_name
# s_cont = """
# _tof_parameters_zero 2.921
# _tof_parameters_dtt1 6167.247
# _tof_parameters_dtt2 -2.280
# _tof_parameters_2theta_bank 145.
# """
# obj = TOFParameters.from_cif(s_cont)
# print(obj, end="\n\n")
| [
"numpy.max",
"scipy.special.erfc",
"numpy.sqrt",
"numpy.min"
] | [((4284, 4299), 'numpy.min', 'numpy.min', (['time'], {}), '(time)\n', (4293, 4299), False, 'import numpy\n'), ((4319, 4334), 'numpy.max', 'numpy.max', (['time'], {}), '(time)\n', (4328, 4334), False, 'import numpy\n'), ((5631, 5696), 'numpy.sqrt', 'numpy.sqrt', (['(self.dtt1 ** 2 - 4.0 * (self.zero - time) * self.dtt2)'], {}), '(self.dtt1 ** 2 - 4.0 * (self.zero - time) * self.dtt2)\n', (5641, 5696), False, 'import numpy\n'), ((3721, 3776), 'scipy.special.erfc', 'scipy.special.erfc', (['(self.width * (self.x_cross - 1 / d))'], {}), '(self.width * (self.x_cross - 1 / d))\n', (3739, 3776), False, 'import scipy\n')] |
import numpy as np
import os.path
import cv2
from openface.align import AlignDlib
from openface.openface_model import create_model
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path, num):
metadata = []
for i in sorted(os.listdir(path))[:num]:
for f in sorted(os.listdir((os.path.join(path, i)))):
# Check file extension. Allow only jpg/jpeg/png file
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == 'JPG' or ext == 'jpeg' or ext == 'png':
metadata.append(IdentityMetadata(path, i, f))
return np.array(metadata)
def load_image(path):
img = cv2.imread(path, 1)
# OpenCV loads images with color channels
# in BGR order. So we need to reverse them
return img[..., ::-1]
def embeddingVectors(path):
# using pre-trained model
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('models/nn4.small2.v1.h5')
# Initialize the OpenFace face alignment utility
aligment = AlignDlib('models/landmarks.dat')
# Align image on face
def align_image(img):
return aligment.align(96, img, aligment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
img = load_image(path)
img = align_image(img)
try:
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
except TypeError:
print("The image is not Clear to extract the Embeddings")
else:
# obtain embedding vector for image
return nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
| [
"openface.align.AlignDlib",
"numpy.array",
"numpy.expand_dims",
"openface.openface_model.create_model",
"cv2.imread"
] | [((921, 939), 'numpy.array', 'np.array', (['metadata'], {}), '(metadata)\n', (929, 939), True, 'import numpy as np\n'), ((974, 993), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (984, 993), False, 'import cv2\n'), ((1201, 1215), 'openface.openface_model.create_model', 'create_model', ([], {}), '()\n', (1213, 1215), False, 'from openface.openface_model import create_model\n'), ((1351, 1384), 'openface.align.AlignDlib', 'AlignDlib', (['"""models/landmarks.dat"""'], {}), "('models/landmarks.dat')\n", (1360, 1384), False, 'from openface.align import AlignDlib\n'), ((1937, 1964), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1951, 1964), True, 'import numpy as np\n')] |
from nose import tools
import numpy as np
from scipy import stats
from . import models
from .. import basis_functions
from .. import solvers
def analytic_solution(t, k0, alpha, delta, g, n, s, **params):
"""Analytic solution for model with Cobb-Douglas production."""
lmbda = (g + n + delta) * (1 - alpha)
ks = (((s / (g + n + delta)) * (1 - np.exp(-lmbda * t)) +
k0**(1 - alpha) * np.exp(-lmbda * t))**(1 / (1 - alpha)))
return ks
def cobb_douglas_output(k, alpha, **params):
"""Intensive output has Cobb-Douglas functional form."""
return k**alpha
def equilibrium_capital(alpha, delta, g, n, s, **params):
"""Equilibrium value of capital (per unit effective labor supply)."""
return (s / (g + n + delta))**(1 / (1 - alpha))
def generate_random_params(scale, seed):
np.random.seed(seed)
# random g, n, delta such that sum of these params is positive
g, n = stats.norm.rvs(0.05, scale, size=2)
delta, = stats.lognorm.rvs(scale, loc=g + n, size=1)
assert g + n + delta > 0
# s and alpha must be on (0, 1) (but lower values are more reasonable)
s, alpha = stats.beta.rvs(a=1, b=3, size=2)
# choose k0 so that it is not too far from equilibrium
kstar = equilibrium_capital(alpha, delta, g, n, s)
k0, = stats.uniform.rvs(0.5 * kstar, 1.5 * kstar, size=1)
assert k0 > 0
params = {'g': g, 'n': n, 'delta': delta, 's': s, 'alpha': alpha,
'k0': k0}
return params
def initial_mesh(t, T, num, problem):
ts = np.linspace(t, T, num)
kstar = equilibrium_capital(**problem.params)
ks = kstar - (kstar - problem.params['k0']) * np.exp(-ts)
return ts, ks
random_seed = np.random.randint(2147483647)
random_params = generate_random_params(0.1, random_seed)
test_problem = models.SolowModel(cobb_douglas_output, equilibrium_capital,
random_params)
polynomial_basis = basis_functions.PolynomialBasis()
solver = solvers.Solver(polynomial_basis)
def _test_polynomial_collocation(basis_kwargs, boundary_points, num=1000):
"""Helper function for testing various kinds of polynomial collocation."""
ts, ks = initial_mesh(*boundary_points, num=num, problem=test_problem)
k_poly = polynomial_basis.fit(ts, ks, **basis_kwargs)
initial_coefs = k_poly.coef
nodes = polynomial_basis.roots(**basis_kwargs)
solution = solver.solve(basis_kwargs, boundary_points, initial_coefs,
nodes, test_problem)
# check that solver terminated successfully
msg = "Solver failed!\nSeed: {}\nModel params: {}\n"
tools.assert_true(solution.result.success,
msg=msg.format(random_seed, test_problem.params))
# compute the residuals
normed_residuals = solution.normalize_residuals(ts)
# check that residuals are close to zero on average
tools.assert_true(np.mean(normed_residuals) < 1e-6,
msg=msg.format(random_seed, test_problem.params))
# check that the numerical and analytic solutions are close
numeric_soln = solution.evaluate_solution(ts)
analytic_soln = analytic_solution(ts, **test_problem.params)
tools.assert_true(np.mean(numeric_soln - analytic_soln) < 1e-6)
def test_chebyshev_collocation():
"""Test collocation solver using Chebyshev polynomials for basis."""
boundary_points = (0, 100)
basis_kwargs = {'kind': 'Chebyshev', 'degree': 50, 'domain': boundary_points}
_test_polynomial_collocation(basis_kwargs, boundary_points)
def test_legendre_collocation():
"""Test collocation solver using Legendre polynomials for basis."""
boundary_points = (0, 100)
basis_kwargs = {'kind': 'Legendre', 'degree': 50, 'domain': boundary_points}
_test_polynomial_collocation(basis_kwargs, boundary_points)
| [
"numpy.mean",
"scipy.stats.beta.rvs",
"scipy.stats.lognorm.rvs",
"scipy.stats.norm.rvs",
"numpy.exp",
"numpy.random.randint",
"numpy.linspace",
"numpy.random.seed",
"scipy.stats.uniform.rvs"
] | [((1697, 1726), 'numpy.random.randint', 'np.random.randint', (['(2147483647)'], {}), '(2147483647)\n', (1714, 1726), True, 'import numpy as np\n'), ((824, 844), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (838, 844), True, 'import numpy as np\n'), ((924, 959), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0.05)', 'scale'], {'size': '(2)'}), '(0.05, scale, size=2)\n', (938, 959), False, 'from scipy import stats\n'), ((973, 1016), 'scipy.stats.lognorm.rvs', 'stats.lognorm.rvs', (['scale'], {'loc': '(g + n)', 'size': '(1)'}), '(scale, loc=g + n, size=1)\n', (990, 1016), False, 'from scipy import stats\n'), ((1137, 1169), 'scipy.stats.beta.rvs', 'stats.beta.rvs', ([], {'a': '(1)', 'b': '(3)', 'size': '(2)'}), '(a=1, b=3, size=2)\n', (1151, 1169), False, 'from scipy import stats\n'), ((1295, 1346), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', (['(0.5 * kstar)', '(1.5 * kstar)'], {'size': '(1)'}), '(0.5 * kstar, 1.5 * kstar, size=1)\n', (1312, 1346), False, 'from scipy import stats\n'), ((1528, 1550), 'numpy.linspace', 'np.linspace', (['t', 'T', 'num'], {}), '(t, T, num)\n', (1539, 1550), True, 'import numpy as np\n'), ((1651, 1662), 'numpy.exp', 'np.exp', (['(-ts)'], {}), '(-ts)\n', (1657, 1662), True, 'import numpy as np\n'), ((2888, 2913), 'numpy.mean', 'np.mean', (['normed_residuals'], {}), '(normed_residuals)\n', (2895, 2913), True, 'import numpy as np\n'), ((3196, 3233), 'numpy.mean', 'np.mean', (['(numeric_soln - analytic_soln)'], {}), '(numeric_soln - analytic_soln)\n', (3203, 3233), True, 'import numpy as np\n'), ((409, 427), 'numpy.exp', 'np.exp', (['(-lmbda * t)'], {}), '(-lmbda * t)\n', (415, 427), True, 'import numpy as np\n'), ((358, 376), 'numpy.exp', 'np.exp', (['(-lmbda * t)'], {}), '(-lmbda * t)\n', (364, 376), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import numpy as np
import blosum as bl
from random import randint
def load_pssm(namefile, aa) :
"""
Lecture d'un fichier .aamtx et renvoie matrice PSSM
"""
with open (namefile, 'r') as f :
for line in f :
if line[0] == '>' :
name = line[1:-1]
elif line[0] in aa :
seq = line[:-1]
pssm = []
else :
pssm.append([float(j) for j in line[:-1].split(' ') if j])
return(name, list(seq), pssm)
def calc_score(vect1, vect2) :
"""
Calcule du score d'alignement entre 2 aa pour une position donnée
"""
s = 0
n1, n2 = len(vect1), len(vect2)
for i in range(n1) :
for j in range(n2) :
s += vect1[i]*vect2[j]
return(s)
def calc_gap_optimal(nq, nt, pssmQ, pssmT, blosum):
"""
Fonction qui calcule et renvoie les pénalités d'ouverture (po) et d'extension de gap (pe) optimales
"""
gaps = np.arange(0.1, 10, 0.1)
A = []
scores = []
blsmean = blosum.stack().mean()
blsstd = blosum.stack().std()
for gap in gaps:
A.append((gap - blsmean) / blsstd)
# Scores - random PSSM
for i in range(nq):
for j in range(nt):
ri, rj = randint(0, nq-1), randint(0, nt-1) #sélection des indices de la pssm de manière aléatoires
scores.append(calc_score(pssmQ[ri], pssmT[rj]))
smean = np.array(scores).mean()
sstd = np.array(scores).std()
po = []
pe = []
for i in range(len(A)):
po.append((A[i] * sstd) + smean)
pe.append(po[i] * 10 / 100)
return(-np.array(po).mean(), -np.array(pe).mean())
def init_matInsert(i, j, po):
"""
Initialisation matrice des ajouts de gaps (au niveau de la Query (Q)
ou au niveau de la template (T)) entre aaQ (en position i) et aaT (en position j)
"""
if i > 0:
return -np.inf
else:
if j > 0 and i == 0:
return po*j
else:
return 0
def init_matMatch(i, j):
"""
Initialisation matrice des matchs (M) entre aaQ (en position i) et aaT (en position j)
"""
if j == 0 and i == 0:
return 0
else:
if j == 0 or i == 0:
return -np.inf
else:
return 0
def scores_propositions(i, j, M, Q, T, pssmQ, pssmT, p, type_score):
"""
Calcul des propositions de scores d'alignement aaQ - aaT
"""
propositions = []
if type_score == 'M':
score = calc_score(pssmQ[i-1], pssmT[j-1])
propositions.append(score + M[i-1,j-1])
propositions.append(Q[i,j])
propositions.append(T[i,j])
elif type_score == 'Q':
propositions.append(M[i,j-1] + p)
propositions.append(Q[i,j-1] + p)
propositions.append(T[i,j-1] + p)
elif type_score == 'T':
propositions.append(M[i-1,j] + p)
propositions.append(Q[i-1,j] + p)
propositions.append(T[i-1,j] + p)
return(propositions)
def crea_matrix(fileQ, fileT):
"""
Création et initialisation des matrices des matchs (M) et des insertions de gaps (Q et T)
(Méthode affine)
"""
aa, blosum = bl.load_blosum("bin/salut_1.0/data/BLOSUM62.txt")
nameQ, seqQ, pssmQ = load_pssm(fileQ, aa)
nameT, seqT, pssmT = load_pssm(fileT, aa)
if len(seqQ) != len(pssmQ):
print(">>> ERREUR : La longueur de la séquence QUERY est différente du nombre de lignes lues dans la PSSM QUERY\n")
sys.exit(1)
elif len(seqT) != len(pssmT):
print(">>> ERREUR : La longueur de la séquence TEMPLATE est différente du nombre de lignes lues dans la PSSM TEMPLATE\n")
sys.exit(1)
nq, nt = len(seqQ)+1, len(seqT)+1
po, pe = calc_gap_optimal(len(seqQ), len(seqT), pssmQ, pssmT, blosum)
M = np.zeros((nq, nt)) # Matchs
Q = np.zeros((nq, nt)) # Ajout gap query
T = np.zeros((nq, nt)) # Ajout gap template
for i in range(nq) :
for j in range(nt) :
if i == 0 or j == 0 :
# Initialisation des matrices d'alignements Match (M) / InsertGapQuery (Q) / InsertGapTemplate (T)
M[i,j] = init_matMatch(i, j)
Q[i,j] = init_matInsert(j, i, po)
T[i,j] = init_matInsert(i, j, po)
else :
# Remplissage des matrices M, Q et T
mat = [M[i-1,j-1], Q[i,j-1], T[i-1,j]]
if mat.index(max(mat)) > 0 :
p = pe
else:
p = po
Q[i,j] = max(scores_propositions(i, j, M, Q, T, pssmQ, pssmT, p, 'Q'))
T[i,j] = max(scores_propositions(i, j, M, Q, T, pssmQ, pssmT, p, 'T'))
M[i,j] = max(scores_propositions(i, j, M, Q, T, pssmQ, pssmT, p, 'M'))
return(nameQ, nameT, M, Q, T, seqQ, seqT, po, pe)
def align_matrix(M, Q, T, seqQ, seqT):
"""
Lecture de l'alignement optimal / global (algorithme de Needleman et Wunsch)
"""
alignQ, alignT = [], []
i, j = len(seqQ), len(seqT)
score = max(M[i,j], Q[i,j], T[i,j])
while i > 0 and j > 0:
mat = [M[i-1,j-1], Q[i,j-1], T[i-1,j]]
# dans le cas où seqQ[i] et seqT[j] sont alignés
if mat.index(max(mat)) == 0 and i > 0 and j > 0: # M
i, j = i-1, j-1
alignQ.append(seqQ[i])
alignT.append(seqT[j])
# cas où il y a ajout d'un gap au niveau de seqQ[i]
elif mat.index(max(mat)) == 1 and j > 0: # Q
i, j = i, j-1
alignQ.append("-")
alignT.append(seqT[j])
# cas où il y a ajout d'un gap au niveau de seqT[j]
elif mat.index(max(mat)) == 2 and i > 0: # T
i, j = i-1, j
alignQ.append(seqQ[i])
alignT.append("-")
alignQ.reverse()
alignT.reverse()
return("".join(alignQ), "".join(alignT), score)
def search_end(seq, align) :
"""
Fonction qui calcule les bornes début et fin d'une séquence alignées
"""
interv = []
tmp = align.find('-')
if tmp == -1 or align[0] == seq[0]:
return(1, len(seq))
tmp = align.replace("-", "")
for i in range(len(seq)):
if seq[i] == tmp[0] and seq[i+1] == tmp[1] and seq[i+2] == tmp[2]:
deb = i
break
fin = deb + len(align.replace("-", ""))
return(deb+1, fin)
def crea_output(nameQ, nameT, truncQ, truncT, nq, nt, intervQ, intervT, scoreGlobal) :
"""
Création et remplissage du fichier en output (query_name.aln)
"""
fd = open("data/queries/{}/{}.aln".format(nameQ, nameQ), "a+")
fd.write("Query | {} | {:.3f} | {} | {} | {}-{} | {}-{}\n".format(nameT, scoreGlobal, nq, nt, intervQ[0], intervQ[1], intervT[0], intervT[1]))
fd.write("{}\n{}\n".format(truncQ, truncT))
fd.close()
fscore = open("data/queries/{}/{}_scores.txt".format(nameQ, nameQ), "a+")
fscore.write("{} {:.3f}\n".format(nameT, scoreGlobal))
fscore.close()
def main():
# Fichiers input :
if len(sys.argv) < 3 :
print(">>> ERREUR : PSSM de la Query et/ou Template manquante(s)\n")
sys.exit(1)
else :
fileQ = sys.argv[1]
fileT = sys.argv[2]
# Construction et lecture de l'alignement entre Query et Template
nameQ, nameT, M, Q, T, seqQ, seqT, po, pe = crea_matrix(fileQ, fileT)
print("Alignement : {} (Q) vs {} (T)".format(nameQ, nameT))
print("Pénalités optimales : ouverture de gap = {:.2f}, extension de gap = {:.3f}".format(po, pe))
alignQ, alignT, scoreGlobal = align_matrix(M, Q, T, seqQ, seqT)
# Calcule de l'intervalle (Q/T) de la section (Q/T) alignées
intervT = [ 1, len(seqT)]
intervQ = search_end(seqQ, alignQ)
# Création de l'output
crea_output(nameQ, nameT, alignQ, alignT, len(seqQ), len(seqT), intervQ, intervT, scoreGlobal)
if __name__=='__main__':
main()
print()
| [
"blosum.load_blosum",
"numpy.array",
"numpy.zeros",
"sys.exit",
"random.randint",
"numpy.arange"
] | [((1037, 1060), 'numpy.arange', 'np.arange', (['(0.1)', '(10)', '(0.1)'], {}), '(0.1, 10, 0.1)\n', (1046, 1060), True, 'import numpy as np\n'), ((3235, 3284), 'blosum.load_blosum', 'bl.load_blosum', (['"""bin/salut_1.0/data/BLOSUM62.txt"""'], {}), "('bin/salut_1.0/data/BLOSUM62.txt')\n", (3249, 3284), True, 'import blosum as bl\n'), ((3860, 3878), 'numpy.zeros', 'np.zeros', (['(nq, nt)'], {}), '((nq, nt))\n', (3868, 3878), True, 'import numpy as np\n'), ((3896, 3914), 'numpy.zeros', 'np.zeros', (['(nq, nt)'], {}), '((nq, nt))\n', (3904, 3914), True, 'import numpy as np\n'), ((3941, 3959), 'numpy.zeros', 'np.zeros', (['(nq, nt)'], {}), '((nq, nt))\n', (3949, 3959), True, 'import numpy as np\n'), ((3542, 3553), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3550, 3553), False, 'import sys\n'), ((7162, 7173), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7170, 7173), False, 'import sys\n'), ((1488, 1504), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1496, 1504), True, 'import numpy as np\n'), ((1523, 1539), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1531, 1539), True, 'import numpy as np\n'), ((3726, 3737), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3734, 3737), False, 'import sys\n'), ((1324, 1342), 'random.randint', 'randint', (['(0)', '(nq - 1)'], {}), '(0, nq - 1)\n', (1331, 1342), False, 'from random import randint\n'), ((1342, 1360), 'random.randint', 'randint', (['(0)', '(nt - 1)'], {}), '(0, nt - 1)\n', (1349, 1360), False, 'from random import randint\n'), ((1688, 1700), 'numpy.array', 'np.array', (['po'], {}), '(po)\n', (1696, 1700), True, 'import numpy as np\n'), ((1710, 1722), 'numpy.array', 'np.array', (['pe'], {}), '(pe)\n', (1718, 1722), True, 'import numpy as np\n')] |
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkendpointsections.py,v $
## Language: Python
## Date: $Date: 2021/01/05 $
## Version: $Revision: 1.5 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this class was contributed by
## <NAME>
## Clemson University
## Extract endpoint sections of a split and grouped centerline
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vtkvmtk
from vmtk import vmtkscripts
from vmtk import pypes
import numpy as np
class vmtkEndpointSections(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.Centerlines = None
self.IoletSections = None
self.CenterlineIdsArrayName = 'CenterlineIds'
self.GroupIdsArrayName = 'GroupIds'
self.NormalsArrayName = 'IoletNormals'
self.SetScriptName('vmtkendpointsections')
self.SetScriptDoc('extracts the inlets and outlets of all branches')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['Centerlines','centerlines','vtkPolyData',1,'','the input centerline','vmtksurfacereader'],
['CenterlineIdsArrayName','centerlineidsarray','str',1],
['GroupIdsArrayName','groupidsarray','str',1],
])
self.SetOutputMembers([
['IoletSections','o','vtkPolyData',1,'','the iolet sections','vmtksurfacewriter'],
['CenterlineIdsArrayName','centerlineidsarray','str',1],
['GroupIdsArrayName','groupidsarray','str',1],
])
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No input surface.')
if self.Centerlines == None:
self.PrintError('Error: No input centerlines.')
endpoints = vtk.vtkPolyData()
cellArray = vtk.vtkCellArray()
pointArray = vtk.vtkPoints()
centerlineIdsArray = vtk.vtkIntArray()
centerlineIdsArray.SetNumberOfComponents(1)
centerlineIdsArray.SetName(self.CenterlineIdsArrayName)
ioletIdsArray = vtk.vtkIntArray()
ioletIdsArray.SetNumberOfComponents(1)
ioletIdsArray.SetName(self.GroupIdsArrayName)
normalsArray = vtk.vtkDoubleArray()
normalsArray.SetNumberOfComponents(3)
normalsArray.SetName(self.NormalsArrayName)
# loop over centerlines
numberOfCells = self.Centerlines.GetNumberOfCells()
for cellId in range(numberOfCells):
cell = self.Centerlines.GetCell(cellId)
numberOfPoints = cell.GetNumberOfPoints()
# create inlet section
firstPoint = cell.GetPoints().GetPoint(0)
nextPoint = cell.GetPoints().GetPoint(1)
distance = np.sqrt(vtk.vtkMath.Distance2BetweenPoints(firstPoint,nextPoint))
tangent = np.subtract(nextPoint, firstPoint)/distance
vtk.vtkMath.Normalize(tangent)
pointId = pointArray.InsertNextPoint(firstPoint)
cellArray.InsertNextCell(1)
cellArray.InsertCellPoint(pointId)
centerlineIdsArray.InsertNextValue(cellId)
ioletIdsArray.InsertNextValue(pointId)
normalsArray.InsertNextTuple3(tangent[0],tangent[1],tangent[2])
# create outlet section
lastPoint = cell.GetPoints().GetPoint(numberOfPoints-1)
nextPoint = cell.GetPoints().GetPoint(numberOfPoints-2)
distance = np.sqrt(vtk.vtkMath.Distance2BetweenPoints(firstPoint,nextPoint))
tangent = np.subtract(lastPoint, nextPoint)/distance
vtk.vtkMath.Normalize(tangent)
pointId = pointArray.InsertNextPoint(lastPoint)
cellArray.InsertNextCell(1)
cellArray.InsertCellPoint(pointId)
centerlineIdsArray.InsertNextValue(cellId)
ioletIdsArray.InsertNextValue(pointId)
normalsArray.InsertNextTuple3(tangent[0],tangent[1],tangent[2])
endpoints.SetPoints(pointArray)
endpoints.SetVerts(cellArray)
endpoints.GetCellData().AddArray(centerlineIdsArray)
endpoints.GetCellData().AddArray(ioletIdsArray)
endpoints.GetCellData().AddArray(normalsArray)
self.GroupEndpointSections(endpoints)
self.MakeGroupIdsAdjacent(endpoints)
self.IoletSections = endpoints
def GroupEndpointSections(self, endpoints):
ioletIdsArray = endpoints.GetCellData().GetArray(self.GroupIdsArrayName)
for ioletId in range(endpoints.GetNumberOfCells()):
groupId = ioletIdsArray.GetValue(ioletId)
currentPoint = endpoints.GetPoints().GetPoint(ioletId)
for j in reversed(range(ioletId)):
point = endpoints.GetPoints().GetPoint(j)
distance = np.sqrt(vtk.vtkMath.Distance2BetweenPoints(point,currentPoint))
print (groupId,j,"%.6e"%distance)
if (distance < 1.e-12):
ioletIdsArray.SetValue(groupId,j)
def MakeGroupIdsAdjacent(self, endpoints):
currentGroupId = 0
groupIdsArray = endpoints.GetCellData().GetArray(self.GroupIdsArrayName)
for i in range(groupIdsArray.GetNumberOfTuples()):
minGroupId = sys.maxsize # int is unbounded in Python 3
for j in range(groupIdsArray.GetNumberOfTuples()):
groupId = groupIdsArray.GetValue(j)
if groupId < minGroupId and groupId >= currentGroupId:
minGroupId = groupId
for j in range(groupIdsArray.GetNumberOfTuples()):
if groupIdsArray.GetValue(j) == minGroupId:
groupIdsArray.SetValue(j,currentGroupId)
currentGroupId += 1
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| [
"vmtk.pypes.pypeMain",
"vtk.vtkMath.Normalize",
"vtk.vtkIntArray",
"vtk.vtkPolyData",
"vtk.vtkCellArray",
"vtk.vtkDoubleArray",
"vtk.vtkPoints",
"numpy.subtract",
"vtk.vtkMath.Distance2BetweenPoints",
"vmtk.pypes.pypeScript.__init__"
] | [((6173, 6189), 'vmtk.pypes.pypeMain', 'pypes.pypeMain', ([], {}), '()\n', (6187, 6189), False, 'from vmtk import pypes\n'), ((941, 972), 'vmtk.pypes.pypeScript.__init__', 'pypes.pypeScript.__init__', (['self'], {}), '(self)\n', (966, 972), False, 'from vmtk import pypes\n'), ((2213, 2230), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (2228, 2230), False, 'import vtk\n'), ((2251, 2269), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (2267, 2269), False, 'import vtk\n'), ((2291, 2306), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (2304, 2306), False, 'import vtk\n'), ((2337, 2354), 'vtk.vtkIntArray', 'vtk.vtkIntArray', ([], {}), '()\n', (2352, 2354), False, 'import vtk\n'), ((2496, 2513), 'vtk.vtkIntArray', 'vtk.vtkIntArray', ([], {}), '()\n', (2511, 2513), False, 'import vtk\n'), ((2639, 2659), 'vtk.vtkDoubleArray', 'vtk.vtkDoubleArray', ([], {}), '()\n', (2657, 2659), False, 'import vtk\n'), ((3313, 3343), 'vtk.vtkMath.Normalize', 'vtk.vtkMath.Normalize', (['tangent'], {}), '(tangent)\n', (3334, 3343), False, 'import vtk\n'), ((4014, 4044), 'vtk.vtkMath.Normalize', 'vtk.vtkMath.Normalize', (['tangent'], {}), '(tangent)\n', (4035, 4044), False, 'import vtk\n'), ((3177, 3234), 'vtk.vtkMath.Distance2BetweenPoints', 'vtk.vtkMath.Distance2BetweenPoints', (['firstPoint', 'nextPoint'], {}), '(firstPoint, nextPoint)\n', (3211, 3234), False, 'import vtk\n'), ((3257, 3291), 'numpy.subtract', 'np.subtract', (['nextPoint', 'firstPoint'], {}), '(nextPoint, firstPoint)\n', (3268, 3291), True, 'import numpy as np\n'), ((3879, 3936), 'vtk.vtkMath.Distance2BetweenPoints', 'vtk.vtkMath.Distance2BetweenPoints', (['firstPoint', 'nextPoint'], {}), '(firstPoint, nextPoint)\n', (3913, 3936), False, 'import vtk\n'), ((3959, 3992), 'numpy.subtract', 'np.subtract', (['lastPoint', 'nextPoint'], {}), '(lastPoint, nextPoint)\n', (3970, 3992), True, 'import numpy as np\n'), ((5209, 5264), 'vtk.vtkMath.Distance2BetweenPoints', 'vtk.vtkMath.Distance2BetweenPoints', (['point', 'currentPoint'], {}), '(point, currentPoint)\n', (5243, 5264), False, 'import vtk\n')] |
"""
Base class for tensor-product style meshes
"""
import numpy as np
import scipy.sparse as sp
import properties
from discretize.base.base_mesh import BaseMesh
from discretize.utils import (
is_scalar,
as_array_n_by_dim,
unpack_widths,
mkvc,
ndgrid,
spzeros,
sdiag,
sdinv,
TensorType,
interpolation_matrix,
)
from discretize.utils.code_utils import deprecate_method, deprecate_property
import warnings
class BaseTensorMesh(BaseMesh):
"""
Base class for tensor-product style meshes
This class contains properites and methods that are common to cartesian
and cylindrical meshes defined by tensor-produts of vectors describing
cell spacings.
Do not use this class directly, instead, inherit it if you plan to develop
a tensor-style mesh (e.g. a spherical mesh) or use the
:meth:`discretize.TensorMesh` class to create a cartesian tensor mesh.
"""
_meshType = "BASETENSOR"
_aliases = {
**BaseMesh._aliases,
**{
"gridCC": "cell_centers",
"gridN": "nodes",
"gridFx": "faces_x",
"gridFy": "faces_y",
"gridFz": "faces_z",
"gridEx": "edges_x",
"gridEy": "edges_y",
"gridEz": "edges_z",
},
}
_unitDimensions = [1, 1, 1]
# properties
h = properties.Tuple(
"h is a list containing the cell widths of the tensor mesh in each "
"dimension.",
properties.Array(
"widths of the tensor mesh in a single dimension",
dtype=float,
shape=("*",),
),
min_length=1,
max_length=3,
coerce=True,
required=True,
)
def __init__(self, h=None, origin=None, **kwargs):
h_in = h
if "x0" in kwargs:
origin = kwargs.pop('x0')
origin_in = origin
# Sanity Checks
if not isinstance(h_in, (list, tuple)):
raise TypeError("h_in must be a list, not {}".format(type(h_in)))
if len(h_in) not in [1, 2, 3]:
raise ValueError(
"h_in must be of dimension 1, 2, or 3 not {}".format(len(h_in))
)
# build h
h = list(range(len(h_in)))
for i, h_i in enumerate(h_in):
if is_scalar(h_i) and not isinstance(h_i, np.ndarray):
# This gives you something over the unit cube.
h_i = self._unitDimensions[i] * np.ones(int(h_i)) / int(h_i)
elif isinstance(h_i, (list, tuple)):
h_i = unpack_widths(h_i)
if not isinstance(h_i, np.ndarray):
raise TypeError("h[{0:d}] is not a numpy array.".format(i))
if len(h_i.shape) != 1:
raise ValueError("h[{0:d}] must be a 1D numpy array.".format(i))
h[i] = h_i[:] # make a copy.
# Origin of the mesh
origin = np.zeros(len(h))
if origin_in is not None:
if len(h) != len(origin_in):
raise ValueError("Dimension mismatch. origin != len(h)")
for i in range(len(h)):
x_i, h_i = origin_in[i], h[i]
if is_scalar(x_i):
origin[i] = x_i
elif x_i == "0":
origin[i] = 0.0
elif x_i == "C":
origin[i] = -h_i.sum() * 0.5
elif x_i == "N":
origin[i] = -h_i.sum()
else:
raise Exception(
"origin[{0:d}] must be a scalar or '0' to be zero, "
"'C' to center, or 'N' to be negative. The input value"
" {1} {2} is invalid".format(i, x_i, type(x_i))
)
if "n" in kwargs.keys():
n = kwargs.pop("n")
if np.any(n != np.array([x.size for x in h])):
raise ValueError("Dimension mismatch. The provided n doesn't h")
else:
n = np.array([x.size for x in h])
super(BaseTensorMesh, self).__init__(n, origin=origin, **kwargs)
# Ensure h contains 1D vectors
self.h = [mkvc(x.astype(float)) for x in h]
@property
def nodes_x(self):
"""Nodal grid vector (1D) in the x direction."""
return np.r_[self.origin[0], self.h[0]].cumsum()
@property
def nodes_y(self):
"""Nodal grid vector (1D) in the y direction."""
return None if self.dim < 2 else np.r_[self.origin[1], self.h[1]].cumsum()
@property
def nodes_z(self):
"""Nodal grid vector (1D) in the z direction."""
return None if self.dim < 3 else np.r_[self.origin[2], self.h[2]].cumsum()
@property
def cell_centers_x(self):
"""Cell-centered grid vector (1D) in the x direction."""
nodes = self.nodes_x
return (nodes[1:] + nodes[:-1]) / 2
@property
def cell_centers_y(self):
"""Cell-centered grid vector (1D) in the y direction."""
if self.dim < 2:
return None
nodes = self.nodes_y
return (nodes[1:] + nodes[:-1]) / 2
@property
def cell_centers_z(self):
"""Cell-centered grid vector (1D) in the z direction."""
if self.dim < 3:
return None
nodes = self.nodes_z
return (nodes[1:] + nodes[:-1]) / 2
@property
def cell_centers(self):
"""Cell-centered grid."""
return self._getTensorGrid("cell_centers")
@property
def nodes(self):
"""Nodal grid."""
return self._getTensorGrid("nodes")
@property
def h_gridded(self):
"""
Returns an (nC, dim) numpy array with the widths of all cells in order
"""
if self.dim == 1:
return self.h[0][:, None]
return ndgrid(*self.h)
@property
def faces_x(self):
"""Face staggered grid in the x direction."""
if self.nFx == 0:
return
return self._getTensorGrid("faces_x")
@property
def faces_y(self):
"""Face staggered grid in the y direction."""
if self.nFy == 0 or self.dim < 2:
return
return self._getTensorGrid("faces_y")
@property
def faces_z(self):
"""Face staggered grid in the z direction."""
if self.nFz == 0 or self.dim < 3:
return
return self._getTensorGrid("faces_z")
@property
def edges_x(self):
"""Edge staggered grid in the x direction."""
if self.nEx == 0:
return
return self._getTensorGrid("edges_x")
@property
def edges_y(self):
"""Edge staggered grid in the y direction."""
if self.nEy == 0 or self.dim < 2:
return
return self._getTensorGrid("edges_y")
@property
def edges_z(self):
"""Edge staggered grid in the z direction."""
if self.nEz == 0 or self.dim < 3:
return
return self._getTensorGrid("edges_z")
def _getTensorGrid(self, key):
if getattr(self, "_" + key, None) is None:
setattr(self, "_" + key, ndgrid(self.get_tensor(key)))
return getattr(self, "_" + key)
def get_tensor(self, key):
"""Returns a tensor list.
Parameters
----------
key : str
Which tensor (see below)
key can be::
'CC', 'cell_centers' -> location of cell centers
'N', 'nodes' -> location of nodes
'Fx', 'faces_x' -> location of faces with an x normal
'Fy', 'faces_y' -> location of faces with an y normal
'Fz', 'faces_z' -> location of faces with an z normal
'Ex', 'edges_x' -> location of edges with an x tangent
'Ey', 'edges_y' -> location of edges with an y tangent
'Ez', 'edges_z' -> location of edges with an z tangent
Returns
-------
list
list of the tensors that make up the mesh.
"""
key = self._parse_location_type(key)
if key == "faces_x":
ten = [
self.nodes_x,
self.cell_centers_y,
self.cell_centers_z,
]
elif key == "faces_y":
ten = [
self.cell_centers_x,
self.nodes_y,
self.cell_centers_z,
]
elif key == "faces_z":
ten = [
self.cell_centers_x,
self.cell_centers_y,
self.nodes_z,
]
elif key == "edges_x":
ten = [self.cell_centers_x, self.nodes_y, self.nodes_z]
elif key == "edges_y":
ten = [self.nodes_x, self.cell_centers_y, self.nodes_z]
elif key == "edges_z":
ten = [self.nodes_x, self.nodes_y, self.cell_centers_z]
elif key == "cell_centers":
ten = [
self.cell_centers_x,
self.cell_centers_y,
self.cell_centers_z,
]
elif key == "nodes":
ten = [self.nodes_x, self.nodes_y, self.nodes_z]
else:
raise KeyError(r"Unrecognized key {key}")
return [t for t in ten if t is not None]
# --------------- Methods ---------------------
def is_inside(self, pts, location_type="nodes", **kwargs):
"""
Determines if a set of points are inside a mesh.
:param numpy.ndarray pts: Location of points to test
:rtype: numpy.ndarray
:return: inside, numpy array of booleans
"""
if "locType" in kwargs:
warnings.warn(
"The locType keyword argument has been deprecated, please use location_type. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
location_type = kwargs["locType"]
pts = as_array_n_by_dim(pts, self.dim)
tensors = self.get_tensor(location_type)
if location_type[0].lower() == "n" and self._meshType == "CYL":
# NOTE: for a CYL mesh we add a node to check if we are inside in
# the radial direction!
tensors[0] = np.r_[0.0, tensors[0]]
tensors[1] = np.r_[tensors[1], 2.0 * np.pi]
inside = np.ones(pts.shape[0], dtype=bool)
for i, tensor in enumerate(tensors):
TOL = np.diff(tensor).min() * 1.0e-10
inside = (
inside
& (pts[:, i] >= tensor.min() - TOL)
& (pts[:, i] <= tensor.max() + TOL)
)
return inside
def _getInterpolationMat(self, loc, location_type="cell_centers", zeros_outside=False):
"""Produces interpolation matrix
Parameters
----------
loc : numpy.ndarray
Location of points to interpolate to
location_type: stc
What to interpolate
location_type can be::
'Ex', 'edges_x' -> x-component of field defined on x edges
'Ey', 'edges_y' -> y-component of field defined on y edges
'Ez', 'edges_z' -> z-component of field defined on z edges
'Fx', 'faces_x' -> x-component of field defined on x faces
'Fy', 'faces_y' -> y-component of field defined on y faces
'Fz', 'faces_z' -> z-component of field defined on z faces
'N', 'nodes' -> scalar field defined on nodes
'CC', 'cell_centers' -> scalar field defined on cell centers
'CCVx', 'cell_centers_x' -> x-component of vector field defined on cell centers
'CCVy', 'cell_centers_y' -> y-component of vector field defined on cell centers
'CCVz', 'cell_centers_z' -> z-component of vector field defined on cell centers
Returns
-------
scipy.sparse.csr_matrix
M, the interpolation matrix
"""
loc = as_array_n_by_dim(loc, self.dim)
if not zeros_outside:
if not np.all(self.is_inside(loc)):
raise ValueError("Points outside of mesh")
else:
indZeros = np.logical_not(self.is_inside(loc))
loc[indZeros, :] = np.array([v.mean() for v in self.get_tensor("CC")])
location_type = self._parse_location_type(location_type)
if location_type in ["faces_x", "faces_y", "faces_z", "edges_x", "edges_y", "edges_z"]:
ind = {"x": 0, "y": 1, "z": 2}[location_type[-1]]
if self.dim < ind:
raise ValueError("mesh is not high enough dimension.")
if "f" in location_type.lower():
items = (self.nFx, self.nFy, self.nFz)[: self.dim]
else:
items = (self.nEx, self.nEy, self.nEz)[: self.dim]
components = [spzeros(loc.shape[0], n) for n in items]
components[ind] = interpolation_matrix(loc, *self.get_tensor(location_type))
# remove any zero blocks (hstack complains)
components = [comp for comp in components if comp.shape[1] > 0]
Q = sp.hstack(components)
elif location_type in ["cell_centers", "nodes"]:
Q = interpolation_matrix(loc, *self.get_tensor(location_type))
elif location_type in ["cell_centers_x", "cell_centers_y", "cell_centers_z"]:
Q = interpolation_matrix(loc, *self.get_tensor("CC"))
Z = spzeros(loc.shape[0], self.nC)
if location_type[-1] == "x":
Q = sp.hstack([Q, Z, Z])
elif location_type[-1] == "y":
Q = sp.hstack([Z, Q, Z])
elif location_type[-1] == "z":
Q = sp.hstack([Z, Z, Q])
else:
raise NotImplementedError(
"getInterpolationMat: location_type=="
+ location_type
+ " and mesh.dim=="
+ str(self.dim)
)
if zeros_outside:
Q[indZeros, :] = 0
return Q.tocsr()
def get_interpolation_matrix(
self, loc, location_type="cell_centers", zeros_outside=False, **kwargs
):
"""Produces linear interpolation matrix
Parameters
----------
loc : numpy.ndarray
Location of points to interpolate to
location_type : str
What to interpolate (see below)
location_type can be::
'Ex', 'edges_x' -> x-component of field defined on x edges
'Ey', 'edges_y' -> y-component of field defined on y edges
'Ez', 'edges_z' -> z-component of field defined on z edges
'Fx', 'faces_x' -> x-component of field defined on x faces
'Fy', 'faces_y' -> y-component of field defined on y faces
'Fz', 'faces_z' -> z-component of field defined on z faces
'N', 'nodes' -> scalar field defined on nodes
'CC', 'cell_centers' -> scalar field defined on cell centers
'CCVx', 'cell_centers_x' -> x-component of vector field defined on cell centers
'CCVy', 'cell_centers_y' -> y-component of vector field defined on cell centers
'CCVz', 'cell_centers_z' -> z-component of vector field defined on cell centers
Returns
-------
scipy.sparse.csr_matrix
M, the interpolation matrix
"""
if "locType" in kwargs:
warnings.warn(
"The locType keyword argument has been deprecated, please use location_type. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
location_type = kwargs["locType"]
if "zerosOutside" in kwargs:
warnings.warn(
"The zerosOutside keyword argument has been deprecated, please use zeros_outside. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
zeros_outside = kwargs["zerosOutside"]
return self._getInterpolationMat(loc, location_type, zeros_outside)
def _fastInnerProduct(self, projection_type, model=None, invert_model=False, invert_matrix=False):
"""Fast version of getFaceInnerProduct.
This does not handle the case of a full tensor property.
Parameters
----------
model : numpy.array
material property (tensor properties are possible) at each cell center (nC, (1, 3, or 6))
projection_type : str
'edges' or 'faces'
returnP : bool
returns the projection matrices
invert_model : bool
inverts the material property
invert_matrix : bool
inverts the matrix
Returns
-------
scipy.sparse.csr_matrix
M, the inner product matrix (nF, nF)
"""
projection_type = projection_type[0].upper()
if projection_type not in ["F", "E"]:
raise ValueError("projection_type must be 'F' for faces or 'E' for edges")
if model is None:
model = np.ones(self.nC)
if invert_model:
model = 1.0 / model
if is_scalar(model):
model = model * np.ones(self.nC)
# number of elements we are averaging (equals dim for regular
# meshes, but for cyl, where we use symmetry, it is 1 for edge
# variables and 2 for face variables)
if self._meshType == "CYL":
shape = getattr(self, "vn" + projection_type)
n_elements = sum([1 if x != 0 else 0 for x in shape])
else:
n_elements = self.dim
# Isotropic? or anisotropic?
if model.size == self.nC:
Av = getattr(self, "ave" + projection_type + "2CC")
Vprop = self.cell_volumes * mkvc(model)
M = n_elements * sdiag(Av.T * Vprop)
elif model.size == self.nC * self.dim:
Av = getattr(self, "ave" + projection_type + "2CCV")
# if cyl, then only certain components are relevant due to symmetry
# for faces, x, z matters, for edges, y (which is theta) matters
if self._meshType == "CYL":
if projection_type == "E":
model = model[:, 1] # this is the action of a projection mat
elif projection_type == "F":
model = model[:, [0, 2]]
V = sp.kron(sp.identity(n_elements), sdiag(self.cell_volumes))
M = sdiag(Av.T * V * mkvc(model))
else:
return None
if invert_matrix:
return sdinv(M)
else:
return M
def _fastInnerProductDeriv(self, projection_type, model, invert_model=False, invert_matrix=False):
"""
Parameters
----------
projection_type : str
'E' or 'F'
tensorType : TensorType
type of the tensor
invert_model : bool
inverts the material property
invert_matrix : bool
inverts the matrix
Returns
-------
function
dMdmu, the derivative of the inner product matrix
"""
projection_type = projection_type[0].upper()
if projection_type not in ["F", "E"]:
raise ValueError("projection_type must be 'F' for faces or 'E' for edges")
tensorType = TensorType(self, model)
dMdprop = None
if invert_matrix or invert_model:
MI = self._fastInnerProduct(
projection_type, model, invert_model=invert_model, invert_matrix=invert_matrix
)
# number of elements we are averaging (equals dim for regular
# meshes, but for cyl, where we use symmetry, it is 1 for edge
# variables and 2 for face variables)
if self._meshType == "CYL":
shape = getattr(self, "vn" + projection_type)
n_elements = sum([1 if x != 0 else 0 for x in shape])
else:
n_elements = self.dim
if tensorType == 0: # isotropic, constant
Av = getattr(self, "ave" + projection_type + "2CC")
V = sdiag(self.cell_volumes)
ones = sp.csr_matrix(
(np.ones(self.nC), (range(self.nC), np.zeros(self.nC))),
shape=(self.nC, 1),
)
if not invert_matrix and not invert_model:
dMdprop = n_elements * Av.T * V * ones
elif invert_matrix and invert_model:
dMdprop = n_elements * (
sdiag(MI.diagonal() ** 2) * Av.T * V * ones * sdiag(1.0 / model ** 2)
)
elif invert_model:
dMdprop = n_elements * Av.T * V * sdiag(-1.0 / model ** 2)
elif invert_matrix:
dMdprop = n_elements * (sdiag(-MI.diagonal() ** 2) * Av.T * V)
elif tensorType == 1: # isotropic, variable in space
Av = getattr(self, "ave" + projection_type + "2CC")
V = sdiag(self.cell_volumes)
if not invert_matrix and not invert_model:
dMdprop = n_elements * Av.T * V
elif invert_matrix and invert_model:
dMdprop = n_elements * (
sdiag(MI.diagonal() ** 2) * Av.T * V * sdiag(1.0 / model ** 2)
)
elif invert_model:
dMdprop = n_elements * Av.T * V * sdiag(-1.0 / model ** 2)
elif invert_matrix:
dMdprop = n_elements * (sdiag(-MI.diagonal() ** 2) * Av.T * V)
elif tensorType == 2: # anisotropic
Av = getattr(self, "ave" + projection_type + "2CCV")
V = sp.kron(sp.identity(self.dim), sdiag(self.cell_volumes))
if self._meshType == "CYL":
Zero = sp.csr_matrix((self.nC, self.nC))
Eye = sp.eye(self.nC)
if projection_type == "E":
P = sp.hstack([Zero, Eye, Zero])
# print(P.todense())
elif projection_type == "F":
P = sp.vstack(
[sp.hstack([Eye, Zero, Zero]), sp.hstack([Zero, Zero, Eye])]
)
# print(P.todense())
else:
P = sp.eye(self.nC * self.dim)
if not invert_matrix and not invert_model:
dMdprop = Av.T * P * V
elif invert_matrix and invert_model:
dMdprop = (
sdiag(MI.diagonal() ** 2) * Av.T * P * V * sdiag(1.0 / model ** 2)
)
elif invert_model:
dMdprop = Av.T * P * V * sdiag(-1.0 / model ** 2)
elif invert_matrix:
dMdprop = sdiag(-MI.diagonal() ** 2) * Av.T * P * V
if dMdprop is not None:
def innerProductDeriv(v=None):
if v is None:
warnings.warn(
"Depreciation Warning: TensorMesh.innerProductDeriv."
" You should be supplying a vector. "
"Use: sdiag(u)*dMdprop",
DeprecationWarning,
)
return dMdprop
return sdiag(v) * dMdprop
return innerProductDeriv
else:
return None
# DEPRECATED
@property
def hx(self):
"""Width of cells in the x direction
Returns
-------
numpy.ndarray
.. deprecated:: 0.5.0
`hx` will be removed in discretize 1.0.0 to reduce namespace clutter,
please use `mesh.h[0]`.
"""
warnings.warn(
"hx has been deprecated, please access as mesh.h[0]", DeprecationWarning
)
return self.h[0]
@property
def hy(self):
"""Width of cells in the y direction
Returns
-------
numpy.ndarray or None
.. deprecated:: 0.5.0
`hy` will be removed in discretize 1.0.0 to reduce namespace clutter,
please use `mesh.h[1]`.
"""
warnings.warn(
"hy has been deprecated, please access as mesh.h[1]", DeprecationWarning
)
return None if self.dim < 2 else self.h[1]
@property
def hz(self):
"""Width of cells in the z direction
Returns
-------
numpy.ndarray or None
.. deprecated:: 0.5.0
`hz` will be removed in discretize 1.0.0 to reduce namespace clutter,
please use `mesh.h[2]`.
"""
warnings.warn(
"hz has been deprecated, please access as mesh.h[2]", DeprecationWarning
)
return None if self.dim < 3 else self.h[2]
vectorNx = deprecate_property("nodes_x", "vectorNx", removal_version="1.0.0")
vectorNy = deprecate_property("nodes_y", "vectorNy", removal_version="1.0.0")
vectorNz = deprecate_property("nodes_z", "vectorNz", removal_version="1.0.0")
vectorCCx = deprecate_property(
"cell_centers_x", "vectorCCx", removal_version="1.0.0"
)
vectorCCy = deprecate_property(
"cell_centers_y", "vectorCCy", removal_version="1.0.0"
)
vectorCCz = deprecate_property(
"cell_centers_z", "vectorCCz", removal_version="1.0.0"
)
getInterpolationMat = deprecate_method(
"get_interpolation_matrix", "getInterpolationMat", removal_version="1.0.0"
)
isInside = deprecate_method("is_inside", "isInside", removal_version="1.0.0")
getTensor = deprecate_method("get_tensor", "getTensor", removal_version="1.0.0")
| [
"discretize.utils.sdiag",
"discretize.utils.sdinv",
"numpy.array",
"properties.Array",
"discretize.utils.TensorType",
"discretize.utils.is_scalar",
"scipy.sparse.eye",
"discretize.utils.code_utils.deprecate_method",
"numpy.diff",
"warnings.warn",
"scipy.sparse.csr_matrix",
"discretize.utils.nd... | [((25004, 25070), 'discretize.utils.code_utils.deprecate_property', 'deprecate_property', (['"""nodes_x"""', '"""vectorNx"""'], {'removal_version': '"""1.0.0"""'}), "('nodes_x', 'vectorNx', removal_version='1.0.0')\n", (25022, 25070), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25086, 25152), 'discretize.utils.code_utils.deprecate_property', 'deprecate_property', (['"""nodes_y"""', '"""vectorNy"""'], {'removal_version': '"""1.0.0"""'}), "('nodes_y', 'vectorNy', removal_version='1.0.0')\n", (25104, 25152), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25168, 25234), 'discretize.utils.code_utils.deprecate_property', 'deprecate_property', (['"""nodes_z"""', '"""vectorNz"""'], {'removal_version': '"""1.0.0"""'}), "('nodes_z', 'vectorNz', removal_version='1.0.0')\n", (25186, 25234), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25251, 25325), 'discretize.utils.code_utils.deprecate_property', 'deprecate_property', (['"""cell_centers_x"""', '"""vectorCCx"""'], {'removal_version': '"""1.0.0"""'}), "('cell_centers_x', 'vectorCCx', removal_version='1.0.0')\n", (25269, 25325), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25356, 25430), 'discretize.utils.code_utils.deprecate_property', 'deprecate_property', (['"""cell_centers_y"""', '"""vectorCCy"""'], {'removal_version': '"""1.0.0"""'}), "('cell_centers_y', 'vectorCCy', removal_version='1.0.0')\n", (25374, 25430), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25461, 25535), 'discretize.utils.code_utils.deprecate_property', 'deprecate_property', (['"""cell_centers_z"""', '"""vectorCCz"""'], {'removal_version': '"""1.0.0"""'}), "('cell_centers_z', 'vectorCCz', removal_version='1.0.0')\n", (25479, 25535), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25576, 25672), 'discretize.utils.code_utils.deprecate_method', 'deprecate_method', (['"""get_interpolation_matrix"""', '"""getInterpolationMat"""'], {'removal_version': '"""1.0.0"""'}), "('get_interpolation_matrix', 'getInterpolationMat',\n removal_version='1.0.0')\n", (25592, 25672), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25698, 25764), 'discretize.utils.code_utils.deprecate_method', 'deprecate_method', (['"""is_inside"""', '"""isInside"""'], {'removal_version': '"""1.0.0"""'}), "('is_inside', 'isInside', removal_version='1.0.0')\n", (25714, 25764), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((25781, 25849), 'discretize.utils.code_utils.deprecate_method', 'deprecate_method', (['"""get_tensor"""', '"""getTensor"""'], {'removal_version': '"""1.0.0"""'}), "('get_tensor', 'getTensor', removal_version='1.0.0')\n", (25797, 25849), False, 'from discretize.utils.code_utils import deprecate_method, deprecate_property\n'), ((1483, 1582), 'properties.Array', 'properties.Array', (['"""widths of the tensor mesh in a single dimension"""'], {'dtype': 'float', 'shape': "('*',)"}), "('widths of the tensor mesh in a single dimension', dtype=\n float, shape=('*',))\n", (1499, 1582), False, 'import properties\n'), ((5819, 5834), 'discretize.utils.ndgrid', 'ndgrid', (['*self.h'], {}), '(*self.h)\n', (5825, 5834), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((9968, 10000), 'discretize.utils.as_array_n_by_dim', 'as_array_n_by_dim', (['pts', 'self.dim'], {}), '(pts, self.dim)\n', (9985, 10000), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((10360, 10393), 'numpy.ones', 'np.ones', (['pts.shape[0]'], {'dtype': 'bool'}), '(pts.shape[0], dtype=bool)\n', (10367, 10393), True, 'import numpy as np\n'), ((12113, 12145), 'discretize.utils.as_array_n_by_dim', 'as_array_n_by_dim', (['loc', 'self.dim'], {}), '(loc, self.dim)\n', (12130, 12145), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((17463, 17479), 'discretize.utils.is_scalar', 'is_scalar', (['model'], {}), '(model)\n', (17472, 17479), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((19675, 19698), 'discretize.utils.TensorType', 'TensorType', (['self', 'model'], {}), '(self, model)\n', (19685, 19698), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((23919, 24010), 'warnings.warn', 'warnings.warn', (['"""hx has been deprecated, please access as mesh.h[0]"""', 'DeprecationWarning'], {}), "('hx has been deprecated, please access as mesh.h[0]',\n DeprecationWarning)\n", (23932, 24010), False, 'import warnings\n'), ((24360, 24451), 'warnings.warn', 'warnings.warn', (['"""hy has been deprecated, please access as mesh.h[1]"""', 'DeprecationWarning'], {}), "('hy has been deprecated, please access as mesh.h[1]',\n DeprecationWarning)\n", (24373, 24451), False, 'import warnings\n'), ((24827, 24918), 'warnings.warn', 'warnings.warn', (['"""hz has been deprecated, please access as mesh.h[2]"""', 'DeprecationWarning'], {}), "('hz has been deprecated, please access as mesh.h[2]',\n DeprecationWarning)\n", (24840, 24918), False, 'import warnings\n'), ((4012, 4041), 'numpy.array', 'np.array', (['[x.size for x in h]'], {}), '([x.size for x in h])\n', (4020, 4041), True, 'import numpy as np\n'), ((9688, 9851), 'warnings.warn', 'warnings.warn', (['"""The locType keyword argument has been deprecated, please use location_type. This will be removed in discretize 1.0.0"""', 'DeprecationWarning'], {}), "(\n 'The locType keyword argument has been deprecated, please use location_type. This will be removed in discretize 1.0.0'\n , DeprecationWarning)\n", (9701, 9851), False, 'import warnings\n'), ((13268, 13289), 'scipy.sparse.hstack', 'sp.hstack', (['components'], {}), '(components)\n', (13277, 13289), True, 'import scipy.sparse as sp\n'), ((15696, 15859), 'warnings.warn', 'warnings.warn', (['"""The locType keyword argument has been deprecated, please use location_type. This will be removed in discretize 1.0.0"""', 'DeprecationWarning'], {}), "(\n 'The locType keyword argument has been deprecated, please use location_type. This will be removed in discretize 1.0.0'\n , DeprecationWarning)\n", (15709, 15859), False, 'import warnings\n'), ((16011, 16179), 'warnings.warn', 'warnings.warn', (['"""The zerosOutside keyword argument has been deprecated, please use zeros_outside. This will be removed in discretize 1.0.0"""', 'DeprecationWarning'], {}), "(\n 'The zerosOutside keyword argument has been deprecated, please use zeros_outside. This will be removed in discretize 1.0.0'\n , DeprecationWarning)\n", (16024, 16179), False, 'import warnings\n'), ((17376, 17392), 'numpy.ones', 'np.ones', (['self.nC'], {}), '(self.nC)\n', (17383, 17392), True, 'import numpy as np\n'), ((18891, 18899), 'discretize.utils.sdinv', 'sdinv', (['M'], {}), '(M)\n', (18896, 18899), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((20444, 20468), 'discretize.utils.sdiag', 'sdiag', (['self.cell_volumes'], {}), '(self.cell_volumes)\n', (20449, 20468), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((2308, 2322), 'discretize.utils.is_scalar', 'is_scalar', (['h_i'], {}), '(h_i)\n', (2317, 2322), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((3187, 3201), 'discretize.utils.is_scalar', 'is_scalar', (['x_i'], {}), '(x_i)\n', (3196, 3201), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((12990, 13014), 'discretize.utils.spzeros', 'spzeros', (['loc.shape[0]', 'n'], {}), '(loc.shape[0], n)\n', (12997, 13014), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((17509, 17525), 'numpy.ones', 'np.ones', (['self.nC'], {}), '(self.nC)\n', (17516, 17525), True, 'import numpy as np\n'), ((18098, 18109), 'discretize.utils.mkvc', 'mkvc', (['model'], {}), '(model)\n', (18102, 18109), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((18139, 18158), 'discretize.utils.sdiag', 'sdiag', (['(Av.T * Vprop)'], {}), '(Av.T * Vprop)\n', (18144, 18158), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((21294, 21318), 'discretize.utils.sdiag', 'sdiag', (['self.cell_volumes'], {}), '(self.cell_volumes)\n', (21299, 21318), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((2571, 2589), 'discretize.utils.unpack_widths', 'unpack_widths', (['h_i'], {}), '(h_i)\n', (2584, 2589), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((3869, 3898), 'numpy.array', 'np.array', (['[x.size for x in h]'], {}), '([x.size for x in h])\n', (3877, 3898), True, 'import numpy as np\n'), ((13592, 13622), 'discretize.utils.spzeros', 'spzeros', (['loc.shape[0]', 'self.nC'], {}), '(loc.shape[0], self.nC)\n', (13599, 13622), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((18710, 18733), 'scipy.sparse.identity', 'sp.identity', (['n_elements'], {}), '(n_elements)\n', (18721, 18733), True, 'import scipy.sparse as sp\n'), ((18735, 18759), 'discretize.utils.sdiag', 'sdiag', (['self.cell_volumes'], {}), '(self.cell_volumes)\n', (18740, 18759), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((20520, 20536), 'numpy.ones', 'np.ones', (['self.nC'], {}), '(self.nC)\n', (20527, 20536), True, 'import numpy as np\n'), ((23181, 23335), 'warnings.warn', 'warnings.warn', (['"""Depreciation Warning: TensorMesh.innerProductDeriv. You should be supplying a vector. Use: sdiag(u)*dMdprop"""', 'DeprecationWarning'], {}), "(\n 'Depreciation Warning: TensorMesh.innerProductDeriv. You should be supplying a vector. Use: sdiag(u)*dMdprop'\n , DeprecationWarning)\n", (23194, 23335), False, 'import warnings\n'), ((23509, 23517), 'discretize.utils.sdiag', 'sdiag', (['v'], {}), '(v)\n', (23514, 23517), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((10457, 10472), 'numpy.diff', 'np.diff', (['tensor'], {}), '(tensor)\n', (10464, 10472), True, 'import numpy as np\n'), ((13684, 13704), 'scipy.sparse.hstack', 'sp.hstack', (['[Q, Z, Z]'], {}), '([Q, Z, Z])\n', (13693, 13704), True, 'import scipy.sparse as sp\n'), ((18794, 18805), 'discretize.utils.mkvc', 'mkvc', (['model'], {}), '(model)\n', (18798, 18805), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((20555, 20572), 'numpy.zeros', 'np.zeros', (['self.nC'], {}), '(self.nC)\n', (20563, 20572), True, 'import numpy as np\n'), ((21965, 21986), 'scipy.sparse.identity', 'sp.identity', (['self.dim'], {}), '(self.dim)\n', (21976, 21986), True, 'import scipy.sparse as sp\n'), ((21988, 22012), 'discretize.utils.sdiag', 'sdiag', (['self.cell_volumes'], {}), '(self.cell_volumes)\n', (21993, 22012), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((22078, 22111), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(self.nC, self.nC)'], {}), '((self.nC, self.nC))\n', (22091, 22111), True, 'import scipy.sparse as sp\n'), ((22134, 22149), 'scipy.sparse.eye', 'sp.eye', (['self.nC'], {}), '(self.nC)\n', (22140, 22149), True, 'import scipy.sparse as sp\n'), ((22553, 22579), 'scipy.sparse.eye', 'sp.eye', (['(self.nC * self.dim)'], {}), '(self.nC * self.dim)\n', (22559, 22579), True, 'import scipy.sparse as sp\n'), ((13768, 13788), 'scipy.sparse.hstack', 'sp.hstack', (['[Z, Q, Z]'], {}), '([Z, Q, Z])\n', (13777, 13788), True, 'import scipy.sparse as sp\n'), ((20892, 20915), 'discretize.utils.sdiag', 'sdiag', (['(1.0 / model ** 2)'], {}), '(1.0 / model ** 2)\n', (20897, 20915), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((21015, 21039), 'discretize.utils.sdiag', 'sdiag', (['(-1.0 / model ** 2)'], {}), '(-1.0 / model ** 2)\n', (21020, 21039), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((22217, 22245), 'scipy.sparse.hstack', 'sp.hstack', (['[Zero, Eye, Zero]'], {}), '([Zero, Eye, Zero])\n', (22226, 22245), True, 'import scipy.sparse as sp\n'), ((13852, 13872), 'scipy.sparse.hstack', 'sp.hstack', (['[Z, Z, Q]'], {}), '([Z, Z, Q])\n', (13861, 13872), True, 'import scipy.sparse as sp\n'), ((21571, 21594), 'discretize.utils.sdiag', 'sdiag', (['(1.0 / model ** 2)'], {}), '(1.0 / model ** 2)\n', (21576, 21594), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((21694, 21718), 'discretize.utils.sdiag', 'sdiag', (['(-1.0 / model ** 2)'], {}), '(-1.0 / model ** 2)\n', (21699, 21718), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((22815, 22838), 'discretize.utils.sdiag', 'sdiag', (['(1.0 / model ** 2)'], {}), '(1.0 / model ** 2)\n', (22820, 22838), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((22929, 22953), 'discretize.utils.sdiag', 'sdiag', (['(-1.0 / model ** 2)'], {}), '(-1.0 / model ** 2)\n', (22934, 22953), False, 'from discretize.utils import is_scalar, as_array_n_by_dim, unpack_widths, mkvc, ndgrid, spzeros, sdiag, sdinv, TensorType, interpolation_matrix\n'), ((22392, 22420), 'scipy.sparse.hstack', 'sp.hstack', (['[Eye, Zero, Zero]'], {}), '([Eye, Zero, Zero])\n', (22401, 22420), True, 'import scipy.sparse as sp\n'), ((22422, 22450), 'scipy.sparse.hstack', 'sp.hstack', (['[Zero, Zero, Eye]'], {}), '([Zero, Zero, Eye])\n', (22431, 22450), True, 'import scipy.sparse as sp\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module contains unit tests for :mod:`~prody.ensemble`."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
from os.path import join
from prody.tests import TestCase
from numpy.testing import assert_equal, assert_allclose
from prody import DCDFile, writeDCD, parseDCD
from prody.tests import TEMPDIR
from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD
class TestDCDFile(TestCase):
def setUp(self):
self.dcd = join(TEMPDIR, 'temp.dcd')
def testWriteDCD(self):
dcd = writeDCD(self.dcd, ALLATOMS)
self.assertEqual(dcd, self.dcd, 'failed to write DCD file')
def testParseDCD(self):
e = parseDCD(writeDCD(self.dcd, ALLATOMS))
assert_equal(e._getCoordsets(), DCD._getCoordsets(),
err_msg='failed to parse DCD file correctly')
def testWrite(self):
dcd = DCDFile(self.dcd, 'w')
dcd.write(ENSEMBLE.getCoordsets())
dcd.close()
e = parseDCD(self.dcd)
assert_allclose(e._getCoordsets(), ENSEMBLE._getCoordsets(),
rtol=RTOL, atol=ATOL,
err_msg='failed to parse DCD file correctly')
def testWriteModeAppend(self):
dcd = DCDFile(writeDCD(self.dcd, ENSEMBLE), 'a')
dcd.write(ENSEMBLE.getCoordsets())
dcd.close()
e = parseDCD(self.dcd)
n_csets = len(ENSEMBLE)
coordsets = e._getCoordsets()
assert_equal(coordsets, coordsets,
'failed to parse DCD file correctly')
assert_allclose(coordsets[:n_csets], ENSEMBLE._getCoordsets(),
rtol=RTOL, atol=ATOL,
err_msg='failed to parse DCD file correctly')
| [
"prody.writeDCD",
"numpy.testing.assert_equal",
"prody.tests.test_ensemble.ENSEMBLE.getCoordsets",
"prody.tests.test_ensemble.DCD._getCoordsets",
"os.path.join",
"prody.DCDFile",
"prody.tests.test_ensemble.ENSEMBLE._getCoordsets",
"prody.parseDCD"
] | [((1249, 1274), 'os.path.join', 'join', (['TEMPDIR', '"""temp.dcd"""'], {}), "(TEMPDIR, 'temp.dcd')\n", (1253, 1274), False, 'from os.path import join\n'), ((1318, 1346), 'prody.writeDCD', 'writeDCD', (['self.dcd', 'ALLATOMS'], {}), '(self.dcd, ALLATOMS)\n', (1326, 1346), False, 'from prody import DCDFile, writeDCD, parseDCD\n'), ((1663, 1685), 'prody.DCDFile', 'DCDFile', (['self.dcd', '"""w"""'], {}), "(self.dcd, 'w')\n", (1670, 1685), False, 'from prody import DCDFile, writeDCD, parseDCD\n'), ((1761, 1779), 'prody.parseDCD', 'parseDCD', (['self.dcd'], {}), '(self.dcd)\n', (1769, 1779), False, 'from prody import DCDFile, writeDCD, parseDCD\n'), ((2133, 2151), 'prody.parseDCD', 'parseDCD', (['self.dcd'], {}), '(self.dcd)\n', (2141, 2151), False, 'from prody import DCDFile, writeDCD, parseDCD\n'), ((2230, 2302), 'numpy.testing.assert_equal', 'assert_equal', (['coordsets', 'coordsets', '"""failed to parse DCD file correctly"""'], {}), "(coordsets, coordsets, 'failed to parse DCD file correctly')\n", (2242, 2302), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1465, 1493), 'prody.writeDCD', 'writeDCD', (['self.dcd', 'ALLATOMS'], {}), '(self.dcd, ALLATOMS)\n', (1473, 1493), False, 'from prody import DCDFile, writeDCD, parseDCD\n'), ((1535, 1554), 'prody.tests.test_ensemble.DCD._getCoordsets', 'DCD._getCoordsets', ([], {}), '()\n', (1552, 1554), False, 'from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD\n'), ((1704, 1727), 'prody.tests.test_ensemble.ENSEMBLE.getCoordsets', 'ENSEMBLE.getCoordsets', ([], {}), '()\n', (1725, 1727), False, 'from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD\n'), ((1823, 1847), 'prody.tests.test_ensemble.ENSEMBLE._getCoordsets', 'ENSEMBLE._getCoordsets', ([], {}), '()\n', (1845, 1847), False, 'from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD\n'), ((2023, 2051), 'prody.writeDCD', 'writeDCD', (['self.dcd', 'ENSEMBLE'], {}), '(self.dcd, ENSEMBLE)\n', (2031, 2051), False, 'from prody import DCDFile, writeDCD, parseDCD\n'), ((2076, 2099), 'prody.tests.test_ensemble.ENSEMBLE.getCoordsets', 'ENSEMBLE.getCoordsets', ([], {}), '()\n', (2097, 2099), False, 'from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD\n'), ((2369, 2393), 'prody.tests.test_ensemble.ENSEMBLE._getCoordsets', 'ENSEMBLE._getCoordsets', ([], {}), '()\n', (2391, 2393), False, 'from prody.tests.test_ensemble import ALLATOMS, ENSEMBLE, RTOL, ATOL, DCD\n')] |
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: <NAME>
# Date Created: 2017-08-15
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import h5py
import numpy as np
import matplotlib.pyplot as plt
from datasets import imagenet
# ImageNet mapping class_index => class_name
imagenet_classnames = imagenet.create_readable_names_for_imagenet_labels()
def find_files(paths, extensions, sort=True):
'''
Returns a list of files in one or multiple directories.
:param paths: str or list, paths to search in for files
:param extensions: str or list, extensions to match
:param sort: bool, whether to sort the list of found files
:return: list of (sorted) files that are found
'''
if type(paths) is str:
paths = [paths]
files = []
for path in paths:
for file in os.listdir(path):
if file.endswith(extensions):
files.append(os.path.join(path, file))
if sort:
files.sort()
return files
def fill_last_batch(image_list, batch_size):
'''
Fill up the last batch with the last example for the list.
Operation is performed in-place.
:param image_list: list of str, image list to fill up
:param batch_size: int, batch_size
:return:
'''
num_examples = len(image_list)
num_batches = int(np.ceil(num_examples/batch_size))
for i in range((num_batches*batch_size)-num_examples):
image_list.append(image_list[-1])
def sort_feature_dataset(feature_dataset):
'''
When more than one preprocessing thread is used the feature_dataset is
not sorted according to alphabetical order of filenames. This function
sorts the dataset in place so that filenames and corresponding fetaures
are sorted by its filename. Note: sorting is in-place.
:param feature_dataset: dict, containting filenames and all features
:return:
'''
indices = np.argsort(feature_dataset['filenames'])
feature_dataset['filenames'].sort()
# Apply sorting to features for each image
for key in feature_dataset.keys():
if key == 'filenames': continue
feature_dataset[key] = feature_dataset[key][indices]
def write_hdf5(filename, layer_names, feature_dataset):
'''
Writes features to HDF5 file.
:param filename: str, filename to output
:param layer_names: list of str, layer names
:param feature_dataset: dict, containing features[layer_names] = vals
:return:
'''
with h5py.File(filename, 'w') as hf:
hf.create_dataset("filenames", data=feature_dataset['filenames'])
for layer_name in layer_names:
hf.create_dataset(layer_name, data=feature_dataset[layer_name], dtype=np.float32)
def display_imagenet_prediction(image, class_index):
class_label = imagenet_classnames[class_index]
print("Prediction: {} (class_index={})".format(class_label, class_index))
plt.figure()
plt.imshow(image)
plt.axis('off')
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.ceil",
"os.listdir",
"os.path.join",
"h5py.File",
"numpy.argsort",
"datasets.imagenet.create_readable_names_for_imagenet_labels",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((807, 859), 'datasets.imagenet.create_readable_names_for_imagenet_labels', 'imagenet.create_readable_names_for_imagenet_labels', ([], {}), '()\n', (857, 859), False, 'from datasets import imagenet\n'), ((2400, 2440), 'numpy.argsort', 'np.argsort', (["feature_dataset['filenames']"], {}), "(feature_dataset['filenames'])\n", (2410, 2440), True, 'import numpy as np\n'), ((3391, 3403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3401, 3403), True, 'import matplotlib.pyplot as plt\n'), ((3408, 3425), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (3418, 3425), True, 'import matplotlib.pyplot as plt\n'), ((3430, 3445), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3438, 3445), True, 'import matplotlib.pyplot as plt\n'), ((3450, 3460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3458, 3460), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1339), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1333, 1339), False, 'import os\n'), ((1819, 1853), 'numpy.ceil', 'np.ceil', (['(num_examples / batch_size)'], {}), '(num_examples / batch_size)\n', (1826, 1853), True, 'import numpy as np\n'), ((2965, 2989), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (2974, 2989), False, 'import h5py\n'), ((1412, 1436), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1424, 1436), False, 'import os\n')] |
#!/usr/bin/python
# x_{t+1} = a x_t + v_t
# y_t = x_t + e_t
import numpy as np
class lgss_bs():
r""" Example of the bootstrap formalism for sequential inference in a
simple linear Gaussian Model.
"""
def __init__(self, a, varV, varE, y):
self.dim = 1
self.a = a
self.varV = varV
self.varE = varE
self.y = y
def evalLogG(self, t, xCur, xPrev, logV):
return -0.5*(xCur[:,0] - self.y[t])**2/self.varE
def simM(self, t, xPrev, ancestors):
return self.a*xPrev + np.sqrt(self.varV)*np.random.standard_normal(size=xPrev.shape)
def evalAuxLogV(self, t, xPrev):
return np.zeros(xPrev.shape[0]) | [
"numpy.random.standard_normal",
"numpy.zeros",
"numpy.sqrt"
] | [((681, 705), 'numpy.zeros', 'np.zeros', (['xPrev.shape[0]'], {}), '(xPrev.shape[0])\n', (689, 705), True, 'import numpy as np\n'), ((557, 575), 'numpy.sqrt', 'np.sqrt', (['self.varV'], {}), '(self.varV)\n', (564, 575), True, 'import numpy as np\n'), ((576, 619), 'numpy.random.standard_normal', 'np.random.standard_normal', ([], {'size': 'xPrev.shape'}), '(size=xPrev.shape)\n', (601, 619), True, 'import numpy as np\n')] |
import os
import time
from typing import Tuple, List
import numpy as np
from cnocr import CnOcr
from cv2 import cv2
import simplerpa.aircv as ac
# 话说网易游戏家也有个aircv,功能类似, 还提供了find_sift方法,使用sift算法查找,以后可以试试
# https://github.com/NetEaseGame/aircv
from simplerpa.core.data.ScreenRect import ScreenRect, Vector
from simplerpa.objtyping.objtyping import DataObject
class ActionImage:
"""
图像处理操作类
"""
cnocr = CnOcr()
@classmethod
def pil_to_cv(cls, pil_image):
"""
把Pillow(PIL)格式的图片,转换成opencv格式的图片。
两者的差别在于,首先PIL支持更丰富的图片表达方式,不一定使用RGB表达,还可以用HSV或者其他形式;
但opencv一定用红绿蓝三原色组合,而且还别出心裁地使用了BGR这个顺序,而不是通常的RGB。
Args:
pil_image: PIL格式的图片
Returns:
opencv格式的图片
"""
img_tmp = pil_image.convert('RGB')
cv_rgb = np.array(img_tmp)
return cv2.cvtColor(cv_rgb, cv2.COLOR_RGB2BGR)
@classmethod
def load_from_file(cls, image_path):
"""
从文件中读取图片,返回opencv格式的变量
Args:
image_path (str): 图片路径,暂时仅支持8位颜色深度
Returns:
opencv格式的图片变量
"""
return cv2.imdecode(np.fromfile(image_path, dtype=np.uint8), -1)
@classmethod
def ocr(cls, cv_image, rect=None, debug=False):
"""
从指定图片的特定位置中提取文本字符串
Args:
cv_image (numpy): 图片变量,要求是opencv格式的
rect: 图片中的位置
Returns:
"""
if not isinstance(cv_image, np.ndarray):
raise TypeError('cv_image should be a ndarray from numpy, but got a {}'.format(type(cv_image)))
if rect is not None:
cv_image = cv_image[rect.top:rect.bottom, rect.left:rect.right]
if len(cv_image.shape) == 2:
cv_image_gray = cv_image
else:
cv_image_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
img_high_contrast = cls.grayscale_linear_transformation(cv_image_gray, 0, 255)
cls.log_image('ocr', img_high_contrast, debug)
res_chars = cls.cnocr.ocr_for_single_line(img_high_contrast)
if len(res_chars) == 0:
result = ''
else:
result = ''.join(list(map(str, res_chars[0])))
if debug:
print('ocr result: {}'.format(result))
return result
@classmethod
def grayscale_linear_transformation(cls, img_gray, new_min=0, new_max=255):
if img_gray is None:
return None
old_max = img_gray.max()
old_min = img_gray.min()
if old_min == old_max:
return img_gray
scale_ratio = (new_max - new_min) / (old_max - old_min)
img_gray_new = (img_gray - old_min) * scale_ratio + new_min
return img_gray_new.astype(np.uint8)
@classmethod
def find_all_template(cls, image_current, image_template, min_confidence, auto_scale: Tuple[float, float] = None,
scale: float = 1, bgremove=False):
width = image_template.shape[1]
height = image_template.shape[0]
if scale == 1:
resized = image_template
else:
resized = cv2.resize(image_template, (int(width * scale), int(height * scale)),
interpolation=cv2.INTER_CUBIC)
match_results = ac.find_all_template(image_current, resized, min_confidence, bgremove=bgremove)
if match_results is None or len(match_results) == 0:
if auto_scale is None:
return []
else:
scale_min = auto_scale[0]
scale_max = auto_scale[1]
for scale in np.arange(scale_min, scale_max, 0.1):
resized = cv2.resize(image_template, (int(width * scale), int(height * scale)),
interpolation=cv2.INTER_CUBIC)
match_results = ac.find_all_template(image_current, resized, min_confidence, bgremove=bgremove)
# print("try resize template to match: {}".format(scale))
if match_results is not None and len(match_results) > 0:
break
res_list = []
for match_result in match_results:
res = cls._change_result(match_result, scale if auto_scale else None)
res_list.append(res)
return res_list
# 这个方法没用了,因为实际上ac.find_template方法,其实也是调用了find_all_template,然后返回第一个结果而已
# @classmethod
# def find_one_template(cls, image_source, image_template, min_confidence=0.5,
# auto_scale: Tuple[float, float] = None):
# match_result = cls.find_all_template(image_source, image_template, min_confidence, auto_scale)
# return match_result[0] if match_result else None
@classmethod
def _change_result(cls, match_result, scale=None):
if match_result is None:
return None
rect_array = match_result['rectangle']
res = DataObject()
res.confidence = match_result['confidence']
res.rect = ScreenRect(rect_array[0][0], rect_array[3][0], rect_array[0][1], rect_array[3][1])
res.scale = scale
return res
@classmethod
def log_image(cls, name, image, debug=True):
if not debug:
return
path_root = 'log'
if not os.path.exists(path_root):
os.makedirs(path_root)
timestamp = time.time()
cv2.imwrite('{}/{}_{}.png'.format(path_root, name, timestamp), image)
@classmethod
def find_rect(cls, image_source, rect, color, find_all=True, debug=False):
if isinstance(color, Tuple):
return cls.sliding_window(image_source, rect,
lambda image_block, top, left: cls._match_color(image_block, color),
find_all=find_all,
debug=debug)
else:
return cls.sliding_window(image_source, rect,
lambda image_block, top, left: cls._match_bin_bright(image_block, color),
find_all=find_all,
debug=debug)
@staticmethod
def sliding_window(image_source, win_rect, handler, find_all=True, step_x=1, step_y=1, debug=False, overlap=False):
rows = image_source.shape[0]
cols = image_source.shape[1]
win_width = win_rect.x
win_height = win_rect.y
results = []
row = 0
skip_y = 0
while row < rows:
top = row
bottom = row + win_height
if bottom > rows:
break
col = 0
skip_x = 0
while col < cols:
left = col
right = col + win_width
if right > cols:
break
image_block = image_source[top:bottom, left:right]
if debug:
ActionImage.log_image('block-row{}-col{}'.format(row, col), image_block, debug=debug)
passed, res = handler(image_block, top, left)
if passed:
result = DataObject()
result.handle_res = res
res_rect = ScreenRect(left, right, top, bottom)
result.rect = res_rect
results.append(result)
if find_all:
skip_x = win_width
skip_y = win_height
else:
return results
if skip_x > 0:
col = col + skip_x
skip_x = 0
else:
col = col + step_x
if skip_y > 0:
row = row + skip_y
skip_y = 0
else:
row = row + step_y
return results
@staticmethod
def _match_bin_bright(image_bin, bright):
passed = np.all(image_bin == bright)
return passed, None
@staticmethod
def _match_color(image, color):
img_sum = np.sum(image, axis=2)
r, g, b = color
color_sum = r + g + b
if np.all(img_sum == color_sum):
# 求和通过,说明大致匹配,再详细考察具体内容
image_r = image[:, :, 2]
image_g = image[:, :, 1]
image_b = image[:, :, 0]
r_match = np.all(image_r == r)
g_match = np.all(image_g == g)
b_match = np.all(image_b == b)
passed = r_match and g_match and b_match
return passed, None
else:
return False, None
@classmethod
def get_color(cls, image, point: Vector):
pixel = image[point.y, point.x]
b, g, r = pixel
return r, g, b
@classmethod
def get_color_sim(cls, image, color, point: Vector):
r, g, b = cls.get_color(image, point)
diff = abs(color[0] - r) + abs(color[1] - g) + abs(color[2] - b)
similarity = 1 - diff / (255 + 255 + 255)
return similarity
@classmethod
def to_grayscale(cls, image, high_contrast=False, keep3channel=False):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if high_contrast:
gray = cls.grayscale_linear_transformation(gray)
gray = cv2.Canny(gray, 100, 200)
if keep3channel:
return cv2.merge((gray, gray, gray))
else:
return gray
@classmethod
def to_binary(cls, image, foreground=None, background=None, tolerance=0.1, single_channel=False):
img = image.copy()
if foreground is not None:
color_bgr = np.array([foreground[2], foreground[1], foreground[0]])
elif background is not None:
color_bgr = np.array([background[2], background[1], background[0]])
else:
raise RuntimeError("either foreground or background should be not None!")
channel = img.shape[2]
if channel == 4:
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
diff = int(255 * tolerance)
color_min = color_bgr - diff
color_min[color_min < 0] = 0
color_max = color_bgr + diff
color_max[color_max > 255] = 255
mask = cv2.inRange(img, color_min, color_max)
if foreground is not None:
img[mask > 0] = (0, 0, 0)
img[mask == 0] = (255, 255, 255)
else:
img[mask > 0] = (255, 255, 255)
img[mask == 0] = (0, 0, 0)
if single_channel:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# ActionImage.log_image('to_binary', img)
return img
@classmethod
def find_content_parts(cls, image, foreground, tolerance) -> List[np.ndarray]:
# ActionImage.log_image('1.color', image)
img_bin = cls.to_binary(image, foreground=foreground, tolerance=tolerance, single_channel=True)
# ActionImage.log_image('2.binary', img_bin)
img_erode = cls.erode(img_bin)
# ActionImage.log_image('3.erode', img_erode)
rect_list = cls.get_connected_area(img_erode)
blocks = cls.get_blocks(image, rect_list)
return blocks
@classmethod
def find_main_part(cls, image, foreground, tolerance, debug=False) -> (ScreenRect, np.ndarray):
ActionImage.log_image('1.color', image, debug)
img_bin = cls.to_binary(image, foreground=foreground, tolerance=tolerance, single_channel=True)
ActionImage.log_image('2.binary', img_bin, debug)
img_erode = cls.erode(img_bin)
ActionImage.log_image('3.erode', img_erode, debug)
rect_list = cls.get_connected_area(img_erode)
main_rect = max(rect_list, key=lambda rect: rect.area)
main_part = image[main_rect.top:main_rect.bottom, main_rect.left:main_rect.right]
ActionImage.log_image('4.main_part', main_part, debug)
main_part_bin = img_bin[main_rect.top:main_rect.bottom, main_rect.left:main_rect.right]
ActionImage.log_image('5.main_part_bin', main_part_bin, debug)
return main_part, main_part_bin
@classmethod
def split_rows(cls, img_gray, background):
space_height = 2
result_list = cls.find_rect(img_gray, Vector(img_gray.shape[1], space_height), background, find_all=True)
space = None
spaces = []
for result in result_list:
# 合并相邻的空白
rect = result.rect
if space is None:
space = [rect.top, rect.bottom]
spaces.append(space)
continue
if rect.top <= space[1]:
space[1] = rect.bottom
else:
space = [rect.top, rect.bottom]
spaces.append(space)
rows = []
pre_space = None
height = img_gray.shape[0]
for space in spaces:
# 获取有内容的行坐标
if pre_space is None:
if space[0] != 0:
rows.append([0, space[0] + space_height])
else:
t = pre_space[1] - space_height
b = space[0] + space_height
rows.append([t if t > 0 else 0, b if b < height else height])
pre_space = space
if space is not None and space[1] < height - 1:
t = space[1] - space_height
rows.append([t if t > 0 else 0, height])
if space is None:
rows.append([0, height])
return rows
#
@classmethod
def erode(cls, img):
return cv2.erode(img, None, iterations=5)
@classmethod
def get_connected_area(cls, img):
bb_img = cv2.bitwise_not(img)
# 传入的图片是白色背景的,但connectedComponentsWithStats查找连通域要求必须是黑色背景,所以这里做反色转换
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(bb_img)
rect_list = []
for i, box in enumerate(stats):
pos = np.where(labels == i)
if bb_img[pos[0][0], pos[1][0]] == 0:
# 把背景组成的连通域剔除
continue
x, y, width, height, area = box
rect = ScreenRect(x, x + width, y, y + height)
rect_list.append(rect)
rect_list.sort(key=lambda r: r.top, reverse=False)
return rect_list
@classmethod
def get_blocks(cls, img, rect_list):
blocks = []
for rect in rect_list:
part = img[rect.top:rect.bottom, rect.left:rect.right]
blocks.append(part)
return blocks
@classmethod
def sub_image(cls, img, rect):
width = img.shape[1]
height = img.shape[0]
l = rect.left
r = rect.right
t = rect.top
b = rect.bottom
return img[t if t > 0 else 0:b if b < height else height, l if l > 0 else 0:r if r < width else width]
@classmethod
def diff(cls, img1, img2):
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
return cv2.absdiff(gray1, gray2)
| [
"numpy.fromfile",
"numpy.array",
"cv2.cv2.bitwise_not",
"simplerpa.core.data.ScreenRect.ScreenRect",
"cnocr.CnOcr",
"numpy.arange",
"simplerpa.core.data.ScreenRect.Vector",
"os.path.exists",
"cv2.cv2.connectedComponentsWithStats",
"numpy.where",
"cv2.cv2.merge",
"cv2.cv2.Canny",
"simplerpa.a... | [((420, 427), 'cnocr.CnOcr', 'CnOcr', ([], {}), '()\n', (425, 427), False, 'from cnocr import CnOcr\n'), ((813, 830), 'numpy.array', 'np.array', (['img_tmp'], {}), '(img_tmp)\n', (821, 830), True, 'import numpy as np\n'), ((846, 885), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['cv_rgb', 'cv2.COLOR_RGB2BGR'], {}), '(cv_rgb, cv2.COLOR_RGB2BGR)\n', (858, 885), False, 'from cv2 import cv2\n'), ((3246, 3325), 'simplerpa.aircv.find_all_template', 'ac.find_all_template', (['image_current', 'resized', 'min_confidence'], {'bgremove': 'bgremove'}), '(image_current, resized, min_confidence, bgremove=bgremove)\n', (3266, 3325), True, 'import simplerpa.aircv as ac\n'), ((4897, 4909), 'simplerpa.objtyping.objtyping.DataObject', 'DataObject', ([], {}), '()\n', (4907, 4909), False, 'from simplerpa.objtyping.objtyping import DataObject\n'), ((4981, 5068), 'simplerpa.core.data.ScreenRect.ScreenRect', 'ScreenRect', (['rect_array[0][0]', 'rect_array[3][0]', 'rect_array[0][1]', 'rect_array[3][1]'], {}), '(rect_array[0][0], rect_array[3][0], rect_array[0][1], rect_array\n [3][1])\n', (4991, 5068), False, 'from simplerpa.core.data.ScreenRect import ScreenRect, Vector\n'), ((5340, 5351), 'time.time', 'time.time', ([], {}), '()\n', (5349, 5351), False, 'import time\n'), ((7922, 7949), 'numpy.all', 'np.all', (['(image_bin == bright)'], {}), '(image_bin == bright)\n', (7928, 7949), True, 'import numpy as np\n'), ((8051, 8072), 'numpy.sum', 'np.sum', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (8057, 8072), True, 'import numpy as np\n'), ((8139, 8167), 'numpy.all', 'np.all', (['(img_sum == color_sum)'], {}), '(img_sum == color_sum)\n', (8145, 8167), True, 'import numpy as np\n'), ((9104, 9143), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (9116, 9143), False, 'from cv2 import cv2\n'), ((9246, 9271), 'cv2.cv2.Canny', 'cv2.Canny', (['gray', '(100)', '(200)'], {}), '(gray, 100, 200)\n', (9255, 9271), False, 'from cv2 import cv2\n'), ((10181, 10219), 'cv2.cv2.inRange', 'cv2.inRange', (['img', 'color_min', 'color_max'], {}), '(img, color_min, color_max)\n', (10192, 10219), False, 'from cv2 import cv2\n'), ((13455, 13489), 'cv2.cv2.erode', 'cv2.erode', (['img', 'None'], {'iterations': '(5)'}), '(img, None, iterations=5)\n', (13464, 13489), False, 'from cv2 import cv2\n'), ((13563, 13583), 'cv2.cv2.bitwise_not', 'cv2.bitwise_not', (['img'], {}), '(img)\n', (13578, 13583), False, 'from cv2 import cv2\n'), ((13707, 13747), 'cv2.cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['bb_img'], {}), '(bb_img)\n', (13739, 13747), False, 'from cv2 import cv2\n'), ((14787, 14825), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_BGR2GRAY'], {}), '(img1, cv2.COLOR_BGR2GRAY)\n', (14799, 14825), False, 'from cv2 import cv2\n'), ((14842, 14880), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_BGR2GRAY'], {}), '(img2, cv2.COLOR_BGR2GRAY)\n', (14854, 14880), False, 'from cv2 import cv2\n'), ((14897, 14922), 'cv2.cv2.absdiff', 'cv2.absdiff', (['gray1', 'gray2'], {}), '(gray1, gray2)\n', (14908, 14922), False, 'from cv2 import cv2\n'), ((1133, 1172), 'numpy.fromfile', 'np.fromfile', (['image_path'], {'dtype': 'np.uint8'}), '(image_path, dtype=np.uint8)\n', (1144, 1172), True, 'import numpy as np\n'), ((1784, 1826), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['cv_image', 'cv2.COLOR_BGR2GRAY'], {}), '(cv_image, cv2.COLOR_BGR2GRAY)\n', (1796, 1826), False, 'from cv2 import cv2\n'), ((5258, 5283), 'os.path.exists', 'os.path.exists', (['path_root'], {}), '(path_root)\n', (5272, 5283), False, 'import os\n'), ((5297, 5319), 'os.makedirs', 'os.makedirs', (['path_root'], {}), '(path_root)\n', (5308, 5319), False, 'import os\n'), ((8338, 8358), 'numpy.all', 'np.all', (['(image_r == r)'], {}), '(image_r == r)\n', (8344, 8358), True, 'import numpy as np\n'), ((8381, 8401), 'numpy.all', 'np.all', (['(image_g == g)'], {}), '(image_g == g)\n', (8387, 8401), True, 'import numpy as np\n'), ((8424, 8444), 'numpy.all', 'np.all', (['(image_b == b)'], {}), '(image_b == b)\n', (8430, 8444), True, 'import numpy as np\n'), ((9317, 9346), 'cv2.cv2.merge', 'cv2.merge', (['(gray, gray, gray)'], {}), '((gray, gray, gray))\n', (9326, 9346), False, 'from cv2 import cv2\n'), ((9591, 9646), 'numpy.array', 'np.array', (['[foreground[2], foreground[1], foreground[0]]'], {}), '([foreground[2], foreground[1], foreground[0]])\n', (9599, 9646), True, 'import numpy as np\n'), ((9939, 9976), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2BGR'], {}), '(img, cv2.COLOR_BGRA2BGR)\n', (9951, 9976), False, 'from cv2 import cv2\n'), ((10481, 10518), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (10493, 10518), False, 'from cv2 import cv2\n'), ((12162, 12201), 'simplerpa.core.data.ScreenRect.Vector', 'Vector', (['img_gray.shape[1]', 'space_height'], {}), '(img_gray.shape[1], space_height)\n', (12168, 12201), False, 'from simplerpa.core.data.ScreenRect import ScreenRect, Vector\n'), ((13829, 13850), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (13837, 13850), True, 'import numpy as np\n'), ((14019, 14058), 'simplerpa.core.data.ScreenRect.ScreenRect', 'ScreenRect', (['x', '(x + width)', 'y', '(y + height)'], {}), '(x, x + width, y, y + height)\n', (14029, 14058), False, 'from simplerpa.core.data.ScreenRect import ScreenRect, Vector\n'), ((3580, 3616), 'numpy.arange', 'np.arange', (['scale_min', 'scale_max', '(0.1)'], {}), '(scale_min, scale_max, 0.1)\n', (3589, 3616), True, 'import numpy as np\n'), ((9708, 9763), 'numpy.array', 'np.array', (['[background[2], background[1], background[0]]'], {}), '([background[2], background[1], background[0]])\n', (9716, 9763), True, 'import numpy as np\n'), ((3826, 3905), 'simplerpa.aircv.find_all_template', 'ac.find_all_template', (['image_current', 'resized', 'min_confidence'], {'bgremove': 'bgremove'}), '(image_current, resized, min_confidence, bgremove=bgremove)\n', (3846, 3905), True, 'import simplerpa.aircv as ac\n'), ((7116, 7128), 'simplerpa.objtyping.objtyping.DataObject', 'DataObject', ([], {}), '()\n', (7126, 7128), False, 'from simplerpa.objtyping.objtyping import DataObject\n'), ((7204, 7240), 'simplerpa.core.data.ScreenRect.ScreenRect', 'ScreenRect', (['left', 'right', 'top', 'bottom'], {}), '(left, right, top, bottom)\n', (7214, 7240), False, 'from simplerpa.core.data.ScreenRect import ScreenRect, Vector\n')] |
from .utils import VectorAndNumbers
from .algorithm import Algorithm
from .individual import Individual
from .job import Job
from .problem import Problem
from SALib.sample.saltelli import sample as sobol_sample
from SALib.sample.morris import sample as morris_sample
from SALib.sample.fast_sampler import sample as fast_sample
from SALib.sample.latin import sample as latin_sample
from SALib.sample.ff import sample as ff_sample
from SALib.analyze import sobol
from SALib.analyze import ff
from SALib.analyze import morris
from SALib.analyze import fast
from SALib.analyze import rbd_fast
from SALib.analyze import delta
import time
import numpy as np
_method = ['rbd_fast', 'fast', 'morris', 'sobol', 'delta', 'ff']
class SALibAlgorithm(Algorithm):
"""
SALib Analysis
"""
def __init__(self, problem: Problem, name='SALibAlgorithm'):
super().__init__(problem, name)
self.sa_problem = {}
self.samples_x = []
self.samples_y = []
self.job = Job(self.problem)
self.options.declare(name='method', default='sobol', values=_method,
desc='Method')
self.options.declare(name='print_to_console', default=False,
desc='Print to console')
self.options.declare(name='samples', default=10, lower=1,
desc='Samples')
def run(self):
t_s = time.time()
# set SALib problem
names = []
bounds = []
for parameter in self.problem.parameters:
names.append(parameter['name'])
bounds.append(parameter['bounds'])
self.sa_problem = {'num_vars': len(self.problem.parameters),
'names': names,
'bounds': bounds}
# generate samples
if self.options["method"] == "rbd_fast":
self.samples_x = latin_sample(self.sa_problem, self.options["samples"])
elif self.options["method"] == "fast":
self.samples_x = fast_sample(self.sa_problem, self.options["samples"])
elif self.options["method"] == "morris":
self.samples_x = morris_sample(self.sa_problem, self.options["samples"], num_levels=4)
elif self.options["method"] == "sobol":
self.samples_x = sobol_sample(self.sa_problem, self.options["samples"])
elif self.options["method"] == "delta":
self.samples_x = latin_sample(self.sa_problem, self.options["samples"])
elif self.options["method"] == "ff":
self.samples_x = ff_sample(self.sa_problem, self.options["samples"])
individuals = []
for vector in self.samples_x:
individuals.append(Individual(vector))
# append to problem
for individual in individuals:
self.problem.individuals.append(individual)
# evaluate individuals
self.evaluate(individuals)
for individual in individuals:
self.samples_y.append(individual.costs[0]) # TODO: fix index [0]
self.samples_y = np.array(self.samples_y)
t = time.time() - t_s
self.problem.logger.info("Sensitivity: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
def analyze(self):
if self.options["method"] == "rbd_fast":
return self.analyze_rbd_fast()
elif self.options["method"] == "fast":
return self.analyze_sobol()
elif self.options["method"] == "morris":
return self.analyze_morris()
elif self.options["method"] == "sobol":
return self.analyze_sobol()
elif self.options["method"] == "delta":
return self.analyze_delta()
elif self.options["method"] == "ff":
return self.analyze_ff()
def analyze_rbd_fast(self):
# RBD-FAST - Random Balance Designs Fourier Amplitude Sensitivity Test
return rbd_fast.analyze(self.sa_problem, self.samples_x, self.samples_y, print_to_console=self.options["print_to_console"])
def analyze_fast(self):
# FAST - Fourier Amplitude Sensitivity Test
return fast.analyze(self.sa_problem, self.samples_y, print_to_console=self.options["print_to_console"])
def analyze_morris(self):
# Method of Morris
return morris.analyze(self.sa_problem, self.samples_x, self.samples_y, conf_level=0.95, num_levels=4, print_to_console=self.options["print_to_console"])
def analyze_sobol(self):
# Sobol Sensitivity Analysis
return sobol.analyze(self.sa_problem, self.samples_y, print_to_console=self.options["print_to_console"])
def analyze_delta(self):
# Delta Moment-Independent Measure
return delta.analyze(self.sa_problem, self.samples_x, self.samples_y, print_to_console=self.options["print_to_console"])
def analyze_ff(self):
# Fractional Factorial
return ff.analyze(self.sa_problem, self.samples_x, self.samples_y, second_order=True, print_to_console=self.options["print_to_console"])
class Sensitivity(Algorithm):
def __init__(self, problem, parameters, name='Sensitivity analysis'):
self.parameters = parameters
super().__init__(problem, name)
self.options.declare(name='max_population_size', default=100, lower=1,
desc='Maximal number of individuals in population')
def run(self):
parameters = []
for parameter in self.problem.parameters:
parameters.append(float(parameter['initial_value']))
for parameter_name in self.parameters:
parameter_values = []
index = 0
selected_parameter = None
for parameter in self.parameters:
if parameter['name'] == parameter_name['name']:
selected_parameter = parameter
break
index += 1
individuals = []
for i in range(self.options['max_population_size']):
value = VectorAndNumbers.gen_number(selected_parameter['bounds'], selected_parameter['precision'], 'normal')
parameters[index] = value
parameter_values.append(value)
individual = Individual(parameters.copy())
individuals.append(individual)
self.evaluate(individuals)
# costs = []
# # TODO: Make also for multi-objective
# for individual in individuals:
# costs.append(individual.costs)
# append individuals
for individual in individuals:
self.problem.individuals.append(individual)
# sync changed individual informations
self.problem.data_store.sync_all()
| [
"SALib.analyze.ff.analyze",
"SALib.sample.ff.sample",
"SALib.analyze.delta.analyze",
"SALib.sample.fast_sampler.sample",
"SALib.analyze.fast.analyze",
"SALib.analyze.sobol.analyze",
"SALib.sample.latin.sample",
"numpy.array",
"SALib.analyze.rbd_fast.analyze",
"SALib.sample.morris.sample",
"SALib... | [((1411, 1422), 'time.time', 'time.time', ([], {}), '()\n', (1420, 1422), False, 'import time\n'), ((3067, 3091), 'numpy.array', 'np.array', (['self.samples_y'], {}), '(self.samples_y)\n', (3075, 3091), True, 'import numpy as np\n'), ((3972, 4092), 'SALib.analyze.rbd_fast.analyze', 'rbd_fast.analyze', (['self.sa_problem', 'self.samples_x', 'self.samples_y'], {'print_to_console': "self.options['print_to_console']"}), "(self.sa_problem, self.samples_x, self.samples_y,\n print_to_console=self.options['print_to_console'])\n", (3988, 4092), False, 'from SALib.analyze import rbd_fast\n'), ((4185, 4286), 'SALib.analyze.fast.analyze', 'fast.analyze', (['self.sa_problem', 'self.samples_y'], {'print_to_console': "self.options['print_to_console']"}), "(self.sa_problem, self.samples_y, print_to_console=self.options\n ['print_to_console'])\n", (4197, 4286), False, 'from SALib.analyze import fast\n'), ((4355, 4505), 'SALib.analyze.morris.analyze', 'morris.analyze', (['self.sa_problem', 'self.samples_x', 'self.samples_y'], {'conf_level': '(0.95)', 'num_levels': '(4)', 'print_to_console': "self.options['print_to_console']"}), "(self.sa_problem, self.samples_x, self.samples_y, conf_level=\n 0.95, num_levels=4, print_to_console=self.options['print_to_console'])\n", (4369, 4505), False, 'from SALib.analyze import morris\n'), ((4583, 4685), 'SALib.analyze.sobol.analyze', 'sobol.analyze', (['self.sa_problem', 'self.samples_y'], {'print_to_console': "self.options['print_to_console']"}), "(self.sa_problem, self.samples_y, print_to_console=self.\n options['print_to_console'])\n", (4596, 4685), False, 'from SALib.analyze import sobol\n'), ((4769, 4886), 'SALib.analyze.delta.analyze', 'delta.analyze', (['self.sa_problem', 'self.samples_x', 'self.samples_y'], {'print_to_console': "self.options['print_to_console']"}), "(self.sa_problem, self.samples_x, self.samples_y,\n print_to_console=self.options['print_to_console'])\n", (4782, 4886), False, 'from SALib.analyze import delta\n'), ((4956, 5090), 'SALib.analyze.ff.analyze', 'ff.analyze', (['self.sa_problem', 'self.samples_x', 'self.samples_y'], {'second_order': '(True)', 'print_to_console': "self.options['print_to_console']"}), "(self.sa_problem, self.samples_x, self.samples_y, second_order=\n True, print_to_console=self.options['print_to_console'])\n", (4966, 5090), False, 'from SALib.analyze import ff\n'), ((1896, 1950), 'SALib.sample.latin.sample', 'latin_sample', (['self.sa_problem', "self.options['samples']"], {}), "(self.sa_problem, self.options['samples'])\n", (1908, 1950), True, 'from SALib.sample.latin import sample as latin_sample\n'), ((3105, 3116), 'time.time', 'time.time', ([], {}), '()\n', (3114, 3116), False, 'import time\n'), ((2027, 2080), 'SALib.sample.fast_sampler.sample', 'fast_sample', (['self.sa_problem', "self.options['samples']"], {}), "(self.sa_problem, self.options['samples'])\n", (2038, 2080), True, 'from SALib.sample.fast_sampler import sample as fast_sample\n'), ((2159, 2228), 'SALib.sample.morris.sample', 'morris_sample', (['self.sa_problem', "self.options['samples']"], {'num_levels': '(4)'}), "(self.sa_problem, self.options['samples'], num_levels=4)\n", (2172, 2228), True, 'from SALib.sample.morris import sample as morris_sample\n'), ((2306, 2360), 'SALib.sample.saltelli.sample', 'sobol_sample', (['self.sa_problem', "self.options['samples']"], {}), "(self.sa_problem, self.options['samples'])\n", (2318, 2360), True, 'from SALib.sample.saltelli import sample as sobol_sample\n'), ((2438, 2492), 'SALib.sample.latin.sample', 'latin_sample', (['self.sa_problem', "self.options['samples']"], {}), "(self.sa_problem, self.options['samples'])\n", (2450, 2492), True, 'from SALib.sample.latin import sample as latin_sample\n'), ((2567, 2618), 'SALib.sample.ff.sample', 'ff_sample', (['self.sa_problem', "self.options['samples']"], {}), "(self.sa_problem, self.options['samples'])\n", (2576, 2618), True, 'from SALib.sample.ff import sample as ff_sample\n')] |
import SimpleITK as sitk
import numpy as np
import os
import paths
import csv
import math
from scipy.io import loadmat
from skimage.measure import regionprops, marching_cubes_classic, mesh_surface_area
def divide_hcp(connectivity_matrix, hcp_connectivity):
''' divide the connectivity matrix by the hcp matrix'''
assert(connectivity_matrix.shape == hcp_connectivity.shape)
output_matrix = np.zeros(connectivity_matrix.shape)
for i in range(connectivity_matrix.shape[0]):
for j in range(connectivity_matrix.shape[1]):
if hcp_connectivity[i,j] != 0:
output_matrix[i,j] = connectivity_matrix[i,j]/hcp_connectivity[i,j]
return output_matrix
def get_hcp_connectivity_matrice(hcp_connectivity_matrices_path = paths.hcp_connectivity_matrices_path):
'''Get the pass-type and end-type connectivity matrices from HCP1021 subjects'''
end_matrix_path = os.path.join(hcp_connectivity_matrices_path, 'HCP1021.1mm.fib.gz.aal.count.end.connectivity.mat')
pass_matrix_path = os.path.join(hcp_connectivity_matrices_path, 'HCP1021.1mm.fib.gz.aal.count.pass.connectivity.mat')
end_obj = loadmat(end_matrix_path)
end_matrix = end_obj['connectivity']
pass_obj = loadmat(pass_matrix_path)
pass_matrix = pass_obj['connectivity']
return pass_matrix, end_matrix
def ReadImage(path):
''' This code returns the numpy nd array for a MR image at path'''
return sitk.GetArrayFromImage(sitk.ReadImage(path)).astype(np.float32)
def find_list(subject_id, list):
''' this is used to find the stroke lesion for a subject name '''
files = [file for file in list if subject_id in file]
return files[0]
def find_3d_surface(mask, voxel_spacing=(1.0,1.0,1.0)):
''' find the surface for a 3D object '''
verts, faces = marching_cubes_classic(volume=mask, spacing=voxel_spacing)
return mesh_surface_area(verts, faces)
def find_3d_roundness(mask):
''' find the roundess of a 3D object '''
mask_region_props = regionprops(mask.astype(int))
mask_area = mask_region_props[0].area
mask_equivDiameter = (6.0*mask_area/math.pi)**(1.0/3.0)
mask_major_axis_length = mask_region_props[0].major_axis_length
return mask_equivDiameter**2/mask_major_axis_length**2
def reshape_by_padding_upper_coords(image, new_shape, pad_value=None):
''' reshape the 3d matrix '''
shape = tuple(list(image.shape))
new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2,len(shape))), axis=0))
if pad_value is None:
if len(shape)==2:
pad_value = image[0,0]
elif len(shape)==3:
pad_value = image[0, 0, 0]
else:
raise ValueError("Image must be either 2 or 3 dimensional")
res = np.ones(list(new_shape), dtype=image.dtype) * pad_value
if len(shape) == 2:
res[0:0+int(shape[0]), 0:0+int(shape[1])] = image
elif len(shape) == 3:
res[0:0+int(shape[0]), 0:0+int(shape[1]), 0:0+int(shape[2])] = image
return res
# ======================= Tools for connectivity matrix ============================================= #
def threshold_connectivity_matrix(connectivity_matrix, threshold=0.01):
''' threshold the connectiivty matrix in order to remove the noise'''
thresholded_connectivity_matrix= np.copy(connectivity_matrix)
thresholded_connectivity_matrix[connectivity_matrix <= threshold*np.amax(connectivity_matrix)] = 0.0
return thresholded_connectivity_matrix
def weight_conversion(W):
''' convert to the normalized version and binary version'''
W_bin = np.copy(W)
W_bin[W!=0]=1
W_nrm = np.copy(W)
W_nrm = W_nrm/np.amax(np.absolute(W))
return W_nrm, W_bin
def get_lesion_weights(stroke_mni_path):
''' get the weight vector(workshop paper)'''
aal_path = os.path.join(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')
aal_nda = ReadImage(aal_path)
aal_182_218_182 = reshape_by_padding_upper_coords(aal_nda, (182,218,182), 0)
stroke_mni_nda = ReadImage(stroke_mni_path)
weights = np.zeros(int(np.amax(aal_182_218_182)), dtype=float)
for bp_number in range(int(np.amax(aal_182_218_182))):
mask = np.zeros(aal_182_218_182.shape, aal_182_218_182.dtype)
mask[aal_182_218_182==(bp_number+1)]=1
bp_size = float(np.count_nonzero(mask))
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_size = float(np.count_nonzero(stroke_in_bp))
weights[bp_number] = stroke_in_bp_size/bp_size
#weights[bp_number] = stroke_in_bp_size
return weights
def get_modified_lesion_weights(stroke_mni_path):
''' get the modified weight vector'''
aal_path = os.path.join(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')
aal_nda = ReadImage(aal_path)
aal_182_218_182 = reshape_by_padding_upper_coords(aal_nda, (182,218,182), 0)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_volume = float(np.count_nonzero(stroke_mni_nda))
weights = np.zeros(int(np.amax(aal_182_218_182)), dtype=float)
for bp_number in range(int(np.amax(aal_182_218_182))):
mask = np.zeros(aal_182_218_182.shape, aal_182_218_182.dtype)
mask[aal_182_218_182==(bp_number+1)]=1
#bp_size = float(np.count_nonzero(mask))
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_volume_in_bp = float(np.count_nonzero(stroke_in_bp))
#weights[bp_number] = 1.0 + stroke_volume_in_bp/stroke_volume
weights[bp_number] = stroke_volume_in_bp/stroke_volume
#remaining_volume = stroke_volume - np.sum(weights)
#print(remaining_volume)
return weights
def get_train_dataset():
'''Give you the training dataset'''
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
gt_subject_paths.sort()
# The CSV file for train dataset
train_mRS_file = "ISLES2017_Training.csv"
train_mRS_path = os.path.join(paths.isles2017_dir, train_mRS_file)
assert(os.path.isfile(train_mRS_path))
# Read CSV file for Train dataset
train_dataset = {}
with open(train_mRS_path, 'rt') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
if line[2] == '90' or line[2] == '88' or line[2] == '96' or line[2] == '97': # 90 days
subject_name = line[0]
gt_file = [file for file in gt_subject_paths if '/'+subject_name+'/' in file]
if gt_file:
train_dataset[subject_name]={}
train_dataset[subject_name]['mRS'] = line[1]
train_dataset[line[0]]['TICI'] = line[3]
train_dataset[line[0]]['TSS'] = line[4]
train_dataset[line[0]]['TTT'] = line[5]
train_dataset[line[0]]['ID'] = gt_file[0][-10:-4]
train_dataset[line[0]]['tracts'] = line[6]
return train_dataset
# Get the mRS for training subject from training_1 to training_48
def extract_gt_mRS():
'''extract the mRS for training subjects from training_1 to training_48'''
mRS_gt = np.zeros((40, ))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
mRS_gt[idx] = train_dataset[subject_name]['mRS']
return mRS_gt
def extract_tract_features():
''' extract number of tracts'''
train_dataset = get_train_dataset()
tracts = np.zeros((40, 1))
for idx, subject_name in enumerate(train_dataset.keys()):
tracts[idx] = train_dataset[subject_name]['tracts']
return tracts, ['tracts']
# Extract the volume of stroke in MNI152 space
def extract_volumetric_features():
# The ground truth lesions in MNI space
volumetric_list = ["volume"]
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
# Volumetric Features
volumetric_features = np.zeros((40,1))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
#volumetric features
stroke_mni_nda = ReadImage(stroke_mni_path)
volumetric_features[idx] = np.count_nonzero(stroke_mni_nda)
return volumetric_features, volumetric_list
def extract_spatial_features():
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
spatial_list = ["centroid_z", "centroid_y", "centroid_x"]
# Volumetric Features
spatial_features = np.zeros((40,3))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_regions = regionprops(stroke_mni_nda.astype(int))
stroke_centroid = stroke_regions[0].centroid
spatial_features[idx, :] = stroke_centroid
return spatial_features, spatial_list
def extract_morphological_features():
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
morphological_list = ["major", "minor", "major/minor", "surface", "solidity", "roundness"]
# Volumetric Features
morphological_features = np.zeros((40,6), dtype=np.float32)
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_regions = regionprops(stroke_mni_nda.astype(int))
stroke_major_axis_length = stroke_regions[0].major_axis_length
stroke_minor_axis_length = stroke_regions[0].minor_axis_length
stroke_surface = find_3d_surface(stroke_mni_nda.astype(int))
stroke_roundness = find_3d_roundness(stroke_mni_nda.astype(int))
morphological_features[idx, :] = stroke_major_axis_length, stroke_minor_axis_length, stroke_major_axis_length/stroke_minor_axis_length, stroke_surface, stroke_regions[0].solidity, stroke_roundness
return morphological_features, morphological_list
def extract_tractographic_features(weight_type, aal_regions=116):
# The ground truth lesion in subject space
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
# New connectivity matrices location
connectivity_train_dir = os.path.join(paths.dsi_studio_path, 'connectivity', 'gt_stroke')
# pass type locations
connectivity_pass_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'pass' in name and name.endswith('.mat')]
connectivity_pass_files.sort()
# end type locations
connectivity_end_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'end' in name and name.endswith('.mat')]
connectivity_end_files.sort()
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
tractographic_list = ["tract_aal_"+str(i) for i in range(1, aal_regions+1)]
assert(len(connectivity_pass_files) == len(connectivity_end_files) == len(stroke_mni_paths) == 43)
train_dataset = get_train_dataset()
# Tractographic Features
W_dsi_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_nrm_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_bin_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_dsi_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_nrm_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_bin_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
connectivity_pass_file = find_list(subject_id, connectivity_pass_files)
connectivity_pass_obj = loadmat(connectivity_pass_file)
thresholded_connectivity_pass = threshold_connectivity_matrix(connectivity_pass_obj['connectivity'], 0)
W_nrm_pass, W_bin_pass = weight_conversion(thresholded_connectivity_pass)
connectivity_end_file = find_list(subject_id, connectivity_end_files)
connectivity_end_obj = loadmat(connectivity_end_file)
thresholded_connectivity_end = threshold_connectivity_matrix(connectivity_end_obj['connectivity'], 0)
W_nrm_end, W_bin_end = weight_conversion(thresholded_connectivity_end)
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
# =================================== Weight Vector ========================================== #
# Get the lesion weights
if 'ori' in weight_type:
lesion_weights = get_lesion_weights(stroke_mni_path)
# Get the modified lesion weights
if 'mod' in weight_type:
lesion_weights = get_modified_lesion_weights(stroke_mni_path)
# No weight
if 'one' in weight_type:
lesion_weights = np.ones((1,aal_regions), dtype=np.float32)
# weighted connectivity histogram
W_dsi_pass_histogram_features[idx, :] = np.multiply(np.sum(thresholded_connectivity_pass, axis=0), lesion_weights)
W_nrm_pass_histogram_features[idx, :] = np.multiply(np.sum(W_nrm_pass, axis=0), lesion_weights)
W_bin_pass_histogram_features[idx, :] = np.multiply(np.sum(W_bin_pass, axis=0), lesion_weights)
W_dsi_end_histogram_features[idx, :] = np.multiply(np.sum(thresholded_connectivity_end, axis=0), lesion_weights)
W_nrm_end_histogram_features[idx, :] = np.multiply(np.sum(W_nrm_end, axis=0), lesion_weights)
W_bin_end_histogram_features[idx, :] = np.multiply(np.sum(W_bin_end, axis=0), lesion_weights)
return W_dsi_pass_histogram_features, W_nrm_pass_histogram_features, W_bin_pass_histogram_features, W_dsi_end_histogram_features, W_nrm_end_histogram_features, W_bin_end_histogram_features, tractographic_list
def extract_volumetric_spatial_features(atlas_name):
'''extract volumetric spatial features'''
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
train_dataset = get_train_dataset()
atlas_path = os.path.join(paths.dsi_studio_path, 'atlas', atlas_name+'.nii.gz')
atlas_nda = ReadImage(atlas_path)
if atlas_name == 'aal':
atlas_nda = reshape_by_padding_upper_coords(atlas_nda, (182,218,182), 0)
volumetric_spatial_features = np.zeros((40, int(np.amax(atlas_nda))+1), dtype=float)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
whole_stroke_volume = float(np.count_nonzero(stroke_mni_nda))
for bp_number in range(1, int(np.amax(atlas_nda)+1)):
mask = np.zeros(atlas_nda.shape, atlas_nda.dtype)
mask[atlas_nda==(bp_number)]=1
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_volume = np.count_nonzero(stroke_in_bp)
volumetric_spatial_features[idx, bp_number] = stroke_in_bp_volume
total_stroke_volume_bp = np.sum(volumetric_spatial_features[idx, :])
volumetric_spatial_features[idx, 0] = whole_stroke_volume - total_stroke_volume_bp
volumetric_spatial_list =['volume_'+atlas_name+'_'+str(i) for i in range(0, int(np.amax(atlas_nda)+1))]
return volumetric_spatial_features, volumetric_spatial_list
def extract_modified_volumetric_spatial_features(atlas_name):
'''extract volumetric spatial features considering the total volume of the stroke lesion'''
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
train_dataset = get_train_dataset()
atlas_path = os.path.join(paths.dsi_studio_path, 'atlas', atlas_name+'.nii.gz')
atlas_nda = ReadImage(atlas_path)
if atlas_name == 'aal':
atlas_nda = reshape_by_padding_upper_coords(atlas_nda, (182,218,182), 0)
modified_volumetric_spatial_features = np.zeros((40, int(np.amax(atlas_nda))), dtype=float)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
whole_stroke_volume = float(np.count_nonzero(stroke_mni_nda))
for bp_number in range(1, int(np.amax(atlas_nda))+1):
mask = np.zeros(atlas_nda.shape, atlas_nda.dtype)
mask[atlas_nda==(bp_number)]=1
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_volume = float(np.count_nonzero(stroke_in_bp))
modified_volumetric_spatial_features[idx, bp_number-1] = stroke_in_bp_volume / whole_stroke_volume
volumetric_spatial_list =['volume_'+atlas_name+'_'+str(i) for i in range(1, int(np.amax(atlas_nda))+1)]
assert((len(volumetric_spatial_list))==modified_volumetric_spatial_features.shape[1])
return modified_volumetric_spatial_features, volumetric_spatial_list
def extract_new_tractographic_features(weight_type, aal_regions=116):
# The ground truth lesion in subject space
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
# New connectivity matrices location
connectivity_train_dir = os.path.join(paths.dsi_studio_path, 'connectivity', 'gt_stroke')
# pass type locations
connectivity_pass_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'pass' in name and name.endswith('.mat')]
connectivity_pass_files.sort()
# end type locations
connectivity_end_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'end' in name and name.endswith('.mat')]
connectivity_end_files.sort()
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
tractographic_list = ["tract_aal_"+str(i) for i in range(1, aal_regions+1)]
assert(len(connectivity_pass_files) == len(connectivity_end_files) == len(stroke_mni_paths) == 43)
train_dataset = get_train_dataset()
# Tractographic Features
W_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
for idx, subject_name in enumerate(train_dataset.keys()):
HCP_pass, HCP_end = get_hcp_connectivity_matrice()
subject_id = train_dataset[subject_name]['ID']
connectivity_pass_file = find_list(subject_id, connectivity_pass_files)
connectivity_pass_obj = loadmat(connectivity_pass_file)
connectivity_pass_matrix = connectivity_pass_obj['connectivity']
#normalized_pass_matrix = divide_hcp(connectivity_pass_matrix, HCP_pass)
connectivity_end_file = find_list(subject_id, connectivity_end_files)
connectivity_end_obj = loadmat(connectivity_end_file)
connectivity_end_matrix = connectivity_end_obj['connectivity']
#normalized_end_matrix = divide_hcp(connectivity_pass_matrix, HCP_end)
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
# =================================== Weight Vector ========================================== #
# Get the lesion weights
if 'ori' in weight_type:
lesion_weights = get_lesion_weights(stroke_mni_path)
# Get the modified lesion weights
if 'mod' in weight_type:
lesion_weights = get_modified_lesion_weights(stroke_mni_path)
# No weight
if 'one' in weight_type:
lesion_weights = np.ones((1,aal_regions), dtype=np.float32)
normalized_pass_matrix = np.divide(np.sum(connectivity_pass_matrix, axis=0), np.sum(HCP_pass, axis=0))
normalized_end_matrix = np.divide(np.sum(connectivity_end_matrix, axis=0), np.sum(HCP_end, axis=0))
# weighted connectivity histogram
W_pass_histogram_features[idx, :] = np.multiply(normalized_pass_matrix, lesion_weights)
W_end_histogram_features[idx, :] = np.multiply(normalized_end_matrix, lesion_weights)
return W_pass_histogram_features, W_end_histogram_features, tractographic_list
| [
"numpy.copy",
"numpy.multiply",
"numpy.amax",
"numpy.ones",
"numpy.absolute",
"scipy.io.loadmat",
"os.path.join",
"os.path.isfile",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.sum",
"skimage.measure.marching_cubes_classic",
"skimage.measure.mesh_surface_area",
"numpy.concatenate",
"Simp... | [((403, 438), 'numpy.zeros', 'np.zeros', (['connectivity_matrix.shape'], {}), '(connectivity_matrix.shape)\n', (411, 438), True, 'import numpy as np\n'), ((909, 1010), 'os.path.join', 'os.path.join', (['hcp_connectivity_matrices_path', '"""HCP1021.1mm.fib.gz.aal.count.end.connectivity.mat"""'], {}), "(hcp_connectivity_matrices_path,\n 'HCP1021.1mm.fib.gz.aal.count.end.connectivity.mat')\n", (921, 1010), False, 'import os\n'), ((1035, 1137), 'os.path.join', 'os.path.join', (['hcp_connectivity_matrices_path', '"""HCP1021.1mm.fib.gz.aal.count.pass.connectivity.mat"""'], {}), "(hcp_connectivity_matrices_path,\n 'HCP1021.1mm.fib.gz.aal.count.pass.connectivity.mat')\n", (1047, 1137), False, 'import os\n'), ((1149, 1173), 'scipy.io.loadmat', 'loadmat', (['end_matrix_path'], {}), '(end_matrix_path)\n', (1156, 1173), False, 'from scipy.io import loadmat\n'), ((1232, 1257), 'scipy.io.loadmat', 'loadmat', (['pass_matrix_path'], {}), '(pass_matrix_path)\n', (1239, 1257), False, 'from scipy.io import loadmat\n'), ((1809, 1867), 'skimage.measure.marching_cubes_classic', 'marching_cubes_classic', ([], {'volume': 'mask', 'spacing': 'voxel_spacing'}), '(volume=mask, spacing=voxel_spacing)\n', (1831, 1867), False, 'from skimage.measure import regionprops, marching_cubes_classic, mesh_surface_area\n'), ((1879, 1910), 'skimage.measure.mesh_surface_area', 'mesh_surface_area', (['verts', 'faces'], {}), '(verts, faces)\n', (1896, 1910), False, 'from skimage.measure import regionprops, marching_cubes_classic, mesh_surface_area\n'), ((3305, 3333), 'numpy.copy', 'np.copy', (['connectivity_matrix'], {}), '(connectivity_matrix)\n', (3312, 3333), True, 'import numpy as np\n'), ((3586, 3596), 'numpy.copy', 'np.copy', (['W'], {}), '(W)\n', (3593, 3596), True, 'import numpy as np\n'), ((3627, 3637), 'numpy.copy', 'np.copy', (['W'], {}), '(W)\n', (3634, 3637), True, 'import numpy as np\n'), ((3810, 3868), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""atlas"""', '"""aal.nii.gz"""'], {}), "(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')\n", (3822, 3868), False, 'import os\n'), ((4676, 4734), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""atlas"""', '"""aal.nii.gz"""'], {}), "(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')\n", (4688, 4734), False, 'import os\n'), ((6016, 6065), 'os.path.join', 'os.path.join', (['paths.isles2017_dir', 'train_mRS_file'], {}), '(paths.isles2017_dir, train_mRS_file)\n', (6028, 6065), False, 'import os\n'), ((6077, 6107), 'os.path.isfile', 'os.path.isfile', (['train_mRS_path'], {}), '(train_mRS_path)\n', (6091, 6107), False, 'import os\n'), ((7192, 7207), 'numpy.zeros', 'np.zeros', (['(40,)'], {}), '((40,))\n', (7200, 7207), True, 'import numpy as np\n'), ((7497, 7514), 'numpy.zeros', 'np.zeros', (['(40, 1)'], {}), '((40, 1))\n', (7505, 7514), True, 'import numpy as np\n'), ((7837, 7885), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (7849, 7885), False, 'import os\n'), ((8150, 8167), 'numpy.zeros', 'np.zeros', (['(40, 1)'], {}), '((40, 1))\n', (8158, 8167), True, 'import numpy as np\n'), ((8686, 8734), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (8698, 8734), False, 'import os\n'), ((9058, 9075), 'numpy.zeros', 'np.zeros', (['(40, 3)'], {}), '((40, 3))\n', (9066, 9075), True, 'import numpy as np\n'), ((9665, 9713), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (9677, 9713), False, 'import os\n'), ((10076, 10111), 'numpy.zeros', 'np.zeros', (['(40, 6)'], {'dtype': 'np.float32'}), '((40, 6), dtype=np.float32)\n', (10084, 10111), True, 'import numpy as np\n'), ((11380, 11444), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""connectivity"""', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'connectivity', 'gt_stroke')\n", (11392, 11444), False, 'import os\n'), ((12131, 12179), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (12143, 12179), False, 'import os\n'), ((12640, 12685), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (12648, 12685), True, 'import numpy as np\n'), ((12722, 12767), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (12730, 12767), True, 'import numpy as np\n'), ((12804, 12849), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (12812, 12849), True, 'import numpy as np\n'), ((12886, 12931), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (12894, 12931), True, 'import numpy as np\n'), ((12967, 13012), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (12975, 13012), True, 'import numpy as np\n'), ((13048, 13093), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (13056, 13093), True, 'import numpy as np\n'), ((15495, 15543), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (15507, 15543), False, 'import os\n'), ((15773, 15841), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""atlas"""', "(atlas_name + '.nii.gz')"], {}), "(paths.dsi_studio_path, 'atlas', atlas_name + '.nii.gz')\n", (15785, 15841), False, 'import os\n'), ((17272, 17320), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (17284, 17320), False, 'import os\n'), ((17550, 17618), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""atlas"""', "(atlas_name + '.nii.gz')"], {}), "(paths.dsi_studio_path, 'atlas', atlas_name + '.nii.gz')\n", (17562, 17618), False, 'import os\n'), ((19238, 19302), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""connectivity"""', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'connectivity', 'gt_stroke')\n", (19250, 19302), False, 'import os\n'), ((19989, 20037), 'os.path.join', 'os.path.join', (['paths.dsi_studio_path', '"""gt_stroke"""'], {}), "(paths.dsi_studio_path, 'gt_stroke')\n", (20001, 20037), False, 'import os\n'), ((20494, 20539), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (20502, 20539), True, 'import numpy as np\n'), ((20571, 20616), 'numpy.zeros', 'np.zeros', (['(40, aal_regions)'], {'dtype': 'np.float32'}), '((40, aal_regions), dtype=np.float32)\n', (20579, 20616), True, 'import numpy as np\n'), ((4173, 4227), 'numpy.zeros', 'np.zeros', (['aal_182_218_182.shape', 'aal_182_218_182.dtype'], {}), '(aal_182_218_182.shape, aal_182_218_182.dtype)\n', (4181, 4227), True, 'import numpy as np\n'), ((4346, 4379), 'numpy.multiply', 'np.multiply', (['mask', 'stroke_mni_nda'], {}), '(mask, stroke_mni_nda)\n', (4357, 4379), True, 'import numpy as np\n'), ((4924, 4956), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_mni_nda'], {}), '(stroke_mni_nda)\n', (4940, 4956), True, 'import numpy as np\n'), ((5099, 5153), 'numpy.zeros', 'np.zeros', (['aal_182_218_182.shape', 'aal_182_218_182.dtype'], {}), '(aal_182_218_182.shape, aal_182_218_182.dtype)\n', (5107, 5153), True, 'import numpy as np\n'), ((5273, 5306), 'numpy.multiply', 'np.multiply', (['mask', 'stroke_mni_nda'], {}), '(mask, stroke_mni_nda)\n', (5284, 5306), True, 'import numpy as np\n'), ((5706, 5730), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (5718, 5730), False, 'import os\n'), ((6240, 6260), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (6250, 6260), False, 'import csv\n'), ((7910, 7934), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (7922, 7934), False, 'import os\n'), ((8507, 8539), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_mni_nda'], {}), '(stroke_mni_nda)\n', (8523, 8539), True, 'import numpy as np\n'), ((8759, 8783), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (8771, 8783), False, 'import os\n'), ((9738, 9762), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (9750, 9762), False, 'import os\n'), ((11132, 11156), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (11144, 11156), False, 'import os\n'), ((11507, 11531), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (11519, 11531), False, 'import os\n'), ((11815, 11839), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (11827, 11839), False, 'import os\n'), ((12204, 12228), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (12216, 12228), False, 'import os\n'), ((13324, 13355), 'scipy.io.loadmat', 'loadmat', (['connectivity_pass_file'], {}), '(connectivity_pass_file)\n', (13331, 13355), False, 'from scipy.io import loadmat\n'), ((13660, 13690), 'scipy.io.loadmat', 'loadmat', (['connectivity_end_file'], {}), '(connectivity_end_file)\n', (13667, 13690), False, 'from scipy.io import loadmat\n'), ((15568, 15592), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (15580, 15592), False, 'import os\n'), ((16785, 16828), 'numpy.sum', 'np.sum', (['volumetric_spatial_features[idx, :]'], {}), '(volumetric_spatial_features[idx, :])\n', (16791, 16828), True, 'import numpy as np\n'), ((17345, 17369), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (17357, 17369), False, 'import os\n'), ((18990, 19014), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (19002, 19014), False, 'import os\n'), ((19365, 19389), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (19377, 19389), False, 'import os\n'), ((19673, 19697), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (19685, 19697), False, 'import os\n'), ((20062, 20086), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (20074, 20086), False, 'import os\n'), ((20906, 20937), 'scipy.io.loadmat', 'loadmat', (['connectivity_pass_file'], {}), '(connectivity_pass_file)\n', (20913, 20937), False, 'from scipy.io import loadmat\n'), ((21202, 21232), 'scipy.io.loadmat', 'loadmat', (['connectivity_end_file'], {}), '(connectivity_end_file)\n', (21209, 21232), False, 'from scipy.io import loadmat\n'), ((22277, 22328), 'numpy.multiply', 'np.multiply', (['normalized_pass_matrix', 'lesion_weights'], {}), '(normalized_pass_matrix, lesion_weights)\n', (22288, 22328), True, 'import numpy as np\n'), ((22381, 22431), 'numpy.multiply', 'np.multiply', (['normalized_end_matrix', 'lesion_weights'], {}), '(normalized_end_matrix, lesion_weights)\n', (22392, 22431), True, 'import numpy as np\n'), ((3664, 3678), 'numpy.absolute', 'np.absolute', (['W'], {}), '(W)\n', (3675, 3678), True, 'import numpy as np\n'), ((4059, 4083), 'numpy.amax', 'np.amax', (['aal_182_218_182'], {}), '(aal_182_218_182)\n', (4066, 4083), True, 'import numpy as np\n'), ((4130, 4154), 'numpy.amax', 'np.amax', (['aal_182_218_182'], {}), '(aal_182_218_182)\n', (4137, 4154), True, 'import numpy as np\n'), ((4299, 4321), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (4315, 4321), True, 'import numpy as np\n'), ((4414, 4444), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_in_bp'], {}), '(stroke_in_bp)\n', (4430, 4444), True, 'import numpy as np\n'), ((4985, 5009), 'numpy.amax', 'np.amax', (['aal_182_218_182'], {}), '(aal_182_218_182)\n', (4992, 5009), True, 'import numpy as np\n'), ((5056, 5080), 'numpy.amax', 'np.amax', (['aal_182_218_182'], {}), '(aal_182_218_182)\n', (5063, 5080), True, 'import numpy as np\n'), ((5343, 5373), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_in_bp'], {}), '(stroke_in_bp)\n', (5359, 5373), True, 'import numpy as np\n'), ((5756, 5793), 'os.walk', 'os.walk', (['paths.isles2017_training_dir'], {}), '(paths.isles2017_training_dir)\n', (5763, 5793), False, 'import os\n'), ((7960, 7983), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (7967, 7983), False, 'import os\n'), ((8809, 8832), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (8816, 8832), False, 'import os\n'), ((9788, 9811), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (9795, 9811), False, 'import os\n'), ((11182, 11219), 'os.walk', 'os.walk', (['paths.isles2017_training_dir'], {}), '(paths.isles2017_training_dir)\n', (11189, 11219), False, 'import os\n'), ((11557, 11588), 'os.walk', 'os.walk', (['connectivity_train_dir'], {}), '(connectivity_train_dir)\n', (11564, 11588), False, 'import os\n'), ((11865, 11896), 'os.walk', 'os.walk', (['connectivity_train_dir'], {}), '(connectivity_train_dir)\n', (11872, 11896), False, 'import os\n'), ((12254, 12277), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (12261, 12277), False, 'import os\n'), ((14415, 14458), 'numpy.ones', 'np.ones', (['(1, aal_regions)'], {'dtype': 'np.float32'}), '((1, aal_regions), dtype=np.float32)\n', (14422, 14458), True, 'import numpy as np\n'), ((14562, 14607), 'numpy.sum', 'np.sum', (['thresholded_connectivity_pass'], {'axis': '(0)'}), '(thresholded_connectivity_pass, axis=0)\n', (14568, 14607), True, 'import numpy as np\n'), ((14685, 14711), 'numpy.sum', 'np.sum', (['W_nrm_pass'], {'axis': '(0)'}), '(W_nrm_pass, axis=0)\n', (14691, 14711), True, 'import numpy as np\n'), ((14789, 14815), 'numpy.sum', 'np.sum', (['W_bin_pass'], {'axis': '(0)'}), '(W_bin_pass, axis=0)\n', (14795, 14815), True, 'import numpy as np\n'), ((14893, 14937), 'numpy.sum', 'np.sum', (['thresholded_connectivity_end'], {'axis': '(0)'}), '(thresholded_connectivity_end, axis=0)\n', (14899, 14937), True, 'import numpy as np\n'), ((15014, 15039), 'numpy.sum', 'np.sum', (['W_nrm_end'], {'axis': '(0)'}), '(W_nrm_end, axis=0)\n', (15020, 15039), True, 'import numpy as np\n'), ((15116, 15141), 'numpy.sum', 'np.sum', (['W_bin_end'], {'axis': '(0)'}), '(W_bin_end, axis=0)\n', (15122, 15141), True, 'import numpy as np\n'), ((15618, 15641), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (15625, 15641), False, 'import os\n'), ((16347, 16379), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_mni_nda'], {}), '(stroke_mni_nda)\n', (16363, 16379), True, 'import numpy as np\n'), ((16462, 16504), 'numpy.zeros', 'np.zeros', (['atlas_nda.shape', 'atlas_nda.dtype'], {}), '(atlas_nda.shape, atlas_nda.dtype)\n', (16470, 16504), True, 'import numpy as np\n'), ((16575, 16608), 'numpy.multiply', 'np.multiply', (['mask', 'stroke_mni_nda'], {}), '(mask, stroke_mni_nda)\n', (16586, 16608), True, 'import numpy as np\n'), ((16643, 16673), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_in_bp'], {}), '(stroke_in_bp)\n', (16659, 16673), True, 'import numpy as np\n'), ((17395, 17418), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (17402, 17418), False, 'import os\n'), ((18131, 18163), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_mni_nda'], {}), '(stroke_mni_nda)\n', (18147, 18163), True, 'import numpy as np\n'), ((18246, 18288), 'numpy.zeros', 'np.zeros', (['atlas_nda.shape', 'atlas_nda.dtype'], {}), '(atlas_nda.shape, atlas_nda.dtype)\n', (18254, 18288), True, 'import numpy as np\n'), ((18359, 18392), 'numpy.multiply', 'np.multiply', (['mask', 'stroke_mni_nda'], {}), '(mask, stroke_mni_nda)\n', (18370, 18392), True, 'import numpy as np\n'), ((19040, 19077), 'os.walk', 'os.walk', (['paths.isles2017_training_dir'], {}), '(paths.isles2017_training_dir)\n', (19047, 19077), False, 'import os\n'), ((19415, 19446), 'os.walk', 'os.walk', (['connectivity_train_dir'], {}), '(connectivity_train_dir)\n', (19422, 19446), False, 'import os\n'), ((19723, 19754), 'os.walk', 'os.walk', (['connectivity_train_dir'], {}), '(connectivity_train_dir)\n', (19730, 19754), False, 'import os\n'), ((20112, 20135), 'os.walk', 'os.walk', (['stroke_mni_dir'], {}), '(stroke_mni_dir)\n', (20119, 20135), False, 'import os\n'), ((21919, 21962), 'numpy.ones', 'np.ones', (['(1, aal_regions)'], {'dtype': 'np.float32'}), '((1, aal_regions), dtype=np.float32)\n', (21926, 21962), True, 'import numpy as np\n'), ((22014, 22054), 'numpy.sum', 'np.sum', (['connectivity_pass_matrix'], {'axis': '(0)'}), '(connectivity_pass_matrix, axis=0)\n', (22020, 22054), True, 'import numpy as np\n'), ((22056, 22080), 'numpy.sum', 'np.sum', (['HCP_pass'], {'axis': '(0)'}), '(HCP_pass, axis=0)\n', (22062, 22080), True, 'import numpy as np\n'), ((22124, 22163), 'numpy.sum', 'np.sum', (['connectivity_end_matrix'], {'axis': '(0)'}), '(connectivity_end_matrix, axis=0)\n', (22130, 22163), True, 'import numpy as np\n'), ((22165, 22188), 'numpy.sum', 'np.sum', (['HCP_end'], {'axis': '(0)'}), '(HCP_end, axis=0)\n', (22171, 22188), True, 'import numpy as np\n'), ((1465, 1485), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['path'], {}), '(path)\n', (1479, 1485), True, 'import SimpleITK as sitk\n'), ((3403, 3431), 'numpy.amax', 'np.amax', (['connectivity_matrix'], {}), '(connectivity_matrix)\n', (3410, 3431), True, 'import numpy as np\n'), ((17825, 17843), 'numpy.amax', 'np.amax', (['atlas_nda'], {}), '(atlas_nda)\n', (17832, 17843), True, 'import numpy as np\n'), ((18433, 18463), 'numpy.count_nonzero', 'np.count_nonzero', (['stroke_in_bp'], {}), '(stroke_in_bp)\n', (18449, 18463), True, 'import numpy as np\n'), ((2441, 2475), 'numpy.concatenate', 'np.concatenate', (['(shape, new_shape)'], {}), '((shape, new_shape))\n', (2455, 2475), True, 'import numpy as np\n'), ((16039, 16057), 'numpy.amax', 'np.amax', (['atlas_nda'], {}), '(atlas_nda)\n', (16046, 16057), True, 'import numpy as np\n'), ((16419, 16437), 'numpy.amax', 'np.amax', (['atlas_nda'], {}), '(atlas_nda)\n', (16426, 16437), True, 'import numpy as np\n'), ((18203, 18221), 'numpy.amax', 'np.amax', (['atlas_nda'], {}), '(atlas_nda)\n', (18210, 18221), True, 'import numpy as np\n'), ((17004, 17022), 'numpy.amax', 'np.amax', (['atlas_nda'], {}), '(atlas_nda)\n', (17011, 17022), True, 'import numpy as np\n'), ((18660, 18678), 'numpy.amax', 'np.amax', (['atlas_nda'], {}), '(atlas_nda)\n', (18667, 18678), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from glob import glob
import csv
import os
import random
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from numpy import genfromtxt
matplotlib.use('Agg')
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Copied from https://github.com/keras-team/keras/blob/master/keras/engine/training_utils.py
# Arguments
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
# Returns
A list of tuples of array indices.
"""
num_batches = (size + batch_size - 1) // batch_size # round up
return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(num_batches)]
def export_emb(emb, info=None, folder='', prefix='prefix', info_header=None):
"""Export embeddings and extra information (labels, filenames) to csv file
Input:
emb: 2D float array (num_emb, emb_size): embeddings
info: list of string 1D arrays of size (num_emb,): extra information to each embedding: label, filename, class_name
folder: string, folder to save files
prefix: string to add to each filename
info_headers: list of strings, lisf of headers for info list
"""
if folder != '' and not os.path.exists(folder):
os.makedirs(folder)
# Save embeddings as csv
emb_header = ['emb_' + str(i) for i in range(emb.shape[1])]
emb_header = ','.join(map(str, emb_header))
filename_emb = os.path.join(folder, prefix + '_emb.csv')
np.savetxt(filename_emb, emb, fmt='%s', delimiter=',', header=emb_header, comments='')
print('Embeddings and are saved to file: {}'.format(filename_emb))
if info is not None:
# Save labels and filenames to a csv file
if info_header is None:
info_header = ['info_' + str(i) for i in range(len(info))]
info_header = ','.join(map(str, info_header[: len(info)]))
info_to_file = np.stack(info, axis=-1)
filename_info = os.path.join(folder, prefix + '_lbl.csv')
np.savetxt(
filename_info,
info_to_file,
fmt='%s',
delimiter=',',
header=info_header,
comments='',
)
print('Info is saved to file: {}'.format(filename_info))
def plot_some(imgs, k=5, random_seed=None, same_order=False, labels=None):
"""Displays k random images from list
Input:
imgs: list of images
k: integer, number of images to display
random_seed: integer, number to initialise random generation
same_order: boolean, if True, displays k first images
"""
if len(imgs) < k:
k = len(imgs)
# Show some images
fig, ax = plt.subplots(ncols=k, figsize=(12, 12 * k))
if same_order:
idx = range(k)
else:
if random_seed is not None:
random.seed(random_seed)
idx = random.sample(range(len(imgs)), k)
if len(imgs[0].shape) > 2:
if imgs[0].shape[-1] > 1:
for i in range(k):
ax[i].imshow(imgs[idx[i]])
else:
for i in range(k):
ax[i].imshow(np.squeeze(imgs[idx[i]]), cmap='gray')
else:
for i in range(k):
ax[i].imshow(imgs[idx[i]], cmap='gray')
if labels is not None:
for i in range(k):
ax[i].set_title(labels[idx[i]])
for i in range(k):
ax[i].axis('off')
plt.tight_layout()
def plot_pairs(arr1, arr2, labels, class1=[], class2=[], offset=0):
"""Prints four pairs of images in two rows.
arr1 - 4D array of images
arr2 - 4D array of images
labels - 1D array where 0 - positive pair, 1 - negative pair
class1 - 1D array of classes for first array
class2 - 1D array of classes for second array
offset - starting index to display images from array
"""
fig, ax = plt.subplots(ncols=4, nrows=2, figsize=(16, 8))
for i in range(4):
ax[0, i].imshow(arr1[i + offset])
ax[1, i].imshow(arr2[i + offset])
title = 'Positive' if (labels[i + offset] == 0) else 'Negative'
if len(class1) > 0:
title += ' / ' + str(class1[i])
ax[0, i].set_title(title)
if len(class2) > 0:
title2 = class2[i]
ax[1, i].set_title(title2)
plt.tight_layout()
def print_nested(val, nesting=-5):
"""Print nested json. Copied from https://www.quora.com/How-do-I-nicely-print-a-nested-dictionary-in-Python"""
if type(val) == dict:
print('')
nesting += 5
for k in val:
print(nesting * ' ', end='')
print(k, end=':')
print_nested(val[k], nesting)
else:
print(val)
def create_subfolders(src, dest):
"""Copy folder structure from src to dest
src - source directory with 1 level of subfolders
dest - destination directory
"""
g = glob(src + '/*')
n = len(g)
print('Found %d subfolders' % n)
if not os.path.exists(dest):
os.makedirs(dest)
count_created = 0
for i in range(n):
(head, tail) = os.path.split(g[i])
if not os.path.exists(os.path.join(dest, tail)):
os.makedirs(os.path.join(dest, tail))
count_created += 1
print('Created %d subfolders' % count_created)
def read_dir(dir):
"""Read dataset in folder where each class is in separate folder"""
g = glob(dir + '/*/*')
print('Found {} files'.format(len(g)))
return g
def plot_model_loss_csv(
file, from_epoch=0, showFig=True, saveFig=False, figName='plot.png'
):
model_history = genfromtxt(file, delimiter=',')
fig, axs = plt.subplots(1, 1, figsize=(6, 4))
# summarize history for loss
axs.plot(
range(1, len(model_history[from_epoch:, 1]) + 1), model_history[from_epoch:, 1]
)
axs.plot(
range(1, len(model_history[from_epoch:, 2]) + 1), model_history[from_epoch:, 2]
)
axs.set_title('Model Loss')
axs.set_ylabel('Loss')
axs.set_xlabel('Epoch')
axs.set_xticks(
np.arange(1, len(model_history[from_epoch:, 1]) + 1),
len(model_history[from_epoch:, 1]) / 10,
)
axs.legend(['train', 'val'], loc='best')
if showFig:
plt.show()
if saveFig:
fig.savefig(figName)
plt.close(fig)
def plot_model_loss_acc_csv(
file, from_epoch=0, showFig=True, saveFig=False, figName='plot.png'
):
model_history = genfromtxt(file, delimiter=',')
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# summarize history for accuracy
axs[0].plot(
range(1, len(model_history[from_epoch:, 1]) + 1), model_history[from_epoch:, 1]
)
axs[0].plot(
range(1, len(model_history[from_epoch:, 3]) + 1), model_history[from_epoch:, 3]
)
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(
np.arange(1, len(model_history[from_epoch:, 1]) + 1),
len(model_history[from_epoch:, 1]) / 10,
)
axs[0].legend(['train', 'val'], loc='best')
# summarize history for loss
axs[1].plot(
range(1, len(model_history[from_epoch:, 2]) + 1), model_history[from_epoch:, 2]
)
axs[1].plot(
range(1, len(model_history[from_epoch:, 4]) + 1), model_history[from_epoch:, 4]
)
axs[1].set_title('Model Loss')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(
np.arange(1, len(model_history[from_epoch:, 2]) + 1),
len(model_history[from_epoch:, 2]) / 10,
)
axs[1].legend(['train', 'val'], loc='best')
if showFig:
plt.show()
if saveFig:
fig.savefig(figName)
plt.close(fig)
def save_res_csv(results, filename):
"""Save dictionary with results to a csv file
Input:
results: dictionary, keys will be headers for the csv file, values - rows
filename: string, name for csv file (eg. results.csv)
"""
exp_header = [k for k, v in results.items()]
exp_data = [v for k, v in results.items()]
# Log iteration results. If file does not exist yet, create file with header
if not os.path.isfile(filename):
with open(filename, 'w') as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerow(exp_header)
print('File {} is created'.format(filename))
# TODO add check if file exists, that header row is the same as header from data
with open(filename, 'a') as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerow(exp_data)
def sort2(x, y):
"""Sort one array based on another array
Input:
x - 1D array, array to sort
y - 1D array, elements in x are sorted based on y
Returns:
sorted numpy arrays
"""
return (
np.array([b for _, b in sorted(zip(y, x))]),
np.array([a for a, _ in sorted(zip(y, x))]),
)
def rem_dupl(seq, seq2=None):
"""Remove duplicates from a sequence and keep the order of elements. Do it in unison with a sequence 2."""
seen = set()
seen_add = seen.add
if seq2 is None:
return [x for x in seq if not (x in seen or seen_add(x))]
else:
a = [x for x in seq if not (x in seen or seen_add(x))]
seen = set()
seen_add = seen.add
b = [seq2[i] for i, x in enumerate(seq) if not (x in seen or seen_add(x))]
return a, b
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected.')
def rgb2gray(rgb, data_type='uint8'):
"""Convert from RBG to gray-scale image.
rbg: 4d or 3d ndarray of RGB image/images
data_type: string, desired data type of output
"""
gray = np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
gray = np.stack((gray, gray, gray), -1).astype(data_type)
return gray
| [
"os.path.exists",
"os.makedirs",
"matplotlib.use",
"csv.writer",
"os.path.join",
"random.seed",
"os.path.split",
"matplotlib.pyplot.close",
"numpy.stack",
"numpy.dot",
"os.path.isfile",
"numpy.squeeze",
"matplotlib.pyplot.tight_layout",
"numpy.savetxt",
"numpy.genfromtxt",
"matplotlib.... | [((182, 203), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (196, 203), False, 'import matplotlib\n'), ((1492, 1533), 'os.path.join', 'os.path.join', (['folder', "(prefix + '_emb.csv')"], {}), "(folder, prefix + '_emb.csv')\n", (1504, 1533), False, 'import os\n'), ((1538, 1628), 'numpy.savetxt', 'np.savetxt', (['filename_emb', 'emb'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'header': 'emb_header', 'comments': '""""""'}), "(filename_emb, emb, fmt='%s', delimiter=',', header=emb_header,\n comments='')\n", (1548, 1628), True, 'import numpy as np\n'), ((2722, 2765), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'k', 'figsize': '(12, 12 * k)'}), '(ncols=k, figsize=(12, 12 * k))\n', (2734, 2765), True, 'import matplotlib.pyplot as plt\n'), ((3435, 3453), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3451, 3453), True, 'import matplotlib.pyplot as plt\n'), ((3875, 3922), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)', 'nrows': '(2)', 'figsize': '(16, 8)'}), '(ncols=4, nrows=2, figsize=(16, 8))\n', (3887, 3922), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4331), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4329, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4898, 4914), 'glob.glob', 'glob', (["(src + '/*')"], {}), "(src + '/*')\n", (4902, 4914), False, 'from glob import glob\n'), ((5405, 5423), 'glob.glob', 'glob', (["(dir + '/*/*')"], {}), "(dir + '/*/*')\n", (5409, 5423), False, 'from glob import glob\n'), ((5602, 5633), 'numpy.genfromtxt', 'genfromtxt', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (5612, 5633), False, 'from numpy import genfromtxt\n'), ((5649, 5683), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 4)'}), '(1, 1, figsize=(6, 4))\n', (5661, 5683), True, 'import matplotlib.pyplot as plt\n'), ((6286, 6300), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6295, 6300), True, 'import matplotlib.pyplot as plt\n'), ((6427, 6458), 'numpy.genfromtxt', 'genfromtxt', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (6437, 6458), False, 'from numpy import genfromtxt\n'), ((6474, 6509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 5)'}), '(1, 2, figsize=(15, 5))\n', (6486, 6509), True, 'import matplotlib.pyplot as plt\n'), ((7684, 7698), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7693, 7698), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9877), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.2989, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.2989, 0.587, 0.114])\n', (9839, 9877), True, 'import numpy as np\n'), ((1311, 1330), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1322, 1330), False, 'import os\n'), ((1965, 1988), 'numpy.stack', 'np.stack', (['info'], {'axis': '(-1)'}), '(info, axis=-1)\n', (1973, 1988), True, 'import numpy as np\n'), ((2014, 2055), 'os.path.join', 'os.path.join', (['folder', "(prefix + '_lbl.csv')"], {}), "(folder, prefix + '_lbl.csv')\n", (2026, 2055), False, 'import os\n'), ((2064, 2166), 'numpy.savetxt', 'np.savetxt', (['filename_info', 'info_to_file'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'header': 'info_header', 'comments': '""""""'}), "(filename_info, info_to_file, fmt='%s', delimiter=',', header=\n info_header, comments='')\n", (2074, 2166), True, 'import numpy as np\n'), ((4979, 4999), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (4993, 4999), False, 'import os\n'), ((5009, 5026), 'os.makedirs', 'os.makedirs', (['dest'], {}), '(dest)\n', (5020, 5026), False, 'import os\n'), ((5095, 5114), 'os.path.split', 'os.path.split', (['g[i]'], {}), '(g[i])\n', (5108, 5114), False, 'import os\n'), ((6226, 6236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6234, 6236), True, 'import matplotlib.pyplot as plt\n'), ((7624, 7634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7632, 7634), True, 'import matplotlib.pyplot as plt\n'), ((8132, 8156), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (8146, 8156), False, 'import os\n'), ((8503, 8542), 'csv.writer', 'csv.writer', (['output'], {'lineterminator': '"""\n"""'}), "(output, lineterminator='\\n')\n", (8513, 8542), False, 'import csv\n'), ((1279, 1301), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1293, 1301), False, 'import os\n'), ((2866, 2890), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (2877, 2890), False, 'import random\n'), ((8223, 8262), 'csv.writer', 'csv.writer', (['output'], {'lineterminator': '"""\n"""'}), "(output, lineterminator='\\n')\n", (8233, 8262), False, 'import csv\n'), ((9891, 9923), 'numpy.stack', 'np.stack', (['(gray, gray, gray)', '(-1)'], {}), '((gray, gray, gray), -1)\n', (9899, 9923), True, 'import numpy as np\n'), ((5145, 5169), 'os.path.join', 'os.path.join', (['dest', 'tail'], {}), '(dest, tail)\n', (5157, 5169), False, 'import os\n'), ((5196, 5220), 'os.path.join', 'os.path.join', (['dest', 'tail'], {}), '(dest, tail)\n', (5208, 5220), False, 'import os\n'), ((3154, 3178), 'numpy.squeeze', 'np.squeeze', (['imgs[idx[i]]'], {}), '(imgs[idx[i]])\n', (3164, 3178), True, 'import numpy as np\n')] |
"""
分析签名算法的碰撞程度
1. 每个桶的模板数目:最多、最少、平均、方差
"""
import numpy as np
from logparser.utils.dataset import *
import collections
from logparser.ADC import log_signature, log_split
from logparser.ADC.ADC_New import log_signature, log_split
class BinEntry:
def __init__(self):
self.sig = None
self.templates = []
if __name__ == '__main__':
result_dict = collections.defaultdict(list)
for dataset in DATASET:
print(dataset)
# if dataset != DATASET.Mac:
# continue
df = pd.read_csv(log_path_structured(dataset))
# df.drop(['Date', 'Time', 'Pid', 'Level', 'Component'], axis=1, inplace=True, errors='ignore')
df.drop_duplicates(subset=['EventId'], keep='first', inplace=True)
bin_dict = collections.defaultdict(BinEntry)
for idx, row in df.iterrows():
log_content = row['Content']
log_token_list = log_split(log_content)
log_sig = log_signature(log_token_list, log_token_list)
bin_dict[log_sig].sig = log_sig
bin_dict[log_sig].templates.append(log_content)
count_list = [len(bin_dict[k].templates) for k in bin_dict]
count_list.sort()
result_dict['dataset'].append(dataset.value)
result_dict['template_count'].append(len(df))
result_dict['bin_count'].append(len(count_list))
result_dict['min'].append(count_list[0])
result_dict['top1'].append(count_list[-1])
result_dict['top2'].append(count_list[-2])
result_dict['top3'].append(count_list[-3])
result_dict['mean'].append(np.mean(count_list))
result_dict['medium'].append(count_list[len(count_list) // 2])
result_dict['std'].append(np.std(count_list))
pd.DataFrame(result_dict).to_csv('%s.csv' % os.path.basename(__file__), index=False)
| [
"numpy.mean",
"collections.defaultdict",
"logparser.ADC.ADC_New.log_signature",
"numpy.std",
"logparser.ADC.ADC_New.log_split"
] | [((372, 401), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (395, 401), False, 'import collections\n'), ((766, 799), 'collections.defaultdict', 'collections.defaultdict', (['BinEntry'], {}), '(BinEntry)\n', (789, 799), False, 'import collections\n'), ((909, 931), 'logparser.ADC.ADC_New.log_split', 'log_split', (['log_content'], {}), '(log_content)\n', (918, 931), False, 'from logparser.ADC.ADC_New import log_signature, log_split\n'), ((954, 999), 'logparser.ADC.ADC_New.log_signature', 'log_signature', (['log_token_list', 'log_token_list'], {}), '(log_token_list, log_token_list)\n', (967, 999), False, 'from logparser.ADC.ADC_New import log_signature, log_split\n'), ((1599, 1618), 'numpy.mean', 'np.mean', (['count_list'], {}), '(count_list)\n', (1606, 1618), True, 'import numpy as np\n'), ((1725, 1743), 'numpy.std', 'np.std', (['count_list'], {}), '(count_list)\n', (1731, 1743), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Calculate permutation p-values for enrichment.
:Author: <NAME> <<EMAIL>>
:Date: 2018-09-13
:Copyright: 2018, <NAME>
:License: CC BY-SA
"""
import copy
import glob
import re
import gc
from datetime import datetime
import csv
import pandas as pd
import numpy as np
from .enrichment import Enrich
from .utils import prepare_directory
class PermuteEnrich(Enrich):
"""Permute inputs to enrichment functions."""
def __init__(self, joined_df, expr_outlier_df, output_prefix,
distribution, anno_list, obs_enrich_loc,
loop_enrich_args, write_rv_args, n_perms=1):
"""Initialize a permuted enrichment object."""
self.joined_df = joined_df
self.drop_relevant_expression_columns()
self.expr_outlier_df = expr_outlier_df
# other inputs
self.distribution = distribution
self.anno_list = anno_list
self.obs_enrich_loc = re.sub(".txt$", "_gene.txt", obs_enrich_loc)
perm_dir = output_prefix + '_per_chrom/perms'
prepare_directory(perm_dir) # , clean_run=True
self.generic_enrich_loc = perm_dir + '/perm_{}_{}.txt'
self.loop_enrich_args = loop_enrich_args
self.write_rv_args = write_rv_args
# loop over permutations
for n_perm in range(n_perms):
print('=========== Permutation {} ==========='.format(str(n_perm)))
self.run_permutation(n_perm)
gc.collect()
self.compare_observed_to_permuted()
def drop_relevant_expression_columns(self):
"""Remove expression-relevant columns that will be permuted."""
cols_to_drop = ['z_expr', 'expr_rank', 'z_abs', 'expr_outlier_neg',
'expr_outlier_pos', 'expr_outlier']
cols_to_drop = [i for i in cols_to_drop if i in self.joined_df.columns]
self.joined_df.drop(cols_to_drop, axis=1, inplace=True)
def run_permutation(self, n_perm):
"""Run a single permutation."""
# randomly re-assign IDs in outlier_df
self.permute_ids()
# join outlier_df_permute with all_data
self.joined_df.set_index(['gene', 'blinded_id'], inplace=True)
self.joined_df = self.joined_df.join(self.permute_expr_df, how='inner')
# declare enrichment output file name
ts = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
self.enrich_loc = self.generic_enrich_loc.format(str(n_perm), ts)
self.rv_outlier_loc = re.sub('.txt$', '_rv_outs.txt', self.enrich_loc)
# run loop_enrichment
self.joined_df.reset_index(inplace=True)
print(self.joined_df.head())
super(PermuteEnrich, self).write_rvs_w_outs_to_file(
**self.write_rv_args)
super(PermuteEnrich, self).loop_enrichment(**self.loop_enrich_args)
# (possibly just a faster method to identify the number of
# outliers with rare variants)
self.drop_relevant_expression_columns()
def permute_ids(self):
"""Permute the RNAseq IDs."""
uniq_ids = self.expr_outlier_df.index.get_level_values(
'blinded_id').unique().tolist()
# using permutation instead of shuffle:
# https://stackoverflow.com/a/15474335
perm_ids = np.random.permutation(uniq_ids).tolist()
self.id_dict = dict(zip(uniq_ids, perm_ids))
print(self.id_dict)
self.permute_expr_df = copy.deepcopy(self.expr_outlier_df)
self.permute_expr_df.reset_index(inplace=True)
# print(self.permute_expr_df.head())
self.permute_expr_df = self.permute_expr_df.assign(
blinded_id=self.permute_expr_df['blinded_id'].map(self.id_dict))
self.permute_expr_df.set_index(['gene', 'blinded_id'], inplace=True)
# print(self.permute_expr_df.head())
def compare_observed_to_permuted(self):
"""Compare results from observed and permuted data to get p-values."""
obs_df = pd.read_table(self.obs_enrich_loc)
print("Observed:")
(n_nom_sig_obs, max_or_obs, min_p_obs,
rv_outs_obs) = self.get_sig_metrics(obs_df)
print(n_nom_sig_obs, max_or_obs, min_p_obs, rv_outs_obs)
# load permutation enrichments 1 at a time
perm_f_iter = glob.iglob(self.generic_enrich_loc.format('*', '*_gene'))
(total_perms, total_nom_sig, total_max_or, total_min_p,
total_rv_outs) = 0, 0, 0, 0, 0
perm_dict = {'perm_ct': [], 'nom_sig_list': [],
'max_or_list': [], 'min_p_list': [],
'rv_outs_list': []}
print("Reviewing permutations:")
for perm_f in perm_f_iter:
# print(perm_f)
perm_df = pd.read_table(perm_f)
n_nom_sig, max_or, min_p, rv_outs = self.get_sig_metrics(perm_df)
total_nom_sig += int(n_nom_sig >= n_nom_sig_obs)
total_max_or += int(max_or >= max_or_obs)
total_min_p += int(min_p <= min_p_obs)
total_rv_outs += int(rv_outs >= rv_outs_obs)
total_perms += 1
perm_dict['perm_ct'].append(total_perms)
perm_dict['nom_sig_list'].append(n_nom_sig)
perm_dict['max_or_list'].append(max_or)
perm_dict['min_p_list'].append(min_p)
perm_dict['rv_outs_list'].append(rv_outs)
if total_perms % 10 == 0:
print(total_perms, total_nom_sig, total_max_or, total_min_p,
total_rv_outs)
# if total_perms > 999:
# break
print("Permutation p-values are:")
print("Nominally sig: " + str(total_nom_sig/total_perms))
print("Max OR: " + str(total_max_or/total_perms))
print("Min p: " + str(total_min_p/total_perms))
print("RV out count: " + str(total_rv_outs/total_perms))
print(n_nom_sig_obs, max_or_obs, min_p_obs, rv_outs_obs)
ts = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
perm_stats_f = self.generic_enrich_loc.format(
'summary', 'stats_' + ts)
with open(perm_stats_f, 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(perm_dict.keys())
writer.writerows(zip(*perm_dict.values()))
# run check_permutation_significance
# count number of examples as or more extreme
# print a histogram of the results
def get_sig_metrics(self, df):
"""Test if a permutation is more extreme for multiple metrics."""
# identify if permutation is more extreme based on:
# if total number of RV-outlier pairs more than observed. (Gabriel)
# if number of nominally significant associations (P<0.05, OR>1)
df = df[df.tss_cut_off == 1e4]
rv_outs = df[df.af_cut_off == 1e-5]['rare_out'].values[0]
# df_nom_sig = df[(df.p < 0.05) & (df['or'] > 1)]
# just using those with OR>1 for 1-sided tests
df_nom_sig = df[df['or'] > 1]
# 2-sided use:
# df_nom_sig = df
n_nom_sig = df_nom_sig[df_nom_sig.p < 0.05].shape[0]
if df_nom_sig.shape[0] == 0:
max_or = 0
min_p = 1
else:
# if most extreme permutation is more than most extreme observed
max_or = df_nom_sig['or'].max()
min_p = df_nom_sig.p.min()
# number of RV outs is more for most significant observed
# max_or_line = df_nom_sig['or'] == max_or
return n_nom_sig, max_or, min_p, rv_outs
#
#
#
#
| [
"copy.deepcopy",
"csv.writer",
"datetime.datetime.now",
"gc.collect",
"pandas.read_table",
"re.sub",
"numpy.random.permutation"
] | [((968, 1012), 're.sub', 're.sub', (['""".txt$"""', '"""_gene.txt"""', 'obs_enrich_loc'], {}), "('.txt$', '_gene.txt', obs_enrich_loc)\n", (974, 1012), False, 'import re\n'), ((2501, 2549), 're.sub', 're.sub', (['""".txt$"""', '"""_rv_outs.txt"""', 'self.enrich_loc'], {}), "('.txt$', '_rv_outs.txt', self.enrich_loc)\n", (2507, 2549), False, 'import re\n'), ((3433, 3468), 'copy.deepcopy', 'copy.deepcopy', (['self.expr_outlier_df'], {}), '(self.expr_outlier_df)\n', (3446, 3468), False, 'import copy\n'), ((3969, 4003), 'pandas.read_table', 'pd.read_table', (['self.obs_enrich_loc'], {}), '(self.obs_enrich_loc)\n', (3982, 4003), True, 'import pandas as pd\n'), ((1482, 1494), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1492, 1494), False, 'import gc\n'), ((4718, 4739), 'pandas.read_table', 'pd.read_table', (['perm_f'], {}), '(perm_f)\n', (4731, 4739), True, 'import pandas as pd\n'), ((6115, 6144), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (6125, 6144), False, 'import csv\n'), ((2352, 2366), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2364, 2366), False, 'from datetime import datetime\n'), ((3280, 3311), 'numpy.random.permutation', 'np.random.permutation', (['uniq_ids'], {}), '(uniq_ids)\n', (3301, 3311), True, 'import numpy as np\n'), ((5913, 5927), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5925, 5927), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from tensorflow.keras.models import load_model
from konlpy.tag import Okt
model = load_model('./result_model.mod')
okt = Okt()
selected_words = []
with open('selected_words.list', 'r') as file:
selected_words = file.readlines()
for index in range(0, len(selected_words)):
selected_words[index] = selected_words[index].rstrip('\n')
def tokenize(doc):
return ['/'.join(t) for t in okt.pos(doc, norm=True, stem=True)]
def term_frequency(doc):
return [doc.count(word) for word in selected_words]
def run_review(review):
token = tokenize(review)
tf = term_frequency(token)
data = np.expand_dims(np.asarray(tf).astype('float32'), axis=0)
score = float(model.predict(data))
if(score > 0.5):
print("{:.2f}% 확률, 긍정 리뷰\n".format(score * 100))
else:
print("{:.2f}% 확률, 부정 리뷰\n".format((1 - score) * 100))
while True:
txt = input('감정 분석을 위한 문장을 입력 하세요: ')
if txt == '':
break
run_review(txt) | [
"konlpy.tag.Okt",
"numpy.asarray",
"tensorflow.keras.models.load_model"
] | [((126, 158), 'tensorflow.keras.models.load_model', 'load_model', (['"""./result_model.mod"""'], {}), "('./result_model.mod')\n", (136, 158), False, 'from tensorflow.keras.models import load_model\n'), ((165, 170), 'konlpy.tag.Okt', 'Okt', ([], {}), '()\n', (168, 170), False, 'from konlpy.tag import Okt\n'), ((669, 683), 'numpy.asarray', 'np.asarray', (['tf'], {}), '(tf)\n', (679, 683), True, 'import numpy as np\n')] |
"""
We provide an implementation and pretrained weights for Pooling-based Vision
Transformers (PiT).
Paper: Rethinking Spatial Dimensions of Vision Transformers.
`[arXiv:2103.16302] <https://arxiv.org/abs/2103.16302>`_.
Original pytorch code and weights from
`NAVER AI <https://github.com/naver-ai/pit>`_.
This code has been ported from the
`timm <https://github.com/rwightman/pytorch-image-models>`_ implementation.
The following models are available.
* Models trained on ImageNet-1k
* ``pit_ti_224``
* ``pit_xs_224``
* ``pit_s_224``
* ``pit_b_224``
* Models trained on ImageNet-1k, using knowledge distillation
* ``pit_ti_distilled_224``
* ``pit_xs_distilled_224``
* ``pit_s_distilled_224``
* ``pit_b_distilled_224``
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
# Modifications for timm by / Copyright 2020 <NAME>
# Copyright 2022 <NAME>
from collections import OrderedDict
from dataclasses import dataclass
from typing import List, Tuple, Union
import numpy as np
import tensorflow as tf
from tfimm.architectures.vit import ViTBlock
from tfimm.layers import interpolate_pos_embeddings_grid, norm_layer_factory
from tfimm.models import ModelConfig, keras_serializable, register_model
from tfimm.utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
# Model registry will add each entrypoint fn to this
__all__ = ["PoolingVisionTransformerConfig", "PoolingVisionTransformer"]
@dataclass
class PoolingVisionTransformerConfig(ModelConfig):
"""
Configuration class for ConvNeXt models.
Parameters:
name: Name of the model.
url: URL for pretrained weights.
nb_classes: Number of classes for classification head.
in_channels: Number of input image channels.
input_size: Input image size (height, width)
patch_size: Patchifying the image is implemented via a convolutional layer with
kernel size ``patch_size`` and stride given by ``stride``.
stride: Stride in patch embedding layer.
embed_dim: Feature dimensions at each stage.
nb_blocks: Number of blocks at each stage.
nb_heads: Number of self-attention heads at each stage.
mlp_ratio: Ratio of mlp hidden dim to embedding dim
distilled: If ``True``, we add a distillation head in addition to classification
head.
drop_rate: Dropout rate.
attn_drop_rate: Attention dropout rate.
drop_path_rate: Dropout rate for stochastic depth.
norm_layer: Normalization layer. See :function:``norm_layer_factory`` for
possible values.
act_layer: Activation function. See :function:``act_layer_factory`` for possible
values.
interpolate_input: If ``True``, we interpolate position embeddings to given
input size, so inference can be done for arbitrary input shape. If ``False``
inference can only be performed at ``input_size``.
crop_pct: Crop percentage for ImageNet evaluation.
interpolation: Interpolation method for ImageNet evaluation.
mean: Defines preprocessing function. If ``x`` is an image with pixel values
in (0, 1), the preprocessing function is ``(x - mean) / std``.
std: Defines preprpocessing function.
first_conv: Name of first convolutional layer. Used by
:function:``create_model`` to adapt the number in input channels when
loading pretrained weights.
classifier: Name of classifier layer. Used by :function:``create_model`` to
adapt the classifier when loading pretrained weights. Some models have
two classifier heads, one for distillation.
"""
nb_classes: int = 1000
in_channels: int = 3
input_size: Tuple[int, int] = (224, 224)
patch_size: int = 16
stride: int = 8
embed_dim: Tuple = (64, 128, 256)
nb_blocks: Tuple = (2, 6, 4)
nb_heads: Tuple = (2, 4, 8)
mlp_ratio: float = 4.0
distilled: bool = False
# Regularization
drop_rate: float = 0.0
attn_drop_rate: float = 0.0
drop_path_rate: float = 0.0
# Other parameters
norm_layer: str = "layer_norm_eps_1e-6"
act_layer: str = "gelu"
# Parameters for inference
interpolate_input: bool = False
crop_pct: float = 0.9
interpolation: str = "bicubic"
# Preprocessing
mean: Tuple[float, float, float] = IMAGENET_DEFAULT_MEAN
std: Tuple[float, float, float] = IMAGENET_DEFAULT_STD
# Weight transfer
first_conv: str = "patch_embed/conv"
classifier: Union[str, Tuple[str, str]] = "head"
@property
def nb_tokens(self) -> int:
"""Number of special tokens. Equals 2 if distillation is used, otherwise 1."""
return 2 if self.distilled else 1
@property
def grid_size(self) -> Tuple[int, int]:
"""Grid size for patch embeddings."""
height = (self.input_size[0] - self.patch_size) // self.stride + 1
width = (self.input_size[1] - self.patch_size) // self.stride + 1
return height, width
@property
def transform_weights(self):
"""
Dictionary of functions to transform weights when loading them in models with
different configs.
"""
return {"pos_embed": PoolingVisionTransformer.transform_pos_embed}
class ConvHeadPooling(tf.keras.layers.Layer):
def __init__(
self,
nb_tokens: int,
in_channels: int,
out_channels: int,
stride: int,
**kwargs,
):
super().__init__(**kwargs)
self.nb_tokens = nb_tokens
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.pad = tf.keras.layers.ZeroPadding2D(padding=stride // 2)
self.conv = tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=stride + 1,
strides=stride,
groups=in_channels,
name="conv",
)
self.fc = tf.keras.layers.Dense(units=out_channels, name="fc")
def call(self, x):
x, input_size = x
batch_size, _, nb_channels = tf.unstack(tf.shape(x))
tokens = x[:, : self.nb_tokens]
x = x[:, self.nb_tokens :] # (N, L, C)
x = tf.reshape(x, (batch_size, *input_size, nb_channels)) # (N, H, W, C)
x = self.pad(x)
x = self.conv(x)
tokens = self.fc(tokens)
output_size = tf.unstack(tf.shape(x)[1:3])
x = tf.reshape(x, (batch_size, -1, self.out_channels))
x = tf.concat([tokens, x], axis=1)
return x, output_size
@keras_serializable
class PoolingVisionTransformer(tf.keras.Model):
"""
Class implementing a Pooling-based Vision Transformer (PiT).
Paper: Rethinking Spatial Dimensions of Vision Transformers.
`[arXiv:2103.16302] <https://arxiv.org/abs/2103.16302>`_
Parameters:
cfg: Configuration class for the model.
**kwargs: Arguments are passed to ``tf.keras.Model``
"""
cfg_class = PoolingVisionTransformerConfig
def __init__(self, cfg: PoolingVisionTransformerConfig, **kwargs):
super().__init__(**kwargs)
self.cfg = cfg
norm_layer = norm_layer_factory(cfg.norm_layer)
self.patch_embed = tf.keras.layers.Conv2D(
filters=cfg.embed_dim[0],
kernel_size=cfg.patch_size,
strides=cfg.stride,
name="patch_embed/conv",
)
self.pos_embed = None
self.pos_drop = tf.keras.layers.Dropout(rate=cfg.drop_rate)
self.cls_token = None
# Stochastic depth
dpr = np.linspace(0.0, cfg.drop_path_rate, sum(cfg.nb_blocks))
dpr = np.split(dpr, np.cumsum(cfg.nb_blocks))
self.blocks = OrderedDict()
for j in range(len(cfg.nb_blocks)):
for k in range(cfg.nb_blocks[j]):
self.blocks[f"stage_{j}/block_{k}"] = ViTBlock(
embed_dim=cfg.embed_dim[j],
nb_heads=cfg.nb_heads[j],
mlp_ratio=cfg.mlp_ratio,
qkv_bias=True,
drop_rate=cfg.drop_rate,
attn_drop_rate=cfg.attn_drop_rate,
drop_path_rate=dpr[j][k],
norm_layer=cfg.norm_layer,
act_layer=cfg.act_layer,
name=f"transformers/{j}/blocks/{k}",
)
if j < len(cfg.nb_blocks) - 1:
self.blocks[f"stage_{j}/pool"] = ConvHeadPooling(
nb_tokens=cfg.nb_tokens,
in_channels=cfg.embed_dim[j],
out_channels=cfg.embed_dim[j + 1],
stride=2,
name=f"transformers/{j}/pool",
)
self.norm = norm_layer(name="norm")
self.head = (
tf.keras.layers.Dense(units=cfg.nb_classes, name="head")
if cfg.nb_classes > 0
else tf.keras.layers.Activation("linear") # Identity layer
)
if cfg.distilled:
self.head_dist = (
tf.keras.layers.Dense(units=cfg.nb_classes, name="head_dist")
if cfg.nb_classes > 0
else tf.keras.layers.Activation("linear") # Identity layer
)
else:
self.head_dist = None
def build(self, input_shape):
height = (input_shape[1] - self.cfg.patch_size) // self.cfg.stride + 1
width = (input_shape[2] - self.cfg.patch_size) // self.cfg.stride + 1
self.pos_embed = self.add_weight(
# We keep PT style NCHW order to make weight translation easier
shape=(1, self.cfg.embed_dim[0], height, width),
initializer=tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.02),
trainable=True,
name="pos_embed",
)
self.cls_token = self.add_weight(
shape=(1, self.cfg.nb_tokens, self.cfg.embed_dim[0]),
initializer=tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.02),
trainable=True,
name="cls_token",
)
@property
def dummy_inputs(self) -> tf.Tensor:
"""Returns a tensor of the correct shape for inference."""
return tf.zeros((1, *self.cfg.input_size, self.cfg.in_channels))
@property
def feature_names(self) -> List[str]:
"""
Names of features, returned when calling ``call`` with ``return_features=True``.
"""
_, features = self(self.dummy_inputs, return_features=True)
return list(features.keys())
def transform_pos_embed(self, target_cfg: PoolingVisionTransformerConfig):
"""
Transforms the position embedding weights in accordance with `target_cfg` and
returns them.
"""
pos_embed = interpolate_pos_embeddings_grid(
pos_embed=tf.transpose(self.pos_embed, [0, 2, 3, 1]),
tgt_grid_size=target_cfg.grid_size,
)
pos_embed = tf.transpose(pos_embed, [0, 3, 1, 2])
return pos_embed
def forward_features(
self, x, training: bool = False, return_features: bool = False
):
"""
Forward pass through model, excluding the classifier layer. This function is
useful if the model is used as input for downstream tasks such as object
detection.
Arguments:
x: Input to model
training: Training or inference phase?
return_features: If ``True``, we return not only the model output, but a
dictionary with intermediate features.
Returns:
If ``return_features=True``, we return a tuple ``(y, features)``, where
``y`` is the model output and ``features`` is a dictionary with
intermediate features.
If ``return_features=False``, we return only ``y``.
"""
features = OrderedDict()
x = self.patch_embed(x)
pos_embed = tf.transpose(self.pos_embed, [0, 2, 3, 1])
if not self.cfg.interpolate_input:
x = x + pos_embed
else:
grid_size = tf.unstack(tf.shape(x)[1:3])
pos_embed = interpolate_pos_embeddings_grid(
pos_embed,
tgt_grid_size=grid_size,
)
x = x + pos_embed
x = self.pos_drop(x, training=training)
batch_size, height, width, nb_channels = tf.unstack(tf.shape(x))
input_size = (height, width)
cls_token = tf.repeat(self.cls_token, repeats=batch_size, axis=0)
x = tf.reshape(x, (batch_size, -1, nb_channels))
x = tf.concat([cls_token, x], axis=1)
features["patch_embedding"] = x
for key, block in self.blocks.items():
if key.endswith("pool"):
x, input_size = block((x, input_size), training=training)
else:
x = block(x, training=training)
features[key] = x
features["features_all"] = x
x = x[:, : self.cfg.nb_tokens]
x = self.norm(x, training=training)
x = x if self.cfg.distilled else x[:, 0]
features["features"] = x
return (x, features) if return_features else x
def call(self, x, training: bool = False, return_features: bool = False):
"""
Forward pass through the full model.
Arguments:
x: Input to model
training: Training or inference phase?
return_features: If ``True``, we return not only the model output, but a
dictionary with intermediate features.
Returns:
If ``return_features=True``, we return a tuple ``(y, features)``, where
``y`` is the model output and ``features`` is a dictionary with
intermediate features.
If ``return_features=False``, we return only ``y``.
"""
features = OrderedDict()
x = self.forward_features(x, training, return_features)
if return_features:
x, features = x
if not self.cfg.distilled:
x = self.head(x, training=training)
else:
y = self.head(x[:, 0], training=training)
y_dist = self.head_dist(x[:, 1], training=training)
x = tf.stack((y, y_dist), axis=1)
features["logits"] = x
return (x, features) if return_features else x
@register_model
def pit_ti_224():
cfg = PoolingVisionTransformerConfig(
name="pit_ti_224",
url="[timm]",
patch_size=16,
stride=8,
embed_dim=(64, 128, 256),
nb_blocks=(2, 6, 4),
nb_heads=(2, 4, 8),
mlp_ratio=4.0,
)
return PoolingVisionTransformer, cfg
@register_model
def pit_xs_224():
cfg = PoolingVisionTransformerConfig(
name="pit_xs_224",
url="[timm]",
patch_size=16,
stride=8,
embed_dim=(96, 192, 384),
nb_blocks=(2, 6, 4),
nb_heads=(2, 4, 8),
mlp_ratio=4.0,
)
return PoolingVisionTransformer, cfg
@register_model
def pit_s_224():
cfg = PoolingVisionTransformerConfig(
name="pit_s_224",
url="[timm]",
patch_size=16,
stride=8,
embed_dim=(144, 288, 576),
nb_blocks=(2, 6, 4),
nb_heads=(3, 6, 12),
mlp_ratio=4.0,
)
return PoolingVisionTransformer, cfg
@register_model
def pit_b_224():
cfg = PoolingVisionTransformerConfig(
name="pit_b_224",
url="[timm]",
patch_size=14,
stride=7,
embed_dim=(256, 512, 1024),
nb_blocks=(3, 6, 4),
nb_heads=(4, 8, 16),
mlp_ratio=4.0,
)
return PoolingVisionTransformer, cfg
@register_model
def pit_ti_distilled_224():
cfg = PoolingVisionTransformerConfig(
name="pit_ti_distilled_224",
url="[timm]",
patch_size=16,
stride=8,
embed_dim=(64, 128, 256),
nb_blocks=(2, 6, 4),
nb_heads=(2, 4, 8),
mlp_ratio=4.0,
distilled=True,
classifier=("head", "head_dist"),
)
return PoolingVisionTransformer, cfg
@register_model
def pit_xs_distilled_224():
cfg = PoolingVisionTransformerConfig(
name="pit_xs_distilled_224",
url="[timm]",
patch_size=16,
stride=8,
embed_dim=(96, 192, 384),
nb_blocks=(2, 6, 4),
nb_heads=(2, 4, 8),
mlp_ratio=4.0,
distilled=True,
classifier=("head", "head_dist"),
)
return PoolingVisionTransformer, cfg
@register_model
def pit_s_distilled_224():
cfg = PoolingVisionTransformerConfig(
name="pit_s_distilled_224",
url="[timm]",
patch_size=16,
stride=8,
embed_dim=(144, 288, 576),
nb_blocks=(2, 6, 4),
nb_heads=(3, 6, 12),
mlp_ratio=4.0,
distilled=True,
classifier=("head", "head_dist"),
)
return PoolingVisionTransformer, cfg
@register_model
def pit_b_distilled_224():
cfg = PoolingVisionTransformerConfig(
name="pit_b_distilled_224",
url="[timm]",
patch_size=14,
stride=7,
embed_dim=(256, 512, 1024),
nb_blocks=(3, 6, 4),
nb_heads=(4, 8, 16),
mlp_ratio=4.0,
distilled=True,
classifier=("head", "head_dist"),
)
return PoolingVisionTransformer, cfg
| [
"collections.OrderedDict",
"tfimm.architectures.vit.ViTBlock",
"tensorflow.shape",
"tensorflow.keras.layers.Conv2D",
"tfimm.layers.norm_layer_factory",
"tensorflow.transpose",
"tfimm.layers.interpolate_pos_embeddings_grid",
"tensorflow.keras.layers.Dropout",
"tensorflow.stack",
"tensorflow.concat"... | [((5715, 5765), 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(stride // 2)'}), '(padding=stride // 2)\n', (5744, 5765), True, 'import tensorflow as tf\n'), ((5786, 5907), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'out_channels', 'kernel_size': '(stride + 1)', 'strides': 'stride', 'groups': 'in_channels', 'name': '"""conv"""'}), "(filters=out_channels, kernel_size=stride + 1,\n strides=stride, groups=in_channels, name='conv')\n", (5808, 5907), True, 'import tensorflow as tf\n'), ((5993, 6045), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'out_channels', 'name': '"""fc"""'}), "(units=out_channels, name='fc')\n", (6014, 6045), True, 'import tensorflow as tf\n'), ((6258, 6311), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, *input_size, nb_channels)'], {}), '(x, (batch_size, *input_size, nb_channels))\n', (6268, 6311), True, 'import tensorflow as tf\n'), ((6475, 6525), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.out_channels)'], {}), '(x, (batch_size, -1, self.out_channels))\n', (6485, 6525), True, 'import tensorflow as tf\n'), ((6538, 6568), 'tensorflow.concat', 'tf.concat', (['[tokens, x]'], {'axis': '(1)'}), '([tokens, x], axis=1)\n', (6547, 6568), True, 'import tensorflow as tf\n'), ((7203, 7237), 'tfimm.layers.norm_layer_factory', 'norm_layer_factory', (['cfg.norm_layer'], {}), '(cfg.norm_layer)\n', (7221, 7237), False, 'from tfimm.layers import interpolate_pos_embeddings_grid, norm_layer_factory\n'), ((7266, 7391), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'cfg.embed_dim[0]', 'kernel_size': 'cfg.patch_size', 'strides': 'cfg.stride', 'name': '"""patch_embed/conv"""'}), "(filters=cfg.embed_dim[0], kernel_size=cfg.patch_size,\n strides=cfg.stride, name='patch_embed/conv')\n", (7288, 7391), True, 'import tensorflow as tf\n'), ((7501, 7544), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'cfg.drop_rate'}), '(rate=cfg.drop_rate)\n', (7524, 7544), True, 'import tensorflow as tf\n'), ((7751, 7764), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7762, 7764), False, 'from collections import OrderedDict\n'), ((10253, 10310), 'tensorflow.zeros', 'tf.zeros', (['(1, *self.cfg.input_size, self.cfg.in_channels)'], {}), '((1, *self.cfg.input_size, self.cfg.in_channels))\n', (10261, 10310), True, 'import tensorflow as tf\n'), ((10995, 11032), 'tensorflow.transpose', 'tf.transpose', (['pos_embed', '[0, 3, 1, 2]'], {}), '(pos_embed, [0, 3, 1, 2])\n', (11007, 11032), True, 'import tensorflow as tf\n'), ((11913, 11926), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11924, 11926), False, 'from collections import OrderedDict\n'), ((11979, 12021), 'tensorflow.transpose', 'tf.transpose', (['self.pos_embed', '[0, 2, 3, 1]'], {}), '(self.pos_embed, [0, 2, 3, 1])\n', (11991, 12021), True, 'import tensorflow as tf\n'), ((12510, 12563), 'tensorflow.repeat', 'tf.repeat', (['self.cls_token'], {'repeats': 'batch_size', 'axis': '(0)'}), '(self.cls_token, repeats=batch_size, axis=0)\n', (12519, 12563), True, 'import tensorflow as tf\n'), ((12576, 12620), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, nb_channels)'], {}), '(x, (batch_size, -1, nb_channels))\n', (12586, 12620), True, 'import tensorflow as tf\n'), ((12633, 12666), 'tensorflow.concat', 'tf.concat', (['[cls_token, x]'], {'axis': '(1)'}), '([cls_token, x], axis=1)\n', (12642, 12666), True, 'import tensorflow as tf\n'), ((13909, 13922), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13920, 13922), False, 'from collections import OrderedDict\n'), ((6144, 6155), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (6152, 6155), True, 'import tensorflow as tf\n'), ((7702, 7726), 'numpy.cumsum', 'np.cumsum', (['cfg.nb_blocks'], {}), '(cfg.nb_blocks)\n', (7711, 7726), True, 'import numpy as np\n'), ((8844, 8900), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'cfg.nb_classes', 'name': '"""head"""'}), "(units=cfg.nb_classes, name='head')\n", (8865, 8900), True, 'import tensorflow as tf\n'), ((8952, 8988), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""linear"""'], {}), "('linear')\n", (8978, 8988), True, 'import tensorflow as tf\n'), ((12186, 12253), 'tfimm.layers.interpolate_pos_embeddings_grid', 'interpolate_pos_embeddings_grid', (['pos_embed'], {'tgt_grid_size': 'grid_size'}), '(pos_embed, tgt_grid_size=grid_size)\n', (12217, 12253), False, 'from tfimm.layers import interpolate_pos_embeddings_grid, norm_layer_factory\n'), ((12440, 12451), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (12448, 12451), True, 'import tensorflow as tf\n'), ((14275, 14304), 'tensorflow.stack', 'tf.stack', (['(y, y_dist)'], {'axis': '(1)'}), '((y, y_dist), axis=1)\n', (14283, 14304), True, 'import tensorflow as tf\n'), ((6444, 6455), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (6452, 6455), True, 'import tensorflow as tf\n'), ((7909, 8200), 'tfimm.architectures.vit.ViTBlock', 'ViTBlock', ([], {'embed_dim': 'cfg.embed_dim[j]', 'nb_heads': 'cfg.nb_heads[j]', 'mlp_ratio': 'cfg.mlp_ratio', 'qkv_bias': '(True)', 'drop_rate': 'cfg.drop_rate', 'attn_drop_rate': 'cfg.attn_drop_rate', 'drop_path_rate': 'dpr[j][k]', 'norm_layer': 'cfg.norm_layer', 'act_layer': 'cfg.act_layer', 'name': 'f"""transformers/{j}/blocks/{k}"""'}), "(embed_dim=cfg.embed_dim[j], nb_heads=cfg.nb_heads[j], mlp_ratio=\n cfg.mlp_ratio, qkv_bias=True, drop_rate=cfg.drop_rate, attn_drop_rate=\n cfg.attn_drop_rate, drop_path_rate=dpr[j][k], norm_layer=cfg.norm_layer,\n act_layer=cfg.act_layer, name=f'transformers/{j}/blocks/{k}')\n", (7917, 8200), False, 'from tfimm.architectures.vit import ViTBlock\n'), ((9090, 9151), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'cfg.nb_classes', 'name': '"""head_dist"""'}), "(units=cfg.nb_classes, name='head_dist')\n", (9111, 9151), True, 'import tensorflow as tf\n'), ((9211, 9247), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""linear"""'], {}), "('linear')\n", (9237, 9247), True, 'import tensorflow as tf\n'), ((9723, 9783), 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'mean': '(0.0)', 'stddev': '(0.02)'}), '(mean=0.0, stddev=0.02)\n', (9760, 9783), True, 'import tensorflow as tf\n'), ((9985, 10045), 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'mean': '(0.0)', 'stddev': '(0.02)'}), '(mean=0.0, stddev=0.02)\n', (10022, 10045), True, 'import tensorflow as tf\n'), ((10873, 10915), 'tensorflow.transpose', 'tf.transpose', (['self.pos_embed', '[0, 2, 3, 1]'], {}), '(self.pos_embed, [0, 2, 3, 1])\n', (10885, 10915), True, 'import tensorflow as tf\n'), ((12144, 12155), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (12152, 12155), True, 'import tensorflow as tf\n')] |
"""
Various methods to manipulate images
"""
from __future__ import print_function
from builtins import object
import numpy as np
class IntensityNormalizeImage(object):
def __init__(self):
"""
Constructor
"""
self.default_normalization_mode = 'percentile_normalization'
"""Default intensity normalization method"""
def max_normalization(self,I):
# first zero out negative values
I = I - I.min()
np.clip(I, 0, None, out=I)
I = I/np.max(I)
return I
def percentile_normalization(self,I,perc=99.):
"""
Linearly normalized image intensities so that the 95-th percentile gets mapped to 0.95; 0 stays 0
:param I: input image
:param perc: desired percentile
:return: returns the normalized image
"""
# first zero out negative values
I =I - I.min()
np.clip(I, 0, None, out=I)
# then normalize the 99th percentile
percI = np.percentile(I, perc)
#np.clip (I,None,percI,out=I)
if percI == 0:
print('Cannot normalize based on percentile; as 99-th percentile is 0. Ignoring normalization')
return I
else:
I = I / percI * perc/100.
return I
def default_intensity_normalization(self, I):
"""
Intensity normalizes an image using the default intensity normalization method
:param I: input image
:return: intensity normalized image
"""
if self.default_normalization_mode == 'percentile_normalization':
return self.percentile_normalization(I)
elif self.default_normalization_mode == 'max_normalization':
return self.max_normalization(I)
else:
print('ERROR: unknown normalization mode: ' + self.default_normalization_mode )
print('ERROR: returning un-normalized image')
return I
| [
"numpy.clip",
"numpy.percentile",
"numpy.max"
] | [((470, 496), 'numpy.clip', 'np.clip', (['I', '(0)', 'None'], {'out': 'I'}), '(I, 0, None, out=I)\n', (477, 496), True, 'import numpy as np\n'), ((908, 934), 'numpy.clip', 'np.clip', (['I', '(0)', 'None'], {'out': 'I'}), '(I, 0, None, out=I)\n', (915, 934), True, 'import numpy as np\n'), ((996, 1018), 'numpy.percentile', 'np.percentile', (['I', 'perc'], {}), '(I, perc)\n', (1009, 1018), True, 'import numpy as np\n'), ((511, 520), 'numpy.max', 'np.max', (['I'], {}), '(I)\n', (517, 520), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
def Plot_IxV(Data):
f = plt.figure('IxV Semi', (5, 4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
#check Y scale
ymax = numpy.nanmax(Data['V'])
ymin = numpy.nanmin(Data['V'])
dy = numpy.max([ymax - ymin, 1E-6])
if not(ax.lines):
ax.plot([],[],'b.-')
ax.set_xlim([Data['I'].min(), Data['I'].max()])
ax.set_ylim([ymax+dy, ymin-dy])
line = ax.lines[-1]
line.set_data(Data['I'], Data['V'])
ax.set_xlabel('Current (A)')
ax.set_ylabel('Voltage (V)')
ax.grid(True)
yc = (ymax + ymin)/2
ymin, ymax = ax.get_ylim()
ymax = numpy.max([yc + dy*1.1/2, ymax])
ymin = numpy.min([yc - dy*1.1/2, ymin])
ax.set_ylim([ymin, ymax])
f.tight_layout()
f.canvas.draw()
def resistance(Data):
I = Data['I']
V = Data['V']
R = numpy.polyfit(I, V, 1)[0]
return R
class IxV(object):
def __init__(self, ResouceNames={}):
logFile = os.path.expanduser('~/MagDynLab.log')
defaultRN = dict(RN_SCA = 'TCPIP::192.168.13.7::5025::SOCKET')
defaultRN.update(ResouceNames)
RN_SCA = defaultRN['RN_SCA']
self.SCA = magdynlab.instruments.KEYSIGHT_B1500A(ResourceName=RN_SCA,
logFile=logFile)
#Experimental/plot data
self.Data = magdynlab.data_types.DataContainer()
self.Data.file_id = '.IxV_Semi'
self.Info = ''
def Measure(self, file_name=None):
self.Data.info = self.Info
print('Measuring : %s' %file_name)
# Get one measurement to get the data shape and dictionaries
m_data = self.SCA.getResultDictionary(new=True, delete=True)
for key in m_data.keys():
self.Data[key] = m_data[key]
if file_name is not None:
self.Data.save(file_name)
self.Data.savetxt(file_name + '.IxV',
keys=[k for k in self.Data.keys()])
print('DONE')
print('Resistance : %0.3E Ohms' % resistance(self.Data))
Plot_IxV(self.Data)
| [
"numpy.polyfit",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.nanmax",
"numpy.min",
"numpy.nanmin",
"matplotlib.pyplot.subplot",
"os.path.expanduser"
] | [((254, 284), 'matplotlib.pyplot.figure', 'plt.figure', (['"""IxV Semi"""', '(5, 4)'], {}), "('IxV Semi', (5, 4))\n", (264, 284), True, 'import matplotlib.pyplot as plt\n'), ((385, 408), 'numpy.nanmax', 'numpy.nanmax', (["Data['V']"], {}), "(Data['V'])\n", (397, 408), False, 'import numpy\n'), ((421, 444), 'numpy.nanmin', 'numpy.nanmin', (["Data['V']"], {}), "(Data['V'])\n", (433, 444), False, 'import numpy\n'), ((456, 487), 'numpy.max', 'numpy.max', (['[ymax - ymin, 1e-06]'], {}), '([ymax - ymin, 1e-06])\n', (465, 487), False, 'import numpy\n'), ((875, 911), 'numpy.max', 'numpy.max', (['[yc + dy * 1.1 / 2, ymax]'], {}), '([yc + dy * 1.1 / 2, ymax])\n', (884, 911), False, 'import numpy\n'), ((920, 956), 'numpy.min', 'numpy.min', (['[yc - dy * 1.1 / 2, ymin]'], {}), '([yc - dy * 1.1 / 2, ymin])\n', (929, 956), False, 'import numpy\n'), ((317, 330), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (328, 330), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1123), 'numpy.polyfit', 'numpy.polyfit', (['I', 'V', '(1)'], {}), '(I, V, 1)\n', (1114, 1123), False, 'import numpy\n'), ((1226, 1263), 'os.path.expanduser', 'os.path.expanduser', (['"""~/MagDynLab.log"""'], {}), "('~/MagDynLab.log')\n", (1244, 1263), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from functools import reduce
from itertools import zip_longest
from math import ceil
from math import floor
from math import log
from scipy import ndimage
import numpy as np
def morton_array(shape):
"""
Return array with Morton numbers.
Inspired by:
https://graphics.stanford.edu/%7Eseander/bithacks.html#InterleaveBMN
"""
# determine the number of dimensions
ndims = len(shape)
# 1d compatibility
if ndims == 1:
return np.arange(shape[0])
def bitcount(number):
""" Return amount of bits used for in number """
return int(ceil(log(number + 1, 2)))
# feasbility check
for i, j in enumerate(shape):
# bit number assessment
count = bitcount(j) # in the number
count += (ndims - 1) * (count - 1) # after spacing
count += (ndims - 1) - i # after shifting
# numpy does not go higher than 64 bits currently
if count > 64:
raise ValueError('Too many bits needed for the computation')
# generate list of zeros and masks
ones = 1
masks = []
shifts = []
pos = range(63, -1, -1)
bmax = max(map(bitcount, shape))
while ones < bmax:
zeros = (ndims - 1) * ones
shifts.append(zeros)
period = ones + zeros
masks.append(
int(''.join('1' if i % period < ones else '0' for i in pos), 2),
)
ones *= 2
# make indices and space them
indices = [np.uint64(k) for k in np.ogrid[tuple(map(slice, shape))]]
for i, (j, k) in enumerate(zip(shape, indices)):
if j < 2:
continue
if j > 2:
start = int(floor(log(bitcount(j) - 1, 2)))
else:
start = 0
for n in range(start, -1, -1):
k[:] = (k | k << shifts[n]) & masks[n]
k <<= (ndims - 1) - i
return reduce(np.bitwise_or, indices)
def get_morton_lut(array, no_data_value):
"""
Return lookup table to rearrange an array of ints in morton order.
:param array: 2D int array with a range of integers from 0 to no_data_value
:param no_data_value: no data value that is excluded from rearrangement.
The no_data_value does not have to be present in the array, but if it is,
it does not get reordered by the lookup table (lut):
lut[no_data_value] == no_data_value
"""
# morton variables have underscores
_array = morton_array(array.shape)
_no_data_value = _array.max().item() + 1
# make lookup from node to morton number
index = np.arange(no_data_value + 1)
lut1 = ndimage.minimum(_array, labels=array, index=index)
lut1[no_data_value] = _no_data_value
# make lookup from morton number back to node numbers
lut2 = np.empty(_no_data_value + 1, dtype='i8')
lut2[np.sort(lut1)] = index
lut2[_no_data_value] = no_data_value
# return the combined lookup table
return lut2[lut1]
def group(array):
"""
Return generator of arrays of indices to equal values.
"""
order = array.argsort()
_, index = np.unique(array[order], return_index=True)
for start, stop in zip_longest(index, index[1:]):
yield order[start:stop]
def analyze(x, y):
""" Return (x_step, y_step) tuple.
Return the smallest separation between points in the x-direction for points
with the same y-coordinates and vice versa. That reveals the highest
refinement level of the quadtree structure.
"""
assert x.dtype == float
assert y.dtype == float
init = {'initial': np.inf}
xs = min(np.diff(np.sort(x[i])).min(**init) for i in group(y))
ys = min(np.diff(np.sort(y[i])).min(**init) for i in group(x))
return None if np.isinf(xs) else xs, None if np.isinf(ys) else ys
def rasterize(points):
""" Return (array, no_data_value) tuple.
Rasterize the indices of the points in an array at the highest quadtree
resolution. Note that points of larger squares in the quadtree also just
occupy one cell in the resulting array, the rest of the cells get the
no_data_value.
"""
points = np.asarray(points, dtype=float)
x, y = points.transpose()
xs, ys = analyze(x, y)
x1, y2 = x.min(), y.max()
# get indices to land each point index in its own array cell
j = np.int64(np.zeros_like(x) if xs is None else (x - x1) / xs)
i = np.int64(np.zeros_like(y) if ys is None else (y2 - y) / ys)
index = i, j
no_data_value = len(points)
ids = np.arange(no_data_value)
values = np.full((i.max() + 1, j.max() + 1), no_data_value)
values[index] = ids
return values, no_data_value
def reorder(points, s1):
"""
Return (points, s1) reordered to morton order.
"""
array, no_data_value = rasterize(points)
# array[lut] would have the ids in array in morton order
lut = get_morton_lut(array=array, no_data_value=no_data_value)
# the points need to be reordered such that rasterize(points[inv]) becomes
# equal to lut[rasterize(points)] - in other words, for 'index value' a in
# the raster to become 'index value' b in the raster, the index b in the
# reordered points array must be occupied by the point from index a in the
# old points array
inv = np.arange(no_data_value)
inv[lut[inv]] = inv.copy() # may get bogus results without the copy
return points[inv], s1[inv]
| [
"scipy.ndimage.minimum",
"numpy.unique",
"functools.reduce",
"numpy.sort",
"itertools.zip_longest",
"numpy.asarray",
"math.log",
"numpy.uint64",
"numpy.empty",
"numpy.isinf",
"numpy.zeros_like",
"numpy.arange"
] | [((1902, 1932), 'functools.reduce', 'reduce', (['np.bitwise_or', 'indices'], {}), '(np.bitwise_or, indices)\n', (1908, 1932), False, 'from functools import reduce\n'), ((2580, 2608), 'numpy.arange', 'np.arange', (['(no_data_value + 1)'], {}), '(no_data_value + 1)\n', (2589, 2608), True, 'import numpy as np\n'), ((2620, 2670), 'scipy.ndimage.minimum', 'ndimage.minimum', (['_array'], {'labels': 'array', 'index': 'index'}), '(_array, labels=array, index=index)\n', (2635, 2670), False, 'from scipy import ndimage\n'), ((2782, 2822), 'numpy.empty', 'np.empty', (['(_no_data_value + 1)'], {'dtype': '"""i8"""'}), "(_no_data_value + 1, dtype='i8')\n", (2790, 2822), True, 'import numpy as np\n'), ((3096, 3138), 'numpy.unique', 'np.unique', (['array[order]'], {'return_index': '(True)'}), '(array[order], return_index=True)\n', (3105, 3138), True, 'import numpy as np\n'), ((3162, 3191), 'itertools.zip_longest', 'zip_longest', (['index', 'index[1:]'], {}), '(index, index[1:])\n', (3173, 3191), False, 'from itertools import zip_longest\n'), ((4127, 4158), 'numpy.asarray', 'np.asarray', (['points'], {'dtype': 'float'}), '(points, dtype=float)\n', (4137, 4158), True, 'import numpy as np\n'), ((4508, 4532), 'numpy.arange', 'np.arange', (['no_data_value'], {}), '(no_data_value)\n', (4517, 4532), True, 'import numpy as np\n'), ((5272, 5296), 'numpy.arange', 'np.arange', (['no_data_value'], {}), '(no_data_value)\n', (5281, 5296), True, 'import numpy as np\n'), ((494, 513), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (503, 513), True, 'import numpy as np\n'), ((1511, 1523), 'numpy.uint64', 'np.uint64', (['k'], {}), '(k)\n', (1520, 1523), True, 'import numpy as np\n'), ((2832, 2845), 'numpy.sort', 'np.sort', (['lut1'], {}), '(lut1)\n', (2839, 2845), True, 'import numpy as np\n'), ((3738, 3750), 'numpy.isinf', 'np.isinf', (['xs'], {}), '(xs)\n', (3746, 3750), True, 'import numpy as np\n'), ((3768, 3780), 'numpy.isinf', 'np.isinf', (['ys'], {}), '(ys)\n', (3776, 3780), True, 'import numpy as np\n'), ((4329, 4345), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (4342, 4345), True, 'import numpy as np\n'), ((4397, 4413), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (4410, 4413), True, 'import numpy as np\n'), ((622, 640), 'math.log', 'log', (['(number + 1)', '(2)'], {}), '(number + 1, 2)\n', (625, 640), False, 'from math import log\n'), ((3605, 3618), 'numpy.sort', 'np.sort', (['x[i]'], {}), '(x[i])\n', (3612, 3618), True, 'import numpy as np\n'), ((3672, 3685), 'numpy.sort', 'np.sort', (['y[i]'], {}), '(y[i])\n', (3679, 3685), True, 'import numpy as np\n')] |
import tensorflow as tf
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import os
import numpy as np
import cv2
class numberGuesser:
checkpointpath = "training.ckpt"
def buildModel(self):
(train_x,train_y), (test_x, test_y) = tf.keras.datasets.mnist.load_data()
train_x, test_x = train_x / 255.0, test_x / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512,activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10,activation=tf.nn.softmax)
])
model.compile (optimizer= 'adam', loss='sparse_categorical_crossentropy', metrics = ['accuracy'])
try:
model.load_weights("training.ckpt")
except:
model.fit(train_x, train_y, batch_size=32, epochs=100)
checkpointpath = "training.ckpt"
checkpoint_dir = os.path.dirname(checkpointpath)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpointpath, save_weights_only=True, verbose=1)
model.save_weights(checkpointpath.format(epoch=0))
model.evaluate(test_x, test_y)
return model
def guessNumber(self):
try:
model = self.buildModel()
#OPEN IMAGE
image = Image.open("number.png")
#CROP
image = image.crop((0,0,500,500))
#MAKE IT SMALLER AND SAVE IT
image.thumbnail((28,28), Image.ANTIALIAS)
image.save("Converted.png")
#OPEN IT and MAKE TO GRAY SCALE
image = cv2.imread('Converted.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Chage Shape and make it into array
image.shape = (1,28,28)
im = np.asarray(image)
for x in range(28):
for y in range(28):
if im[0][x][y] == 255:
im[0][x][y] = 0
im = im / 255.0
prediction = model.predict(im)
prediction = np.argmax(prediction)
return prediction
except :
model = self.buildModel()
#OPEN IMAGE
image = Image.open("number.png")
#CROP
image = image.crop((0,0,500,500))
#MAKE IT SMALLER AND SAVE IT
image.thumbnail((28,28), Image.ANTIALIAS)
image.save("Converted.png")
#OPEN IT and MAKE TO GRAY SCALE
image = cv2.imread('Converted.png')
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Chage Shape and make it into array
image.shape = (1,28,28)
im = np.asarray(image)
for x in range(28):
for y in range(28):
if im[0][x][y] == 255:
im[0][x][y] = 0
im = im / 255.0
prediction = model.predict(im)
prediction = np.argmax(prediction)
return prediction
| [
"PIL.Image.open",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.layers.Dropout",
"numpy.asarray",
"numpy.argmax",
"os.path.dirname",
"tensorflow.keras.layers.Dense",
"cv2.cvtColor",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Flatten",
"cv2.imread"
] | [((273, 308), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (306, 308), True, 'import tensorflow as tf\n'), ((1449, 1473), 'PIL.Image.open', 'Image.open', (['"""number.png"""'], {}), "('number.png')\n", (1459, 1473), False, 'from PIL import Image, ImageFilter\n'), ((1744, 1771), 'cv2.imread', 'cv2.imread', (['"""Converted.png"""'], {}), "('Converted.png')\n", (1754, 1771), False, 'import cv2\n'), ((1793, 1832), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1805, 1832), False, 'import cv2\n'), ((1937, 1954), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1947, 1954), True, 'import numpy as np\n'), ((2267, 2288), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (2276, 2288), True, 'import numpy as np\n'), ((439, 484), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (462, 484), True, 'import tensorflow as tf\n'), ((499, 548), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': 'tf.nn.relu'}), '(512, activation=tf.nn.relu)\n', (520, 548), True, 'import tensorflow as tf\n'), ((562, 590), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (585, 590), True, 'import tensorflow as tf\n'), ((605, 656), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (626, 656), True, 'import tensorflow as tf\n'), ((1009, 1040), 'os.path.dirname', 'os.path.dirname', (['checkpointpath'], {}), '(checkpointpath)\n', (1024, 1040), False, 'import os\n'), ((1070, 1159), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['checkpointpath'], {'save_weights_only': '(True)', 'verbose': '(1)'}), '(checkpointpath, save_weights_only=True,\n verbose=1)\n', (1104, 1159), True, 'import tensorflow as tf\n'), ((2437, 2461), 'PIL.Image.open', 'Image.open', (['"""number.png"""'], {}), "('number.png')\n", (2447, 2461), False, 'from PIL import Image, ImageFilter\n'), ((2732, 2759), 'cv2.imread', 'cv2.imread', (['"""Converted.png"""'], {}), "('Converted.png')\n", (2742, 2759), False, 'import cv2\n'), ((2781, 2820), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2793, 2820), False, 'import cv2\n'), ((2925, 2942), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2935, 2942), True, 'import numpy as np\n'), ((3255, 3276), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (3264, 3276), True, 'import numpy as np\n')] |
from typing import List
import gluonnlp as nlp
import numpy as np
from gluonnlp.data import BERTSentenceTransform
__all__ = ['TextDataTransform', 'BERTDataTransform']
class TextDataTransform(object):
"""
Python class for performing data pre-processing on the text dataset.
This class is constructed using
"""
def __init__(self, vocab, tokenizer=nlp.data.SpacyTokenizer('en'),
transforms: List = None, pair=False, max_sequence_length=100):
"""
Init method for TextDataTransform. This is a utility class for defining custom transforms on the text dataset.
:param vocab:
:param tokenizer:
:param transforms: A List of transforms from gluonnlp.data.transforms. eg : ClipSequence, PadSequence
"""
self._vocab = vocab
self._tokenizer = tokenizer
self._transforms = transforms
self._pair = pair
self._max_sequence_length = max_sequence_length
def __call__(self, sample):
if self._pair:
text_a, text_b, label = sample
else:
text_a, label = sample
tokens_a = self._tokenizer(text_a)
tokens_b = None
if self._pair:
tokens_b = self._tokenizer(text_b)
if tokens_b:
self._truncate_seq_pair(tokens_a, tokens_b, max_length=self._max_sequence_length)
tokens = []
tokens.extend(tokens_a)
if tokens_b:
tokens.extend(tokens_b)
for rule in self._transforms:
tokens = rule(tokens)
tokens = self._vocab[tokens]
return tokens, label
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class BERTDataTransform(object):
"""This is adapted from https://github.com/dmlc/gluon-nlp/blob/0f5170baca2cfa6d4dfef2df71b28c568c6ee03a/scripts/tests/test_bert_dataset_transform.py.
The file had to be copied over because it could not be imported using gluonnlp.
"""
"""Dataset transformation for BERT-style sentence classification or regression.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
max_seq_length : int.
Maximum sequence length of the sentences.
labels : list of int , float or None. defaults None
List of all label ids for the classification task and regressing task.
If labels is None, the default task is regression
pad : bool, default True
Whether to pad the sentences to maximum length.
pair : bool, default True
Whether to transform sentences or sentence pairs.
label_dtype: int32 or float32, default float32
label_dtype = int32 for classification task
label_dtype = float32 for regression task
"""
def __init__(self,
tokenizer,
max_seq_length,
class_labels=None,
label_alias=None,
pad=True,
pair=True,
has_label=True):
self.class_labels = class_labels
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.has_label = has_label
self.label_alias = None
self.class_labels = None
self._label_dtype = 'int32' if class_labels else 'float32'
self.pair = pair
self.pad = pad
self.has_label = has_label
if has_label and class_labels:
self._label_map = {}
for (i, label) in enumerate(class_labels):
self._label_map[label] = i
if label_alias:
for key in label_alias:
self._label_map[key] = self._label_map[label_alias[key]]
self._bert_xform = BERTSentenceTransform(
tokenizer, max_seq_length, pad=pad, pair=pair)
def __call__(self, line):
"""Perform transformation for sequence pairs or single sequences.
The transformation is processed in the following steps:
- tokenize the input sequences
- insert [CLS], [SEP] as necessary
- generate type ids to indicate whether a token belongs to the first
sequence or the second sequence.
- generate valid length
For sequence pairs, the input is a tuple of 3 strings:
text_a, text_b and label.
Inputs:
text_a: 'is this jacksonville ?'
text_b: 'no it is not'
label: '0'
Tokenization:
text_a: 'is this jack ##son ##ville ?'
text_b: 'no it is not .'
Processed:
tokens: '[CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]'
type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
valid_length: 14
label: 0
For single sequences, the input is a tuple of 2 strings: text_a and label.
Inputs:
text_a: 'the dog is hairy .'
label: '1'
Tokenization:
text_a: 'the dog is hairy .'
Processed:
text_a: '[CLS] the dog is hairy . [SEP]'
type_ids: 0 0 0 0 0 0 0
valid_length: 7
label: 1
Parameters
----------
line: tuple of str
Input strings. For sequence pairs, the input is a tuple of 3 strings:
(text_a, text_b, label). For single sequences, the input is a tuple
of 2 strings: (text_a, label).
Returns
-------
np.array: input token ids in 'int32', shape (batch_size, seq_length)
np.array: valid length in 'int32', shape (batch_size,)
np.array: input token type ids in 'int32', shape (batch_size, seq_length)
np.array: classification task: label id in 'int32', shape (batch_size, 1),
regression task: label in 'float32', shape (batch_size, 1)
"""
if self.has_label:
input_ids, valid_length, segment_ids = self._bert_xform(line[:-1])
label = line[-1]
# map to int if class labels are available
if self.class_labels:
label = self._label_map[label]
label = np.array([label], dtype=self._label_dtype)
return input_ids, valid_length, segment_ids, label
else:
return self._bert_xform(line)
def re_init(self):
self._label_dtype = 'int32' if self.class_labels else 'float32'
if self.has_label and self.class_labels:
self._label_map = {}
for (i, label) in enumerate(self.class_labels):
self._label_map[label] = i
if self.label_alias:
for key in self.label_alias:
self._label_map[key] = self._label_map[self.label_alias[key]]
self._bert_xform = BERTSentenceTransform(
self.tokenizer, self.max_seq_length, pad=self.pad, pair=self.pair)
| [
"gluonnlp.data.BERTSentenceTransform",
"numpy.array",
"gluonnlp.data.SpacyTokenizer"
] | [((370, 399), 'gluonnlp.data.SpacyTokenizer', 'nlp.data.SpacyTokenizer', (['"""en"""'], {}), "('en')\n", (393, 399), True, 'import gluonnlp as nlp\n'), ((4459, 4527), 'gluonnlp.data.BERTSentenceTransform', 'BERTSentenceTransform', (['tokenizer', 'max_seq_length'], {'pad': 'pad', 'pair': 'pair'}), '(tokenizer, max_seq_length, pad=pad, pair=pair)\n', (4480, 4527), False, 'from gluonnlp.data import BERTSentenceTransform\n'), ((7518, 7610), 'gluonnlp.data.BERTSentenceTransform', 'BERTSentenceTransform', (['self.tokenizer', 'self.max_seq_length'], {'pad': 'self.pad', 'pair': 'self.pair'}), '(self.tokenizer, self.max_seq_length, pad=self.pad,\n pair=self.pair)\n', (7539, 7610), False, 'from gluonnlp.data import BERTSentenceTransform\n'), ((6888, 6930), 'numpy.array', 'np.array', (['[label]'], {'dtype': 'self._label_dtype'}), '([label], dtype=self._label_dtype)\n', (6896, 6930), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
"""
词向量测试 6M
词向量:
- 规模: 6115353 x 64D
- 来源: [自然语言处理中句子相似度计算的几种方法](https://cuiqingcai.com/6101.html)提供的[news_12g_baidubaike_20g_novel_90g_embedding_64.bin](https://pan.baidu.com/s/1TZ8GII0CEX32ydjsfMc0zw)
使用(下面代码可能有错误, 请自行修改):
```
# 编辑Dockerfile
echo 'FROM frkhit/benchmark-word2vec:latest
COPY news_12g_baidubaike_20g_novel_90g_embedding_64.bin ./word_vec_1m.bin
COPY benchmark_1M.py benchmark.py ./
ENTRYPOINT ["python"]
CMD ["-u", "benchmark_1M.py"]
' > Dockerfile
# docker build
docker build -t benchmark:latest .
# docker run
docker logs -f $(docker run -d benchmark:latest)
```
测试结果:
- faiss[Flat]: load index, 31.92s; search 100 times by word, 209.59s; search 100 times by vec, 215.94s
- faiss[IMI2x10,Flat; nprobe=8192]: load index, 53.94s; search 100 times by word, 4.36s; search 100 times by vec, 4.22s
- gensim: load index, 208.36s; search 100 times by word, 394.81s; search 100 times by vec, 423.10s
"""
from __future__ import absolute_import
import pickle
import time
import gensim
import numpy as np
import os
from pyxtools import global_init_logger
from pyxtools.faiss_tools import faiss
from benchmark import FaissBenchmark, GensimBenchmark
class Mixin(object):
def load_pre_trained_model(self, ):
""" 返回预训练好的模型 """
return gensim.models.KeyedVectors.load_word2vec_format(self.word_vec_model_file, binary=True)
def _global_prepare(self):
""" """
pass
@staticmethod
def get_word_list() -> [str]:
""" 测试词 """
return ["计算机", "中国", "人工智能", "自然语言", "语言", "科学", "哲学", "未来", "人类", "地球"]
class GensimBenchmark1M(Mixin, GensimBenchmark):
""" Gensim 1M words"""
def __init__(self):
super(GensimBenchmark1M, self).__init__()
self.word_vec_model_file = "word_vec_1m.bin"
class FaissBenchmark1M(Mixin, FaissBenchmark):
""" Faiss 1M words"""
def __init__(self):
super(FaissBenchmark1M, self).__init__()
self.word_vec_model_file = "word_vec_1m.bin"
self.faiss_index_file = "./faiss_1m.index"
self.faiss_index_detail_pkl = "./faiss_1m.pkl"
# faiss setting
self._faiss_factory = "IMI2x10,Flat"
self.n_probe = 8192
def prepare(self):
""" 将Gensim 版本的模型转化为Faiss模型 """
super(FaissBenchmark, self).prepare()
# turn model from gensim to faiss index
if os.path.exists(self.faiss_index_file) and os.path.exists(self.faiss_index_detail_pkl):
return
# load model to dict
self.logger.info("loading model...")
time_start = time.time()
gensim_model = self.load_pre_trained_model()
model_size = len(gensim_model.vocab)
self.dimension = gensim_model.vector_size
feature = np.zeros(shape=(model_size, self.dimension), dtype=np.float32)
word_list = [word for word in gensim_model.vocab]
for i, word in enumerate(word_list):
feature[i] = gensim_model.get_vector(word) # not normed
self.logger.info("success to load index! Cost {} seconds!".format(time.time() - time_start))
# train faiss index
normed_feature = feature / np.linalg.norm(feature, axis=1, keepdims=True)
faiss_index = faiss.index_factory(self.dimension, self._faiss_factory)
self.logger.info("training index...")
time_start = time.time()
faiss_index.train(normed_feature) # nb * d
faiss_index.add(normed_feature)
self.logger.info("success to train index! Cost {} seconds!".format(time.time() - time_start))
# save in file
faiss.write_index(faiss_index, self.faiss_index_file)
with open(self.faiss_index_detail_pkl, "wb") as f:
pickle.dump((word_list, feature), f)
def search(self):
""" search similar words """
self._model.nprobe = self.n_probe
super(FaissBenchmark1M, self).search()
def vec_search(self):
""" 直接使用词向量搜索 """
self._model.nprobe = self.n_probe
super(FaissBenchmark1M, self).vec_search()
if __name__ == '__main__':
# global logger
global_init_logger()
# benchmark
for method_cls in [FaissBenchmark1M, GensimBenchmark1M, ]:
method_cls().run()
| [
"os.path.exists",
"pickle.dump",
"pyxtools.faiss_tools.faiss.index_factory",
"numpy.linalg.norm",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.zeros",
"pyxtools.faiss_tools.faiss.write_index",
"pyxtools.global_init_logger",
"time.time"
] | [((4099, 4119), 'pyxtools.global_init_logger', 'global_init_logger', ([], {}), '()\n', (4117, 4119), False, 'from pyxtools import global_init_logger\n'), ((1293, 1383), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['self.word_vec_model_file'], {'binary': '(True)'}), '(self.word_vec_model_file,\n binary=True)\n', (1340, 1383), False, 'import gensim\n'), ((2580, 2591), 'time.time', 'time.time', ([], {}), '()\n', (2589, 2591), False, 'import time\n'), ((2758, 2820), 'numpy.zeros', 'np.zeros', ([], {'shape': '(model_size, self.dimension)', 'dtype': 'np.float32'}), '(shape=(model_size, self.dimension), dtype=np.float32)\n', (2766, 2820), True, 'import numpy as np\n'), ((3227, 3283), 'pyxtools.faiss_tools.faiss.index_factory', 'faiss.index_factory', (['self.dimension', 'self._faiss_factory'], {}), '(self.dimension, self._faiss_factory)\n', (3246, 3283), False, 'from pyxtools.faiss_tools import faiss\n'), ((3351, 3362), 'time.time', 'time.time', ([], {}), '()\n', (3360, 3362), False, 'import time\n'), ((3589, 3642), 'pyxtools.faiss_tools.faiss.write_index', 'faiss.write_index', (['faiss_index', 'self.faiss_index_file'], {}), '(faiss_index, self.faiss_index_file)\n', (3606, 3642), False, 'from pyxtools.faiss_tools import faiss\n'), ((2378, 2415), 'os.path.exists', 'os.path.exists', (['self.faiss_index_file'], {}), '(self.faiss_index_file)\n', (2392, 2415), False, 'import os\n'), ((2420, 2463), 'os.path.exists', 'os.path.exists', (['self.faiss_index_detail_pkl'], {}), '(self.faiss_index_detail_pkl)\n', (2434, 2463), False, 'import os\n'), ((3158, 3204), 'numpy.linalg.norm', 'np.linalg.norm', (['feature'], {'axis': '(1)', 'keepdims': '(True)'}), '(feature, axis=1, keepdims=True)\n', (3172, 3204), True, 'import numpy as np\n'), ((3714, 3750), 'pickle.dump', 'pickle.dump', (['(word_list, feature)', 'f'], {}), '((word_list, feature), f)\n', (3725, 3750), False, 'import pickle\n'), ((3067, 3078), 'time.time', 'time.time', ([], {}), '()\n', (3076, 3078), False, 'import time\n'), ((3530, 3541), 'time.time', 'time.time', ([], {}), '()\n', (3539, 3541), False, 'import time\n')] |
from openvino.inference_engine import IENetwork, IEPlugin
from argparse import ArgumentParser
from PIL import Image, ImageDraw
import logging as log
import numpy as np
import time
import cv2
import sys
import os
def build_argparser():
parser = ArgumentParser()
parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.",
required=True, type=str)
parser.add_argument("-i", "--input", help="Path to a folder with images",
required=True, type=str)
parser.add_argument("-l", "--cpu_extension", help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels impl.",
type=str, default=None)
parser.add_argument("-d", "--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)",
default="CPU", type=str)
parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("-pt", "--prob_threshold", help="Probability threshold for detections filtering",
default=0.5, type=float)
parser.add_argument("-o", "--output", help="Path to a folder to save inferred images",
required=True, type=str)
parser.add_argument("--labels", help="Labels mapping file", default=None, type=str)
return parser
def load_labels(labelsPath):
labels = []
with open(labelsPath, "r") as f:
for line in f.readlines():
labels.append(line.rstrip())
return(labels)
def pre_process_image_opencv(imagePath, shapes):
n, c, h, w = shapes
image = cv2.imread(imagePath)
processedImg = cv2.resize(image, (w, h))
processedImg = processedImg.transpose((2, 0, 1)) # Change data layout from HWC to CHW
processedImg = processedImg.reshape((n, c, h, w))
return image, processedImg, imagePath
def pre_process_image(imagePath, shapes):
n, c, h, w = shapes
image = Image.open(imagePath)
processedImg = np.array(image.resize((w, h), resample=Image.BILINEAR))
processedImg = processedImg.transpose((2, 0, 1)) # Change data layout from HWC to CHW
processedImg = processedImg.reshape((n, c, h, w))
return np.array(image), processedImg, imagePath
def readInputs(imagesFolder):
listDir=[]
for root, dirs, files in os.walk(imagesFolder):
for file in files:
listDir.append(os.path.join(root, file))
return listDir
def run_single_inference(fileName, exec_net, input_blob, out_blob, shapes):
infer_time = []
# Pre-process first
image, imagePath = pre_process_image(fileName, shapes)
t0 = time.time()
res = exec_net.infer(inputs={input_blob: np.array(image)})
infer_time.append((time.time()-t0)*1000)
log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
def drawBox(image, fileName, text):
weight = 5
#unormalize
image = np.squeeze(image)
image = image.transpose((1, 2, 0))
print(image.shape)
image = Image.fromarray((image*255).astype('uint8'))
width, height = image.size
padding = 5
xMin = 0 + padding
yMin = 0 + padding
xMax = xMin + width - 2*padding
yMax = yMin + height - 2*padding
color = (255,0,0)
draw = ImageDraw.Draw(image)
for i in range(weight):
draw.rectangle([(xMin-i, yMin-i), (xMax+i, yMax+i)], outline=color)
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
draw.text((10,10), text, font=fnt, fill=(255,255,255,255))
image.save(os.path.basename(fileName))
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
if args.labels:
labels = load_labels(args.labels)
imagesFolder = args.input
# Main sync point:
# in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
# in the regular mode we start the CURRENT request and immediately wait for it's completion
is_async_mode = False
# Get images from input folder
listDir = readInputs(imagesFolder)
# Read IR
log.info("Reading IR...")
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
# Plugin initialization for specified device and load extensions library if specified
log.info("Initializing plugin for {} device...".format(args.device))
plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
if args.cpu_extension and 'CPU' in args.device:
plugin.add_cpu_extension(args.cpu_extension)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
shapes = net.inputs[input_blob]
# Load network to the plugin
exec_net = plugin.load(network=net)
del net
cur_request_id = 0
next_request_id = 1
for fileName in listDir:
image, processedImg, imagePath = pre_process_image(fileName, shapes)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
initial_w = image.shape[1]
initial_h = image.shape[0]
inf_start = time.time()
if is_async_mode:
exec_net.start_async(request_id=next_request_id, inputs={input_blob: processedImg})
else:
exec_net.start_async(request_id=cur_request_id, inputs={input_blob: processedImg})
if exec_net.requests[cur_request_id].wait(-1) == 0:
inf_end = time.time()
det_time = inf_end - inf_start
res = exec_net.requests[cur_request_id].outputs[out_blob]
for obj in res[0][0]:
# Draw only objects when probability more than specified threshold
if obj[2] > args.prob_threshold:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
class_id = int(obj[1]) -1
# Draw box and label\class_id
color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
det_label = labels[class_id] if labels else str(class_id)
cv2.putText(image, det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
cv2.imwrite(os.path.join(args.output, os.path.basename(fileName)), image)
drawBox()
# Draw performance stats
inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
"Inference time: {:.3f} ms".format(det_time * 1000)
# render_time_message = "OpenCV rendering time: {:.3f} ms".format(render_time * 1000)
async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
"Async mode is off. Processing request {}".format(cur_request_id)
print(inf_time_message)
# cv2.putText(image, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
# # cv2.putText(image, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# cv2.putText(image, async_mode_message, (10, int(initial_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
# (10, 10, 200), 1)
if is_async_mode:
cur_request_id, next_request_id = next_request_id, cur_request_id
del exec_net
del plugin
main() | [
"cv2.rectangle",
"numpy.array",
"PIL.ImageDraw.Draw",
"logging.info",
"os.walk",
"argparse.ArgumentParser",
"numpy.asarray",
"openvino.inference_engine.IENetwork.from_ir",
"openvino.inference_engine.IEPlugin",
"os.path.splitext",
"numpy.squeeze",
"cv2.cvtColor",
"cv2.resize",
"time.time",
... | [((246, 262), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (260, 262), False, 'from argparse import ArgumentParser\n'), ((1612, 1633), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (1622, 1633), False, 'import cv2\n'), ((1650, 1675), 'cv2.resize', 'cv2.resize', (['image', '(w, h)'], {}), '(image, (w, h))\n', (1660, 1675), False, 'import cv2\n'), ((1927, 1948), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (1937, 1948), False, 'from PIL import Image, ImageDraw\n'), ((2287, 2308), 'os.walk', 'os.walk', (['imagesFolder'], {}), '(imagesFolder)\n', (2294, 2308), False, 'import os\n'), ((2568, 2579), 'time.time', 'time.time', ([], {}), '()\n', (2577, 2579), False, 'import time\n'), ((2863, 2880), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (2873, 2880), True, 'import numpy as np\n'), ((3201, 3222), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (3215, 3222), False, 'from PIL import Image, ImageDraw\n'), ((3520, 3614), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""[ %(levelname)s ] %(message)s"""', 'level': 'log.INFO', 'stream': 'sys.stdout'}), "(format='[ %(levelname)s ] %(message)s', level=log.INFO,\n stream=sys.stdout)\n", (3535, 3614), True, 'import logging as log\n'), ((4128, 4153), 'logging.info', 'log.info', (['"""Reading IR..."""'], {}), "('Reading IR...')\n", (4136, 4153), True, 'import logging as log\n'), ((4161, 4214), 'openvino.inference_engine.IENetwork.from_ir', 'IENetwork.from_ir', ([], {'model': 'model_xml', 'weights': 'model_bin'}), '(model=model_xml, weights=model_bin)\n', (4178, 4214), False, 'from openvino.inference_engine import IENetwork, IEPlugin\n'), ((4383, 4440), 'openvino.inference_engine.IEPlugin', 'IEPlugin', ([], {'device': 'args.device', 'plugin_dirs': 'args.plugin_dir'}), '(device=args.device, plugin_dirs=args.plugin_dir)\n', (4391, 4440), False, 'from openvino.inference_engine import IENetwork, IEPlugin\n'), ((2177, 2192), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2185, 2192), True, 'import numpy as np\n'), ((3474, 3500), 'os.path.basename', 'os.path.basename', (['fileName'], {}), '(fileName)\n', (3490, 3500), False, 'import os\n'), ((5038, 5076), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (5050, 5076), False, 'import cv2\n'), ((5150, 5161), 'time.time', 'time.time', ([], {}), '()\n', (5159, 5161), False, 'import time\n'), ((3687, 3714), 'os.path.splitext', 'os.path.splitext', (['model_xml'], {}), '(model_xml)\n', (3703, 3714), False, 'import os\n'), ((5430, 5441), 'time.time', 'time.time', ([], {}), '()\n', (5439, 5441), False, 'import time\n'), ((2349, 2373), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2361, 2373), False, 'import os\n'), ((2622, 2637), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2630, 2637), True, 'import numpy as np\n'), ((2660, 2671), 'time.time', 'time.time', ([], {}), '()\n', (2669, 2671), False, 'import time\n'), ((2757, 2779), 'numpy.asarray', 'np.asarray', (['infer_time'], {}), '(infer_time)\n', (2767, 2779), True, 'import numpy as np\n'), ((5964, 6022), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xmin, ymin)', '(xmax, ymax)', 'color', '(2)'], {}), '(image, (xmin, ymin), (xmax, ymax), color, 2)\n', (5977, 6022), False, 'import cv2\n'), ((6269, 6295), 'os.path.basename', 'os.path.basename', (['fileName'], {}), '(fileName)\n', (6285, 6295), False, 'import os\n')] |
import numpy as np
gauss_len = 100
gaussian_amp = 0.2
def gauss(amplitude, mu, sigma, delf, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_wave = amplitude * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
# Detuning correction Eqn. (4) in Chen et al. PRL, 116, 020501 (2016)
gauss_wave = gauss_wave * np.exp(2 * np.pi * delf * t)
return [float(x) for x in gauss_wave]
def gauss_der(amplitude, mu, sigma, delf, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_der_wave = (
amplitude * (-2 * (t - mu)) * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
)
# Detuning correction Eqn. (4) in Chen et al. PRL, 116, 020501 (2016)
gauss_der_wave = gauss_der_wave * np.exp(2 * np.pi * delf * t)
return [float(x) for x in gauss_der_wave]
def IQ_imbalance(g, phi):
c = np.cos(phi)
s = np.sin(phi)
N = 1 / ((1 - g ** 2) * (2 * c ** 2 - 1))
return [float(N * x) for x in [(1 - g) * c, (1 + g) * s, (1 - g) * s, (1 + g) * c]]
delf = 0.0 # Detuning frequency e.g. [-25,-10] MHz
gauss_pulse = gauss(gaussian_amp, 0, 6, delf, gauss_len)
drag_gauss_pulse = gauss(gaussian_amp, 0, 6, delf, gauss_len)
alpha = 0.05
delta = 0.8 - 2 * np.pi * delf # Below Eqn. (4) in Chen et al.
drag_gauss_der_pulse = gauss_der(alpha / delta * gaussian_amp, 0, 6, delf, gauss_len)
readout_len = 400
qubit_IF = 0
rr_IF = 0
qubit_LO = 6.345e9
rr_LO = 4.755e9
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": +0.0}, # qubit 1-I
2: {"offset": +0.0}, # qubit 1-Q
3: {"offset": +0.0}, # Readout resonator
4: {"offset": +0.0}, # Readout resonator
},
"digital_outputs": {
1: {},
},
"analog_inputs": {
1: {"offset": +0.0},
2: {"offset": +0.0},
},
}
},
"elements": {
"qubit": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": qubit_LO,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"X/2": "DRAG_PULSE",
"X": "DRAG_PULSE",
"-X/2": "DRAG_PULSE",
"Y/2": "DRAG_PULSE",
"Y": "DRAG_PULSE",
"-Y/2": "DRAG_PULSE",
},
},
"rr": {
"mixInputs": {
"I": ("con1", 3),
"Q": ("con1", 4),
"lo_frequency": rr_LO,
"mixer": "mixer_RR",
},
"intermediate_frequency": rr_IF,
"operations": {
"readout": "readout_pulse",
},
"outputs": {"out1": ("con1", 1)},
"time_of_flight": 28,
"smearing": 0,
},
},
"pulses": {
"XPulse": {
"operation": "control",
"length": gauss_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
"YPulse": {
"operation": "control",
"length": gauss_len,
"waveforms": {"I": "zero_wf", "Q": "gauss_wf"},
},
"DRAG_PULSE": {
"operation": "control",
"length": gauss_len,
"waveforms": {"I": "DRAG_gauss_wf", "Q": "DRAG_gauss_der_wf"},
},
"readout_pulse": {
"operation": "measurement",
"length": gauss_len,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
"integration_weights": {
"integW1": "integW1",
"integW2": "integW2",
},
"digital_marker": "ON",
},
},
"waveforms": {
"zero_wf": {"type": "constant", "sample": 0.0},
"gauss_wf": {"type": "arbitrary", "samples": gauss_pulse},
"DRAG_gauss_wf": {"type": "arbitrary", "samples": drag_gauss_pulse},
"DRAG_gauss_der_wf": {"type": "arbitrary", "samples": drag_gauss_der_pulse},
"readout_wf": {"type": "constant", "sample": 0.3},
},
"digital_waveforms": {
"ON": {"samples": [(1, 0)]},
},
"integration_weights": {
"integW1": {
"cosine": [1.0] * int(readout_len / 4),
"sine": [0.0] * int(readout_len / 4),
},
"integW2": {
"cosine": [0.0] * int(readout_len / 4),
"sine": [1.0] * int(readout_len / 4),
},
},
"mixers": {
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": qubit_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
"mixer_RR": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": rr_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
},
}
| [
"numpy.sin",
"numpy.linspace",
"numpy.exp",
"numpy.cos"
] | [((119, 163), 'numpy.linspace', 'np.linspace', (['(-length / 2)', '(length / 2)', 'length'], {}), '(-length / 2, length / 2, length)\n', (130, 163), True, 'import numpy as np\n'), ((481, 525), 'numpy.linspace', 'np.linspace', (['(-length / 2)', '(length / 2)', 'length'], {}), '(-length / 2, length / 2, length)\n', (492, 525), True, 'import numpy as np\n'), ((870, 881), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (876, 881), True, 'import numpy as np\n'), ((891, 902), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (897, 902), True, 'import numpy as np\n'), ((194, 235), 'numpy.exp', 'np.exp', (['(-(t - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(t - mu) ** 2 / (2 * sigma ** 2))\n', (200, 235), True, 'import numpy as np\n'), ((344, 372), 'numpy.exp', 'np.exp', (['(2 * np.pi * delf * t)'], {}), '(2 * np.pi * delf * t)\n', (350, 372), True, 'import numpy as np\n'), ((589, 630), 'numpy.exp', 'np.exp', (['(-(t - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(t - mu) ** 2 / (2 * sigma ** 2))\n', (595, 630), True, 'import numpy as np\n'), ((754, 782), 'numpy.exp', 'np.exp', (['(2 * np.pi * delf * t)'], {}), '(2 * np.pi * delf * t)\n', (760, 782), True, 'import numpy as np\n')] |
import numpy
import pandas
import scipy
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
import statsmodels.api as stats
# The SWEEP Operator
def SWEEPOperator (pDim, inputM, tol):
# pDim: dimension of matrix inputM, positive integer
# inputM: a square and symmetric matrix, numpy array
# tol: singularity tolerance, positive real
aliasParam = []
nonAliasParam = []
A = numpy.array(inputM, copy = True, dtype = numpy.float)
diagA = numpy.diagonal(A)
for k in range(pDim):
akk = A[k,k]
if (akk >= (tol * diagA[k])):
nonAliasParam.append(k)
for i in range(pDim):
if (i != k):
for j in range(pDim):
if (j != k):
A[i,j] = A[i,j] - A[i,k] * (A[k,j] / akk)
A[j,i] = A[i,j]
A[i,k] = A[i,k] / akk
A[k,i] = A[i,k]
A[k,k] = - 1.0 / akk
else:
aliasParam.append(k)
for i in range(pDim):
A[i,k] = 0.0
A[k,i] = 0.0
return A, aliasParam, nonAliasParam
# A function that find the non-aliased columns, fit a logistic model, and return the full parameter estimates
def build_mnlogit (fullX, y):
# Find the non-redundant columns in the design matrix fullX
nFullParam = fullX.shape[1]
XtX = numpy.transpose(fullX).dot(fullX)
invXtX, aliasParam, nonAliasParam = SWEEPOperator(pDim = nFullParam, inputM = XtX, tol = 1e-13)
# Build a multinomial logistic model
X = fullX.iloc[:, list(nonAliasParam)]
logit = stats.MNLogit(y, X)
thisFit = logit.fit(method='ncg', maxiter = 1000, xtol = 1e-8,
full_output = True, disp = True)
thisParameter = thisFit.params
thisLLK = thisFit.llf
# The number of free parameters
y_category = y.cat.categories
nYCat = len(y_category)
thisDF = len(nonAliasParam) * (nYCat - 1)
# Return model statistics
return (thisLLK, thisDF, thisParameter, thisFit)
inputData = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\policy_2001.csv',
delimiter=',',
usecols = ['CLAIM_FLAG', 'CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME'])
# Print number of missing values per variable
print('Number of Missing Values:')
print(pandas.Series.sort_index(inputData.isna().sum()))
# Specify CLAIM_FLAG as a categorical variable
inputData['CLAIM_FLAG'] = inputData['CLAIM_FLAG'].astype('category')
y_category = inputData['CLAIM_FLAG'].cat.categories
nYCat = len(y_category)
# Specify CREDIT_SCORE_BAND as a categorical variable
inputData['CREDIT_SCORE_BAND'] = inputData['CREDIT_SCORE_BAND'].astype('category')
# Create Training and Test partitions
policy_train, policy_test = train_test_split(inputData, test_size = 0.33, random_state = 20201014, stratify = inputData['CLAIM_FLAG'])
nObs_train = policy_train.shape[0]
nObs_test = policy_test.shape[0]
# Build the logistic model
y = policy_train['CLAIM_FLAG']
# Train a Logistic Regression model using the Forward Selection method
devianceTable = pandas.DataFrame()
u = pandas.DataFrame()
# Step 0: Intercept only model
u = y.isnull()
designX = pandas.DataFrame(u.where(u, 1)).rename(columns = {'CLAIM_FLAG': 'const'})
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[0, 'Intercept', DF0, LLK0, None, None, None]])
# Consider Model 1 is CLAIM_FLAG = Intercept + <predictor>
predList = ['CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME']
step = 1.0
for pred in predList:
step += 0.1
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = pandas.get_dummies(thisVar)
else:
designX = thisVar
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Step 1: Model is CLAIM_FLAG = Intercept + MVR_PTS
designX = policy_train[['MVR_PTS']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[1, 'Intercept + MVR_PTS',
DF0, LLK0, None, None, None]])
# Consider Model 2 is CLAIM_FLAG = Intercept + MVR_PTS + <predictor>
predList = ['CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'TIF', 'TRAVTIME']
step = 2.0
for pred in predList:
step += 0.1
designX = policy_train[['MVR_PTS']]
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = designX.join(pandas.get_dummies(thisVar))
else:
designX = designX.join(thisVar)
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + MVR_PTS + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Step 2: Model is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000
designX = policy_train[['MVR_PTS','BLUEBOOK_1000']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[2, 'Intercept + MVR_PTS + BLUEBOOK_1000',
DF0, LLK0, None, None, None]])
# Consider Model 2 is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + <predictor>
predList = ['CREDIT_SCORE_BAND', 'CUST_LOYALTY', 'TIF', 'TRAVTIME']
step = 3.0
for pred in predList:
step += 0.1
designX = policy_train[['MVR_PTS','BLUEBOOK_1000']]
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = designX.join(pandas.get_dummies(thisVar))
else:
designX = designX.join(thisVar)
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + MVR_PTS + BLUEBOOK_1000 + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Step 3: Model is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME
designX = policy_train[['MVR_PTS','BLUEBOOK_1000','TRAVTIME']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[3, 'Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME',
DF0, LLK0, None, None, None]])
# Consider Model 2 is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME + <predictor>
predList = ['CREDIT_SCORE_BAND', 'CUST_LOYALTY', 'TIF']
step = 4.0
for pred in predList:
step += 0.1
designX = policy_train[['MVR_PTS','BLUEBOOK_1000','TRAVTIME']]
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = designX.join(pandas.get_dummies(thisVar))
else:
designX = designX.join(thisVar)
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Final Model is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME
y = policy_train['CLAIM_FLAG']
designX = policy_train[['MVR_PTS','BLUEBOOK_1000','TRAVTIME']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
# Apply the Final Model to the Testing partition
X = policy_test[['MVR_PTS','BLUEBOOK_1000','TRAVTIME']]
X = stats.add_constant(X, prepend=True)
yPredProb = thisFit.predict(X)
y = policy_test['CLAIM_FLAG']
# Calculate the Area Under Curve value for the Testing partition
testAUC = metrics.roc_auc_score(y, yPredProb[1]) | [
"numpy.diagonal",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"statsmodels.api.MNLogit",
"sklearn.metrics.roc_auc_score",
"pandas.get_dummies",
"numpy.array",
"scipy.stats.chi2.sf",
"statsmodels.api.add_constant",
"pandas.DataFrame",
"numpy.transpose"
] | [((2177, 2407), 'pandas.read_csv', 'pandas.read_csv', (['"""C:\\\\Users\\\\minlam\\\\Documents\\\\IIT\\\\Machine Learning\\\\Data\\\\policy_2001.csv"""'], {'delimiter': '""","""', 'usecols': "['CLAIM_FLAG', 'CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY',\n 'MVR_PTS', 'TIF', 'TRAVTIME']"}), "(\n 'C:\\\\Users\\\\minlam\\\\Documents\\\\IIT\\\\Machine Learning\\\\Data\\\\policy_2001.csv'\n , delimiter=',', usecols=['CLAIM_FLAG', 'CREDIT_SCORE_BAND',\n 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME'])\n", (2192, 2407), False, 'import pandas\n'), ((3005, 3110), 'sklearn.model_selection.train_test_split', 'train_test_split', (['inputData'], {'test_size': '(0.33)', 'random_state': '(20201014)', 'stratify': "inputData['CLAIM_FLAG']"}), "(inputData, test_size=0.33, random_state=20201014, stratify\n =inputData['CLAIM_FLAG'])\n", (3021, 3110), False, 'from sklearn.model_selection import train_test_split\n'), ((3337, 3355), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (3353, 3355), False, 'import pandas\n'), ((3363, 3381), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (3379, 3381), False, 'import pandas\n'), ((4547, 4588), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (4565, 4588), True, 'import statsmodels.api as stats\n'), ((5778, 5819), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (5796, 5819), True, 'import statsmodels.api as stats\n'), ((7078, 7119), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (7096, 7119), True, 'import statsmodels.api as stats\n'), ((8443, 8484), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (8461, 8484), True, 'import statsmodels.api as stats\n'), ((8661, 8696), 'statsmodels.api.add_constant', 'stats.add_constant', (['X'], {'prepend': '(True)'}), '(X, prepend=True)\n', (8679, 8696), True, 'import statsmodels.api as stats\n'), ((8841, 8879), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y', 'yPredProb[1]'], {}), '(y, yPredProb[1])\n', (8862, 8879), True, 'import sklearn.metrics as metrics\n'), ((453, 502), 'numpy.array', 'numpy.array', (['inputM'], {'copy': '(True)', 'dtype': 'numpy.float'}), '(inputM, copy=True, dtype=numpy.float)\n', (464, 502), False, 'import numpy\n'), ((520, 537), 'numpy.diagonal', 'numpy.diagonal', (['A'], {}), '(A)\n', (534, 537), False, 'import numpy\n'), ((1710, 1729), 'statsmodels.api.MNLogit', 'stats.MNLogit', (['y', 'X'], {}), '(y, X)\n', (1723, 1729), True, 'import statsmodels.api as stats\n'), ((4067, 4108), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (4085, 4108), True, 'import statsmodels.api as stats\n'), ((4250, 4286), 'scipy.stats.chi2.sf', 'scipy.stats.chi2.sf', (['testDev', 'testDF'], {}), '(testDev, testDF)\n', (4269, 4286), False, 'import scipy\n'), ((5256, 5297), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (5274, 5297), True, 'import statsmodels.api as stats\n'), ((5439, 5475), 'scipy.stats.chi2.sf', 'scipy.stats.chi2.sf', (['testDev', 'testDF'], {}), '(testDev, testDF)\n', (5458, 5475), False, 'import scipy\n'), ((6518, 6559), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (6536, 6559), True, 'import statsmodels.api as stats\n'), ((6701, 6737), 'scipy.stats.chi2.sf', 'scipy.stats.chi2.sf', (['testDev', 'testDF'], {}), '(testDev, testDF)\n', (6720, 6737), False, 'import scipy\n'), ((7839, 7880), 'statsmodels.api.add_constant', 'stats.add_constant', (['designX'], {'prepend': '(True)'}), '(designX, prepend=True)\n', (7857, 7880), True, 'import statsmodels.api as stats\n'), ((8022, 8058), 'scipy.stats.chi2.sf', 'scipy.stats.chi2.sf', (['testDev', 'testDF'], {}), '(testDev, testDF)\n', (8041, 8058), False, 'import scipy\n'), ((3990, 4017), 'pandas.get_dummies', 'pandas.get_dummies', (['thisVar'], {}), '(thisVar)\n', (4008, 4017), False, 'import pandas\n'), ((1474, 1496), 'numpy.transpose', 'numpy.transpose', (['fullX'], {}), '(fullX)\n', (1489, 1496), False, 'import numpy\n'), ((5164, 5191), 'pandas.get_dummies', 'pandas.get_dummies', (['thisVar'], {}), '(thisVar)\n', (5182, 5191), False, 'import pandas\n'), ((6426, 6453), 'pandas.get_dummies', 'pandas.get_dummies', (['thisVar'], {}), '(thisVar)\n', (6444, 6453), False, 'import pandas\n'), ((7747, 7774), 'pandas.get_dummies', 'pandas.get_dummies', (['thisVar'], {}), '(thisVar)\n', (7765, 7774), False, 'import pandas\n')] |
#
# @Author: kuroitu (2020)
# @email: <EMAIL>
#
import numpy as np
from ..dual import *
##########
# 指数関数 exponential functions
#######
def power(obj, n):
obj = to_dual(obj)
return obj ** n
def square(obj):
obj = to_dual(obj)
return obj ** 2
def sqrt(obj):
obj = to_dual(obj)
return obj ** 0.5
def cbrt(obj):
obj = to_dual(obj)
return obj ** (1 / 3)
def exp(obj):
obj = to_dual(obj)
return Dual(np.exp(obj.re), obj.im * np.exp(obj.re))
def exp2(obj):
obj = to_dual(obj)
return Dual(np.exp2(obj.re), obj.im * 2 ** obj.re * np.log(2))
def expm1(obj):
obj = to_dual(obj)
return Dual(np.expm1(obj.re), obj.im * np.exp(obj.re))
| [
"numpy.exp",
"numpy.log",
"numpy.expm1",
"numpy.exp2"
] | [((449, 463), 'numpy.exp', 'np.exp', (['obj.re'], {}), '(obj.re)\n', (455, 463), True, 'import numpy as np\n'), ((546, 561), 'numpy.exp2', 'np.exp2', (['obj.re'], {}), '(obj.re)\n', (553, 561), True, 'import numpy as np\n'), ((654, 670), 'numpy.expm1', 'np.expm1', (['obj.re'], {}), '(obj.re)\n', (662, 670), True, 'import numpy as np\n'), ((474, 488), 'numpy.exp', 'np.exp', (['obj.re'], {}), '(obj.re)\n', (480, 488), True, 'import numpy as np\n'), ((586, 595), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (592, 595), True, 'import numpy as np\n'), ((681, 695), 'numpy.exp', 'np.exp', (['obj.re'], {}), '(obj.re)\n', (687, 695), True, 'import numpy as np\n')] |
import cv2
from PIL import Image
from matplotlib import pyplot as plt
#******************************************#
from sklearn.metrics import precision_score,recall_score,f1_score
from sklearn.metrics import accuracy_score,jaccard_score
#******************************************************************#
import os
import math
import time
import datetime
import shutil
import random
from random import shuffle
#***********************************#
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
#************************************#
from torch.utils.data import DataLoader
from MyDataset import MyDataset
#********************************************#
# from networks.fcn import VGGNet,FCNs,FCN8s
# from networks.network import U_Net
# from networks.Nested_UNet import Nested_UNet
# from networks.tiramisu import FCDenseNet103
# from networks.segnet import SegNet
# from networks.utlis import SegNet,PSPSegNet
# from networks.selfcorrection import SelfCorrection
#*****************************************************************#
num_classes=2
batch_size=2
num_epoch=10
factor=2
#******************* Model ******************************#
# Modelname='FCNs.model'
# Modelname='FCN8s.model'
Modelname='FCN8sResNet.model'
# Modelname='UNet.model'
# Modelname='N_UNet.model'
# Modelname='FCDenseNet103.model'
# Modelname='SegNetOrigin.model'
# Modelname='SegNetResNet.model'
# Modelname='PSPSegNet.model'
# Modelname='SelfCorrection.model'
# vgg_model = VGGNet(requires_grad=True, remove_fc=True)
# model = FCNs(pretrained_net=vgg_model, n_class=num_classes)
# model = FCN8s(pretrained_net=vgg_model, n_class=num_classes)
model = torch.hub.load('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=False,num_classes=num_classes)
# model = U_Net(output_ch=num_classes)
# model = Nested_UNet(out_ch=num_classes)
# model = FCDenseNet103(n_classes=num_classes)
# model = SegNet(num_classes=2)
# model = PSPSegNet(num_classes=num_classes)
# model = SelfCorrection(num_classes=num_classes)
model.cuda()
weight_decay=1e-8
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-1,momentum=0.99, weight_decay=weight_decay)
#****************************************************************#
#***************** Data ****************************#
# datapath='Data/Original_Data'
datapath='Data/Normalized_Data'
Train_ImageMainPath=datapath+'/train'
Test_ImageMainPath=datapath+'/test'
# img_size=(512,512) # for FCNs & SegNets
# ann_size=(512,512)
img_size=(256,256) # for DenseNet103 & UNet
ann_size=(256,256)
# img_size=(512,512) # for SelfCorrection
# ann_size=(128,128)
train_Dataset = MyDataset(Train_ImageMainPath,img_size=img_size,ann_siz=ann_size)
test_Dataset = MyDataset(Test_ImageMainPath,img_size=img_size,ann_siz=ann_size)
train_loader = DataLoader(train_Dataset,batch_size=batch_size,shuffle=True)
test_loader = DataLoader(test_Dataset,batch_size=1,shuffle=True)
#****************************************************************#
#***************************** Prepair for Training *************************#
print('')
print('No. of Training Data: ',len(train_Dataset))
print('Model: ',Modelname.split('.')[0])
if os.path.isfile('models/'+Modelname):
model.load_state_dict(torch.load('models/'+Modelname))
print('exist')
print('********************************')
#***************************************************************#
#**==================== Helper Functions ======================*#
def GetTestList(model,Test_loader):
model.eval()
All_AnnList=[]
All_PredList=[]
All_Acc = 0
All_F1 = 0
for _ , img, ann in test_loader :
AnnList=[]
PredList=[]
ann=np.asarray(ann.data.cpu(),dtype=np.int8)
All_AnnList.append(ann)
AnnList.append(ann)
# out=model(img)
# _,out2=torch.max(out,1)
out = model(img)['out'][0] # for FCN with ResNet101
out2 = out.argmax(0)
out2=out2.data.cpu().numpy()
out2=np.array(out2,dtype=np.uint8)
All_PredList.append(out2)
PredList.append(out2)
PredList=np.array(PredList)
AnnList=np.array(AnnList)
PredList=PredList.reshape((-1,))
AnnList=AnnList.reshape((-1,))
Acc = accuracy_score(AnnList, PredList)
All_Acc+=Acc
F1=f1_score(AnnList, PredList, average='macro')
All_F1 += F1
Avg_Acc = All_Acc/len(test_loader)
Avg_F1 = All_F1/len(test_loader)
print('======================================')
print('Avg_F1_score=', Avg_F1 )
print('Avg_accuracy_score=', Avg_Acc)
return All_AnnList,All_PredList
def calc_Accuracy(model,test_loader,Best_IoU,Best_F1):
AnnList,PredList = GetTestList(model,test_loader)
PredList= np.array(PredList)
AnnList= np.array(AnnList)
PredList= PredList.reshape((-1,))
AnnList= AnnList.reshape((-1,))
Acc = accuracy_score(AnnList, PredList)
F1 = f1_score(AnnList,PredList,average='macro')
av_iou = jaccard_score(AnnList,PredList,average='macro')
print('======================================')
print('av_iou=',av_iou,',Best_IoU=',Best_IoU)
print('precision_score=',precision_score(AnnList, PredList, average='macro') )
print('recall_score=',recall_score(AnnList, PredList, average='macro') )
print('F1_score=', F1 )
print('accuracy_score=', Acc)
if av_iou>Best_IoU:
Best_IoU=av_iou
# torch.save(model.state_dict(), 'models/'+Modelname)
# print('model has been changed',datetime.datetime.now())
# print('*********************************')
if F1>Best_F1:
Best_F1=F1
torch.save(model.state_dict(), 'models/'+Modelname)
print('model has been changed',datetime.datetime.now())
print('*********************************')
return Best_IoU,Best_F1
#***************************************************************#
print('')
Best_IoU=-1000
Best_F1=-1000
print('Before of Training')
print('-----------------')
print('')
Best_IoU,Best_F1= calc_Accuracy(model,test_loader,Best_IoU,Best_F1)
#*********************************************************************#
#*********************** Training ************************************#
Loss=[]
IoU_List=[]
F1_List=[]
print('')
print('Start Training')
print('-----------------')
for epoch in range(num_epoch):
total=0
counter=0
step_counter=0
model.train()
for _ , img, ann in train_loader :
loss=0
optimizer.zero_grad()
ann=ann.type(torch.int64)
# out=model(img)
out = model(img)['out'] # for FCN8s with ResNet101
loss=(5/1000.0)*criterion(out , ann)
loss.backward(retain_graph=True)
optimizer.step()
total=total+loss.data
step_counter=step_counter+1
if (step_counter) % 324==0 and step_counter >0:
print ('Epoch:', epoch+1,'step',step_counter,'Last Batch loss %.4f:' %loss.data )
Loss.append(1000*loss.data.cpu().numpy())
# if (step_counter) % 290 ==0 and step_counter >0:
# print('')
# print('Start Validating')
# print('-------------')
# Best_IoU,Best_F1= calc_Accuracy(model,test_loader,Best_IoU,Best_F1)
# model.train()
print('End of epoch')
print('')
print('Start Validating after end of epoch: ', epoch+1)
print('-------------')
Best_IoU,Best_F1= calc_Accuracy(model,test_loader,Best_IoU,Best_F1)
Loss.append(total/len(train_loader))
F1_List.append(Best_F1)
IoU_List.append(Best_IoU)
plt.subplot(3,1,1)
plt.title('Loss')
plt.plot(Loss,color='red')
plt.subplot(3,1,2)
plt.title('Dice')
plt.plot(F1_List,color='orange')
plt.subplot(3,1,3)
plt.title('Jaccard')
plt.plot(IoU_List,color='blue')
plt.savefig('training_plots'+'/'+Modelname.split('.')[0]+'.png')
plt.close() | [
"torch.hub.load",
"sklearn.metrics.f1_score",
"torch.nn.CrossEntropyLoss",
"torch.load",
"matplotlib.pyplot.plot",
"MyDataset.MyDataset",
"sklearn.metrics.precision_score",
"os.path.isfile",
"matplotlib.pyplot.close",
"numpy.array",
"sklearn.metrics.recall_score",
"sklearn.metrics.jaccard_scor... | [((1738, 1841), 'torch.hub.load', 'torch.hub.load', (['"""pytorch/vision:v0.6.0"""', '"""fcn_resnet101"""'], {'pretrained': '(False)', 'num_classes': 'num_classes'}), "('pytorch/vision:v0.6.0', 'fcn_resnet101', pretrained=False,\n num_classes=num_classes)\n", (1752, 1841), False, 'import torch\n'), ((2136, 2157), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2155, 2157), True, 'import torch.nn as nn\n'), ((2719, 2786), 'MyDataset.MyDataset', 'MyDataset', (['Train_ImageMainPath'], {'img_size': 'img_size', 'ann_siz': 'ann_size'}), '(Train_ImageMainPath, img_size=img_size, ann_siz=ann_size)\n', (2728, 2786), False, 'from MyDataset import MyDataset\n'), ((2800, 2866), 'MyDataset.MyDataset', 'MyDataset', (['Test_ImageMainPath'], {'img_size': 'img_size', 'ann_siz': 'ann_size'}), '(Test_ImageMainPath, img_size=img_size, ann_siz=ann_size)\n', (2809, 2866), False, 'from MyDataset import MyDataset\n'), ((2881, 2943), 'torch.utils.data.DataLoader', 'DataLoader', (['train_Dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_Dataset, batch_size=batch_size, shuffle=True)\n', (2891, 2943), False, 'from torch.utils.data import DataLoader\n'), ((2956, 3008), 'torch.utils.data.DataLoader', 'DataLoader', (['test_Dataset'], {'batch_size': '(1)', 'shuffle': '(True)'}), '(test_Dataset, batch_size=1, shuffle=True)\n', (2966, 3008), False, 'from torch.utils.data import DataLoader\n'), ((3258, 3295), 'os.path.isfile', 'os.path.isfile', (["('models/' + Modelname)"], {}), "('models/' + Modelname)\n", (3272, 3295), False, 'import os\n'), ((7759, 7779), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (7770, 7779), True, 'from matplotlib import pyplot as plt\n'), ((7778, 7795), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (7787, 7795), True, 'from matplotlib import pyplot as plt\n'), ((7796, 7823), 'matplotlib.pyplot.plot', 'plt.plot', (['Loss'], {'color': '"""red"""'}), "(Loss, color='red')\n", (7804, 7823), True, 'from matplotlib import pyplot as plt\n'), ((7824, 7844), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (7835, 7844), True, 'from matplotlib import pyplot as plt\n'), ((7843, 7860), 'matplotlib.pyplot.title', 'plt.title', (['"""Dice"""'], {}), "('Dice')\n", (7852, 7860), True, 'from matplotlib import pyplot as plt\n'), ((7861, 7894), 'matplotlib.pyplot.plot', 'plt.plot', (['F1_List'], {'color': '"""orange"""'}), "(F1_List, color='orange')\n", (7869, 7894), True, 'from matplotlib import pyplot as plt\n'), ((7895, 7915), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (7906, 7915), True, 'from matplotlib import pyplot as plt\n'), ((7914, 7934), 'matplotlib.pyplot.title', 'plt.title', (['"""Jaccard"""'], {}), "('Jaccard')\n", (7923, 7934), True, 'from matplotlib import pyplot as plt\n'), ((7935, 7967), 'matplotlib.pyplot.plot', 'plt.plot', (['IoU_List'], {'color': '"""blue"""'}), "(IoU_List, color='blue')\n", (7943, 7967), True, 'from matplotlib import pyplot as plt\n'), ((8034, 8045), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8043, 8045), True, 'from matplotlib import pyplot as plt\n'), ((4862, 4880), 'numpy.array', 'np.array', (['PredList'], {}), '(PredList)\n', (4870, 4880), True, 'import numpy as np\n'), ((4894, 4911), 'numpy.array', 'np.array', (['AnnList'], {}), '(AnnList)\n', (4902, 4911), True, 'import numpy as np\n'), ((4998, 5031), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['AnnList', 'PredList'], {}), '(AnnList, PredList)\n', (5012, 5031), False, 'from sklearn.metrics import accuracy_score, jaccard_score\n'), ((5041, 5085), 'sklearn.metrics.f1_score', 'f1_score', (['AnnList', 'PredList'], {'average': '"""macro"""'}), "(AnnList, PredList, average='macro')\n", (5049, 5085), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((5097, 5146), 'sklearn.metrics.jaccard_score', 'jaccard_score', (['AnnList', 'PredList'], {'average': '"""macro"""'}), "(AnnList, PredList, average='macro')\n", (5110, 5146), False, 'from sklearn.metrics import accuracy_score, jaccard_score\n'), ((3321, 3354), 'torch.load', 'torch.load', (["('models/' + Modelname)"], {}), "('models/' + Modelname)\n", (3331, 3354), False, 'import torch\n'), ((4087, 4117), 'numpy.array', 'np.array', (['out2'], {'dtype': 'np.uint8'}), '(out2, dtype=np.uint8)\n', (4095, 4117), True, 'import numpy as np\n'), ((4199, 4217), 'numpy.array', 'np.array', (['PredList'], {}), '(PredList)\n', (4207, 4217), True, 'import numpy as np\n'), ((4234, 4251), 'numpy.array', 'np.array', (['AnnList'], {}), '(AnnList)\n', (4242, 4251), True, 'import numpy as np\n'), ((4347, 4380), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['AnnList', 'PredList'], {}), '(AnnList, PredList)\n', (4361, 4380), False, 'from sklearn.metrics import accuracy_score, jaccard_score\n'), ((4413, 4457), 'sklearn.metrics.f1_score', 'f1_score', (['AnnList', 'PredList'], {'average': '"""macro"""'}), "(AnnList, PredList, average='macro')\n", (4421, 4457), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((5277, 5328), 'sklearn.metrics.precision_score', 'precision_score', (['AnnList', 'PredList'], {'average': '"""macro"""'}), "(AnnList, PredList, average='macro')\n", (5292, 5328), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((5358, 5406), 'sklearn.metrics.recall_score', 'recall_score', (['AnnList', 'PredList'], {'average': '"""macro"""'}), "(AnnList, PredList, average='macro')\n", (5370, 5406), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((5837, 5860), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5858, 5860), False, 'import datetime\n')] |
import cv2
import torch
from model.model import MobileHairNet
from config.config import get_config
import os
import numpy as np
from glob import glob
def get_mask(image, net, size = 224):
image_h, image_w = image.shape[0], image.shape[1]
down_size_image = cv2.resize(image, (size, size))
b, g, r = cv2.split(down_size_image)
down_size_image = cv2.merge([r,g,b])
down_size_image = torch.from_numpy(down_size_image).float().div(255.0).unsqueeze(0)
down_size_image = np.transpose(down_size_image, (0, 3, 1, 2)).to(device)
mask = net(down_size_image)
mask = torch.squeeze(mask).argmax(0)
mask_cv2 = mask.data.cpu().numpy().astype(np.uint8) * 255
mask_cv2 = cv2.resize(mask_cv2, (image_w, image_h))
return mask_cv2
def alpha_image(image, mask, alpha=0.1):
color = np.zeros((mask.shape[0], mask.shape[1], 3))
color[np.where(mask != 0)] = [0, 130, 255]
alpha_hand = ((1 - alpha) * image + alpha * color).astype(np.uint8)
alpha_hand = cv2.bitwise_and(alpha_hand, alpha_hand, mask=mask)
return cv2.add(alpha_hand, image)
if __name__ == "__main__":
config = get_config()
pretrained = glob(os.path.join(config.checkpoint_dir, f"MobileHairNet_epoch-{config.epoch-1}.pth"))[-1]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = MobileHairNet().to(device)
net.load_state_dict(torch.load(pretrained, map_location=device))
cam = cv2.VideoCapture(0)
if not cam.isOpened():
raise Exception("webcam is not detected")
while (True):
# ret : frame capture결과(boolean)
# frame : Capture한 frame
ret, image = cam.read()
if (ret):
mask = get_mask(image, net)
add = alpha_image(image, mask)
cv2.imshow('frame', add)
if cv2.waitKey(1) & 0xFF == ord(chr(27)):
break
cam.release()
cv2.destroyAllWindows()
| [
"model.model.MobileHairNet",
"cv2.imshow",
"torch.from_numpy",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"torch.squeeze",
"numpy.where",
"cv2.waitKey",
"cv2.add",
"cv2.merge",
"cv2.split",
"cv2.resize",
"numpy.transpose",
"config.config.get_config",
"torch.load",
"cv2.bitwise... | [((267, 298), 'cv2.resize', 'cv2.resize', (['image', '(size, size)'], {}), '(image, (size, size))\n', (277, 298), False, 'import cv2\n'), ((313, 339), 'cv2.split', 'cv2.split', (['down_size_image'], {}), '(down_size_image)\n', (322, 339), False, 'import cv2\n'), ((362, 382), 'cv2.merge', 'cv2.merge', (['[r, g, b]'], {}), '([r, g, b])\n', (371, 382), False, 'import cv2\n'), ((697, 737), 'cv2.resize', 'cv2.resize', (['mask_cv2', '(image_w, image_h)'], {}), '(mask_cv2, (image_w, image_h))\n', (707, 737), False, 'import cv2\n'), ((814, 857), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1], 3)'], {}), '((mask.shape[0], mask.shape[1], 3))\n', (822, 857), True, 'import numpy as np\n'), ((994, 1044), 'cv2.bitwise_and', 'cv2.bitwise_and', (['alpha_hand', 'alpha_hand'], {'mask': 'mask'}), '(alpha_hand, alpha_hand, mask=mask)\n', (1009, 1044), False, 'import cv2\n'), ((1057, 1083), 'cv2.add', 'cv2.add', (['alpha_hand', 'image'], {}), '(alpha_hand, image)\n', (1064, 1083), False, 'import cv2\n'), ((1126, 1138), 'config.config.get_config', 'get_config', ([], {}), '()\n', (1136, 1138), False, 'from config.config import get_config\n'), ((1439, 1458), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1455, 1458), False, 'import cv2\n'), ((1900, 1923), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1921, 1923), False, 'import cv2\n'), ((868, 887), 'numpy.where', 'np.where', (['(mask != 0)'], {}), '(mask != 0)\n', (876, 887), True, 'import numpy as np\n'), ((1384, 1427), 'torch.load', 'torch.load', (['pretrained'], {'map_location': 'device'}), '(pretrained, map_location=device)\n', (1394, 1427), False, 'import torch\n'), ((491, 534), 'numpy.transpose', 'np.transpose', (['down_size_image', '(0, 3, 1, 2)'], {}), '(down_size_image, (0, 3, 1, 2))\n', (503, 534), True, 'import numpy as np\n'), ((590, 609), 'torch.squeeze', 'torch.squeeze', (['mask'], {}), '(mask)\n', (603, 609), False, 'import torch\n'), ((1161, 1247), 'os.path.join', 'os.path.join', (['config.checkpoint_dir', 'f"""MobileHairNet_epoch-{config.epoch - 1}.pth"""'], {}), "(config.checkpoint_dir,\n f'MobileHairNet_epoch-{config.epoch - 1}.pth')\n", (1173, 1247), False, 'import os\n'), ((1285, 1310), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1308, 1310), False, 'import torch\n'), ((1333, 1348), 'model.model.MobileHairNet', 'MobileHairNet', ([], {}), '()\n', (1346, 1348), False, 'from model.model import MobileHairNet\n'), ((1776, 1800), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'add'], {}), "('frame', add)\n", (1786, 1800), False, 'import cv2\n'), ((1816, 1830), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1827, 1830), False, 'import cv2\n'), ((403, 436), 'torch.from_numpy', 'torch.from_numpy', (['down_size_image'], {}), '(down_size_image)\n', (419, 436), False, 'import torch\n')] |
import os
import gc
import utils
import pandas as pd
import numpy as np
import pickle as pkl
from datetime import date
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.regularizers import l2
utils.start(__file__)
#==============================================================================
PREF = 'f503'
KEY = 'card_id'
SEED = 18
np.random.seed(SEED)
# =============================================================================
# def
# =============================================================================
def get_embed(x_input, x_size, k_latent):
if x_size > 0:
embed = Embedding(x_size, k_latent, input_length=1,
embeddings_regularizer=l2(embedding_reg))(x_input)
embed = Flatten()(embed)
else:
embed = Dense(k_latent, kernel_regularizer=l2(embedding_reg))(x_input)
return embed
def build_model_1(X, fsize):
dim_input = len(fsize)
input_x = [Input(shape=(1,)) for i in range(dim_input)]
biases = [get_embed(x, size, 1) for (x, size) in zip(input_x, fsize)]
factors = [get_embed(x, size, k_latent)
for (x, size) in zip(input_x, fsize)]
s = Add()(factors)
diffs = [Subtract()([s, x]) for x in factors]
dots = [Dot(axes=1)([d, x]) for d, x in zip(diffs, factors)]
x = Concatenate()(biases + dots)
x = BatchNormalization()(x)
output = Dense(1, activation='relu', kernel_regularizer=l2(kernel_reg))(x)
model = Model(inputs=input_x, outputs=[output])
opt = Adam(clipnorm=0.5)
model.compile(optimizer=opt, loss='mean_squared_error')
output_f = factors + biases
model_features = Model(inputs=input_x, outputs=output_f)
return model, model_features
# =============================================================================
# features
# =============================================================================
# features = []
# features += [f'f10{i}.pkl' for i in (2, )]
# features += [f'f11{i}_{j}.pkl' for i in (1, 2)
# for j in ('Y', 'N')]
# features += [f'f12{i}.pkl' for i in (1, 2)]
# features += [f'f13{i}.pkl' for i in (1, 2)]
# features += [f'f20{i}.pkl' for i in (2, 3)]
# features += [f'f21{i}_{j}.pkl' for i in (1, 2)
# for j in ('Y', 'N')]
# features += [f'f23{i}.pkl' for i in (1, 2)]
# features += [f'f40{i}.pkl' for i in (2, 3)]
# features += [f'f41{i}_{j}.pkl' for i in (1, 2)
# for j in ('Y', 'N')]
# features += [f'f42{i}.pkl' for i in (1, 2)]
# features += [f'f50{i}.pkl' for i in (2, )]
# features = os.listdir('../feature')
# =============================================================================
# read data and features
# =============================================================================
train = pd.read_csv(os.path.join(PATH, 'train.csv'))
test = pd.read_csv(os.path.join(PATH, 'test.csv'))
for f in tqdm(features):
t = pd.read_pickle(os.path.join('..', 'feature', f))
train = pd.merge(train, t, on=KEY, how='left')
test = pd.merge(test, t, on=KEY, how='left')
# =============================================================================
# change date to int
# =============================================================================
cols = train.columns.values
for f in [
'new_purchase_date_max', 'new_purchase_date_min',
'hist_purchase_date_max', 'hist_purchase_date_min',
'Y_hist_auth_purchase_date_max', 'Y_hist_auth_purchase_date_min',
'N_hist_auth_purchase_date_max', 'N_hist_auth_purchase_date_min',
'Y_new_auth_purchase_date_max', 'Y_new_auth_purchase_date_min',
'N_new_auth_purchase_date_max', 'N_new_auth_purchase_date_min',
]:
if f in cols:
train[f] = train[f].astype(np.int64) * 1e-9
test[f] = test[f].astype(np.int64) * 1e-9
# =============================================================================
# concat train and test
# =============================================================================
df = pd.concat([train, test], axis=0, sort=False)
del train, test
gc.collect()
# =============================================================================
# main
# =============================================================================
features = ['feature_1', 'feature_2', 'feature_3']
fsize = [int(df[f].max()) + 1 for f in features]
X = df.groupby(features)['card_id'].count()
X = X.unstack().fillna(0)
X = X.stack().astype('float32')
X = np.log1p(X).reset_index()
X.columns = features + ['num']
X_train = [X[f].values for f in features]
y_train = (X[['num']].values).astype('float32')
k_latent = 1
embedding_reg = 0.0002
kernel_reg = 0.1
model, model_features = build_model_1(X_train, fsize)
n_epochs = 1000
batch_size = 2 ** 17
model, model_features = build_model_1(X_train, fsize)
earlystopper = EarlyStopping(patience=0, verbose=50)
history = model.fit(
X_train, y_train,
epochs=n_epochs, batch_size=batch_size, verbose=1, shuffle=True,
validation_data=(X_train, y_train),
callbacks=[earlystopper],
)
model.save('weights/{}_weights.h5'.format(str(date.today()).replace('-', '')))
X_pred = model_features.predict(X_train, batch_size=batch_size)
factors = X_pred[:len(features)]
biases = X_pred[len(features):2*len(features)]
for f, X_p in zip(features, factors):
for i in range(k_latent):
X['%s_fm_factor_%d' % (f, i)] = X_p[:, i]
for f, X_p in zip(features, biases):
X['%s_fm_bias' % (f)] = X_p[:, 0]
df = pd.merge(df, X, on=features, how='left')
df = df.drop(features, axis=1)
df.to_pickle(f'../feature/{PREF}.pkl')
#==============================================================================
utils.end(__file__)
| [
"utils.start",
"keras.layers.Subtract",
"keras.layers.Dot",
"numpy.random.seed",
"keras.callbacks.EarlyStopping",
"keras.models.Model",
"keras.layers.Add",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.layers.normalization.BatchNormalization",
"keras.layers.Concatenate",
"pandas.merg... | [((464, 485), 'utils.start', 'utils.start', (['__file__'], {}), '(__file__)\n', (475, 485), False, 'import utils\n'), ((608, 628), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (622, 628), True, 'import numpy as np\n'), ((4248, 4292), 'pandas.concat', 'pd.concat', (['[train, test]'], {'axis': '(0)', 'sort': '(False)'}), '([train, test], axis=0, sort=False)\n', (4257, 4292), True, 'import pandas as pd\n'), ((4309, 4321), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4319, 4321), False, 'import gc\n'), ((5064, 5101), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(0)', 'verbose': '(50)'}), '(patience=0, verbose=50)\n', (5077, 5101), False, 'from keras.callbacks import EarlyStopping\n'), ((5715, 5755), 'pandas.merge', 'pd.merge', (['df', 'X'], {'on': 'features', 'how': '"""left"""'}), "(df, X, on=features, how='left')\n", (5723, 5755), True, 'import pandas as pd\n'), ((5907, 5926), 'utils.end', 'utils.end', (['__file__'], {}), '(__file__)\n', (5916, 5926), False, 'import utils\n'), ((1729, 1768), 'keras.models.Model', 'Model', ([], {'inputs': 'input_x', 'outputs': '[output]'}), '(inputs=input_x, outputs=[output])\n', (1734, 1768), False, 'from keras.models import Sequential, Model\n'), ((1779, 1797), 'keras.optimizers.Adam', 'Adam', ([], {'clipnorm': '(0.5)'}), '(clipnorm=0.5)\n', (1783, 1797), False, 'from keras.optimizers import Adam\n'), ((1911, 1950), 'keras.models.Model', 'Model', ([], {'inputs': 'input_x', 'outputs': 'output_f'}), '(inputs=input_x, outputs=output_f)\n', (1916, 1950), False, 'from keras.models import Sequential, Model\n'), ((3061, 3092), 'os.path.join', 'os.path.join', (['PATH', '"""train.csv"""'], {}), "(PATH, 'train.csv')\n", (3073, 3092), False, 'import os\n'), ((3113, 3143), 'os.path.join', 'os.path.join', (['PATH', '"""test.csv"""'], {}), "(PATH, 'test.csv')\n", (3125, 3143), False, 'import os\n'), ((3240, 3278), 'pandas.merge', 'pd.merge', (['train', 't'], {'on': 'KEY', 'how': '"""left"""'}), "(train, t, on=KEY, how='left')\n", (3248, 3278), True, 'import pandas as pd\n'), ((3290, 3327), 'pandas.merge', 'pd.merge', (['test', 't'], {'on': 'KEY', 'how': '"""left"""'}), "(test, t, on=KEY, how='left')\n", (3298, 3327), True, 'import pandas as pd\n'), ((1209, 1226), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (1214, 1226), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract\n'), ((1436, 1441), 'keras.layers.Add', 'Add', ([], {}), '()\n', (1439, 1441), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract\n'), ((1577, 1590), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (1588, 1590), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract\n'), ((1614, 1634), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1632, 1634), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3194, 3226), 'os.path.join', 'os.path.join', (['""".."""', '"""feature"""', 'f'], {}), "('..', 'feature', f)\n", (3206, 3226), False, 'import os\n'), ((4699, 4710), 'numpy.log1p', 'np.log1p', (['X'], {}), '(X)\n', (4707, 4710), True, 'import numpy as np\n'), ((1012, 1021), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1019, 1021), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract\n'), ((1465, 1475), 'keras.layers.Subtract', 'Subtract', ([], {}), '()\n', (1473, 1475), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract\n'), ((1515, 1526), 'keras.layers.Dot', 'Dot', ([], {'axes': '(1)'}), '(axes=1)\n', (1518, 1526), False, 'from keras.layers import Input, Embedding, Dense, Flatten, Concatenate, Dot, Reshape, Add, Subtract\n'), ((1698, 1712), 'keras.regularizers.l2', 'l2', (['kernel_reg'], {}), '(kernel_reg)\n', (1700, 1712), False, 'from keras.regularizers import l2\n'), ((968, 985), 'keras.regularizers.l2', 'l2', (['embedding_reg'], {}), '(embedding_reg)\n', (970, 985), False, 'from keras.regularizers import l2\n'), ((1090, 1107), 'keras.regularizers.l2', 'l2', (['embedding_reg'], {}), '(embedding_reg)\n', (1092, 1107), False, 'from keras.regularizers import l2\n'), ((5334, 5346), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5344, 5346), False, 'from datetime import date\n')] |
"""This where implementations of individual operations live"""
from ..coreOperation import *
from ..coreNode import broadcast_shape, reduce_shape
import numpy as np
class FlattenFeaturesOperation(SingleInputOperation):
"""Flatten the axis greater than 0 to turn
dim > 2 tensors into 2d arrays
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
nExamples : int
Number of examples in current batch
shape : tuple
shape of the output
"""
name = "FlattenFeaturesOperation"
def setShape(self):
"""Set the output shape"""
inpShapeSize = len(self.inputA.shape)
if (inpShapeSize >= 2):
self.nExamples = self.inputA.shape[0]
numFeatures = 1
for index in range(inpShapeSize - 1):
numFeatures *= self.inputA.shape[index + 1]
self.shape = (self.nExamples, numFeatures)
else:
self.nExamples = 1
self.shape = (self.nExamples, self.inputA.shape[0])
def perform(self, a):
"""Perform the flattening
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return a.reshape(self.shape)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self).reshape(self.inputA.shape)
return grad
class ReshapeFeaturesOperation(SingleInputOperation):
"""Gather features and reshape them, transform a 2d array
(nExamples, nFeatures) into a multidim array of
(nExamples, shape)
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
nExamples : int
Number of examples in current batch
shape : tuple
shape of the output
exampleShape : tuple
shape of each example, Result of this operation is a matrix
with shape (nExamples, nFeatures in each examle)
"""
name = "ReshapeFeaturesOperation"
def __init__(self, inputA=None, exampleShape=0):
self.exampleShape = exampleShape
super().__init__(inputA)
self.setShape()
def setShape(self):
"""Set the output shape"""
inpShapeSize = len(self.inputA.shape)
if (inpShapeSize >= 2):
self.nExamples = self.inputA.shape[0]
self.shape = (self.nExamples, ) + self.exampleShape
else:
self.nExamples = 1
self.shape = (self.nExamples, ) + self.exampleShape
def perform(self, a):
"""Reshape the flatend array to desired shape
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return a.reshape(self.shape)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self).reshape(self.inputA.shape)
return grad
class SliceOperation(SingleInputOperation):
"""Performs array slicing using numpy index expressions
example for index_exp:
>>> x = np.arange(4).reshape(2, 2)
>>> indexExp = np.index_exp[0, :]
>>> x[indexExp]
array([0, 1])
see https://docs.scipy.org/doc/numpy/reference/generated/numpy.s_.html#numpy-s
for more information
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
indexExp : np.index_exp
Index expression for slicing
shape : tuple
shape of the output
"""
name = "SliceOperation"
def __init__(self, inputA=None, indexExp=None):
if indexExp is None:
raise ValueError("Must provide index Expression as numpy.index_exp!")
self.indexExp = indexExp
super().__init__(inputA)
def setShape(self):
"""Set the output shape"""
testMat = np.zeros(self.inputA.shape)
result = testMat[self.indexExp]
self.shape = result.shape
def perform(self, a):
"""Reshape the flatend array to desired shape
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return a[self.indexExp]
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
gradGather = np.zeros(self.shape)
for out in self.outputs:
gradGather += out.getGradient(self)
grad = np.zeros(self.inputA.shape)
grad[self.indexExp] = gradGather
return grad
| [
"numpy.zeros",
"numpy.ones"
] | [((5385, 5412), 'numpy.zeros', 'np.zeros', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (5393, 5412), True, 'import numpy as np\n'), ((1890, 1916), 'numpy.ones', 'np.ones', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (1897, 1916), True, 'import numpy as np\n'), ((1950, 1977), 'numpy.zeros', 'np.zeros', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (1958, 1977), True, 'import numpy as np\n'), ((4081, 4107), 'numpy.ones', 'np.ones', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (4088, 4107), True, 'import numpy as np\n'), ((4141, 4168), 'numpy.zeros', 'np.zeros', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (4149, 4168), True, 'import numpy as np\n'), ((6171, 6197), 'numpy.ones', 'np.ones', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (6178, 6197), True, 'import numpy as np\n'), ((6237, 6257), 'numpy.zeros', 'np.zeros', (['self.shape'], {}), '(self.shape)\n', (6245, 6257), True, 'import numpy as np\n'), ((6366, 6393), 'numpy.zeros', 'np.zeros', (['self.inputA.shape'], {}), '(self.inputA.shape)\n', (6374, 6393), True, 'import numpy as np\n')] |
#functions to help with running fiberassign, using SV3 parameters/targets
import fitsio
import numpy as np
from astropy.table import Table,join
# system
import os
import subprocess
import sys
import tempfile
import shutil
import re
# time
from time import time
from datetime import datetime, timedelta
#import some functions from fiberassign
#from fiberassign.assign import minimal_target_columns
#from fiberassign.fba_launch_io import (
# mv_temp2final,
# force_finite_pm,
# force_nonzero_refepoch,
# gaia_ref_epochs,
# mv_write_targets_out
#)
#from desitarget
import desitarget
from desitarget import io
from desitarget.mtl import inflate_ledger
#hardcode target directories; these are fixed
skydir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/skies'
tdir = '/global/cfs/cdirs/desi/target/catalogs/dr9/0.57.0/targets/sv3/resolve/'
# AR default REF_EPOCH for PMRA=PMDEC=REF_EPOCH=0 objects
gaia_ref_epochs = {"dr2": 2015.5}
minimal_target_columns= ['RELEASE','BRICKNAME','BRICKID','BRICK_OBJID','MORPHTYPE','RA',\
'DEC','EBV','FLUX_G','FLUX_R','FLUX_Z','FLUX_W1','FLUX_W2','FLUX_IVAR_G','FLUX_IVAR_R',\
'FLUX_IVAR_Z','FLUX_IVAR_W1','FLUX_IVAR_W2','FIBERFLUX_G','FIBERFLUX_R','FIBERFLUX_Z',\
'FIBERTOTFLUX_G','FIBERTOTFLUX_R','FIBERTOTFLUX_Z','REF_EPOCH','MASKBITS','SERSIC',\
'SHAPE_R','SHAPE_E1','SHAPE_E2','REF_ID','REF_CAT','GAIA_PHOT_G_MEAN_MAG',\
'GAIA_PHOT_BP_MEAN_MAG','GAIA_PHOT_RP_MEAN_MAG','PARALLAX','PMRA','PMDEC','PHOTSYS',\
'TARGETID','SUBPRIORITY','OBSCONDITIONS','PRIORITY_INIT','NUMOBS_INIT','SV3_DESI_TARGET',\
'SV3_BGS_TARGET','SV3_MWS_TARGET','SV3_SCND_TARGET']
def comp_neworig(tileid,dirn='/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'):
"""
check that new matches the original
"""
ts = str(tileid).zfill(6)
fa = fitsio.read('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
fn = fitsio.read(dirn+'fba-'+ts+'.fits')
w = fn['DEVICE_TYPE'] == 'POS'
fn = fn[w]
wn = fn['TARGETID'] >= 0
fn = fn[wn]
print(len(fn))
wa = fa['TARGETID'] >= 0
fa = fa[wa]
print(len(fa))
ws = np.isin(fn['TARGETID'],fa['TARGETID'])
print(np.sum(ws))
if np.sum(ws) == len(fa) and len(fa) == len(fn):
return True
else:
return False
def comp_neworig_tgt(tileid):
"""
check that new matches the original, just tgt
"""
ts = str(tileid).zfill(6)
fa = fitsio.read('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
dirn = '/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'
fn = fitsio.read(dirn+'fba-'+ts+'.fits')
wn = fn['FA_TYPE'] != 0
wn &= fn['FA_TYPE'] != 4
#w = fn['DEVICE_TYPE'] == 'POS'
fn = fn[wn]
#wn = fn['TARGETID'] >= 0
#fn = fn[wn]
print(len(fn))
wa = fa['FA_TYPE'] != 0
wa &= fa['FA_TYPE'] != 4
fa = fa[wa]
print(len(fa))
ws = np.isin(fn['TARGETID'],fa['TARGETID'])
print(np.sum(ws))
if np.sum(ws) == len(fa) and len(fa) == len(fn):
return True
else:
return False
def comp_neworig_fba(tileid,dirn = '/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'):
"""
check that new matches the original, comparing fba files
"""
ts = str(tileid).zfill(6)
ts = str(tileid).zfill(6)
#get info from origin fiberassign file
fht = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
indir = fht['OUTDIR']
if fht['DESIROOT'] == '/data/datasystems':
indir = '/global/cfs/cdirs/desi/survey/fiberassign/SV3/' +fht['PMTIME'][:10].translate({ord('-'): None}) +'/'
try:
f = fitsio.read(indir+ts+'-targ.fits')
except:
date = int(fht['PMTIME'][:10].translate({ord('-'): None}))-1
indir = '/global/cfs/cdirs/desi/survey/fiberassign/SV3/'+str(date)+'/'
fa = fitsio.read(indir+'fba-'+ts+'.fits')
fn = fitsio.read(dirn+'fba-'+ts+'.fits')
return np.array_equal(fa['TARGETID'],fn['TARGETID'])
# w = fn['DEVICE_TYPE'] == 'POS'
# fn = fn[w]
# wn = fn['TARGETID'] >= 0
# fn = fn[wn]
# #print(len(fn))
# wa = fa['OBJTYPE'] == 'TGT'
# fa = fa[wa]
# #print(len(fa))
# ws = np.isin(fn['TARGETID'],fa['TARGETID'])
# #print(np.sum(ws))
# if np.sum(ws) == len(fa):# and len(fa) == len(fn):
# return True
# else:
# return False
def redo_fba_fromorig(tileid,outdir=None,faver=None):
'''
simply try to reproduce fiberassign from the files in the fiberassign directory
'''
ts = str(tileid).zfill(6)
#get info from origin fiberassign file
fht = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
indir = fht['OUTDIR']
if fht['DESIROOT'] == '/data/datasystems':
indir = '/global/cfs/cdirs/desi/survey/fiberassign/SV3/' +fht['PMTIME'][:10].translate({ord('-'): None}) +'/'
try:
f = fitsio.read(indir+ts+'-targ.fits')
except:
date = int(fht['PMTIME'][:10].translate({ord('-'): None}))-1
indir = '/global/cfs/cdirs/desi/survey/fiberassign/SV3/'+str(date)+'/'
print(indir)
tarf = indir+ts+'-targ.fits'
try:
fitsio.read(tarf)
except:
return('Error! target file does not appear to exist for tile '+ts+' '+tilef)
tilef = indir+ts+'-tiles.fits'
try:
fitsio.read(tilef)
except:
return('Error! tile file does not appear to exist for tile '+ts+' '+tilef)
skyf = indir+ts+'-sky.fits'
try:
fitsio.read(skyf)
except:
print('Error! sky file does not appear to exist')
scndf = indir+ts+'-scnd.fits'
scnd = True
try:
fitsio.read(scndf)
except:
print(' secondary file does not appear to exist')
scnd = False
gfaf = indir+ts+'-gfa.fits'
try:
fitsio.read(gfaf)
except:
print('Error! gfa file does not appear to exist')
toof = indir+ts+'-too.fits'
too = os.path.isfile(toof)
if too:
print('will be using too file '+toof)
if outdir is None:
outdir = '/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'
prog = fht['FAPRGRM'].lower()
gaiadr = None
if np.isin('gaiadr2',fht['FAARGS'].split()):
gaiadr = 'dr2'
if np.isin('gaiaedr3',fht['FAARGS'].split()):
gaiadr = 'edr3'
fo = open(outdir+'fa-'+ts+'.sh','w')
fo.write('#!/bin/bash\n\n')
fo.write('source /global/project/projectdirs/desi/software/desi_environment.sh master\n')
if faver == None:
faver = float(fht['FA_VER'][:3])
if faver == 2.4:
fo.write('export SKYBRICKS_DIR=${DESI_ROOT}/target/skybricks/v2\n')
if faver < 2.4:
if int(indir[-7:-1]) > 210413:
fo.write("module swap fiberassign/2.3.0\n")
else:
fo.write("module swap fiberassign/"+fht['FA_VER'][:3]+'.0'+"\n")
else:
fo.write("module swap fiberassign/"+fht['FA_VER']+"\n")
else:
fo.write("module swap fiberassign/"+str(faver)+"\n")
faver = float(faver[:3])
fo.write("fba_run")
fo.write(" --targets "+tarf)
if scnd:
fo.write(" "+scndf)
if too:
fo.write(" "+toof)
fo.write(" --sky "+skyf)
fo.write(" --footprint "+tilef)
rundate= fht['RUNDATE']
if rundate == '2021-04-10T21:28:37':
rundate = '2021-04-10T20:00:00'
fo.write(" --rundate "+rundate)
fo.write(" --fieldrot "+str(fht['FIELDROT']))
fo.write(" --dir "+outdir)
#if indir != '/global/cfs/cdirs/desi/survey/fiberassign/SV3/20210416/' and indir != '/global/cfs/cdirs/desi/survey/fiberassign/SV3/20210418/':
fo.write(" --sky_per_petal 40 --standards_per_petal 10")
#fo.write(" --by_tile true")
if faver >= 2.4:
fo.write(" --sky_per_slitblock 1")
if faver >= 3:
fo.write(" --ha "+str(fht['FA_HA']))
fo.write(" --margin-gfa 0.4 --margin-petal 0.4 --margin-pos 0.05")
fo.close()
def get_fba_fromnewmtl(tileid,mtldir=None,getosubp=False,outdir=None,faver=None):
ts = str(tileid).zfill(6)
#get info from origin fiberassign file
fht = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
indir = fht['OUTDIR']
if fht['DESIROOT'] == '/data/datasystems':
indir = '/global/cfs/cdirs/desi/survey/fiberassign/SV3/' +fht['PMTIME'][:10].translate({ord('-'): None}) +'/'
try:
f = fitsio.read(indir+ts+'-targ.fits')
except:
date = int(fht['PMTIME'][:10].translate({ord('-'): None}))-1
indir = '/global/cfs/cdirs/desi/survey/fiberassign/SV3/'+str(date)+'/'
print(indir)
tilef = indir+ts+'-tiles.fits'
try:
fitsio.read(tilef)
except:
return('Error! tile file does not appear to exist for tile '+ts+' '+tilef)
skyf = indir+ts+'-sky.fits'
try:
fitsio.read(skyf)
except:
print('Error! sky file does not appear to exist')
scndf = indir+ts+'-scnd.fits'
scnd = True
try:
fitsio.read(scndf)
except:
print(' secondary file does not appear to exist')
scnd = False
gfaf = indir+ts+'-gfa.fits'
try:
fitsio.read(gfaf)
except:
print('Error! gfa file does not appear to exist')
toof = indir+ts+'-too.fits'
too = os.path.isfile(toof)
if too:
print('will be using too file '+toof)
if outdir is None:
outdir = '/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/'
if getosubp == True or mtldir == None:
outdir += 'orig/'
if mtldir == None:
tarfn = indir+ts+'-targ.fits'
else:
tarfn = outdir+ts+'-targ.fits'
prog = fht['FAPRGRM'].lower()
gaiadr = None
if np.isin('gaiadr2',fht['FAARGS'].split()):
gaiadr = 'dr2'
if np.isin('gaiaedr3',fht['FAARGS'].split()):
gaiadr = 'edr3'
if mtldir is not None:
altcreate_mtl(tilef,
mtldir+prog,
gaiadr,
fht['PMCORR'],
tarfn,
tdir+prog)
if getosubp:
otar = Table.read(indir+ts+'-targ.fits')
otar.keep_columns(['TARGETID','SUBPRIORITY'])
ntar = Table.read(tarfn)
ntar.remove_columns(['SUBPRIORITY'])
ntar = join(ntar,otar,keys=['TARGETID'])
ntar.write(tarfn,format='fits', overwrite=True)
fo = open(outdir+'fa-'+ts+'.sh','w')
fo.write('#!/bin/bash\n\n')
fo.write('source /global/project/projectdirs/desi/software/desi_environment.sh master\n')
if faver == None:
faver = float(fht['FA_VER'][:3])
if faver == 2.4:
fo.write('export SKYBRICKS_DIR=${DESI_ROOT}/target/skybricks/v2\n')
if faver < 2.4:
if int(indir[-7:-1]) > 210413:
fo.write("module swap fiberassign/2.3.0\n") #inspection of results revealed tiles that used 2.2.dev* after 20210413 are reproduced using 2.3.0 and those before using 2.2.0
else:
fo.write("module swap fiberassign/"+fht['FA_VER'][:3]+'.0'+"\n")
else:
fo.write("module swap fiberassign/"+fht['FA_VER']+"\n")
else:
fo.write("module swap fiberassign/"+str(faver)+"\n")
faver = float(faver[:3])
fo.write("fba_run")
fo.write(" --targets "+tarfn)
if scnd:
fo.write(" "+scndf)
if too:
fo.write(" "+toof)
fo.write(" --sky "+skyf)
fo.write(" --footprint "+tilef)
rundate= fht['RUNDATE']
if rundate == '2021-04-10T21:28:37':
rundate = '2021-04-10T20:00:00'
fo.write(" --rundate "+rundate)
fo.write(" --fieldrot "+str(fht['FIELDROT']))
fo.write(" --dir "+outdir)
fo.write(" --sky_per_petal 40 --standards_per_petal 10")
#fo.write(" --by_tile true")
if faver >= 2.4:
fo.write(" --sky_per_slitblock 1")
if faver >= 3:
fo.write(" --ha "+str(fht['FA_HA']))
fo.write(" --margin-gfa 0.4 --margin-petal 0.4 --margin-pos 0.05")
fo.close()
# if float(fht['FA_VER'][:3]) < 2.4:
# fo.write("module swap fiberassign/2.3.0\n")
# else:
# fo.write("module swap fiberassign/"+fht['FA_VER']+"\n")
# fo.write("fba_run")
# fo.write(" --targets "+tarfn+" "+scndf)
# if too:
# fo.write(" "+toof)
# fo.write(" --sky "+skyf)
# fo.write(" --footprint "+tilef)
# fo.write(" --rundate "+fht['RUNDATE'])
# fo.write(" --fieldrot "+str(fht['FIELDROT']))
# fo.write(" --dir "+outdir)
# #fo.write(" --by_tile true")
# if float(fht['FA_VER'][:3]) >= 3:
# fo.write(" --ha "+str(fht['FA_HA']))
# fo.close()
def altcreate_mtl(
tilesfn,
mtldir,
gaiadr,
pmcorr,
outfn,
targdir,
survey='sv3',
mtltime=None,#I think we will just want this to be the latest for the re/alt runs
pmtime_utc_str=None,
add_plate_cols=True#,
#tmpoutdir=tempfile.mkdtemp(),
):
"""
Mostly copied from fiberassign.fba_launch_io.create_mtl
Create a (primary or secondary) target fits file, based on MTL ledgers (and complementary columns from desitarget targets files).
Args:
tilesfn: path to a tiles fits file (string)
mtldir: folder with ledger files
targdir: desitarget targets folder (or file name if secondary) (string)
gaiadr: Gaia dr ("dr2" or "edr3")
pmcorr: apply proper-motion correction? ("y" or "n")
outfn: fits file name to be written (string)
survey: should just be sv3
mtltime: MTL isodate (string formatted as yyyy-mm-ddThh:mm:ss+00:00); this needs be considered carefully for alt mtls
tmpoutdir (optional, defaults to a temporary directory): temporary directory where
write_targets will write (creating some sub-directories)
pmtime_utc_str (optional, defaults to None): UTC time use to compute
new coordinates after applying proper motion since REF_EPOCH
(string formatted as "yyyy-mm-ddThh:mm:ss+00:00")
add_plate_cols (optional, defaults to True): adds a PLATE_RA and PLATE_DEC columns (boolean)
log (optional, defaults to Logger.get()): Logger object
step (optional, defaults to ""): corresponding step, for fba_launch log recording
(e.g. dotiles, dosky, dogfa, domtl, doscnd, dotoo)
start(optional, defaults to time()): start time for log (in seconds; output of time.time()
Notes:
if pmcorr="y", then pmtime_utc_str needs to be set; will trigger an error otherwise.
for sv3-backup, we remove BACKUP_BRIGHT targets.
TBD : if secondary targets, we currently disable the inflate_ledger(), as it
seems to not currently work.
hence if secondary and pmcorr="y", the code will crash, as the
GAIA_ASTROMETRIC_EXCESS_NOISE column will be missing; though we do not
expect this configuration to happen, so it should be fine for now.
TBD: the PLATE_{RA,DEC,REF_EPOCH} columns currently simply are copy of RA,DEC,REF_EPOCH
TBD: but it prepares e.g. to add chromatic offsets.
20210526 : implementation of using subpriority=False in write_targets
to avoid an over-writting of the SUBPRIORITY; AJR changed to True reproduce SV3
"""
tiles = fitsio.read(tilesfn)
# AR mtl: read mtl
d = io.read_targets_in_tiles(
mtldir,
tiles,
quick=False,
mtl=True,
unique=True,
isodate=mtltime,
)
# AR mtl: removing by hand BACKUP_BRIGHT for sv3/BACKUP
# AR mtl: using an indirect way to find if program=backup,
# AR mtl: to avoid the need of an extra program argument
# AR mtl: for sv3, there is no secondary-backup, so no ambiguity
if (survey == "sv3") & ("backup" in mtldir):
from desitarget.sv3.sv3_targetmask import mws_mask
keep = (d["SV3_MWS_TARGET"] & mws_mask["BACKUP_BRIGHT"]) == 0
d = d[keep]
#AJR added this in
columns = [key for key in minimal_target_columns if key not in d.dtype.names]
#tcol = ['SV3_DESI_TARGET','SV3_BGS_TARGET','SV3_MWS_TARGET','SV3_SCND_TARGET']
#for col in tcol:
# columns.append(col)
d = inflate_ledger(
d, targdir, columns=columns, header=False, strictcols=False, quick=True
) # AR adding PLATE_RA, PLATE_DEC, PLATE_REF_EPOCH ?
if add_plate_cols:
d = Table(d)
d["PLATE_RA"] = d["RA"]
d["PLATE_DEC"] = d["DEC"]
d["PLATE_REF_EPOCH"] = d["REF_EPOCH"]
d = d.as_array()
# AR mtl: PMRA, PMDEC: convert NaN to zeros
d = force_finite_pm(d)
# AR mtl: update RA, DEC, REF_EPOCH using proper motion?
if pmcorr == "y":
if pmtime_utc_str is None:
sys.exit(1)
d = update_nowradec(d, gaiadr, pmtime_utc_str)
else:
d = force_nonzero_refepoch(
d, gaia_ref_epochs[gaiadr]
)
d = Table(d)
outfndir = '/'.join(outfn.split('/')[:-1])
if not os.path.exists(outfndir):
os.makedirs(outfndir, exist_ok=True)
d.write(outfn,format='fits', overwrite=True)
del d
return True
# AR mtl: write fits
#n, tmpfn = io.write_targets(tmpoutdir, d, indir=mtldir, indir2=targdir, survey=survey, subpriority=True)
#_ = mv_write_targets_out(tmpfn, tmpoutdir, outfn)
# AR mtl: update header if pmcorr = "y"
if pmcorr == "y":
fd = fitsio.FITS(outfn, "rw")
fd["TARGETS"].write_key("COMMENT", "RA,DEC updated with PM for AEN objects")
fd["TARGETS"].write_key("COMMENT", "REF_EPOCH updated for all objects")
fd.close()
"""
copying functions from fba_launch_io.py just so these are stable and in one place; don't
actually want to have to load proper version of fiberassign just for this
"""
def mv_write_targets_out(infn, targdir, outfn):
"""
Moves the file created by desitarget.io.write_targets
and removes folder created by desitarget.io.write_targets
Args:
infn: filename output by desitarget.io.write_targets
targdir: folder provided as desitarget.io.write_targets input
outfn: desired renaming of infn
log (optional, defaults to Logger.get()): Logger object
step (optional, defaults to ""): corresponding step, for fba_launch log recording
(e.g. dotiles, dosky, dogfa, domtl, doscnd, dotoo)
start(optional, defaults to time()): start time for log (in seconds; output of time.time()
"""
# AR renaming
_ = shutil.move(infn, outfn)
# AR removing folders
if targdir[-1] != "/":
targdir = "{}/".format(targdir)
tmpdirs = infn.replace(targdir, "").split("/")[:-1]
for i in range(len(tmpdirs))[::-1]:
os.rmdir(os.path.join(*[targdir] + tmpdirs[: i + 1]))
def get_nowradec(ra, dec, pmra, pmdec, parallax, ref_year, pmtime_utc_str, scnd=False):
"""
Apply proper motion correction
Args:
ra: numpy array of RAs (deg)
dec: numpy array of DECs (deg)
pmra: numpy array of projected proper-motion in RA (mas/year)
pmdec: numpy array of projected proper-motion in DEC (mas/year)
parallax: numpy array of parallax (mas)
ref_year: reference epoch (e.g. 2015.5 for Gaia/DR2)
pmtime_utc_str: date to update position to (format: YYYY-MM-DDThh:mm:ss+00:00)
scnd (optional, defaults to False): secondary target? (boolean; if True, sets parallax=0)
Returns:
ra: numpy array of RAs updated to pmtime_utc_str (deg)
dec: numpy array of DECs updated to pmtime_utc_str (deg)
Notes:
Courtesy of DL; adapted from legacypipe.survey
Originally named radec_at_mjd()
"""
# AR pmtime_utc : UTC time of the new ref_epoch; "%Y-%m-%dT%H:%M:%S%z", e.g. "2021-04-21T00:00:00+00:00"
# AR scnd=True -> parallax is set to 0, i.e. not used
"""
Units:
- matches Gaia DR1/DR2
- pmra,pmdec are in mas/yr.
pmra is in angular speed (ie, has a cos(dec) factor)
- parallax is in mas.
Returns: RA,Dec
"""
equinox = 53084.28 # mjd of the spring equinox in 2004
equinox_jyear = Time(equinox, format="mjd").jyear
axistilt = 23.44 # degrees
arcsecperrad = 3600.0 * 180.0 / np.pi
# AR pmtime
pmtime_utc = datetime.strptime(pmtime_utc_str, "%Y-%m-%dT%H:%M:%S%z")
pmtime_utc_jyear = Time(pmtime_utc).jyear
pmtime_utc_mjd = Time(pmtime_utc).mjd
def xyztoradec(xyz):
assert len(xyz.shape) == 2
ra = np.arctan2(xyz[:, 1], xyz[:, 0]) # AR added "np." in front of arctan2...
ra += 2 * np.pi * (ra < 0)
norm = np.sqrt(np.sum(xyz ** 2, axis=1))
dec = np.arcsin(xyz[:, 2] / norm)
return np.rad2deg(ra), np.rad2deg(dec)
def radectoxyz(ra_deg, dec_deg): # AR changed inputs from ra,dec to ra_deg,dec_deg
ra = np.deg2rad(ra_deg)
dec = np.deg2rad(dec_deg)
cosd = np.cos(dec)
return np.vstack((cosd * np.cos(ra), cosd * np.sin(ra), np.sin(dec))).T
dt = pmtime_utc_jyear - ref_year
cosdec = np.cos(np.deg2rad(dec))
dec = dec + dt * pmdec / (3600.0 * 1000.0)
ra = ra + (dt * pmra / (3600.0 * 1000.0)) / cosdec
parallax = np.atleast_1d(parallax)
# AR discards parallax for scnd=True
if scnd == True:
parallax *= 0.0
I = np.flatnonzero(parallax)
if len(I):
suntheta = 2.0 * np.pi * np.fmod(pmtime_utc_jyear - equinox_jyear, 1.0)
# Finite differences on the unit sphere -- xyztoradec handles
# points that are not exactly on the surface of the sphere.
axis = np.deg2rad(axistilt)
scale = parallax[I] / 1000.0 / arcsecperrad
xyz = radectoxyz(ra[I], dec[I])
xyz[:, 0] += scale * np.cos(suntheta)
xyz[:, 1] += scale * np.sin(suntheta) * np.cos(axis)
xyz[:, 2] += scale * np.sin(suntheta) * np.sin(axis)
r, d = xyztoradec(xyz)
ra[I] = r
dec[I] = d
return ra, dec
def force_finite_pm(
d, pmra_key="PMRA", pmdec_key="PMDEC"
):
"""
Replaces NaN PMRA, PMDEC by 0
Args:
d: array with at least proper-motion columns
pmra_key (optional, defaults to PMRA): column name for PMRA
pmdec_key (optional, defaults to PMDEC): column name for PMDEC
log (optional, defaults to Logger.get()): Logger object
step (optional, defaults to ""): corresponding step, for fba_launch log recording
(e.g. dotiles, dosky, dogfa, domtl, doscnd, dotoo)
start(optional, defaults to time()): start time for log (in seconds; output of time.time()
Returns:
d: same as input d, but NaN proper motions replaced by 0
"""
for key in [pmra_key, pmdec_key]:
keep = ~np.isfinite(d[key])
if keep.sum() > 0:
d[key][keep] = 0.0
return d
def force_nonzero_refepoch(
d,
force_ref_epoch,
ref_epoch_key="REF_EPOCH",
pmra_key="PMRA",
pmdec_key="PMDEC",
):
"""
Replaces 0 by force_ref_epoch in ref_epoch
Args:
d: array with at least proper-motion columns
force_ref_epoch: float, ref_epoch to replace 0 by
ref_epoch_key (optional, defaults to REF_EPOCH): column name for the ref_epoch
pmra_key (optional, defaults to PMRA): column name for PMRA
pmdec_key (optional, defaults to PMDEC): column name for PMDEC
log (optional, defaults to Logger.get()): Logger object
step (optional, defaults to ""): corresponding step, for fba_launch log recording
(e.g. dotiles, dosky, dogfa, domtl, doscnd, dotoo)
start(optional, defaults to time()): start time for log (in seconds; output of time.time()
Returns:
d: same as input d, but 0 ref_epochs replaced by force_ref_epoch
Notes:
Will exit with error if ref_epoch=0, but pmra or pmdec != 0
"""
keep = d[ref_epoch_key] == 0
n = ((d[pmra_key][keep] != 0) | (d[pmra_key][keep] != 0)).sum()
if n > 0:
sys.exit(1)
d[ref_epoch_key][keep] = force_ref_epoch
return d
def update_nowradec(
d,
gaiadr,
pmtime_utc_str,
ra_key="RA",
dec_key="DEC",
pmra_key="PMRA",
pmdec_key="PMDEC",
parallax_key="PARALLAX",
ref_epoch_key="REF_EPOCH",
gaiag_key="GAIA_PHOT_G_MEAN_MAG",
gaiaaen_key="GAIA_ASTROMETRIC_EXCESS_NOISE",
scnd=False,
):
"""
Update (RA, DEC, REF_EPOCH) using proper motion
Args:
d: array with at least proper-motion columns
pmtime_utc_str: date to update position to (format: YYYY-MM-DDThh:mm:ss+00:00)
gaiadr: Gaia dr ("dr2" or "edr3")
ra_key (optional, defaults to RA): column name for RA
dec_key (optional, defaults to DEC): column name for DEC
pmra_key (optional, defaults to PMRA): column name for PMRA
pmdec_key (optional, defaults to PMDEC): column name for PMDEC
parallax_key (optional, defaults to PARALLAX): column name for PARALLAX
ref_epoch_key (optional, defaults to REF_EPOCH): column name for the REF_EPOCH
gaia_key (optional, defaults to GAIA_PHOT_G_MEAN_MAG): column name for Gaia g-mag
gaiaaen_key (optional, defaults to GAIA_ASTROMETRIC_EXCESS_NOISE): column name for Gaia GAIA_ASTROMETRIC_EXCESS_NOISE
scnd (optional, defaults to False): secondary target? (boolean);
if False, update for REF_EPOCH>0 + AEN only
if True, update for REF_EPOCH>0 + finite(PMRA,PMDEC) ; forces PARALLAX=0
log (optional, defaults to Logger.get()): Logger object
step (optional, defaults to ""): corresponding step, for fba_launch log recording
(e.g. dotiles, dosky, dogfa, domtl, doscnd, dotoo)
start(optional, defaults to time()): start time for log (in seconds; output of time.time()
Returns:
d: same as input, but with RA, DEC updated to pmtime_utc_str
Notes:
REF_EPOCH is updated for *all* objects
"""
# AR
pmtime_utc = datetime.strptime(pmtime_utc_str, "%Y-%m-%dT%H:%M:%S%z")
pmtime_utc_jyear = Time(pmtime_utc).jyear
# AR computing positions at pmtime_utc_str using Gaia PMRA, PMDEC
nowra, nowdec = get_nowradec(
d[ra_key],
d[dec_key],
d[pmra_key],
d[pmdec_key],
d[parallax_key],
d[ref_epoch_key],
pmtime_utc_str,
scnd=scnd,
)
if scnd == True:
# AR secondary: REF_EPOCH>0
keep = d["REF_EPOCH"] > 0
else:
# AR targets with REF_EPOCH>0 and passing the AEN criterion
keep = d["REF_EPOCH"] > 0
# AR gaia_psflike arguments changed at desitarget-0.58.0
if desitarget.__version__ < "0.58.0":
keep &= gaia_psflike(d[gaiag_key], d[gaiaaen_key])
else:
keep &= gaia_psflike(d[gaiag_key], d[gaiaaen_key], dr=gaiadr)
# AR storing changes to report extrema in the log
dra = nowra - d[ra_key]
ddec = nowdec - d[dec_key]
# AR updating positions to pmtime_utc_str for targets passing the AEN criterion
d[ra_key][keep] = nowra[keep]
d[dec_key][keep] = nowdec[keep]
# AR updating REF_EPOCH for *all* objects (for PlateMaker)
d[ref_epoch_key] = pmtime_utc_jyear
return d
| [
"astropy.table.Table",
"numpy.isin",
"desitarget.mtl.inflate_ledger",
"numpy.arctan2",
"numpy.isfinite",
"sys.exit",
"numpy.sin",
"os.path.exists",
"shutil.move",
"fitsio.read",
"desitarget.io.read_targets_in_tiles",
"numpy.flatnonzero",
"fitsio.FITS",
"fitsio.read_header",
"numpy.rad2de... | [((1821, 1940), 'fitsio.read', 'fitsio.read', (["('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:3] +\n '/fiberassign-' + ts + '.fits.gz')"], {}), "('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:\n 3] + '/fiberassign-' + ts + '.fits.gz')\n", (1832, 1940), False, 'import fitsio\n'), ((1938, 1979), 'fitsio.read', 'fitsio.read', (["(dirn + 'fba-' + ts + '.fits')"], {}), "(dirn + 'fba-' + ts + '.fits')\n", (1949, 1979), False, 'import fitsio\n'), ((2163, 2202), 'numpy.isin', 'np.isin', (["fn['TARGETID']", "fa['TARGETID']"], {}), "(fn['TARGETID'], fa['TARGETID'])\n", (2170, 2202), True, 'import numpy as np\n'), ((2472, 2591), 'fitsio.read', 'fitsio.read', (["('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:3] +\n '/fiberassign-' + ts + '.fits.gz')"], {}), "('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:\n 3] + '/fiberassign-' + ts + '.fits.gz')\n", (2483, 2591), False, 'import fitsio\n'), ((2672, 2713), 'fitsio.read', 'fitsio.read', (["(dirn + 'fba-' + ts + '.fits')"], {}), "(dirn + 'fba-' + ts + '.fits')\n", (2683, 2713), False, 'import fitsio\n'), ((2987, 3026), 'numpy.isin', 'np.isin', (["fn['TARGETID']", "fa['TARGETID']"], {}), "(fn['TARGETID'], fa['TARGETID'])\n", (2994, 3026), True, 'import numpy as np\n'), ((3461, 3586), 'fitsio.read_header', 'fitsio.read_header', (["('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:3] +\n '/fiberassign-' + ts + '.fits.gz')"], {}), "('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' +\n ts[:3] + '/fiberassign-' + ts + '.fits.gz')\n", (3479, 3586), False, 'import fitsio\n'), ((4039, 4081), 'fitsio.read', 'fitsio.read', (["(indir + 'fba-' + ts + '.fits')"], {}), "(indir + 'fba-' + ts + '.fits')\n", (4050, 4081), False, 'import fitsio\n'), ((4090, 4131), 'fitsio.read', 'fitsio.read', (["(dirn + 'fba-' + ts + '.fits')"], {}), "(dirn + 'fba-' + ts + '.fits')\n", (4101, 4131), False, 'import fitsio\n'), ((4137, 4183), 'numpy.array_equal', 'np.array_equal', (["fa['TARGETID']", "fn['TARGETID']"], {}), "(fa['TARGETID'], fn['TARGETID'])\n", (4151, 4183), True, 'import numpy as np\n'), ((4816, 4941), 'fitsio.read_header', 'fitsio.read_header', (["('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:3] +\n '/fiberassign-' + ts + '.fits.gz')"], {}), "('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' +\n ts[:3] + '/fiberassign-' + ts + '.fits.gz')\n", (4834, 4941), False, 'import fitsio\n'), ((6252, 6272), 'os.path.isfile', 'os.path.isfile', (['toof'], {}), '(toof)\n', (6266, 6272), False, 'import os\n'), ((8468, 8593), 'fitsio.read_header', 'fitsio.read_header', (["('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' + ts[:3] +\n '/fiberassign-' + ts + '.fits.gz')"], {}), "('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/' +\n ts[:3] + '/fiberassign-' + ts + '.fits.gz')\n", (8486, 8593), False, 'import fitsio\n'), ((9723, 9743), 'os.path.isfile', 'os.path.isfile', (['toof'], {}), '(toof)\n', (9737, 9743), False, 'import os\n'), ((15736, 15756), 'fitsio.read', 'fitsio.read', (['tilesfn'], {}), '(tilesfn)\n', (15747, 15756), False, 'import fitsio\n'), ((15789, 15885), 'desitarget.io.read_targets_in_tiles', 'io.read_targets_in_tiles', (['mtldir', 'tiles'], {'quick': '(False)', 'mtl': '(True)', 'unique': '(True)', 'isodate': 'mtltime'}), '(mtldir, tiles, quick=False, mtl=True, unique=True,\n isodate=mtltime)\n', (15813, 15885), False, 'from desitarget import io\n'), ((16644, 16735), 'desitarget.mtl.inflate_ledger', 'inflate_ledger', (['d', 'targdir'], {'columns': 'columns', 'header': '(False)', 'strictcols': '(False)', 'quick': '(True)'}), '(d, targdir, columns=columns, header=False, strictcols=False,\n quick=True)\n', (16658, 16735), False, 'from desitarget.mtl import inflate_ledger\n'), ((17374, 17382), 'astropy.table.Table', 'Table', (['d'], {}), '(d)\n', (17379, 17382), False, 'from astropy.table import Table, join\n'), ((18959, 18983), 'shutil.move', 'shutil.move', (['infn', 'outfn'], {}), '(infn, outfn)\n', (18970, 18983), False, 'import shutil\n'), ((20728, 20784), 'datetime.datetime.strptime', 'datetime.strptime', (['pmtime_utc_str', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(pmtime_utc_str, '%Y-%m-%dT%H:%M:%S%z')\n", (20745, 20784), False, 'from datetime import datetime, timedelta\n'), ((21648, 21671), 'numpy.atleast_1d', 'np.atleast_1d', (['parallax'], {}), '(parallax)\n', (21661, 21671), True, 'import numpy as np\n'), ((21766, 21790), 'numpy.flatnonzero', 'np.flatnonzero', (['parallax'], {}), '(parallax)\n', (21780, 21790), True, 'import numpy as np\n'), ((26484, 26540), 'datetime.datetime.strptime', 'datetime.strptime', (['pmtime_utc_str', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(pmtime_utc_str, '%Y-%m-%dT%H:%M:%S%z')\n", (26501, 26540), False, 'from datetime import datetime, timedelta\n'), ((2212, 2222), 'numpy.sum', 'np.sum', (['ws'], {}), '(ws)\n', (2218, 2222), True, 'import numpy as np\n'), ((3036, 3046), 'numpy.sum', 'np.sum', (['ws'], {}), '(ws)\n', (3042, 3046), True, 'import numpy as np\n'), ((5448, 5465), 'fitsio.read', 'fitsio.read', (['tarf'], {}), '(tarf)\n', (5459, 5465), False, 'import fitsio\n'), ((5619, 5637), 'fitsio.read', 'fitsio.read', (['tilef'], {}), '(tilef)\n', (5630, 5637), False, 'import fitsio\n'), ((5786, 5803), 'fitsio.read', 'fitsio.read', (['skyf'], {}), '(skyf)\n', (5797, 5803), False, 'import fitsio\n'), ((5946, 5964), 'fitsio.read', 'fitsio.read', (['scndf'], {}), '(scndf)\n', (5957, 5964), False, 'import fitsio\n'), ((6118, 6135), 'fitsio.read', 'fitsio.read', (['gfaf'], {}), '(gfaf)\n', (6129, 6135), False, 'import fitsio\n'), ((9102, 9120), 'fitsio.read', 'fitsio.read', (['tilef'], {}), '(tilef)\n', (9113, 9120), False, 'import fitsio\n'), ((9269, 9286), 'fitsio.read', 'fitsio.read', (['skyf'], {}), '(skyf)\n', (9280, 9286), False, 'import fitsio\n'), ((9429, 9447), 'fitsio.read', 'fitsio.read', (['scndf'], {}), '(scndf)\n', (9440, 9447), False, 'import fitsio\n'), ((9589, 9606), 'fitsio.read', 'fitsio.read', (['gfaf'], {}), '(gfaf)\n', (9600, 9606), False, 'import fitsio\n'), ((10487, 10524), 'astropy.table.Table.read', 'Table.read', (["(indir + ts + '-targ.fits')"], {}), "(indir + ts + '-targ.fits')\n", (10497, 10524), False, 'from astropy.table import Table, join\n'), ((10590, 10607), 'astropy.table.Table.read', 'Table.read', (['tarfn'], {}), '(tarfn)\n', (10600, 10607), False, 'from astropy.table import Table, join\n'), ((10668, 10703), 'astropy.table.join', 'join', (['ntar', 'otar'], {'keys': "['TARGETID']"}), "(ntar, otar, keys=['TARGETID'])\n", (10672, 10703), False, 'from astropy.table import Table, join\n'), ((16848, 16856), 'astropy.table.Table', 'Table', (['d'], {}), '(d)\n', (16853, 16856), False, 'from astropy.table import Table, join\n'), ((17441, 17465), 'os.path.exists', 'os.path.exists', (['outfndir'], {}), '(outfndir)\n', (17455, 17465), False, 'import os\n'), ((17475, 17511), 'os.makedirs', 'os.makedirs', (['outfndir'], {'exist_ok': '(True)'}), '(outfndir, exist_ok=True)\n', (17486, 17511), False, 'import os\n'), ((17857, 17881), 'fitsio.FITS', 'fitsio.FITS', (['outfn', '"""rw"""'], {}), "(outfn, 'rw')\n", (17868, 17881), False, 'import fitsio\n'), ((20947, 20979), 'numpy.arctan2', 'np.arctan2', (['xyz[:, 1]', 'xyz[:, 0]'], {}), '(xyz[:, 1], xyz[:, 0])\n', (20957, 20979), True, 'import numpy as np\n'), ((21119, 21146), 'numpy.arcsin', 'np.arcsin', (['(xyz[:, 2] / norm)'], {}), '(xyz[:, 2] / norm)\n', (21128, 21146), True, 'import numpy as np\n'), ((21296, 21314), 'numpy.deg2rad', 'np.deg2rad', (['ra_deg'], {}), '(ra_deg)\n', (21306, 21314), True, 'import numpy as np\n'), ((21329, 21348), 'numpy.deg2rad', 'np.deg2rad', (['dec_deg'], {}), '(dec_deg)\n', (21339, 21348), True, 'import numpy as np\n'), ((21364, 21375), 'numpy.cos', 'np.cos', (['dec'], {}), '(dec)\n', (21370, 21375), True, 'import numpy as np\n'), ((21514, 21529), 'numpy.deg2rad', 'np.deg2rad', (['dec'], {}), '(dec)\n', (21524, 21529), True, 'import numpy as np\n'), ((22039, 22059), 'numpy.deg2rad', 'np.deg2rad', (['axistilt'], {}), '(axistilt)\n', (22049, 22059), True, 'import numpy as np\n'), ((24474, 24485), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24482, 24485), False, 'import sys\n'), ((2234, 2244), 'numpy.sum', 'np.sum', (['ws'], {}), '(ws)\n', (2240, 2244), True, 'import numpy as np\n'), ((3058, 3068), 'numpy.sum', 'np.sum', (['ws'], {}), '(ws)\n', (3064, 3068), True, 'import numpy as np\n'), ((3802, 3840), 'fitsio.read', 'fitsio.read', (["(indir + ts + '-targ.fits')"], {}), "(indir + ts + '-targ.fits')\n", (3813, 3840), False, 'import fitsio\n'), ((5157, 5195), 'fitsio.read', 'fitsio.read', (["(indir + ts + '-targ.fits')"], {}), "(indir + ts + '-targ.fits')\n", (5168, 5195), False, 'import fitsio\n'), ((8809, 8847), 'fitsio.read', 'fitsio.read', (["(indir + ts + '-targ.fits')"], {}), "(indir + ts + '-targ.fits')\n", (8820, 8847), False, 'import fitsio\n'), ((17204, 17215), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17212, 17215), False, 'import sys\n'), ((19190, 19234), 'os.path.join', 'os.path.join', (['*([targdir] + tmpdirs[:i + 1])'], {}), '(*([targdir] + tmpdirs[:i + 1]))\n', (19202, 19234), False, 'import os\n'), ((21079, 21103), 'numpy.sum', 'np.sum', (['(xyz ** 2)'], {'axis': '(1)'}), '(xyz ** 2, axis=1)\n', (21085, 21103), True, 'import numpy as np\n'), ((21162, 21176), 'numpy.rad2deg', 'np.rad2deg', (['ra'], {}), '(ra)\n', (21172, 21176), True, 'import numpy as np\n'), ((21178, 21193), 'numpy.rad2deg', 'np.rad2deg', (['dec'], {}), '(dec)\n', (21188, 21193), True, 'import numpy as np\n'), ((21839, 21885), 'numpy.fmod', 'np.fmod', (['(pmtime_utc_jyear - equinox_jyear)', '(1.0)'], {}), '(pmtime_utc_jyear - equinox_jyear, 1.0)\n', (21846, 21885), True, 'import numpy as np\n'), ((22181, 22197), 'numpy.cos', 'np.cos', (['suntheta'], {}), '(suntheta)\n', (22187, 22197), True, 'import numpy as np\n'), ((22246, 22258), 'numpy.cos', 'np.cos', (['axis'], {}), '(axis)\n', (22252, 22258), True, 'import numpy as np\n'), ((22307, 22319), 'numpy.sin', 'np.sin', (['axis'], {}), '(axis)\n', (22313, 22319), True, 'import numpy as np\n'), ((23197, 23216), 'numpy.isfinite', 'np.isfinite', (['d[key]'], {}), '(d[key])\n', (23208, 23216), True, 'import numpy as np\n'), ((22227, 22243), 'numpy.sin', 'np.sin', (['suntheta'], {}), '(suntheta)\n', (22233, 22243), True, 'import numpy as np\n'), ((22288, 22304), 'numpy.sin', 'np.sin', (['suntheta'], {}), '(suntheta)\n', (22294, 22304), True, 'import numpy as np\n'), ((21440, 21451), 'numpy.sin', 'np.sin', (['dec'], {}), '(dec)\n', (21446, 21451), True, 'import numpy as np\n'), ((21409, 21419), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (21415, 21419), True, 'import numpy as np\n'), ((21428, 21438), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (21434, 21438), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision.models.resnet import Bottleneck
import numpy as np
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__(block, layers, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
for i in range(2, 5):
getattr(self, 'layer%d'%i)[0].conv1.stride = (2,2)
getattr(self, 'layer%d'%i)[0].conv2.stride = (1,1)
class Encoder(nn.Module):
def __init__(self, resnet101_file):
super(Encoder, self).__init__()
resnet = ResNet(Bottleneck, [3, 4, 23, 3])
ckpt = torch.load(resnet101_file, map_location=lambda s, l: s)
resnet.load_state_dict(ckpt)
self.resnet = resnet
self.transforms = torchvision.transforms.Compose([
torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def preprocess(self, image):
if len(image.shape) == 3 and image.shape[-1] == 4: # remove alpha channel
image = image[:, :, :3]
elif len(image.shape) == 2:
image = image[:, :, np.newaxis]
image = np.concatenate((image, image, image), axis=2)
image = image.astype('float32') / 255.0
image = torch.from_numpy(image.transpose(2, 0, 1))
image = self.transforms(image)
return image
def forward(self, img, att_size=14):
x = img.unsqueeze(0)
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
fc = x.mean(3).mean(2).squeeze()
att = F.adaptive_avg_pool2d(x, [att_size, att_size]).squeeze().permute(1, 2, 0)
return fc, att
| [
"torch.nn.functional.adaptive_avg_pool2d",
"torch.load",
"torch.nn.MaxPool2d",
"numpy.concatenate",
"torchvision.transforms.Normalize"
] | [((349, 413), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(0)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, padding=0, ceil_mode=True)\n', (361, 413), True, 'import torch.nn as nn\n'), ((753, 808), 'torch.load', 'torch.load', (['resnet101_file'], {'map_location': '(lambda s, l: s)'}), '(resnet101_file, map_location=lambda s, l: s)\n', (763, 808), False, 'import torch\n'), ((946, 1024), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (978, 1024), False, 'import torchvision\n'), ((1289, 1334), 'numpy.concatenate', 'np.concatenate', (['(image, image, image)'], {'axis': '(2)'}), '((image, image, image), axis=2)\n', (1303, 1334), True, 'import numpy as np\n'), ((1899, 1945), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['x', '[att_size, att_size]'], {}), '(x, [att_size, att_size])\n', (1920, 1945), True, 'import torch.nn.functional as F\n')] |
from astropy.io import fits
import pandas as pd
import numpy as np
from lightkurve import KeplerLightCurve
def find_flares(flux, error, N1=4, N2=4, N3=3):
'''
The algorithm for local changes due to flares defined by
<NAME> et al. (2015), Eqn. 3a-d
http://arxiv.org/abs/1510.01005
Note: these equations were originally in magnitude units, i.e. smaller
values are increases in brightness. The signs have been changed, but
coefficients have not been adjusted to change from log(flux) to flux.
Parameters:
----------
flux : numpy array
data to search over
error : numpy array
errors corresponding to data.
N1 : int, optional
Coefficient from original paper (Default is 3 in paper, 4 here)
How many times above the stddev is required.
N2 : int, optional
Coefficient from original paper (Default is 1 in paper, 4 here)
How many times above the stddev and uncertainty is required
N3 : int, optional
Coefficient from original paper (Default is 3)
The number of consecutive points required to flag as a flare
Return:
------------
isflare : numpy array of booleans
datapoints are flagged with 1 if they belong to a flare candidate
'''
median = np.nanmedian(flux)
sigma = np.nanstd(flux)
T0 = flux - median # excursion should be positive #"N0"
T1 = np.abs(flux - median) / sigma #N1
T2 = np.abs(flux - median - error) / sigma #N2
# apply thresholds N0-N2:
pass_thresholds = np.where((T0 > 0) & (T1 > N1) & (T2 > N2))
#array of indices where thresholds are exceeded:
is_pass_thresholds = np.zeros_like(flux)
is_pass_thresholds[pass_thresholds] = 1
# Need to find cumulative number of points that pass_thresholds
# Counted in reverse!
# Examples reverse_counts = [0 0 0 3 2 1 0 0 1 0 4 3 2 1 0 0 0 1 0 2 1 0]
# isflare = [0 0 0 1 1 1 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0]
reverse_counts = np.zeros_like(flux, dtype='int')
for k in range(2, len(flux)):
reverse_counts[-k] = (is_pass_thresholds[-k]
* (reverse_counts[-(k-1)]
+ is_pass_thresholds[-k]))
# find flare start where values in reverse_counts switch from 0 to >=N3
istart_i = np.where((reverse_counts[1:] >= N3) &
(reverse_counts[:-1] - reverse_counts[1:] < 0))[0] + 1
# use the value of reverse_counts to determine how many points away stop is
istop_i = istart_i + (reverse_counts[istart_i])
isflare = np.zeros_like(flux, dtype='int')
for (l,r) in list(zip(istart_i,istop_i)):
isflare[l:r+1] = 1
return isflare
def wrapper(lc, minsep=3):
'''
Main wrapper to obtain and process a light curve.
Parameters:
-------------
lc : light curve
FlareLightCurve object
minsep : 1 or int
minimum distance between two candidate start times in datapoints
Return:
----------
numpy arrays of start and stop cadence numbers of flare candidates
'''
#find continuous observing periods
lc.find_gaps()
istart = np.array([], dtype='int')
istop = np.array([], dtype='int')
#Now work on periods of continuous observation with no gaps
for (le,ri) in lc.gaps:
error = lc.flux_error[le:ri]
flux = lc.flux[le:ri]
flux_model_i = np.nanmedian(flux) * np.ones_like(flux)
flux_diff = flux - flux_model_i
# run final flare-find on DATA - MODEL
isflare = find_flares(flux_diff, error)
# now pick out final flare candidate indices
candidates = np.where( isflare > 0)[0]
if (len(candidates) < 1):#no candidates = no indices
istart_gap = np.array([])
istop_gap = np.array([])
else:
# find start and stop index, combine neighboring candidates
# in to same events
separated_candidates = np.where( (np.diff(candidates)) > minsep )[0]
istart_gap = candidates[ np.append([0], separated_candidates + 1) ]
istop_gap = candidates[ np.append(separated_candidates,
[len(candidates) - 1]) ]
#stitch indices back into the original light curve
istart = np.array(np.append(istart, istart_gap + le), dtype='int')
istop = np.array(np.append(istop, istop_gap + le), dtype='int')
lc.flares.append((lc.cadenceno[istart_gap + le],
lc.cadenceno[istart_gap + le]))
return lc.cadenceno[istart], lc.cadenceno[istop]
#lc = get_k2sc_lc('examples/hlsp_k2sc_k2_llc_211117077-c04_kepler_v2_lc.fits')
#lc = get_k2sc_lc('examples/hlsp_k2sc_k2_llc_210951703-c04_kepler_v2_lc.fits')
| [
"numpy.abs",
"numpy.ones_like",
"numpy.nanstd",
"numpy.nanmedian",
"numpy.where",
"numpy.diff",
"numpy.append",
"numpy.array",
"numpy.zeros_like"
] | [((1288, 1306), 'numpy.nanmedian', 'np.nanmedian', (['flux'], {}), '(flux)\n', (1300, 1306), True, 'import numpy as np\n'), ((1319, 1334), 'numpy.nanstd', 'np.nanstd', (['flux'], {}), '(flux)\n', (1328, 1334), True, 'import numpy as np\n'), ((1541, 1583), 'numpy.where', 'np.where', (['((T0 > 0) & (T1 > N1) & (T2 > N2))'], {}), '((T0 > 0) & (T1 > N1) & (T2 > N2))\n', (1549, 1583), True, 'import numpy as np\n'), ((1662, 1681), 'numpy.zeros_like', 'np.zeros_like', (['flux'], {}), '(flux)\n', (1675, 1681), True, 'import numpy as np\n'), ((1999, 2031), 'numpy.zeros_like', 'np.zeros_like', (['flux'], {'dtype': '"""int"""'}), "(flux, dtype='int')\n", (2012, 2031), True, 'import numpy as np\n'), ((2588, 2620), 'numpy.zeros_like', 'np.zeros_like', (['flux'], {'dtype': '"""int"""'}), "(flux, dtype='int')\n", (2601, 2620), True, 'import numpy as np\n'), ((3165, 3190), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (3173, 3190), True, 'import numpy as np\n'), ((3203, 3228), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""int"""'}), "([], dtype='int')\n", (3211, 3228), True, 'import numpy as np\n'), ((1404, 1425), 'numpy.abs', 'np.abs', (['(flux - median)'], {}), '(flux - median)\n', (1410, 1425), True, 'import numpy as np\n'), ((1447, 1476), 'numpy.abs', 'np.abs', (['(flux - median - error)'], {}), '(flux - median - error)\n', (1453, 1476), True, 'import numpy as np\n'), ((2325, 2415), 'numpy.where', 'np.where', (['((reverse_counts[1:] >= N3) & (reverse_counts[:-1] - reverse_counts[1:] < 0))'], {}), '((reverse_counts[1:] >= N3) & (reverse_counts[:-1] - reverse_counts\n [1:] < 0))\n', (2333, 2415), True, 'import numpy as np\n'), ((3411, 3429), 'numpy.nanmedian', 'np.nanmedian', (['flux'], {}), '(flux)\n', (3423, 3429), True, 'import numpy as np\n'), ((3432, 3450), 'numpy.ones_like', 'np.ones_like', (['flux'], {}), '(flux)\n', (3444, 3450), True, 'import numpy as np\n'), ((3661, 3682), 'numpy.where', 'np.where', (['(isflare > 0)'], {}), '(isflare > 0)\n', (3669, 3682), True, 'import numpy as np\n'), ((3774, 3786), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3782, 3786), True, 'import numpy as np\n'), ((3811, 3823), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3819, 3823), True, 'import numpy as np\n'), ((4318, 4352), 'numpy.append', 'np.append', (['istart', '(istart_gap + le)'], {}), '(istart, istart_gap + le)\n', (4327, 4352), True, 'import numpy as np\n'), ((4392, 4424), 'numpy.append', 'np.append', (['istop', '(istop_gap + le)'], {}), '(istop, istop_gap + le)\n', (4401, 4424), True, 'import numpy as np\n'), ((4060, 4100), 'numpy.append', 'np.append', (['[0]', '(separated_candidates + 1)'], {}), '([0], separated_candidates + 1)\n', (4069, 4100), True, 'import numpy as np\n'), ((3988, 4007), 'numpy.diff', 'np.diff', (['candidates'], {}), '(candidates)\n', (3995, 4007), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
h = np.arange(0, 32.0, 1.0)
to11 = 288.15 - 6.5 * h
to20 = 216.65
to32 = 216.65 + 1.0 * (h - 20.0)
temp = (h < 11) * to11 + (h >= 11) * to20 + (h >= 20) * (to32 - to20)
plt.plot(h, temp)
plt.show() | [
"matplotlib.pyplot.plot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((56, 79), 'numpy.arange', 'np.arange', (['(0)', '(32.0)', '(1.0)'], {}), '(0, 32.0, 1.0)\n', (65, 79), True, 'import numpy as np\n'), ((224, 241), 'matplotlib.pyplot.plot', 'plt.plot', (['h', 'temp'], {}), '(h, temp)\n', (232, 241), True, 'import matplotlib.pyplot as plt\n'), ((242, 252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (250, 252), True, 'import matplotlib.pyplot as plt\n')] |
'''
Function:
复现论文"Combining Sketch and Tone for Pencil Drawing Production"
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import cv2
import math
import numpy as np
from PIL import Image
from scipy import signal
from ..base import BaseBeautifier
from scipy.ndimage import interpolation
from scipy.sparse.linalg import spsolve
from scipy.sparse import csr_matrix, spdiags
'''图像处理工具'''
class ImageProcessor():
'''将像素值压缩到[0, 1]'''
@staticmethod
def im2double(img):
if len(img.shape) == 2: return (img - img.min()) / (img.max() - img.min())
else: return cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
'''拉普拉斯分布'''
@staticmethod
def Laplace(x, sigma=9):
value = (1. / sigma) * math.exp(-(256 - x) / sigma) * (256 - x)
return value
'''均匀分布'''
@staticmethod
def Uniform(x, ua=105, ub=225):
value = (1. / (ub - ua)) * (max(x - ua, 0) - max(x - ub, 0))
return value
'''高斯分布'''
@staticmethod
def Gaussian(x, u=90, sigma=11):
value = (1. / math.sqrt(2 * math.pi * sigma)) * math.exp(-((x - u) ** 2) / (2 * (sigma ** 2)))
return value
'''水平方向拼接'''
@staticmethod
def horizontalStitch(img, width):
img_stitch = img.copy()
while img_stitch.shape[1] < width:
window_size = int(round(img.shape[1] / 4.))
left = img[:, (img.shape[1]-window_size): img.shape[1]]
right = img[:, :window_size]
aleft = np.zeros((left.shape[0], window_size))
aright = np.zeros((left.shape[0], window_size))
for i in range(window_size):
aleft[:, i] = left[:, i] * (1 - (i + 1.) / window_size)
aright[:, i] = right[:, i] * (i + 1.) / window_size
img_stitch = np.column_stack((img_stitch[:, :(img_stitch.shape[1]-window_size)], aleft+aright, img_stitch[:, window_size: img_stitch.shape[1]]))
img_stitch = img_stitch[:, :width]
return img_stitch
'''垂直方向拼接'''
@staticmethod
def verticalStitch(img, height):
img_stitch = img.copy()
while img_stitch.shape[0] < height:
window_size = int(round(img.shape[0] / 4.))
up = img[(img.shape[0]-window_size): img.shape[0], :]
down = img[0:window_size, :]
aup = np.zeros((window_size, up.shape[1]))
adown = np.zeros((window_size, up.shape[1]))
for i in range(window_size):
aup[i, :] = up[i, :] * (1 - (i + 1.) / window_size)
adown[i, :] = down[i, :] * (i + 1.) / window_size
img_stitch = np.row_stack((img_stitch[:img_stitch.shape[0]-window_size, :], aup+adown, img_stitch[window_size: img_stitch.shape[0], :]))
img_stitch = img_stitch[:height, :]
return img_stitch
'''复现论文"Combining Sketch and Tone for Pencil Drawing Production"'''
class PencilDrawingBeautifier(BaseBeautifier):
def __init__(self, mode='gray', kernel_size_scale=1/40, stroke_width=1, color_depth=1, weights_color=[62, 30, 5], weights_gray=[76, 22, 2], texture_path=None, **kwargs):
super(PencilDrawingBeautifier, self).__init__(**kwargs)
assert mode in ['gray', 'color']
self.rootdir = os.path.split(os.path.abspath(__file__))[0]
self.image_processor = ImageProcessor()
self.mode = mode
# 铅笔笔画相关参数
self.kernel_size_scale, self.stroke_width = kernel_size_scale, stroke_width
# 铅笔色调相关参数
self.weights_color, self.weights_gray, self.color_depth = weights_color, weights_gray, color_depth
if (texture_path is None) or (not os.path.exists(texture_path)): self.texture_path = os.path.join(self.rootdir, 'textures/default.jpg')
'''迭代图片'''
def iterimage(self, image):
if self.mode == 'color':
img = Image.fromarray(image)
img_ycbcr = img.convert('YCbCr')
img = np.ndarray((img.size[1], img.size[0], 3), 'u1', img_ycbcr.tobytes())
img_out = img.copy()
img_out.flags.writeable = True
img_out[:, :, 0] = self.__strokeGeneration(img[:, :, 0]) * self.__toneGeneration(img[:, :, 0]) * 255
img_out = cv2.cvtColor(img_out, cv2.COLOR_YCR_CB2BGR)
else:
img = image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_s = self.__strokeGeneration(img)
img_t = self.__toneGeneration(img)
img_out = img_s * img_t * 255
return img_out
'''铅笔笔画生成'''
def __strokeGeneration(self, img):
h, w = img.shape
kernel_size = int(min(w, h) * self.kernel_size_scale)
kernel_size += kernel_size % 2
# 计算梯度,产生幅度
img_double = self.image_processor.im2double(img)
dx = np.concatenate((np.abs(img_double[:, :-1]-img_double[:, 1:]), np.zeros((h, 1))), 1)
dy = np.concatenate((np.abs(img_double[:-1, :]-img_double[1:, :]), np.zeros((1, w))), 0)
img_gradient = np.sqrt(np.power(dx, 2) + np.power(dy, 2))
# 选择八个参考方向
line_segments = np.zeros((kernel_size, kernel_size, 8))
for i in [0, 1, 2, 7]:
for x in range(kernel_size):
y = round((x + 1 - kernel_size / 2) * math.tan(math.pi / 8 * i))
y = kernel_size / 2 - y
if y > 0 and y <= kernel_size:
line_segments[int(y-1), x, i] = 1
if i == 7:
line_segments[:, :, 3] = np.rot90(line_segments[:, :, 7], -1)
else:
line_segments[:, :, i+4] = np.rot90(line_segments[:, :, i], 1)
# 获取参考方向的响应图
response_maps = np.zeros((h, w, 8))
for i in range(8):
response_maps[:, :, i] = signal.convolve2d(img_gradient, line_segments[:, :, i], 'same')
response_maps_maxvalueidx = response_maps.argmax(axis=-1)
# 通过在所有方向的响应中选择最大值来进行分类
magnitude_maps = np.zeros_like(response_maps)
for i in range(8):
magnitude_maps[:, :, i] = img_gradient * (response_maps_maxvalueidx == i).astype('float')
# 线条整形
stroke_maps = np.zeros_like(response_maps)
for i in range(8):
stroke_maps[:, :, i] = signal.convolve2d(magnitude_maps[:, :, i], line_segments[:, :, i], 'same')
stroke_maps = stroke_maps.sum(axis=-1)
stroke_maps = (stroke_maps - stroke_maps.min()) / (stroke_maps.max() - stroke_maps.min())
stroke_maps = (1 - stroke_maps) ** self.stroke_width
return stroke_maps
'''铅笔色调生成'''
def __toneGeneration(self, img, mode=None):
height, width = img.shape
# 直方图匹配
img_hist_match = self.__histogramMatching(img, mode) ** self.color_depth
# 获得纹理
texture = cv2.imread(self.texture_path)
texture = cv2.cvtColor(texture, cv2.COLOR_BGR2GRAY)[99: texture.shape[0]-100, 99: texture.shape[1]-100]
ratio = 0.2 * min(img.shape[0], img.shape[1]) / float(1024)
texture = interpolation.zoom(texture, (ratio, ratio))
texture = self.image_processor.im2double(texture)
texture = self.image_processor.horizontalStitch(texture, img.shape[1])
texture = self.image_processor.verticalStitch(texture, img.shape[0])
size = img.size
nzmax = 2 * (size-1)
i = np.zeros((nzmax, 1))
j = np.zeros((nzmax, 1))
s = np.zeros((nzmax, 1))
for m in range(1, nzmax+1):
i[m-1] = int(math.ceil((m + 0.1) / 2)) - 1
j[m-1] = int(math.ceil((m - 0.1) / 2)) - 1
s[m-1] = -2 * (m % 2) + 1
dx = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))
nzmax = 2 * (size - img.shape[1])
i = np.zeros((nzmax, 1))
j = np.zeros((nzmax, 1))
s = np.zeros((nzmax, 1))
for m in range(1, nzmax+1):
i[m-1, :] = int(math.ceil((m - 1 + 0.1) / 2) + img.shape[1] * (m % 2)) - 1
j[m-1, :] = math.ceil((m - 0.1) / 2) - 1
s[m-1, :] = -2 * (m % 2) + 1
dy = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))
texture_sparse = spdiags(np.log(np.reshape(texture.T, (1, texture.size), order="f") + 0.01), 0, size, size)
img_hist_match1d = np.log(np.reshape(img_hist_match.T, (1, img_hist_match.size), order="f").T + 0.01)
nat = texture_sparse.T.dot(img_hist_match1d)
a = np.dot(texture_sparse.T, texture_sparse)
b = dx.T.dot(dx)
c = dy.T.dot(dy)
mat = a + 0.2 * (b + c)
beta1d = spsolve(mat, nat)
beta = np.reshape(beta1d, (img.shape[0], img.shape[1]), order="c")
tone = texture ** beta
tone = (tone - tone.min()) / (tone.max() - tone.min())
return tone
'''直方图匹配'''
def __histogramMatching(self, img, mode=None):
weights = self.weights_color if mode == 'color' else self.weights_gray
# 图像
histogram_img = cv2.calcHist([img], [0], None, [256], [0, 256])
histogram_img.resize(histogram_img.size)
histogram_img /= histogram_img.sum()
histogram_img_cdf = np.cumsum(histogram_img)
# 自然图像
histogram_natural = np.zeros_like(histogram_img)
for x in range(256):
histogram_natural[x] = weights[0] * self.image_processor.Laplace(x) + weights[1] * self.image_processor.Uniform(x) + weights[2] * self.image_processor.Gaussian(x)
histogram_natural /= histogram_natural.sum()
histogram_natural_cdf = np.cumsum(histogram_natural)
# 做直方图匹配
img_hist_match = np.zeros_like(img)
for x in range(img.shape[0]):
for y in range(img.shape[1]):
value = histogram_img_cdf[img[x, y]]
img_hist_match[x, y] = (np.abs(histogram_natural_cdf-value)).argmin()
img_hist_match = np.true_divide(img_hist_match, 255)
return img_hist_match | [
"math.sqrt",
"numpy.column_stack",
"numpy.row_stack",
"numpy.rot90",
"scipy.ndimage.interpolation.zoom",
"math.exp",
"os.path.exists",
"cv2.calcHist",
"numpy.reshape",
"math.tan",
"numpy.dot",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.spsolve",
"scipy.signal.convolve2d",
"numpy.abs... | [((5062, 5101), 'numpy.zeros', 'np.zeros', (['(kernel_size, kernel_size, 8)'], {}), '((kernel_size, kernel_size, 8))\n', (5070, 5101), True, 'import numpy as np\n'), ((5655, 5674), 'numpy.zeros', 'np.zeros', (['(h, w, 8)'], {}), '((h, w, 8))\n', (5663, 5674), True, 'import numpy as np\n'), ((5926, 5954), 'numpy.zeros_like', 'np.zeros_like', (['response_maps'], {}), '(response_maps)\n', (5939, 5954), True, 'import numpy as np\n'), ((6121, 6149), 'numpy.zeros_like', 'np.zeros_like', (['response_maps'], {}), '(response_maps)\n', (6134, 6149), True, 'import numpy as np\n'), ((6749, 6778), 'cv2.imread', 'cv2.imread', (['self.texture_path'], {}), '(self.texture_path)\n', (6759, 6778), False, 'import cv2\n'), ((6977, 7020), 'scipy.ndimage.interpolation.zoom', 'interpolation.zoom', (['texture', '(ratio, ratio)'], {}), '(texture, (ratio, ratio))\n', (6995, 7020), False, 'from scipy.ndimage import interpolation\n'), ((7300, 7320), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (7308, 7320), True, 'import numpy as np\n'), ((7333, 7353), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (7341, 7353), True, 'import numpy as np\n'), ((7366, 7386), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (7374, 7386), True, 'import numpy as np\n'), ((7584, 7642), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(s.T[0], (i.T[0], j.T[0]))'], {'shape': '(size, size)'}), '((s.T[0], (i.T[0], j.T[0])), shape=(size, size))\n', (7594, 7642), False, 'from scipy.sparse import csr_matrix, spdiags\n'), ((7697, 7717), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (7705, 7717), True, 'import numpy as np\n'), ((7730, 7750), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (7738, 7750), True, 'import numpy as np\n'), ((7763, 7783), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (7771, 7783), True, 'import numpy as np\n'), ((8014, 8072), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(s.T[0], (i.T[0], j.T[0]))'], {'shape': '(size, size)'}), '((s.T[0], (i.T[0], j.T[0])), shape=(size, size))\n', (8024, 8072), False, 'from scipy.sparse import csr_matrix, spdiags\n'), ((8364, 8404), 'numpy.dot', 'np.dot', (['texture_sparse.T', 'texture_sparse'], {}), '(texture_sparse.T, texture_sparse)\n', (8370, 8404), True, 'import numpy as np\n'), ((8504, 8521), 'scipy.sparse.linalg.spsolve', 'spsolve', (['mat', 'nat'], {}), '(mat, nat)\n', (8511, 8521), False, 'from scipy.sparse.linalg import spsolve\n'), ((8537, 8596), 'numpy.reshape', 'np.reshape', (['beta1d', '(img.shape[0], img.shape[1])'], {'order': '"""c"""'}), "(beta1d, (img.shape[0], img.shape[1]), order='c')\n", (8547, 8596), True, 'import numpy as np\n'), ((8894, 8941), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img], [0], None, [256], [0, 256])\n', (8906, 8941), False, 'import cv2\n'), ((9064, 9088), 'numpy.cumsum', 'np.cumsum', (['histogram_img'], {}), '(histogram_img)\n', (9073, 9088), True, 'import numpy as np\n'), ((9132, 9160), 'numpy.zeros_like', 'np.zeros_like', (['histogram_img'], {}), '(histogram_img)\n', (9145, 9160), True, 'import numpy as np\n'), ((9450, 9478), 'numpy.cumsum', 'np.cumsum', (['histogram_natural'], {}), '(histogram_natural)\n', (9459, 9478), True, 'import numpy as np\n'), ((9521, 9539), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (9534, 9539), True, 'import numpy as np\n'), ((9784, 9819), 'numpy.true_divide', 'np.true_divide', (['img_hist_match', '(255)'], {}), '(img_hist_match, 255)\n', (9798, 9819), True, 'import numpy as np\n'), ((1104, 1146), 'math.exp', 'math.exp', (['(-(x - u) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - u) ** 2 / (2 * sigma ** 2))\n', (1112, 1146), False, 'import math\n'), ((1505, 1543), 'numpy.zeros', 'np.zeros', (['(left.shape[0], window_size)'], {}), '((left.shape[0], window_size))\n', (1513, 1543), True, 'import numpy as np\n'), ((1565, 1603), 'numpy.zeros', 'np.zeros', (['(left.shape[0], window_size)'], {}), '((left.shape[0], window_size))\n', (1573, 1603), True, 'import numpy as np\n'), ((1810, 1946), 'numpy.column_stack', 'np.column_stack', (['(img_stitch[:, :img_stitch.shape[1] - window_size], aleft + aright,\n img_stitch[:, window_size:img_stitch.shape[1]])'], {}), '((img_stitch[:, :img_stitch.shape[1] - window_size], aleft +\n aright, img_stitch[:, window_size:img_stitch.shape[1]]))\n', (1825, 1946), True, 'import numpy as np\n'), ((2340, 2376), 'numpy.zeros', 'np.zeros', (['(window_size, up.shape[1])'], {}), '((window_size, up.shape[1]))\n', (2348, 2376), True, 'import numpy as np\n'), ((2397, 2433), 'numpy.zeros', 'np.zeros', (['(window_size, up.shape[1])'], {}), '((window_size, up.shape[1]))\n', (2405, 2433), True, 'import numpy as np\n'), ((2634, 2764), 'numpy.row_stack', 'np.row_stack', (['(img_stitch[:img_stitch.shape[0] - window_size, :], aup + adown, img_stitch\n [window_size:img_stitch.shape[0], :])'], {}), '((img_stitch[:img_stitch.shape[0] - window_size, :], aup +\n adown, img_stitch[window_size:img_stitch.shape[0], :]))\n', (2646, 2764), True, 'import numpy as np\n'), ((3686, 3736), 'os.path.join', 'os.path.join', (['self.rootdir', '"""textures/default.jpg"""'], {}), "(self.rootdir, 'textures/default.jpg')\n", (3698, 3736), False, 'import os\n'), ((3835, 3857), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3850, 3857), False, 'from PIL import Image\n'), ((4201, 4244), 'cv2.cvtColor', 'cv2.cvtColor', (['img_out', 'cv2.COLOR_YCR_CB2BGR'], {}), '(img_out, cv2.COLOR_YCR_CB2BGR)\n', (4213, 4244), False, 'import cv2\n'), ((4301, 4338), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4313, 4338), False, 'import cv2\n'), ((5739, 5802), 'scipy.signal.convolve2d', 'signal.convolve2d', (['img_gradient', 'line_segments[:, :, i]', '"""same"""'], {}), "(img_gradient, line_segments[:, :, i], 'same')\n", (5756, 5802), False, 'from scipy import signal\n'), ((6212, 6286), 'scipy.signal.convolve2d', 'signal.convolve2d', (['magnitude_maps[:, :, i]', 'line_segments[:, :, i]', '"""same"""'], {}), "(magnitude_maps[:, :, i], line_segments[:, :, i], 'same')\n", (6229, 6286), False, 'from scipy import signal\n'), ((6797, 6838), 'cv2.cvtColor', 'cv2.cvtColor', (['texture', 'cv2.COLOR_BGR2GRAY'], {}), '(texture, cv2.COLOR_BGR2GRAY)\n', (6809, 6838), False, 'import cv2\n'), ((757, 785), 'math.exp', 'math.exp', (['(-(256 - x) / sigma)'], {}), '(-(256 - x) / sigma)\n', (765, 785), False, 'import math\n'), ((1070, 1100), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * sigma)'], {}), '(2 * math.pi * sigma)\n', (1079, 1100), False, 'import math\n'), ((3261, 3286), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3276, 3286), False, 'import os\n'), ((3635, 3663), 'os.path.exists', 'os.path.exists', (['texture_path'], {}), '(texture_path)\n', (3649, 3663), False, 'import os\n'), ((4788, 4834), 'numpy.abs', 'np.abs', (['(img_double[:, :-1] - img_double[:, 1:])'], {}), '(img_double[:, :-1] - img_double[:, 1:])\n', (4794, 4834), True, 'import numpy as np\n'), ((4834, 4850), 'numpy.zeros', 'np.zeros', (['(h, 1)'], {}), '((h, 1))\n', (4842, 4850), True, 'import numpy as np\n'), ((4885, 4931), 'numpy.abs', 'np.abs', (['(img_double[:-1, :] - img_double[1:, :])'], {}), '(img_double[:-1, :] - img_double[1:, :])\n', (4891, 4931), True, 'import numpy as np\n'), ((4931, 4947), 'numpy.zeros', 'np.zeros', (['(1, w)'], {}), '((1, w))\n', (4939, 4947), True, 'import numpy as np\n'), ((4984, 4999), 'numpy.power', 'np.power', (['dx', '(2)'], {}), '(dx, 2)\n', (4992, 4999), True, 'import numpy as np\n'), ((5002, 5017), 'numpy.power', 'np.power', (['dy', '(2)'], {}), '(dy, 2)\n', (5010, 5017), True, 'import numpy as np\n'), ((7931, 7955), 'math.ceil', 'math.ceil', (['((m - 0.1) / 2)'], {}), '((m - 0.1) / 2)\n', (7940, 7955), False, 'import math\n'), ((5468, 5504), 'numpy.rot90', 'np.rot90', (['line_segments[:, :, 7]', '(-1)'], {}), '(line_segments[:, :, 7], -1)\n', (5476, 5504), True, 'import numpy as np\n'), ((5574, 5609), 'numpy.rot90', 'np.rot90', (['line_segments[:, :, i]', '(1)'], {}), '(line_segments[:, :, i], 1)\n', (5582, 5609), True, 'import numpy as np\n'), ((7448, 7472), 'math.ceil', 'math.ceil', (['((m + 0.1) / 2)'], {}), '((m + 0.1) / 2)\n', (7457, 7472), False, 'import math\n'), ((7503, 7527), 'math.ceil', 'math.ceil', (['((m - 0.1) / 2)'], {}), '((m - 0.1) / 2)\n', (7512, 7527), False, 'import math\n'), ((8113, 8164), 'numpy.reshape', 'np.reshape', (['texture.T', '(1, texture.size)'], {'order': '"""f"""'}), "(texture.T, (1, texture.size), order='f')\n", (8123, 8164), True, 'import numpy as np\n'), ((8223, 8288), 'numpy.reshape', 'np.reshape', (['img_hist_match.T', '(1, img_hist_match.size)'], {'order': '"""f"""'}), "(img_hist_match.T, (1, img_hist_match.size), order='f')\n", (8233, 8288), True, 'import numpy as np\n'), ((5228, 5253), 'math.tan', 'math.tan', (['(math.pi / 8 * i)'], {}), '(math.pi / 8 * i)\n', (5236, 5253), False, 'import math\n'), ((7848, 7876), 'math.ceil', 'math.ceil', (['((m - 1 + 0.1) / 2)'], {}), '((m - 1 + 0.1) / 2)\n', (7857, 7876), False, 'import math\n'), ((9713, 9750), 'numpy.abs', 'np.abs', (['(histogram_natural_cdf - value)'], {}), '(histogram_natural_cdf - value)\n', (9719, 9750), True, 'import numpy as np\n')] |
from typing import Callable, Sequence, Optional, Tuple
import math
import attr
import cv2 as cv
import numpy as np
import numpy.typing as npt
from vkit.label.type import VPoint, VPointList
from .grid_rendering.type import VImageGrid
from .grid_rendering.grid_creator import create_src_image_grid
from .grid_rendering.interface import PointProjector
from .interface import GeometricDistortionImageGridBased, StateImageGridBased
class Point2dTo3dStrategy:
def generate_np_3d_points(self, points: VPointList) -> npt.NDArray:
raise NotImplementedError()
@attr.define
class CameraModelConfig:
rotation_unit_vec: Sequence[float]
rotation_theta: float
principal_point: Optional[Sequence[float]] = None
focal_length: Optional[float] = None
camera_distance: Optional[float] = None
class CameraModel:
@staticmethod
def generate_rotation_vec(rotation_unit_vec, rotation_theta):
return rotation_unit_vec * rotation_theta
@staticmethod
def generate_translation_vec(
rotation_vec,
camera_distance,
principal_point,
return_rotation_mat=False,
):
rotation_mat, _ = cv.Rodrigues(rotation_vec)
rotation_mat_inv = rotation_mat.transpose()
c2pp_vec = np.array([0, 0, camera_distance], dtype=np.float32).reshape(-1, 1)
c2pp_before_rotation_vec = np.matmul(
rotation_mat_inv,
c2pp_vec,
)
c2o_before_rotation_vec = c2pp_before_rotation_vec - principal_point
translation_vec = np.matmul(
rotation_mat,
c2o_before_rotation_vec.reshape(-1, 1),
)
if return_rotation_mat:
return translation_vec, rotation_mat
else:
return translation_vec
@staticmethod
def generate_extrinsic_mat(rotation_unit_vec, rotation_theta, camera_distance, principal_point):
rotation_vec = CameraModel.generate_rotation_vec(rotation_unit_vec, rotation_theta)
translation_vec, rotation_mat = CameraModel.generate_translation_vec(
rotation_vec,
camera_distance,
principal_point,
return_rotation_mat=True,
)
extrinsic_mat = np.hstack((rotation_mat, translation_vec.reshape((-1, 1))))
return extrinsic_mat
@staticmethod
def generate_intrinsic_mat(focal_length):
return np.array(
[
[focal_length, 0, 0],
[0, focal_length, 0],
[0, 0, 1],
],
dtype=np.float32,
)
@staticmethod
def prep_rotation_unit_vec(rotation_unit_vec):
return np.asarray(rotation_unit_vec, dtype=np.float32)
@staticmethod
def prep_rotation_theta(rotation_theta):
return np.clip(rotation_theta, -89, 89) / 180 * np.pi
@staticmethod
def prep_principal_point(principal_point):
if len(principal_point) == 2:
principal_point.append(0)
principal_point = np.asarray(principal_point, dtype=np.float32).reshape(-1, 1)
return principal_point
def __init__(self, config: CameraModelConfig):
assert config.focal_length
assert config.camera_distance
assert config.principal_point
rotation_unit_vec = self.prep_rotation_unit_vec(config.rotation_unit_vec)
rotation_theta = self.prep_rotation_theta(config.rotation_theta)
self.rotation_vec = self.generate_rotation_vec(rotation_unit_vec, rotation_theta)
principal_point = self.prep_principal_point(list(config.principal_point))
self.translation_vec = self.generate_translation_vec(
self.rotation_vec,
config.camera_distance,
principal_point,
)
self.intrinsic_mat = self.generate_intrinsic_mat(config.focal_length)
def project_np_points_from_3d_to_2d(self, np_3d_points):
camera_2d_points, _ = cv.projectPoints(
np_3d_points,
self.rotation_vec,
self.translation_vec,
self.intrinsic_mat,
np.zeros(5),
)
return camera_2d_points.reshape(-1, 2)
class CameraPointProjector(PointProjector):
def __init__(self, point_2d_to_3d_strategy, camera_model_config):
self.point_2d_to_3d_strategy = point_2d_to_3d_strategy
self.camera_model = CameraModel(camera_model_config)
def project_points(self, src_points: VPointList):
np_3d_points = self.point_2d_to_3d_strategy.generate_np_3d_points(src_points)
camera_2d_points = self.camera_model.project_np_points_from_3d_to_2d(np_3d_points)
return VPointList.from_np_array(camera_2d_points)
def project_point(self, src_point: VPoint):
return self.project_points(VPointList.from_point(src_point))[0]
class CameraOperationState(StateImageGridBased):
@staticmethod
def complete_camera_model_config(
height: int,
width: int,
src_image_grid: VImageGrid,
point_2d_to_3d_strategy: Point2dTo3dStrategy,
camera_model_config: CameraModelConfig,
):
if camera_model_config.principal_point \
and camera_model_config.focal_length \
and camera_model_config.camera_distance:
return camera_model_config
# Make a copy.
camera_model_config = attr.evolve(camera_model_config)
if not camera_model_config.principal_point:
camera_model_config.principal_point = [height // 2, width // 2]
if not camera_model_config.focal_length:
camera_model_config.focal_length = max(height, width)
if not camera_model_config.camera_distance:
# Initial guess.
camera_distance = camera_model_config.focal_length
# To camera coordinate.
extrinsic_mat = CameraModel.generate_extrinsic_mat(
CameraModel.prep_rotation_unit_vec(camera_model_config.rotation_unit_vec),
CameraModel.prep_rotation_theta(camera_model_config.rotation_theta),
camera_distance,
CameraModel.prep_principal_point(list(camera_model_config.principal_point)),
)
intrinsic_mat = CameraModel.generate_intrinsic_mat(camera_model_config.focal_length)
np_3d_points = point_2d_to_3d_strategy.generate_np_3d_points(
src_image_grid.flatten_points
)
np_3d_points = np.matmul(
extrinsic_mat,
np.hstack((np_3d_points, np.ones((np_3d_points.shape[0], 1)))).transpose(),
)
np_3d_points = np.matmul(
intrinsic_mat,
np_3d_points,
)
# Adjust camera distance.
pos_zs = np_3d_points[2]
delta = pos_zs.min() - camera_distance
# Add one to make sure one point touch the plane.
camera_model_config.camera_distance = camera_distance - delta + 1
return camera_model_config
def __init__(
self,
height,
width,
grid_size,
point_2d_to_3d_strategy,
camera_model_config,
):
src_image_grid = create_src_image_grid(height, width, grid_size)
camera_model_config = self.complete_camera_model_config(
height,
width,
src_image_grid,
point_2d_to_3d_strategy,
camera_model_config,
)
point_projector = CameraPointProjector(
point_2d_to_3d_strategy,
camera_model_config,
)
super().__init__(src_image_grid, point_projector)
@attr.define
class CameraCubicCurveConfig:
curve_alpha: float
curve_beta: float
# Clockwise, [0, 180]
curve_direction: float
curve_scale: float
camera_model_config: CameraModelConfig
grid_size: int
class CameraCubicCurvePoint2dTo3dStrategy(Point2dTo3dStrategy):
def __init__(self, height, width, curve_alpha, curve_beta, curve_direction, curve_scale):
# Plane area.
self.height = height
self.width = width
# Curve endpoint slopes.
self.curve_alpha = math.tan(np.clip(curve_alpha, -80, 80) / 180 * np.pi)
self.curve_beta = math.tan(np.clip(curve_beta, -80, 80) / 180 * np.pi)
# Plane projection direction.
self.curve_direction = (curve_direction % 180) / 180 * np.pi
self.rotation_mat = np.array(
[
[
math.cos(self.curve_direction),
math.sin(self.curve_direction),
],
[
-math.sin(self.curve_direction),
math.cos(self.curve_direction),
],
],
dtype=np.float32,
)
corners = np.array(
[
[0, 0],
[self.width - 1, 0],
[self.width - 1, self.height - 1],
[0, self.height - 1],
],
dtype=np.float32,
)
rotated_corners = np.matmul(self.rotation_mat, corners.transpose())
self.plane_projection_min = rotated_corners[0].min()
self.plane_projection_range = rotated_corners[0].max() - self.plane_projection_min
self.curve_scale = curve_scale
def generate_np_3d_points(self, points: VPointList) -> npt.NDArray:
np_2d_points = points.to_np_array().astype(np.float32)
# Project based on theta.
plane_projected_points = np.matmul(self.rotation_mat, np_2d_points.transpose())
plane_projected_xs = plane_projected_points[0]
plane_projected_ratios = (
plane_projected_xs - self.plane_projection_min
) / self.plane_projection_range
# Axis-z.
poly = np.array([
self.curve_alpha + self.curve_beta,
-2 * self.curve_alpha - self.curve_beta,
self.curve_alpha,
0,
])
pos_zs = np.polyval(poly, plane_projected_ratios)
pos_zs = pos_zs * self.plane_projection_range * self.curve_scale
np_3d_points = np.hstack((np_2d_points, pos_zs.reshape((-1, 1))))
return np_3d_points
class CameraCubicCurveState(CameraOperationState):
def __init__(self, config: CameraCubicCurveConfig, shape: Tuple[int, int]):
height, width = shape
super().__init__(
height,
width,
config.grid_size,
CameraCubicCurvePoint2dTo3dStrategy(
height,
width,
config.curve_alpha,
config.curve_beta,
config.curve_direction,
config.curve_scale,
),
config.camera_model_config,
)
camera_cubic_curve = GeometricDistortionImageGridBased(
config_cls=CameraCubicCurveConfig,
state_cls=CameraCubicCurveState,
)
class CameraPlaneLinePoint2dTo3dStrategy(Point2dTo3dStrategy):
def __init__(
self,
height,
width,
point: Tuple[float, float],
direction: float,
perturb_vec: Tuple[float, float, float],
alpha: float,
weights_func: Callable[[npt.NDArray, float], npt.NDArray],
):
# Plane area.
self.height = height
self.width = width
# Define a line.
self.point = np.array(point, dtype=np.float32)
direction = (direction % 180) / 180 * np.pi
cos_theta = np.cos(direction)
sin_theta = np.sin(direction)
self.line_params_a_b = np.array([sin_theta, -cos_theta], dtype=np.float32)
self.line_param_c = -self.point[0] * sin_theta + self.point[1] * cos_theta
# For weight calculationn.
self.distance_max = np.sqrt(height**2 + width**2)
self.alpha = alpha
self.weights_func = weights_func
# Deformation vector.
self.perturb_vec = np.array(perturb_vec, dtype=np.float32)
def generate_np_3d_points(self, points: VPointList) -> npt.NDArray:
np_2d_points = points.to_np_array().astype(np.float32)
# Calculate weights.
distances = np.abs((np_2d_points * self.line_params_a_b).sum(axis=1) + self.line_param_c)
norm_distances = distances / self.distance_max
weights = self.weights_func(norm_distances, self.alpha)
# Add weighted fold vector.
np_3d_points = np.hstack(
(np_2d_points, np.zeros((np_2d_points.shape[0], 1), dtype=np.float32))
)
np_3d_points += weights.reshape(-1, 1) * self.perturb_vec
return np_3d_points
@attr.define
class CameraPlaneLineFoldConfig:
fold_point: Tuple[float, float]
# Clockwise, [0, 180]
fold_direction: float
fold_perturb_vec: Tuple[float, float, float]
fold_alpha: float
camera_model_config: CameraModelConfig
grid_size: int
class CameraPlaneLineFoldState(CameraOperationState):
@staticmethod
def weights_func(norm_distances: npt.NDArray, alpha: float):
return alpha / (norm_distances + alpha) # type: ignore
def __init__(self, config: CameraPlaneLineFoldConfig, shape: Tuple[int, int]):
height, width = shape
super().__init__(
height,
width,
config.grid_size,
CameraPlaneLinePoint2dTo3dStrategy(
height=height,
width=width,
point=config.fold_point,
direction=config.fold_direction,
perturb_vec=config.fold_perturb_vec,
alpha=config.fold_alpha,
weights_func=self.weights_func,
),
config.camera_model_config,
)
camera_plane_line_fold = GeometricDistortionImageGridBased(
config_cls=CameraPlaneLineFoldConfig,
state_cls=CameraPlaneLineFoldState,
)
@attr.define
class CameraPlaneLineCurveConfig:
curve_point: Tuple[float, float]
# Clockwise, [0, 180]
curve_direction: float
curve_perturb_vec: Tuple[float, float, float]
curve_alpha: float
camera_model_config: CameraModelConfig
grid_size: int
class CameraPlaneLineCurveState(CameraOperationState):
@staticmethod
def weights_func(norm_distances: npt.NDArray, alpha: float):
return 1 - norm_distances**alpha # type: ignore
def __init__(self, config: CameraPlaneLineCurveConfig, shape: Tuple[int, int]):
height, width = shape
super().__init__(
height,
width,
config.grid_size,
CameraPlaneLinePoint2dTo3dStrategy(
height=height,
width=width,
point=config.curve_point,
direction=config.curve_direction,
perturb_vec=config.curve_perturb_vec,
alpha=config.curve_alpha,
weights_func=self.weights_func,
),
config.camera_model_config,
)
camera_plane_line_curve = GeometricDistortionImageGridBased(
config_cls=CameraPlaneLineCurveConfig,
state_cls=CameraPlaneLineCurveState,
)
def debug_cubic_curve():
from vkit.opt import get_data_folder
folder = get_data_folder(__file__)
from vkit.label.type import VPolygon, VPoint
from .interface import debug_geometric_distortion
config = CameraCubicCurveConfig(
curve_alpha=60,
curve_beta=-60,
curve_direction=45,
curve_scale=1.0,
camera_model_config=CameraModelConfig(
# focal_length=200,
rotation_unit_vec=[1.0, 0.0, 0.0],
rotation_theta=0,
# camera_distance=200,
# principal_point=[220, 220],
),
grid_size=10,
)
src_polygon = VPolygon(
VPointList([
VPoint(y=100, x=100),
VPoint(y=100, x=300),
VPoint(y=300, x=300),
VPoint(y=300, x=100),
])
)
state = debug_geometric_distortion(
'camera_cubic_curve',
camera_cubic_curve,
config,
src_polygon,
folder,
'Lenna.png',
)
assert state
from .grid_rendering.visualization import visualize_image_grid
visualize_image_grid(state.src_image_grid).to_file(f'{folder}/camera_cubic_curve-src-grid.png')
visualize_image_grid(state.dst_image_grid).to_file(f'{folder}/camera_cubic_curve-dst-grid.png')
def debug_plane_line_fold():
from vkit.opt import get_data_folder
folder = get_data_folder(__file__)
from vkit.label.type import VPolygon, VPoint
from .interface import debug_geometric_distortion
config = CameraPlaneLineFoldConfig(
fold_point=(300, 300),
fold_direction=30,
fold_perturb_vec=(50, 0, 200),
fold_alpha=0.5,
camera_model_config=CameraModelConfig(
rotation_unit_vec=[1.0, 0.0, 0.0],
rotation_theta=30,
),
grid_size=10,
)
src_polygon = VPolygon(
VPointList([
VPoint(y=100, x=100),
VPoint(y=100, x=300),
VPoint(y=300, x=300),
VPoint(y=300, x=100),
])
)
state = debug_geometric_distortion(
'camera_plane_line_fold',
camera_plane_line_fold,
config,
src_polygon,
folder,
'Lenna.png',
)
assert state
from .grid_rendering.visualization import visualize_image_grid
visualize_image_grid(state.src_image_grid
).to_file(f'{folder}/camera_plane_line_fold-src-grid.png')
visualize_image_grid(state.dst_image_grid
).to_file(f'{folder}/camera_plane_line_fold-dst-grid.png')
def debug_plane_line_curve():
from vkit.opt import get_data_folder
folder = get_data_folder(__file__)
from vkit.label.type import VPolygon, VPoint
from .interface import debug_geometric_distortion
config = CameraPlaneLineCurveConfig(
curve_point=(300, 300),
curve_direction=0,
curve_perturb_vec=(0, 0, 300),
curve_alpha=2,
camera_model_config=CameraModelConfig(
rotation_unit_vec=[0.0, 1.0, 0.0],
rotation_theta=85,
),
grid_size=10,
)
src_polygon = VPolygon(
VPointList([
VPoint(y=100, x=100),
VPoint(y=100, x=300),
VPoint(y=300, x=300),
VPoint(y=300, x=100),
])
)
state = debug_geometric_distortion(
'camera_plane_line_curve',
camera_plane_line_curve,
config,
src_polygon,
folder,
'Lenna.png',
)
assert state
from .grid_rendering.visualization import visualize_image_grid
visualize_image_grid(state.src_image_grid
).to_file(f'{folder}/camera_plane_line_curve-src-grid.png')
visualize_image_grid(state.dst_image_grid
).to_file(f'{folder}/camera_plane_line_curve-dst-grid.png')
| [
"numpy.clip",
"numpy.sqrt",
"vkit.label.type.VPoint",
"numpy.ones",
"math.cos",
"numpy.asarray",
"attr.evolve",
"numpy.array",
"cv2.Rodrigues",
"numpy.zeros",
"numpy.matmul",
"numpy.polyval",
"vkit.label.type.VPointList.from_np_array",
"numpy.cos",
"numpy.sin",
"vkit.label.type.VPointL... | [((15135, 15160), 'vkit.opt.get_data_folder', 'get_data_folder', (['__file__'], {}), '(__file__)\n', (15150, 15160), False, 'from vkit.opt import get_data_folder\n'), ((16429, 16454), 'vkit.opt.get_data_folder', 'get_data_folder', (['__file__'], {}), '(__file__)\n', (16444, 16454), False, 'from vkit.opt import get_data_folder\n'), ((17707, 17732), 'vkit.opt.get_data_folder', 'get_data_folder', (['__file__'], {}), '(__file__)\n', (17722, 17732), False, 'from vkit.opt import get_data_folder\n'), ((1160, 1186), 'cv2.Rodrigues', 'cv.Rodrigues', (['rotation_vec'], {}), '(rotation_vec)\n', (1172, 1186), True, 'import cv2 as cv\n'), ((1361, 1398), 'numpy.matmul', 'np.matmul', (['rotation_mat_inv', 'c2pp_vec'], {}), '(rotation_mat_inv, c2pp_vec)\n', (1370, 1398), True, 'import numpy as np\n'), ((2382, 2470), 'numpy.array', 'np.array', (['[[focal_length, 0, 0], [0, focal_length, 0], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[focal_length, 0, 0], [0, focal_length, 0], [0, 0, 1]], dtype=np.\n float32)\n', (2390, 2470), True, 'import numpy as np\n'), ((2649, 2696), 'numpy.asarray', 'np.asarray', (['rotation_unit_vec'], {'dtype': 'np.float32'}), '(rotation_unit_vec, dtype=np.float32)\n', (2659, 2696), True, 'import numpy as np\n'), ((4625, 4667), 'vkit.label.type.VPointList.from_np_array', 'VPointList.from_np_array', (['camera_2d_points'], {}), '(camera_2d_points)\n', (4649, 4667), False, 'from vkit.label.type import VPoint, VPointList\n'), ((5337, 5369), 'attr.evolve', 'attr.evolve', (['camera_model_config'], {}), '(camera_model_config)\n', (5348, 5369), False, 'import attr\n'), ((8804, 8923), 'numpy.array', 'np.array', (['[[0, 0], [self.width - 1, 0], [self.width - 1, self.height - 1], [0, self.\n height - 1]]'], {'dtype': 'np.float32'}), '([[0, 0], [self.width - 1, 0], [self.width - 1, self.height - 1], [\n 0, self.height - 1]], dtype=np.float32)\n', (8812, 8923), True, 'import numpy as np\n'), ((9783, 9896), 'numpy.array', 'np.array', (['[self.curve_alpha + self.curve_beta, -2 * self.curve_alpha - self.\n curve_beta, self.curve_alpha, 0]'], {}), '([self.curve_alpha + self.curve_beta, -2 * self.curve_alpha - self.\n curve_beta, self.curve_alpha, 0])\n', (9791, 9896), True, 'import numpy as np\n'), ((9968, 10008), 'numpy.polyval', 'np.polyval', (['poly', 'plane_projected_ratios'], {}), '(poly, plane_projected_ratios)\n', (9978, 10008), True, 'import numpy as np\n'), ((11350, 11383), 'numpy.array', 'np.array', (['point'], {'dtype': 'np.float32'}), '(point, dtype=np.float32)\n', (11358, 11383), True, 'import numpy as np\n'), ((11456, 11473), 'numpy.cos', 'np.cos', (['direction'], {}), '(direction)\n', (11462, 11473), True, 'import numpy as np\n'), ((11494, 11511), 'numpy.sin', 'np.sin', (['direction'], {}), '(direction)\n', (11500, 11511), True, 'import numpy as np\n'), ((11543, 11594), 'numpy.array', 'np.array', (['[sin_theta, -cos_theta]'], {'dtype': 'np.float32'}), '([sin_theta, -cos_theta], dtype=np.float32)\n', (11551, 11594), True, 'import numpy as np\n'), ((11742, 11775), 'numpy.sqrt', 'np.sqrt', (['(height ** 2 + width ** 2)'], {}), '(height ** 2 + width ** 2)\n', (11749, 11775), True, 'import numpy as np\n'), ((11898, 11937), 'numpy.array', 'np.array', (['perturb_vec'], {'dtype': 'np.float32'}), '(perturb_vec, dtype=np.float32)\n', (11906, 11937), True, 'import numpy as np\n'), ((4067, 4078), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (4075, 4078), True, 'import numpy as np\n'), ((6611, 6649), 'numpy.matmul', 'np.matmul', (['intrinsic_mat', 'np_3d_points'], {}), '(intrinsic_mat, np_3d_points)\n', (6620, 6649), True, 'import numpy as np\n'), ((1259, 1310), 'numpy.array', 'np.array', (['[0, 0, camera_distance]'], {'dtype': 'np.float32'}), '([0, 0, camera_distance], dtype=np.float32)\n', (1267, 1310), True, 'import numpy as np\n'), ((2776, 2808), 'numpy.clip', 'np.clip', (['rotation_theta', '(-89)', '(89)'], {}), '(rotation_theta, -89, 89)\n', (2783, 2808), True, 'import numpy as np\n'), ((2991, 3036), 'numpy.asarray', 'np.asarray', (['principal_point'], {'dtype': 'np.float32'}), '(principal_point, dtype=np.float32)\n', (3001, 3036), True, 'import numpy as np\n'), ((4752, 4784), 'vkit.label.type.VPointList.from_point', 'VPointList.from_point', (['src_point'], {}), '(src_point)\n', (4773, 4784), False, 'from vkit.label.type import VPoint, VPointList\n'), ((12419, 12473), 'numpy.zeros', 'np.zeros', (['(np_2d_points.shape[0], 1)'], {'dtype': 'np.float32'}), '((np_2d_points.shape[0], 1), dtype=np.float32)\n', (12427, 12473), True, 'import numpy as np\n'), ((15738, 15758), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(100)', 'x': '(100)'}), '(y=100, x=100)\n', (15744, 15758), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((15772, 15792), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(100)', 'x': '(300)'}), '(y=100, x=300)\n', (15778, 15792), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((15806, 15826), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(300)', 'x': '(300)'}), '(y=300, x=300)\n', (15812, 15826), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((15840, 15860), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(300)', 'x': '(100)'}), '(y=300, x=100)\n', (15846, 15860), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((16947, 16967), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(100)', 'x': '(100)'}), '(y=100, x=100)\n', (16953, 16967), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((16981, 17001), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(100)', 'x': '(300)'}), '(y=100, x=300)\n', (16987, 17001), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((17015, 17035), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(300)', 'x': '(300)'}), '(y=300, x=300)\n', (17021, 17035), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((17049, 17069), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(300)', 'x': '(100)'}), '(y=300, x=100)\n', (17055, 17069), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((18226, 18246), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(100)', 'x': '(100)'}), '(y=100, x=100)\n', (18232, 18246), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((18260, 18280), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(100)', 'x': '(300)'}), '(y=100, x=300)\n', (18266, 18280), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((18294, 18314), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(300)', 'x': '(300)'}), '(y=300, x=300)\n', (18300, 18314), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((18328, 18348), 'vkit.label.type.VPoint', 'VPoint', ([], {'y': '(300)', 'x': '(100)'}), '(y=300, x=100)\n', (18334, 18348), False, 'from vkit.label.type import VPolygon, VPoint\n'), ((8162, 8191), 'numpy.clip', 'np.clip', (['curve_alpha', '(-80)', '(80)'], {}), '(curve_alpha, -80, 80)\n', (8169, 8191), True, 'import numpy as np\n'), ((8242, 8270), 'numpy.clip', 'np.clip', (['curve_beta', '(-80)', '(80)'], {}), '(curve_beta, -80, 80)\n', (8249, 8270), True, 'import numpy as np\n'), ((8485, 8515), 'math.cos', 'math.cos', (['self.curve_direction'], {}), '(self.curve_direction)\n', (8493, 8515), False, 'import math\n'), ((8537, 8567), 'math.sin', 'math.sin', (['self.curve_direction'], {}), '(self.curve_direction)\n', (8545, 8567), False, 'import math\n'), ((8679, 8709), 'math.cos', 'math.cos', (['self.curve_direction'], {}), '(self.curve_direction)\n', (8687, 8709), False, 'import math\n'), ((8627, 8657), 'math.sin', 'math.sin', (['self.curve_direction'], {}), '(self.curve_direction)\n', (8635, 8657), False, 'import math\n'), ((6519, 6554), 'numpy.ones', 'np.ones', (['(np_3d_points.shape[0], 1)'], {}), '((np_3d_points.shape[0], 1))\n', (6526, 6554), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
# Find zero standard deviation
def find_zerostd(pos, num_minority):
for i in tqdm(range(100), desc="Searching zero std", leave=False):
std = pos.std()
mean = pos.mean()
zero_list = []
zero_mean = []
for i in range(len(pos.columns)):
if std[i] == 0:
zero_list.append(pos.columns[i])
zero_mean.append(mean[i])
if (len(zero_list) == 0) and (len(zero_mean) == 0):
print("Not found zero std.")
df = None
else:
print("Found zero std! {}".format(zero_list))
df_index = np.zeros(shape=(num_minority, len(zero_list)))
df = pd.DataFrame(df_index, columns=zero_list)
# fill mean value
for i in range(len(zero_list)):
pos.drop(zero_list[i], axis=1, inplace=True)
df[zero_list[i]] = zero_mean[i]
return pos, df
# Find no correlation and univariate sampling
def no_corr(pos, num_minority):
for i in tqdm(range(100), desc="Searching no correlation", leave=False):
corr = abs(pos.corr())
nocorr_df = pd.DataFrame(index=[], columns=[])
mean_list = []
var_list = []
col_list = []
# split no corr attribute and calc mean and var.
for i in range(len(corr.columns)):
sort = corr.iloc[:, [i]]
sort = sort.sort_values(by=sort.columns[0], ascending=False) #sort
if sort.values[1] < 0.2:
mean_list.append(pos[pos.columns[i]].mean())
var_list.append(pos[pos.columns[i]].var())
col_list.append(pos.columns[i])
if (len(mean_list)==0) and (len(var_list)==0) and (len(col_list)==0):
print("Not found no correlation.")
df = None
else:
print("Found no corr! {}".format(col_list))
# univariate normal dist over-sampling
tmp = []
np.random.seed(seed=6)
for mean, var in zip(mean_list, var_list):
uni_x = np.random.normal(mean, var, num_minority)
tmp.append(uni_x)
# convert to dataframe
df = pd.DataFrame(tmp).T
df.columns = col_list
# drop no correlation attributes
for i in range(len(col_list)):
pos.drop(col_list[i], axis=1, inplace=True)
return pos, df
# Multivariate sampling
def mnd_os(pos, num_minority):
for i in tqdm(range(100), desc="Multi normal dist over-sampling", leave=False):
# calc correlation and covert absolute value
corr = abs(pos.corr())
# find strong correlation attribute
corr_col = []
corr_ind = []
for i in range(len(corr.columns)):
sort = corr.iloc[:, [i]] #extract one index
sort = sort.sort_values(by=sort.columns[0], ascending=False) #sort
corr_col.append(sort.columns[0]) #strong corr coulumns
corr_ind.append(sort.index[1]) #strong corr index
# calc mean and covariance
mean_list = []
cov_list = []
for i in range(len(pos.columns)):
mean_list.append([pos[corr_col[i]].mean(), pos[corr_ind[i]].mean()])
cov_list.append(pd.concat([pos[corr_col[i]], pos[corr_ind[i]]], axis=1).cov())
# generate new sample
tmp = []
np.random.seed(seed=6)
for mean, cov in zip(mean_list, cov_list):
mul_x, mul_y = np.random.multivariate_normal(mean, cov, num_minority).T
tmp.append(mul_x)
# convert to dataframe
df = pd.DataFrame(tmp).T
df.columns = pos.columns
return df
def append_data(pos, zero_std, no_corr, name):
pos = pd.concat([pos, zero_std, no_corr], axis=1)
pos['Label'] = 1
os.makedirs('./pos_data', exist_ok=True)
pos.to_csv('./pos_data/{}_mndo.csv'.format(name), index=False)
print('Generated data is saved in ./pos_data/{}_mndo.csv'.format(name))
return pos
| [
"numpy.random.normal",
"os.makedirs",
"numpy.random.multivariate_normal",
"numpy.random.seed",
"pandas.DataFrame",
"pandas.concat"
] | [((3809, 3852), 'pandas.concat', 'pd.concat', (['[pos, zero_std, no_corr]'], {'axis': '(1)'}), '([pos, zero_std, no_corr], axis=1)\n', (3818, 3852), True, 'import pandas as pd\n'), ((3879, 3919), 'os.makedirs', 'os.makedirs', (['"""./pos_data"""'], {'exist_ok': '(True)'}), "('./pos_data', exist_ok=True)\n", (3890, 3919), False, 'import os\n'), ((730, 771), 'pandas.DataFrame', 'pd.DataFrame', (['df_index'], {'columns': 'zero_list'}), '(df_index, columns=zero_list)\n', (742, 771), True, 'import pandas as pd\n'), ((1181, 1215), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '[]', 'columns': '[]'}), '(index=[], columns=[])\n', (1193, 1215), True, 'import pandas as pd\n'), ((1994, 2016), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(6)'}), '(seed=6)\n', (2008, 2016), True, 'import numpy as np\n'), ((3438, 3460), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(6)'}), '(seed=6)\n', (3452, 3460), True, 'import numpy as np\n'), ((2088, 2129), 'numpy.random.normal', 'np.random.normal', (['mean', 'var', 'num_minority'], {}), '(mean, var, num_minority)\n', (2104, 2129), True, 'import numpy as np\n'), ((2209, 2226), 'pandas.DataFrame', 'pd.DataFrame', (['tmp'], {}), '(tmp)\n', (2221, 2226), True, 'import pandas as pd\n'), ((3679, 3696), 'pandas.DataFrame', 'pd.DataFrame', (['tmp'], {}), '(tmp)\n', (3691, 3696), True, 'import pandas as pd\n'), ((3539, 3593), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'num_minority'], {}), '(mean, cov, num_minority)\n', (3568, 3593), True, 'import numpy as np\n'), ((3307, 3362), 'pandas.concat', 'pd.concat', (['[pos[corr_col[i]], pos[corr_ind[i]]]'], {'axis': '(1)'}), '([pos[corr_col[i]], pos[corr_ind[i]]], axis=1)\n', (3316, 3362), True, 'import pandas as pd\n')] |
import os
import numpy as np
import dataprovider3.emio as emio
# Focused annotation
data_dir = 'flyem/ground_truth/focused_annotation'
data_info = {
'vol001':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol001/mip1/padded_x512_y512_z20',
'lamellae': [287],
'loc': True,
},
'vol002':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol002/mip1/padded_x512_y512_z20',
'loc': True,
},
'vol003':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol003/mip1/padded_x512_y512_z20',
'loc': True,
},
'vol004':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol004/mip1/padded_x512_y512_z20',
'rosetta': [1],
'loc': True,
},
'vol005':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol005/mip1/padded_x512_y512_z20',
'esophagus': [1],
'loc': True,
},
'vol006':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol006/mip1/padded_x512_y512_z20',
'loc': True,
},
'vol007':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol007/mip1/padded_x512_y512_z20',
'loc': True,
},
'vol008':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol008/mip1/padded_x512_y512_z20',
'loc': True,
},
'vol009':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol009/mip1/padded_x512_y512_z20',
'glia_msk': [52],
'loc': True,
},
'vol010':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol010/mip1/padded_x512_y512_z20',
'loc': True,
},
'vol011':{
'img': 'img.h5',
'msk': 'msk.h5',
'seg': 'seg.h5',
'glia': 'glia.h5',
'seg_d3_b0': 'seg_d3_b0.h5',
'dir': 'vol011/mip1/padded_x512_y512_z20',
'loc': True,
'lamellae': [52],
},
}
def load_data(base_dir, data_ids=None, **kwargs):
if data_ids is None:
data_ids = data_info.keys()
data = dict()
base = os.path.expanduser(base_dir)
dpath = os.path.join(base, data_dir)
for data_id in data_ids:
if data_id in data_info:
info = data_info[data_id]
data[data_id] = load_dataset(dpath, data_id, info, **kwargs)
return data
def load_dataset(dpath, tag, info, class_keys=[], **kwargs):
assert len(class_keys) > 0
dset = dict()
# Image
fpath = os.path.join(dpath, info['dir'], info['img'])
print(fpath)
dset['img'] = emio.imread(fpath).astype(np.float32)
dset['img'] /= 255.0
# Mask
fpath = os.path.join(dpath, info['dir'], info['msk'])
print(fpath)
dset['msk'] = emio.imread(fpath).astype(np.uint8)
# Segmentation
fpath = os.path.join(dpath, info['dir'], info['seg_d3_b0'])
print(fpath)
dset['seg'] = emio.imread(fpath).astype(np.uint32)
# Special case
if 'lamellae' in info:
idx = np.isin(dset['seg'], info['lamellae'])
dset['seg'][idx] = 0
# Glia
if 'glia' in class_keys:
if 'glia' in info:
fpath = os.path.join(dpath, info['dir'], info['glia'])
print(fpath)
dset['glia'] = emio.imread(fpath).astype(np.uint8)
else:
dset['glia'] = np.zeros_like(dset['msk'])
# Mask out
if 'rosetta' in info:
idx = np.isin(dset['seg'], info['rosetta'])
dset['msk'][idx] = 0
if 'esophagus' in info:
idx = np.isin(dset['seg'], info['esophagus'])
dset['msk'][idx] = 0
if 'glia_msk' in info:
idx = np.isin(dset['seg'], info['glia_msk'])
dset['msk'][idx] = 0
# Additoinal info
dset['loc'] = info['loc']
return dset
| [
"os.path.join",
"numpy.isin",
"dataprovider3.emio.imread",
"numpy.zeros_like",
"os.path.expanduser"
] | [((2934, 2962), 'os.path.expanduser', 'os.path.expanduser', (['base_dir'], {}), '(base_dir)\n', (2952, 2962), False, 'import os\n'), ((2975, 3003), 'os.path.join', 'os.path.join', (['base', 'data_dir'], {}), '(base, data_dir)\n', (2987, 3003), False, 'import os\n'), ((3330, 3375), 'os.path.join', 'os.path.join', (['dpath', "info['dir']", "info['img']"], {}), "(dpath, info['dir'], info['img'])\n", (3342, 3375), False, 'import os\n'), ((3498, 3543), 'os.path.join', 'os.path.join', (['dpath', "info['dir']", "info['msk']"], {}), "(dpath, info['dir'], info['msk'])\n", (3510, 3543), False, 'import os\n'), ((3647, 3698), 'os.path.join', 'os.path.join', (['dpath', "info['dir']", "info['seg_d3_b0']"], {}), "(dpath, info['dir'], info['seg_d3_b0'])\n", (3659, 3698), False, 'import os\n'), ((3832, 3870), 'numpy.isin', 'np.isin', (["dset['seg']", "info['lamellae']"], {}), "(dset['seg'], info['lamellae'])\n", (3839, 3870), True, 'import numpy as np\n'), ((4247, 4284), 'numpy.isin', 'np.isin', (["dset['seg']", "info['rosetta']"], {}), "(dset['seg'], info['rosetta'])\n", (4254, 4284), True, 'import numpy as np\n'), ((4357, 4396), 'numpy.isin', 'np.isin', (["dset['seg']", "info['esophagus']"], {}), "(dset['seg'], info['esophagus'])\n", (4364, 4396), True, 'import numpy as np\n'), ((4468, 4506), 'numpy.isin', 'np.isin', (["dset['seg']", "info['glia_msk']"], {}), "(dset['seg'], info['glia_msk'])\n", (4475, 4506), True, 'import numpy as np\n'), ((3411, 3429), 'dataprovider3.emio.imread', 'emio.imread', (['fpath'], {}), '(fpath)\n', (3422, 3429), True, 'import dataprovider3.emio as emio\n'), ((3579, 3597), 'dataprovider3.emio.imread', 'emio.imread', (['fpath'], {}), '(fpath)\n', (3590, 3597), True, 'import dataprovider3.emio as emio\n'), ((3734, 3752), 'dataprovider3.emio.imread', 'emio.imread', (['fpath'], {}), '(fpath)\n', (3745, 3752), True, 'import dataprovider3.emio as emio\n'), ((3988, 4034), 'os.path.join', 'os.path.join', (['dpath', "info['dir']", "info['glia']"], {}), "(dpath, info['dir'], info['glia'])\n", (4000, 4034), False, 'import os\n'), ((4164, 4190), 'numpy.zeros_like', 'np.zeros_like', (["dset['msk']"], {}), "(dset['msk'])\n", (4177, 4190), True, 'import numpy as np\n'), ((4087, 4105), 'dataprovider3.emio.imread', 'emio.imread', (['fpath'], {}), '(fpath)\n', (4098, 4105), True, 'import dataprovider3.emio as emio\n')] |
#
# KTH Royal Institute of Technology
# DD2424: Deep Learning in Data Science
# Assignment 3
#
# <NAME> (<EMAIL>)
#
import numpy as np
import pickle
from timeit import default_timer as timer
class Net:
# ==================== Initialization ====================
def __init__(self, network_sizes, descent_params, init_theta=True):
self.network_sizes = network_sizes
self.n_layers = len(network_sizes)-1
self.descent_params = descent_params
self.lamb = descent_params.get('lambda', 0.0)
if init_theta:
self.Ws, self.bs = self._initial_theta()
def _initial_theta(self):
mu = 0.0
sigma = 0.001
Ws, bs = [], []
for i in range(self.n_layers):
lhs, rhs = self.network_sizes[i], self.network_sizes[i+1]
Wi = sigma * np.random.randn(rhs,lhs) + mu
bi = np.zeros([rhs,1])
Ws.append(Wi)
bs.append(bi)
return Ws, bs
# ==================== Import/Export ====================
@classmethod
def import_model(cls, filepath):
with open(filepath, 'rb') as f:
res = pickle.load(f, encoding='bytes')
if not isinstance(res, Net):
raise TypeError('File does not exist or is corrupted')
return res
def export_model(self, filepath):
with open(filepath, 'wb') as f:
pickle.dump(self, f)
# ==================== FW/BW passes ====================
def _forward(self, X, s_means=None, s_vars=None):
if self._should_batch_normalize():
_, _, _, Hs, P = self._forward_bn(X, s_means, s_vars)
return Hs, P
Hi = X
si = None
Hs = []
for i in range(self.n_layers):
Hs.append(Hi)
Wi, bi = self.Ws[i], self.bs[i]
si = Wi @ Hi + bi
Hi = np.maximum(0.0, si)
P = self._softmax(si)
return Hs, P
def _backward(self, X, Y, P, Hs):
N = X.shape[1]
G = (P - Y)
grads_W, grads_b = [], []
for j in range(self.n_layers):
i = self.n_layers -1 -j # Reversed
Hi, Wi = Hs[i], self.Ws[i]
grad_bi = np.mean(G, axis=1).reshape(-1, 1)
grad_Wi = (G @ Hi.T) / N + (2 * self.lamb * Wi)
grads_b.append(grad_bi)
grads_W.append(grad_Wi)
G = (G.T @ Wi).T
G[Hi <= 0] = 0.0
grads_W.reverse()
grads_b.reverse()
return grads_W, grads_b
# ==================== FW/BW passes with Batch Normalization ====================
def _should_batch_normalize(self):
return self.descent_params.get('batch_normalize', True)
@staticmethod
def _batch_normalize_fw(s, mean=None, var=None):
if mean is None or var is None:
eps = 1e-8
mean = np.mean(s, axis=1).reshape(-1,1)
var = np.var(s, axis=1).reshape(-1,1) + eps
s_norm = (s - mean) / (var ** 0.5)
return s_norm, mean, var
def _forward_bn(self, X, s_means=None, s_vars=None):
Hi = X
Hs = []
ss = []
use_estimates = True
if s_means is None or s_vars is None:
s_means = []
s_vars = []
use_estimates = False
for i in range(self.n_layers):
Hs.append(Hi)
Wi, bi = self.Ws[i], self.bs[i]
si = Wi @ Hi + bi
ss.append(si)
if use_estimates:
si, mean, var = self._batch_normalize_fw(si, s_means[i], s_vars[i])
else:
si, mean, var = self._batch_normalize_fw(si)
s_means.append(mean)
s_vars.append(var)
Hi = np.maximum(0.0, si)
P = self._softmax(ss[-1])
assert np.isfinite(P).all()
return ss, s_means, s_vars, Hs, P
@staticmethod
def _batch_normalize_bw(G, si, mean, var):
eps = 1e-8
N = G.shape[1]
var_eps = var + eps
si_zero_mean = si - mean
dVar_f = -0.5 * np.sum(G * (var_eps ** (-3 / 2.0)) * si_zero_mean, axis=1).reshape(-1, 1)
dMean_f = -np.sum(G * (var_eps ** (-1 / 2.0)), axis=1).reshape(-1, 1)
return G * (var_eps ** (-1 / 2.0)) + (2.0 / N * dVar_f * si_zero_mean) + dMean_f / N
def _backward_bn(self, X, Y, P, Hs, ss, s_means, s_vars):
N = X.shape[1]
G = (P - Y)
grads_W, grads_b = [], []
for j in range(self.n_layers):
i = self.n_layers - 1 - j # Reversed
Hi, Wi = Hs[i], self.Ws[i]
grad_bi = np.mean(G, axis=1).reshape(-1, 1)
grad_Wi = (G @ Hi.T) / N + (2 * self.lamb * Wi)
assert np.isfinite(grad_bi).all()
assert np.isfinite(grad_Wi).all()
grads_b.append(grad_bi)
grads_W.append(grad_Wi)
G = (G.T @ Wi).T
G[Hi <= 0] = 0.0
if i > 0:
si, mean, var = ss[i - 1], s_means[i - 1], s_vars[i - 1]
assert si.shape == G.shape
G = self._batch_normalize_bw(G, si, mean, var)
grads_W.reverse()
grads_b.reverse()
return grads_W, grads_b
# ==================== Cost, Accuracy, Utilities ====================
@staticmethod
def _softmax(s, axis=0):
exp_s = np.exp(s)
exp_sum = np.sum(exp_s, axis=axis)
return exp_s / exp_sum
def _cross_entropy_loss(self, X, Y, s_means=None, s_vars=None):
N = X.shape[1]
_, P = self._forward(X, s_means, s_vars)
loss = -Y * np.log(P)
return np.sum(loss) / N
def compute_cost(self, X, Y, s_means=None, s_vars=None):
# Regularization term
L_2 = np.sum([np.sum(Wi ** 2) for Wi in self.Ws])
# Cross-entropy loss
ce_loss = self._cross_entropy_loss(X, Y, s_means, s_vars)
# Sum of both contributions
return ce_loss + self.lamb * L_2
def classify(self, X, s_means=None, s_vars=None):
_, P = self._forward(X, s_means, s_vars)
return np.argmax(P, axis=0)
def compute_accuracy(self, X, y, s_means=None, s_vars=None):
y_star = self.classify(X, s_means, s_vars)
correct = np.sum([y_star == y])
N = X.shape[1]
return float(correct) / N
# ==================== Gradient descent ====================
def train(self, X, Y, X_test, Y_test, silent=False):
tick_t = timer()
params = self.descent_params
batch_size = params.get('batch_size', 100)
epochs = params.get('epochs', 40)
eta = params.get('eta', 0.01)
gamma = params.get('gamma', 0.0)
decay_rate = params.get('decay_rate', 1.0)
plateau_guard = params.get('plateau_guard', None)
overfitting_guard = params.get('overfitting_guard', None)
output_folder = params.get('output_folder', None)
batch_normalize = self._should_batch_normalize()
N = X.shape[1]
batches = N // batch_size
Ws, bs = self.Ws, self.bs
# Prepare the momentum vectors
v_W = [np.zeros(a.shape) for a in Ws]
v_b = [np.zeros(a.shape) for a in bs]
# Convert Y (one-hot) into a normal label representation
y = np.argmax(Y, axis=0)
# Keep track of the performance at each epoch
costs = [self.compute_cost(X, Y)]
losses = [self._cross_entropy_loss(X, Y)]
accuracies = [self.compute_accuracy(X, y)]
times = []
speed = []
test_speed = []
y_test = np.argmax(Y_test, axis=0)
test_costs = [self.compute_cost(X_test, Y_test)]
test_losses = [self._cross_entropy_loss(X_test, Y_test)]
test_accuracies = [self.compute_accuracy(X_test, y_test)]
s_means_est = None
s_vars_est = None
alpha = 0.99
# For each epoch
for e in range(1, epochs + 1):
tick_e = timer()
# For each mini batch
for i in range(batches):
# Extract batch
i_beg = i * batch_size
i_end = (i + 1) * batch_size
X_batch = X[:, i_beg:i_end]
Y_batch = Y[:, i_beg:i_end]
# Compute gradients
if batch_normalize:
ss, s_means, s_vars, Hs, P = self._forward_bn(X_batch)
grads_W, grads_b = self._backward_bn(X_batch, Y_batch, P, Hs, ss, s_means, s_vars)
if s_means_est is None:
s_means_est = s_means
s_vars_est = s_vars
else:
s_means_est = [alpha * s_means_est[l] + (1 - alpha) * s_means[l] for l in range(len(s_means))]
s_vars_est = [alpha * s_vars_est[l] + (1 - alpha) * s_vars[l] for l in range(len(s_vars))]
else:
Hs, P = self._forward(X_batch)
grads_W, grads_b = self._backward(X_batch, Y_batch, P, Hs)
# Update W and b
for j in range(len(Ws)):
v_W[j] = gamma * v_W[j] + eta * grads_W[j]
v_b[j] = gamma * v_b[j] + eta * grads_b[j]
Ws[j] -= v_W[j]
bs[j] -= v_b[j]
# Apply the decay rate to eta
eta *= decay_rate
# Keep track of the performance at each epoch
costs.append(self.compute_cost(X, Y, s_means_est, s_vars_est))
losses.append(self._cross_entropy_loss(X, Y, s_means_est, s_vars_est))
accuracies.append(self.compute_accuracy(X, y, s_means_est, s_vars_est))
test_costs.append(self.compute_cost(X_test, Y_test, s_means_est, s_vars_est))
test_losses.append(self._cross_entropy_loss(X_test, Y_test, s_means_est, s_vars_est))
test_accuracies.append(self.compute_accuracy(X_test, y_test, s_means_est, s_vars_est))
dJ = costs[-1] - costs[-2]
dJ_star = test_costs[-1] - test_costs[-2]
speed.append(dJ)
test_speed.append(dJ_star)
mean_dJ_star = np.mean(test_speed[-2:])
if output_folder is not None:
filepath = "{}/model_epoch_{}.pkl".format(output_folder, e)
self.export_model(filepath)
if not silent:
tock_e = timer()
interval = tock_e - tick_e
times.append(interval)
rem = (epochs - e) * np.mean(times[-3:])
print('===> Epoch[{}]: {}s remaining, {} dJ, {} dJ*, {} J, {} J*, acc_v: {}%'.format(e, int(round(rem)), round(dJ, 5), round(dJ_star, 5), round(costs[-1], 5), round(test_costs[-1], 5), round(100.0*test_accuracies[-1], 5)))
if overfitting_guard is not None and mean_dJ_star >= overfitting_guard:
print('Overfitting detected, aborting training...')
break
if eta > 0.001 and plateau_guard is not None and mean_dJ_star >= plateau_guard:
if not silent:
print('Plateau reached, adjusting eta...')
eta /= 10.0
if not silent:
tock_t = timer()
print("Done. Took ~{}s".format(round(tock_t - tick_t)))
best_epoch = np.argmax(test_accuracies)
best_test_acc = test_accuracies[best_epoch]
print("Best test accuracy reached at epoch {} ({}%)".format(best_epoch, round(best_test_acc*100.0, 4)))
return {
'costs': costs,
'test_costs': test_costs,
'losses': losses,
'test_losses': test_losses,
'accuracies': accuracies,
'test_accuracies': test_accuracies,
'speed': speed,
'test_speed': test_speed,
'params': params
}
| [
"numpy.mean",
"pickle.dump",
"timeit.default_timer",
"numpy.log",
"pickle.load",
"numpy.argmax",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.isfinite",
"numpy.maximum",
"numpy.random.randn",
"numpy.var"
] | [((5369, 5378), 'numpy.exp', 'np.exp', (['s'], {}), '(s)\n', (5375, 5378), True, 'import numpy as np\n'), ((5397, 5421), 'numpy.sum', 'np.sum', (['exp_s'], {'axis': 'axis'}), '(exp_s, axis=axis)\n', (5403, 5421), True, 'import numpy as np\n'), ((6097, 6117), 'numpy.argmax', 'np.argmax', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (6106, 6117), True, 'import numpy as np\n'), ((6253, 6274), 'numpy.sum', 'np.sum', (['[y_star == y]'], {}), '([y_star == y])\n', (6259, 6274), True, 'import numpy as np\n'), ((6475, 6482), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6480, 6482), True, 'from timeit import default_timer as timer\n'), ((7285, 7305), 'numpy.argmax', 'np.argmax', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (7294, 7305), True, 'import numpy as np\n'), ((7584, 7609), 'numpy.argmax', 'np.argmax', (['Y_test'], {'axis': '(0)'}), '(Y_test, axis=0)\n', (7593, 7609), True, 'import numpy as np\n'), ((879, 897), 'numpy.zeros', 'np.zeros', (['[rhs, 1]'], {}), '([rhs, 1])\n', (887, 897), True, 'import numpy as np\n'), ((1148, 1180), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (1159, 1180), False, 'import pickle\n'), ((1395, 1415), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (1406, 1415), False, 'import pickle\n'), ((1876, 1895), 'numpy.maximum', 'np.maximum', (['(0.0)', 'si'], {}), '(0.0, si)\n', (1886, 1895), True, 'import numpy as np\n'), ((3755, 3774), 'numpy.maximum', 'np.maximum', (['(0.0)', 'si'], {}), '(0.0, si)\n', (3765, 3774), True, 'import numpy as np\n'), ((5614, 5623), 'numpy.log', 'np.log', (['P'], {}), '(P)\n', (5620, 5623), True, 'import numpy as np\n'), ((5639, 5651), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (5645, 5651), True, 'import numpy as np\n'), ((7130, 7147), 'numpy.zeros', 'np.zeros', (['a.shape'], {}), '(a.shape)\n', (7138, 7147), True, 'import numpy as np\n'), ((7176, 7193), 'numpy.zeros', 'np.zeros', (['a.shape'], {}), '(a.shape)\n', (7184, 7193), True, 'import numpy as np\n'), ((7960, 7967), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7965, 7967), True, 'from timeit import default_timer as timer\n'), ((10167, 10191), 'numpy.mean', 'np.mean', (['test_speed[-2:]'], {}), '(test_speed[-2:])\n', (10174, 10191), True, 'import numpy as np\n'), ((11229, 11236), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11234, 11236), True, 'from timeit import default_timer as timer\n'), ((11330, 11356), 'numpy.argmax', 'np.argmax', (['test_accuracies'], {}), '(test_accuracies)\n', (11339, 11356), True, 'import numpy as np\n'), ((3825, 3839), 'numpy.isfinite', 'np.isfinite', (['P'], {}), '(P)\n', (3836, 3839), True, 'import numpy as np\n'), ((5770, 5785), 'numpy.sum', 'np.sum', (['(Wi ** 2)'], {}), '(Wi ** 2)\n', (5776, 5785), True, 'import numpy as np\n'), ((10408, 10415), 'timeit.default_timer', 'timer', ([], {}), '()\n', (10413, 10415), True, 'from timeit import default_timer as timer\n'), ((832, 857), 'numpy.random.randn', 'np.random.randn', (['rhs', 'lhs'], {}), '(rhs, lhs)\n', (847, 857), True, 'import numpy as np\n'), ((2215, 2233), 'numpy.mean', 'np.mean', (['G'], {'axis': '(1)'}), '(G, axis=1)\n', (2222, 2233), True, 'import numpy as np\n'), ((2873, 2891), 'numpy.mean', 'np.mean', (['s'], {'axis': '(1)'}), '(s, axis=1)\n', (2880, 2891), True, 'import numpy as np\n'), ((4083, 4139), 'numpy.sum', 'np.sum', (['(G * var_eps ** (-3 / 2.0) * si_zero_mean)'], {'axis': '(1)'}), '(G * var_eps ** (-3 / 2.0) * si_zero_mean, axis=1)\n', (4089, 4139), True, 'import numpy as np\n'), ((4176, 4217), 'numpy.sum', 'np.sum', (['(G * var_eps ** (-1 / 2.0))'], {'axis': '(1)'}), '(G * var_eps ** (-1 / 2.0), axis=1)\n', (4182, 4217), True, 'import numpy as np\n'), ((4622, 4640), 'numpy.mean', 'np.mean', (['G'], {'axis': '(1)'}), '(G, axis=1)\n', (4629, 4640), True, 'import numpy as np\n'), ((4736, 4756), 'numpy.isfinite', 'np.isfinite', (['grad_bi'], {}), '(grad_bi)\n', (4747, 4756), True, 'import numpy as np\n'), ((4782, 4802), 'numpy.isfinite', 'np.isfinite', (['grad_Wi'], {}), '(grad_Wi)\n', (4793, 4802), True, 'import numpy as np\n'), ((10535, 10554), 'numpy.mean', 'np.mean', (['times[-3:]'], {}), '(times[-3:])\n', (10542, 10554), True, 'import numpy as np\n'), ((2924, 2941), 'numpy.var', 'np.var', (['s'], {'axis': '(1)'}), '(s, axis=1)\n', (2930, 2941), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import pygeos
from pygeos import Geometry, GEOSException
from pygeos.testing import assert_geometries_equal
from .common import (
all_types,
empty,
empty_line_string,
empty_point,
empty_polygon,
line_string,
multi_point,
point,
point_z,
)
CONSTRUCTIVE_NO_ARGS = (
pygeos.boundary,
pygeos.centroid,
pygeos.convex_hull,
pygeos.envelope,
pygeos.extract_unique_points,
pygeos.normalize,
pygeos.point_on_surface,
)
CONSTRUCTIVE_FLOAT_ARG = (
pygeos.buffer,
pygeos.offset_curve,
pygeos.delaunay_triangles,
pygeos.simplify,
pygeos.voronoi_polygons,
)
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("func", CONSTRUCTIVE_NO_ARGS)
def test_no_args_array(geometry, func):
actual = func([geometry, geometry])
assert actual.shape == (2,)
assert actual[0] is None or isinstance(actual[0], Geometry)
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("func", CONSTRUCTIVE_FLOAT_ARG)
def test_float_arg_array(geometry, func):
if func is pygeos.offset_curve and pygeos.get_type_id(geometry) not in [1, 2]:
with pytest.raises(GEOSException, match="only accept linestrings"):
func([geometry, geometry], 0.0)
return
actual = func([geometry, geometry], 0.0)
assert actual.shape == (2,)
assert isinstance(actual[0], Geometry)
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("reference", all_types)
def test_snap_array(geometry, reference):
actual = pygeos.snap([geometry, geometry], [reference, reference], tolerance=1.0)
assert actual.shape == (2,)
assert isinstance(actual[0], Geometry)
@pytest.mark.parametrize("func", CONSTRUCTIVE_NO_ARGS)
def test_no_args_missing(func):
actual = func(None)
assert actual is None
@pytest.mark.parametrize("func", CONSTRUCTIVE_FLOAT_ARG)
def test_float_arg_missing(func):
actual = func(None, 1.0)
assert actual is None
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("func", CONSTRUCTIVE_FLOAT_ARG)
def test_float_arg_nan(geometry, func):
actual = func(geometry, float("nan"))
assert actual is None
def test_buffer_cap_style_invalid():
with pytest.raises(ValueError, match="'invalid' is not a valid option"):
pygeos.buffer(point, 1, cap_style="invalid")
def test_buffer_join_style_invalid():
with pytest.raises(ValueError, match="'invalid' is not a valid option"):
pygeos.buffer(point, 1, join_style="invalid")
def test_snap_none():
actual = pygeos.snap(None, point, tolerance=1.0)
assert actual is None
@pytest.mark.parametrize("geometry", all_types)
def test_snap_nan_float(geometry):
actual = pygeos.snap(geometry, point, tolerance=np.nan)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
def test_build_area_none():
actual = pygeos.build_area(None)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geom,expected",
[
(point, empty), # a point has no area
(line_string, empty), # a line string has no area
# geometry collection of two polygons are combined into one
(
Geometry(
"GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), POLYGON((1 1, 1 2, 2 2, 1 1)))"
),
Geometry("POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 2, 1 2, 1 1))"),
),
(empty, empty),
([empty], [empty]),
],
)
def test_build_area(geom, expected):
actual = pygeos.build_area(geom)
assert actual is not expected
assert actual == expected
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
def test_make_valid_none():
actual = pygeos.make_valid(None)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geom,expected",
[
(point, point), # a valid geometry stays the same (but is copied)
# an L shaped polygon without area is converted to a multilinestring
(
Geometry("POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))"),
Geometry("MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))"),
),
# a polygon with self-intersection (bowtie) is converted into polygons
(
Geometry("POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))"),
Geometry("MULTIPOLYGON (((1 1, 2 2, 2 0, 1 1)), ((0 0, 0 2, 1 1, 0 0)))"),
),
(empty, empty),
([empty], [empty]),
],
)
def test_make_valid(geom, expected):
actual = pygeos.make_valid(geom)
assert actual is not expected
# normalize needed to handle variation in output across GEOS versions
assert pygeos.normalize(actual) == expected
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geom,expected",
[
(all_types, all_types),
# first polygon is valid, second polygon has self-intersection
(
[
Geometry("POLYGON((0 0, 2 2, 0 2, 0 0))"),
Geometry("POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))"),
],
[
Geometry("POLYGON((0 0, 2 2, 0 2, 0 0))"),
Geometry(
"MULTIPOLYGON (((1 1, 0 0, 0 2, 1 1)), ((1 1, 2 2, 2 0, 1 1)))"
),
],
),
([point, None, empty], [point, None, empty]),
],
)
def test_make_valid_1d(geom, expected):
actual = pygeos.make_valid(geom)
# normalize needed to handle variation in output across GEOS versions
assert np.all(pygeos.normalize(actual) == pygeos.normalize(expected))
@pytest.mark.parametrize(
"geom,expected",
[
(point, point), # a point is always in normalized form
# order coordinates of linestrings and parts of multi-linestring
(
Geometry("MULTILINESTRING ((1 1, 0 0), (1 1, 1 2))"),
Geometry("MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))"),
),
],
)
def test_normalize(geom, expected):
actual = pygeos.normalize(geom)
assert actual == expected
def test_offset_curve_empty():
actual = pygeos.offset_curve(empty_line_string, 2.0)
assert pygeos.is_empty(actual)
def test_offset_curve_distance_array():
# check that kwargs are passed through
result = pygeos.offset_curve([line_string, line_string], [-2.0, -3.0])
assert result[0] == pygeos.offset_curve(line_string, -2.0)
assert result[1] == pygeos.offset_curve(line_string, -3.0)
def test_offset_curve_kwargs():
# check that kwargs are passed through
result1 = pygeos.offset_curve(
line_string, -2.0, quadsegs=2, join_style="mitre", mitre_limit=2.0
)
result2 = pygeos.offset_curve(line_string, -2.0)
assert result1 != result2
def test_offset_curve_non_scalar_kwargs():
msg = "only accepts scalar values"
with pytest.raises(TypeError, match=msg):
pygeos.offset_curve([line_string, line_string], 1, quadsegs=np.array([8, 9]))
with pytest.raises(TypeError, match=msg):
pygeos.offset_curve(
[line_string, line_string], 1, join_style=["round", "bevel"]
)
with pytest.raises(TypeError, match=msg):
pygeos.offset_curve([line_string, line_string], 1, mitre_limit=[5.0, 6.0])
def test_offset_curve_join_style_invalid():
with pytest.raises(ValueError, match="'invalid' is not a valid option"):
pygeos.offset_curve(line_string, 1.0, join_style="invalid")
@pytest.mark.skipif(pygeos.geos_version < (3, 7, 0), reason="GEOS < 3.7")
@pytest.mark.parametrize(
"geom,expected",
[
(
pygeos.Geometry("LINESTRING (0 0, 1 2)"),
pygeos.Geometry("LINESTRING (1 2, 0 0)"),
),
(
pygeos.Geometry("LINEARRING (0 0, 1 2, 1 3, 0 0)"),
pygeos.Geometry("LINEARRING (0 0, 1 3, 1 2, 0 0)"),
),
(
pygeos.Geometry("POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))"),
pygeos.Geometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))"),
),
(
pygeos.Geometry(
"POLYGON((0 0, 10 0, 10 10, 0 10, 0 0), (2 2, 2 4, 4 4, 4 2, 2 2))"
),
pygeos.Geometry(
"POLYGON((0 0, 0 10, 10 10, 10 0, 0 0), (2 2, 4 2, 4 4, 2 4, 2 2))"
),
),
pytest.param(
pygeos.Geometry("MULTILINESTRING ((0 0, 1 2), (3 3, 4 4))"),
pygeos.Geometry("MULTILINESTRING ((1 2, 0 0), (4 4, 3 3))"),
marks=pytest.mark.skipif(
pygeos.geos_version < (3, 8, 1), reason="GEOS < 3.8.1"
),
),
(
pygeos.Geometry(
"MULTIPOLYGON (((0 0, 1 0, 1 1, 0 1, 0 0)), ((2 2, 2 3, 3 3, 3 2, 2 2)))"
),
pygeos.Geometry(
"MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), ((2 2, 3 2, 3 3, 2 3, 2 2)))"
),
),
# points are unchanged
(point, point),
(point_z, point_z),
(multi_point, multi_point),
# empty geometries are unchanged
(empty_point, empty_point),
(empty_line_string, empty_line_string),
(empty, empty),
(empty_polygon, empty_polygon),
],
)
def test_reverse(geom, expected):
assert_geometries_equal(pygeos.reverse(geom), expected)
@pytest.mark.skipif(pygeos.geos_version < (3, 7, 0), reason="GEOS < 3.7")
def test_reverse_none():
assert pygeos.reverse(None) is None
assert pygeos.reverse([None]).tolist() == [None]
geometry = pygeos.Geometry("POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))")
expected = pygeos.Geometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))")
result = pygeos.reverse([None, geometry])
assert result[0] is None
assert_geometries_equal(result[1], expected)
@pytest.mark.skipif(pygeos.geos_version < (3, 7, 0), reason="GEOS < 3.7")
@pytest.mark.parametrize("geom", ["Not a geometry", 1])
def test_reverse_invalid_type(geom):
with pytest.raises(TypeError, match="One of the arguments is of incorrect type"):
pygeos.reverse(geom)
@pytest.mark.parametrize(
"geom,expected",
[
# Point outside
("POINT (0 0)", "GEOMETRYCOLLECTION EMPTY"),
# Point inside
("POINT (15 15)", "POINT (15 15)"),
# Point on boundary
("POINT (15 10)", "GEOMETRYCOLLECTION EMPTY"),
# Line outside
("LINESTRING (0 0, -5 5)", "GEOMETRYCOLLECTION EMPTY"),
# Line inside
("LINESTRING (15 15, 16 15)", "LINESTRING (15 15, 16 15)"),
# Line on boundary
("LINESTRING (10 15, 10 10, 15 10)", "GEOMETRYCOLLECTION EMPTY"),
# Line splitting rectangle
("LINESTRING (10 5, 25 20)", "LINESTRING (15 10, 20 15)"),
],
)
def test_clip_by_rect(geom, expected):
geom, expected = pygeos.Geometry(geom), pygeos.Geometry(expected)
actual = pygeos.clip_by_rect(geom, 10, 10, 20, 20)
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize(
"geom, rect, expected",
[
# Polygon hole (CCW) fully on rectangle boundary"""
(
"POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))",
(10, 10, 20, 20),
"GEOMETRYCOLLECTION EMPTY",
),
# Polygon hole (CW) fully on rectangle boundary"""
(
"POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10))",
(10, 10, 20, 20),
"GEOMETRYCOLLECTION EMPTY",
),
# Polygon fully within rectangle"""
(
"POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))",
(0, 0, 40, 40),
"POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))",
),
# Polygon overlapping rectangle
(
"POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))",
(5, 5, 15, 15),
"POLYGON ((5 5, 5 15, 10 15, 10 10, 15 10, 15 5, 5 5))",
),
],
)
def test_clip_by_rect_polygon(geom, rect, expected):
geom, expected = pygeos.Geometry(geom), pygeos.Geometry(expected)
actual = pygeos.clip_by_rect(geom, *rect)
assert_geometries_equal(actual, expected)
@pytest.mark.parametrize("geometry", all_types)
def test_clip_by_rect_array(geometry):
actual = pygeos.clip_by_rect([geometry, geometry], 0.0, 0.0, 1.0, 1.0)
assert actual.shape == (2,)
assert actual[0] is None or isinstance(actual[0], Geometry)
def test_clip_by_rect_missing():
actual = pygeos.clip_by_rect(None, 0, 0, 1, 1)
assert actual is None
@pytest.mark.parametrize("geom", [empty, empty_line_string, empty_polygon])
def test_clip_by_rect_empty(geom):
# TODO empty point
actual = pygeos.clip_by_rect(geom, 0, 0, 1, 1)
assert actual == Geometry("GEOMETRYCOLLECTION EMPTY")
def test_clip_by_rect_non_scalar_kwargs():
msg = "only accepts scalar values"
with pytest.raises(TypeError, match=msg):
pygeos.clip_by_rect([line_string, line_string], 0, 0, 1, np.array([0, 1]))
def test_polygonize():
lines = [
pygeos.Geometry("LINESTRING (0 0, 1 1)"),
pygeos.Geometry("LINESTRING (0 0, 0 1)"),
pygeos.Geometry("LINESTRING (0 1, 1 1)"),
pygeos.Geometry("LINESTRING (1 1, 1 0)"),
pygeos.Geometry("LINESTRING (1 0, 0 0)"),
pygeos.Geometry("LINESTRING (5 5, 6 6)"),
pygeos.Geometry("POINT (0 0)"),
None,
]
result = pygeos.polygonize(lines)
assert pygeos.get_type_id(result) == 7 # GeometryCollection
expected = pygeos.Geometry(
"GEOMETRYCOLLECTION (POLYGON ((0 0, 1 1, 1 0, 0 0)), POLYGON ((1 1, 0 0, 0 1, 1 1)))"
)
assert result == expected
def test_polygonize_array():
lines = [
pygeos.Geometry("LINESTRING (0 0, 1 1)"),
pygeos.Geometry("LINESTRING (0 0, 0 1)"),
pygeos.Geometry("LINESTRING (0 1, 1 1)"),
]
expected = pygeos.Geometry("GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))")
result = pygeos.polygonize(np.array(lines))
assert isinstance(result, pygeos.Geometry)
assert result == expected
result = pygeos.polygonize(np.array([lines]))
assert isinstance(result, np.ndarray)
assert result.shape == (1,)
assert result[0] == expected
arr = np.array([lines, lines])
assert arr.shape == (2, 3)
result = pygeos.polygonize(arr)
assert isinstance(result, np.ndarray)
assert result.shape == (2,)
assert result[0] == expected
assert result[1] == expected
arr = np.array([[lines, lines], [lines, lines], [lines, lines]])
assert arr.shape == (3, 2, 3)
result = pygeos.polygonize(arr)
assert isinstance(result, np.ndarray)
assert result.shape == (3, 2)
for res in result.flatten():
assert res == expected
@pytest.mark.skipif(
np.__version__ < "1.15",
reason="axis keyword for generalized ufunc introduced in np 1.15",
)
def test_polygonize_array_axis():
lines = [
pygeos.Geometry("LINESTRING (0 0, 1 1)"),
pygeos.Geometry("LINESTRING (0 0, 0 1)"),
pygeos.Geometry("LINESTRING (0 1, 1 1)"),
]
arr = np.array([lines, lines]) # shape (2, 3)
result = pygeos.polygonize(arr, axis=1)
assert result.shape == (2,)
result = pygeos.polygonize(arr, axis=0)
assert result.shape == (3,)
def test_polygonize_missing():
# set of geometries that is all missing
result = pygeos.polygonize([None, None])
assert result == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY")
def test_polygonize_full():
lines = [
None,
pygeos.Geometry("LINESTRING (0 0, 1 1)"),
pygeos.Geometry("LINESTRING (0 0, 0 1)"),
pygeos.Geometry("LINESTRING (0 1, 1 1)"),
pygeos.Geometry("LINESTRING (1 1, 1 0)"),
None,
pygeos.Geometry("LINESTRING (1 0, 0 0)"),
pygeos.Geometry("LINESTRING (5 5, 6 6)"),
pygeos.Geometry("LINESTRING (1 1, 100 100)"),
pygeos.Geometry("POINT (0 0)"),
None,
]
result = pygeos.polygonize_full(lines)
assert len(result) == 4
assert all(pygeos.get_type_id(geom) == 7 for geom in result) # GeometryCollection
polygons, cuts, dangles, invalid = result
expected_polygons = pygeos.Geometry(
"GEOMETRYCOLLECTION (POLYGON ((0 0, 1 1, 1 0, 0 0)), POLYGON ((1 1, 0 0, 0 1, 1 1)))"
)
assert polygons == expected_polygons
assert cuts == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY")
expected_dangles = pygeos.Geometry(
"GEOMETRYCOLLECTION (LINESTRING (1 1, 100 100), LINESTRING (5 5, 6 6))"
)
assert dangles == expected_dangles
assert invalid == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY")
def test_polygonize_full_array():
lines = [
pygeos.Geometry("LINESTRING (0 0, 1 1)"),
pygeos.Geometry("LINESTRING (0 0, 0 1)"),
pygeos.Geometry("LINESTRING (0 1, 1 1)"),
]
expected = pygeos.Geometry("GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))")
result = pygeos.polygonize_full(np.array(lines))
assert len(result) == 4
assert all(isinstance(geom, pygeos.Geometry) for geom in result)
assert result[0] == expected
assert all(
geom == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY") for geom in result[1:]
)
result = pygeos.polygonize_full(np.array([lines]))
assert len(result) == 4
assert all(isinstance(geom, np.ndarray) for geom in result)
assert all(geom.shape == (1,) for geom in result)
assert result[0][0] == expected
assert all(
geom[0] == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY") for geom in result[1:]
)
arr = np.array([lines, lines])
assert arr.shape == (2, 3)
result = pygeos.polygonize_full(arr)
assert len(result) == 4
assert all(isinstance(arr, np.ndarray) for arr in result)
assert all(arr.shape == (2,) for arr in result)
assert result[0][0] == expected
assert result[0][1] == expected
assert all(
g == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY")
for geom in result[1:]
for g in geom
)
arr = np.array([[lines, lines], [lines, lines], [lines, lines]])
assert arr.shape == (3, 2, 3)
result = pygeos.polygonize_full(arr)
assert len(result) == 4
assert all(isinstance(arr, np.ndarray) for arr in result)
assert all(arr.shape == (3, 2) for arr in result)
for res in result[0].flatten():
assert res == expected
for arr in result[1:]:
for res in arr.flatten():
assert res == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY")
@pytest.mark.skipif(
np.__version__ < "1.15",
reason="axis keyword for generalized ufunc introduced in np 1.15",
)
def test_polygonize_full_array_axis():
lines = [
pygeos.Geometry("LINESTRING (0 0, 1 1)"),
pygeos.Geometry("LINESTRING (0 0, 0 1)"),
pygeos.Geometry("LINESTRING (0 1, 1 1)"),
]
arr = np.array([lines, lines]) # shape (2, 3)
result = pygeos.polygonize_full(arr, axis=1)
assert len(result) == 4
assert all(arr.shape == (2,) for arr in result)
result = pygeos.polygonize_full(arr, axis=0)
assert len(result) == 4
assert all(arr.shape == (3,) for arr in result)
def test_polygonize_full_missing():
# set of geometries that is all missing
result = pygeos.polygonize_full([None, None])
assert len(result) == 4
assert all(geom == pygeos.Geometry("GEOMETRYCOLLECTION EMPTY") for geom in result)
@pytest.mark.skipif(pygeos.geos_version < (3, 10, 0), reason="GEOS < 3.10")
@pytest.mark.parametrize("geometry", all_types)
@pytest.mark.parametrize("tolerance", [-1, 0])
def test_segmentize_invalid_tolerance(geometry, tolerance):
with pytest.raises(GEOSException, match="IllegalArgumentException"):
pygeos.segmentize(geometry, tolerance=tolerance)
@pytest.mark.skipif(pygeos.geos_version < (3, 10, 0), reason="GEOS < 3.10")
@pytest.mark.parametrize("geometry", all_types)
def test_segmentize_tolerance_nan(geometry):
actual = pygeos.segmentize(geometry, tolerance=np.nan)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 10, 0), reason="GEOS < 3.10")
@pytest.mark.parametrize(
"geometry", [empty, empty_point, empty_line_string, empty_polygon]
)
def test_segmentize_empty(geometry):
actual = pygeos.segmentize(geometry, tolerance=5)
assert_geometries_equal(actual, geometry)
@pytest.mark.skipif(pygeos.geos_version < (3, 10, 0), reason="GEOS < 3.10")
@pytest.mark.parametrize("geometry", [point, point_z, multi_point])
def test_segmentize_no_change(geometry):
actual = pygeos.segmentize(geometry, tolerance=5)
assert_geometries_equal(actual, geometry)
@pytest.mark.skipif(pygeos.geos_version < (3, 10, 0), reason="GEOS < 3.10")
def test_segmentize_none():
assert pygeos.segmentize(None, tolerance=5) is None
@pytest.mark.skipif(pygeos.geos_version < (3, 10, 0), reason="GEOS < 3.10")
@pytest.mark.parametrize(
"geometry,tolerance, expected",
[
# tolerance greater than max edge length, no change
(
pygeos.Geometry("LINESTRING (0 0, 0 10)"),
20,
pygeos.Geometry("LINESTRING (0 0, 0 10)"),
),
(
pygeos.Geometry("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"),
20,
pygeos.Geometry("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"),
),
# tolerance causes one vertex per segment
(
pygeos.Geometry("LINESTRING (0 0, 0 10)"),
5,
pygeos.Geometry("LINESTRING (0 0, 0 5, 0 10)"),
),
(
Geometry("POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"),
5,
pygeos.Geometry(
"POLYGON ((0 0, 5 0, 10 0, 10 5, 10 10, 5 10, 0 10, 0 5, 0 0))"
),
),
# ensure input arrays are broadcast correctly
(
[
pygeos.Geometry("LINESTRING (0 0, 0 10)"),
pygeos.Geometry("LINESTRING (0 0, 0 2)"),
],
5,
[
pygeos.Geometry("LINESTRING (0 0, 0 5, 0 10)"),
pygeos.Geometry("LINESTRING (0 0, 0 2)"),
],
),
(
[
pygeos.Geometry("LINESTRING (0 0, 0 10)"),
pygeos.Geometry("LINESTRING (0 0, 0 2)"),
],
[5],
[
pygeos.Geometry("LINESTRING (0 0, 0 5, 0 10)"),
pygeos.Geometry("LINESTRING (0 0, 0 2)"),
],
),
(
[
pygeos.Geometry("LINESTRING (0 0, 0 10)"),
pygeos.Geometry("LINESTRING (0 0, 0 2)"),
],
[5, 1.5],
[
pygeos.Geometry("LINESTRING (0 0, 0 5, 0 10)"),
pygeos.Geometry("LINESTRING (0 0, 0 1, 0 2)"),
],
),
],
)
def test_segmentize(geometry, tolerance, expected):
actual = pygeos.segmentize(geometry, tolerance)
assert_geometries_equal(actual, expected)
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize("geometry", all_types)
def test_minimum_bounding_circle_all_types(geometry):
actual = pygeos.minimum_bounding_circle([geometry, geometry])
assert actual.shape == (2,)
assert actual[0] is None or isinstance(actual[0], Geometry)
actual = pygeos.minimum_bounding_circle(None)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 8, 0), reason="GEOS < 3.8")
@pytest.mark.parametrize(
"geometry, expected",
[
(
pygeos.Geometry("POLYGON ((0 5, 5 10, 10 5, 5 0, 0 5))"),
pygeos.buffer(pygeos.Geometry("POINT (5 5)"), 5),
),
(
pygeos.Geometry("LINESTRING (1 0, 1 10)"),
pygeos.buffer(pygeos.Geometry("POINT (1 5)"), 5),
),
(
pygeos.Geometry("MULTIPOINT (2 2, 4 2)"),
pygeos.buffer(pygeos.Geometry("POINT (3 2)"), 1),
),
(
pygeos.Geometry("POINT (2 2)"),
pygeos.Geometry("POINT (2 2)"),
),
(
pygeos.Geometry("GEOMETRYCOLLECTION EMPTY"),
pygeos.Geometry("POLYGON EMPTY"),
),
],
)
def test_minimum_bounding_circle(geometry, expected):
actual = pygeos.minimum_bounding_circle(geometry)
assert_geometries_equal(actual, expected)
@pytest.mark.skipif(pygeos.geos_version < (3, 6, 0), reason="GEOS < 3.6")
@pytest.mark.parametrize("geometry", all_types)
def test_oriented_envelope_all_types(geometry):
actual = pygeos.oriented_envelope([geometry, geometry])
assert actual.shape == (2,)
assert actual[0] is None or isinstance(actual[0], Geometry)
actual = pygeos.oriented_envelope(None)
assert actual is None
@pytest.mark.skipif(pygeos.geos_version < (3, 6, 0), reason="GEOS < 3.6")
@pytest.mark.parametrize(
"geometry, expected",
[
(
pygeos.Geometry("MULTIPOINT (0 0, 10 0, 10 10)"),
pygeos.Geometry("POLYGON ((0 0, 5 -5, 15 5, 10 10, 0 0))"),
),
(
pygeos.Geometry("LINESTRING (1 1, 5 1, 10 10)"),
pygeos.Geometry("POLYGON ((1 1, 3 -1, 12 8, 10 10, 1 1))"),
),
(
pygeos.Geometry("POLYGON ((1 1, 15 1, 5 10, 1 1))"),
pygeos.Geometry("POLYGON ((15 1, 15 10, 1 10, 1 1, 15 1))"),
),
(
pygeos.Geometry("LINESTRING (1 1, 10 1)"),
pygeos.Geometry("LINESTRING (1 1, 10 1)"),
),
(
pygeos.Geometry("POINT (2 2)"),
pygeos.Geometry("POINT (2 2)"),
),
(
pygeos.Geometry("GEOMETRYCOLLECTION EMPTY"),
pygeos.Geometry("POLYGON EMPTY"),
),
],
)
def test_oriented_envelope(geometry, expected):
actual = pygeos.oriented_envelope(geometry)
assert pygeos.equals(actual, expected).all()
@pytest.mark.skipif(pygeos.geos_version < (3, 6, 0), reason="GEOS < 3.6")
@pytest.mark.parametrize(
"geometry, expected",
[
(
pygeos.Geometry("MULTIPOINT (0 0, 10 0, 10 10)"),
pygeos.Geometry("POLYGON ((0 0, 5 -5, 15 5, 10 10, 0 0))"),
),
(
pygeos.Geometry("LINESTRING (1 1, 5 1, 10 10)"),
pygeos.Geometry("POLYGON ((1 1, 3 -1, 12 8, 10 10, 1 1))"),
),
(
pygeos.Geometry("POLYGON ((1 1, 15 1, 5 10, 1 1))"),
pygeos.Geometry("POLYGON ((15 1, 15 10, 1 10, 1 1, 15 1))"),
),
(
pygeos.Geometry("LINESTRING (1 1, 10 1)"),
pygeos.Geometry("LINESTRING (1 1, 10 1)"),
),
(
pygeos.Geometry("POINT (2 2)"),
pygeos.Geometry("POINT (2 2)"),
),
(
pygeos.Geometry("GEOMETRYCOLLECTION EMPTY"),
pygeos.Geometry("POLYGON EMPTY"),
),
],
)
def test_minimum_rotated_rectangle(geometry, expected):
actual = pygeos.minimum_rotated_rectangle(geometry)
assert pygeos.equals(actual, expected).all()
| [
"pygeos.equals",
"pygeos.reverse",
"pygeos.polygonize_full",
"pygeos.offset_curve",
"numpy.array",
"pygeos.testing.assert_geometries_equal",
"pygeos.Geometry",
"pygeos.build_area",
"pytest.mark.skipif",
"pygeos.clip_by_rect",
"pygeos.is_empty",
"pygeos.segmentize",
"pygeos.normalize",
"pyt... | [((668, 714), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (691, 714), False, 'import pytest\n'), ((716, 769), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'CONSTRUCTIVE_NO_ARGS'], {}), "('func', CONSTRUCTIVE_NO_ARGS)\n", (739, 769), False, 'import pytest\n'), ((949, 995), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (972, 995), False, 'import pytest\n'), ((997, 1052), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'CONSTRUCTIVE_FLOAT_ARG'], {}), "('func', CONSTRUCTIVE_FLOAT_ARG)\n", (1020, 1052), False, 'import pytest\n'), ((1436, 1482), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (1459, 1482), False, 'import pytest\n'), ((1484, 1531), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""reference"""', 'all_types'], {}), "('reference', all_types)\n", (1507, 1531), False, 'import pytest\n'), ((1738, 1791), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'CONSTRUCTIVE_NO_ARGS'], {}), "('func', CONSTRUCTIVE_NO_ARGS)\n", (1761, 1791), False, 'import pytest\n'), ((1877, 1932), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'CONSTRUCTIVE_FLOAT_ARG'], {}), "('func', CONSTRUCTIVE_FLOAT_ARG)\n", (1900, 1932), False, 'import pytest\n'), ((2025, 2071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (2048, 2071), False, 'import pytest\n'), ((2073, 2128), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', 'CONSTRUCTIVE_FLOAT_ARG'], {}), "('func', CONSTRUCTIVE_FLOAT_ARG)\n", (2096, 2128), False, 'import pytest\n'), ((2683, 2729), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (2706, 2729), False, 'import pytest\n'), ((2854, 2926), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (2872, 2926), False, 'import pytest\n'), ((3021, 3093), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (3039, 3093), False, 'import pytest\n'), ((3768, 3840), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (3786, 3840), False, 'import pytest\n'), ((3935, 4007), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (3953, 4007), False, 'import pytest\n'), ((4901, 4973), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (4919, 4973), False, 'import pytest\n'), ((7650, 7722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 7, 0))'], {'reason': '"""GEOS < 3.7"""'}), "(pygeos.geos_version < (3, 7, 0), reason='GEOS < 3.7')\n", (7668, 7722), False, 'import pytest\n'), ((9494, 9566), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 7, 0))'], {'reason': '"""GEOS < 3.7"""'}), "(pygeos.geos_version < (3, 7, 0), reason='GEOS < 3.7')\n", (9512, 9566), False, 'import pytest\n'), ((9954, 10026), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 7, 0))'], {'reason': '"""GEOS < 3.7"""'}), "(pygeos.geos_version < (3, 7, 0), reason='GEOS < 3.7')\n", (9972, 10026), False, 'import pytest\n'), ((10028, 10082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', "['Not a geometry', 1]"], {}), "('geom', ['Not a geometry', 1])\n", (10051, 10082), False, 'import pytest\n'), ((10238, 10674), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom,expected"""', "[('POINT (0 0)', 'GEOMETRYCOLLECTION EMPTY'), ('POINT (15 15)',\n 'POINT (15 15)'), ('POINT (15 10)', 'GEOMETRYCOLLECTION EMPTY'), (\n 'LINESTRING (0 0, -5 5)', 'GEOMETRYCOLLECTION EMPTY'), (\n 'LINESTRING (15 15, 16 15)', 'LINESTRING (15 15, 16 15)'), (\n 'LINESTRING (10 15, 10 10, 15 10)', 'GEOMETRYCOLLECTION EMPTY'), (\n 'LINESTRING (10 5, 25 20)', 'LINESTRING (15 10, 20 15)')]"], {}), "('geom,expected', [('POINT (0 0)',\n 'GEOMETRYCOLLECTION EMPTY'), ('POINT (15 15)', 'POINT (15 15)'), (\n 'POINT (15 10)', 'GEOMETRYCOLLECTION EMPTY'), ('LINESTRING (0 0, -5 5)',\n 'GEOMETRYCOLLECTION EMPTY'), ('LINESTRING (15 15, 16 15)',\n 'LINESTRING (15 15, 16 15)'), ('LINESTRING (10 15, 10 10, 15 10)',\n 'GEOMETRYCOLLECTION EMPTY'), ('LINESTRING (10 5, 25 20)',\n 'LINESTRING (15 10, 20 15)')])\n", (10261, 10674), False, 'import pytest\n'), ((11119, 11811), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom, rect, expected"""', "[(\n 'POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))'\n , (10, 10, 20, 20), 'GEOMETRYCOLLECTION EMPTY'), (\n 'POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10))'\n , (10, 10, 20, 20), 'GEOMETRYCOLLECTION EMPTY'), (\n 'POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))'\n , (0, 0, 40, 40),\n 'POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))'\n ), (\n 'POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))'\n , (5, 5, 15, 15), 'POLYGON ((5 5, 5 15, 10 15, 10 10, 15 10, 15 5, 5 5))')]"], {}), "('geom, rect, expected', [(\n 'POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))'\n , (10, 10, 20, 20), 'GEOMETRYCOLLECTION EMPTY'), (\n 'POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 10 20, 20 20, 20 10, 10 10))'\n , (10, 10, 20, 20), 'GEOMETRYCOLLECTION EMPTY'), (\n 'POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))'\n , (0, 0, 40, 40),\n 'POLYGON ((1 1, 1 30, 30 30, 30 1, 1 1), (10 10, 20 10, 20 20, 10 20, 10 10))'\n ), (\n 'POLYGON ((0 0, 0 30, 30 30, 30 0, 0 0), (10 10, 20 10, 20 20, 10 20, 10 10))'\n , (5, 5, 15, 15), 'POLYGON ((5 5, 5 15, 10 15, 10 10, 15 10, 15 5, 5 5))')]\n )\n", (11142, 11811), False, 'import pytest\n'), ((12417, 12463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (12440, 12463), False, 'import pytest\n'), ((12789, 12863), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geom"""', '[empty, empty_line_string, empty_polygon]'], {}), "('geom', [empty, empty_line_string, empty_polygon])\n", (12812, 12863), False, 'import pytest\n'), ((15004, 15119), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(np.__version__ < '1.15')"], {'reason': '"""axis keyword for generalized ufunc introduced in np 1.15"""'}), "(np.__version__ < '1.15', reason=\n 'axis keyword for generalized ufunc introduced in np 1.15')\n", (15022, 15119), False, 'import pytest\n'), ((18756, 18871), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(np.__version__ < '1.15')"], {'reason': '"""axis keyword for generalized ufunc introduced in np 1.15"""'}), "(np.__version__ < '1.15', reason=\n 'axis keyword for generalized ufunc introduced in np 1.15')\n", (18774, 18871), False, 'import pytest\n'), ((19646, 19720), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 10, 0))'], {'reason': '"""GEOS < 3.10"""'}), "(pygeos.geos_version < (3, 10, 0), reason='GEOS < 3.10')\n", (19664, 19720), False, 'import pytest\n'), ((19722, 19768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (19745, 19768), False, 'import pytest\n'), ((19770, 19815), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tolerance"""', '[-1, 0]'], {}), "('tolerance', [-1, 0])\n", (19793, 19815), False, 'import pytest\n'), ((20009, 20083), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 10, 0))'], {'reason': '"""GEOS < 3.10"""'}), "(pygeos.geos_version < (3, 10, 0), reason='GEOS < 3.10')\n", (20027, 20083), False, 'import pytest\n'), ((20085, 20131), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (20108, 20131), False, 'import pytest\n'), ((20265, 20339), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 10, 0))'], {'reason': '"""GEOS < 3.10"""'}), "(pygeos.geos_version < (3, 10, 0), reason='GEOS < 3.10')\n", (20283, 20339), False, 'import pytest\n'), ((20341, 20436), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', '[empty, empty_point, empty_line_string, empty_polygon]'], {}), "('geometry', [empty, empty_point, empty_line_string,\n empty_polygon])\n", (20364, 20436), False, 'import pytest\n'), ((20579, 20653), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 10, 0))'], {'reason': '"""GEOS < 3.10"""'}), "(pygeos.geos_version < (3, 10, 0), reason='GEOS < 3.10')\n", (20597, 20653), False, 'import pytest\n'), ((20655, 20721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', '[point, point_z, multi_point]'], {}), "('geometry', [point, point_z, multi_point])\n", (20678, 20721), False, 'import pytest\n'), ((20866, 20940), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 10, 0))'], {'reason': '"""GEOS < 3.10"""'}), "(pygeos.geos_version < (3, 10, 0), reason='GEOS < 3.10')\n", (20884, 20940), False, 'import pytest\n'), ((21028, 21102), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 10, 0))'], {'reason': '"""GEOS < 3.10"""'}), "(pygeos.geos_version < (3, 10, 0), reason='GEOS < 3.10')\n", (21046, 21102), False, 'import pytest\n'), ((23214, 23286), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (23232, 23286), False, 'import pytest\n'), ((23288, 23334), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (23311, 23334), False, 'import pytest\n'), ((23631, 23703), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 0))'], {'reason': '"""GEOS < 3.8"""'}), "(pygeos.geos_version < (3, 8, 0), reason='GEOS < 3.8')\n", (23649, 23703), False, 'import pytest\n'), ((24589, 24661), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 6, 0))'], {'reason': '"""GEOS < 3.6"""'}), "(pygeos.geos_version < (3, 6, 0), reason='GEOS < 3.6')\n", (24607, 24661), False, 'import pytest\n'), ((24663, 24709), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry"""', 'all_types'], {}), "('geometry', all_types)\n", (24686, 24709), False, 'import pytest\n'), ((24988, 25060), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 6, 0))'], {'reason': '"""GEOS < 3.6"""'}), "(pygeos.geos_version < (3, 6, 0), reason='GEOS < 3.6')\n", (25006, 25060), False, 'import pytest\n'), ((26108, 26180), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 6, 0))'], {'reason': '"""GEOS < 3.6"""'}), "(pygeos.geos_version < (3, 6, 0), reason='GEOS < 3.6')\n", (26126, 26180), False, 'import pytest\n'), ((1587, 1659), 'pygeos.snap', 'pygeos.snap', (['[geometry, geometry]', '[reference, reference]'], {'tolerance': '(1.0)'}), '([geometry, geometry], [reference, reference], tolerance=1.0)\n', (1598, 1659), False, 'import pygeos\n'), ((2614, 2653), 'pygeos.snap', 'pygeos.snap', (['None', 'point'], {'tolerance': '(1.0)'}), '(None, point, tolerance=1.0)\n', (2625, 2653), False, 'import pygeos\n'), ((2778, 2824), 'pygeos.snap', 'pygeos.snap', (['geometry', 'point'], {'tolerance': 'np.nan'}), '(geometry, point, tolerance=np.nan)\n', (2789, 2824), False, 'import pygeos\n'), ((2968, 2991), 'pygeos.build_area', 'pygeos.build_area', (['None'], {}), '(None)\n', (2985, 2991), False, 'import pygeos\n'), ((3677, 3700), 'pygeos.build_area', 'pygeos.build_area', (['geom'], {}), '(geom)\n', (3694, 3700), False, 'import pygeos\n'), ((3882, 3905), 'pygeos.make_valid', 'pygeos.make_valid', (['None'], {}), '(None)\n', (3899, 3905), False, 'import pygeos\n'), ((4718, 4741), 'pygeos.make_valid', 'pygeos.make_valid', (['geom'], {}), '(geom)\n', (4735, 4741), False, 'import pygeos\n'), ((5636, 5659), 'pygeos.make_valid', 'pygeos.make_valid', (['geom'], {}), '(geom)\n', (5653, 5659), False, 'import pygeos\n'), ((6211, 6233), 'pygeos.normalize', 'pygeos.normalize', (['geom'], {}), '(geom)\n', (6227, 6233), False, 'import pygeos\n'), ((6310, 6353), 'pygeos.offset_curve', 'pygeos.offset_curve', (['empty_line_string', '(2.0)'], {}), '(empty_line_string, 2.0)\n', (6329, 6353), False, 'import pygeos\n'), ((6365, 6388), 'pygeos.is_empty', 'pygeos.is_empty', (['actual'], {}), '(actual)\n', (6380, 6388), False, 'import pygeos\n'), ((6487, 6548), 'pygeos.offset_curve', 'pygeos.offset_curve', (['[line_string, line_string]', '[-2.0, -3.0]'], {}), '([line_string, line_string], [-2.0, -3.0])\n', (6506, 6548), False, 'import pygeos\n'), ((6766, 6857), 'pygeos.offset_curve', 'pygeos.offset_curve', (['line_string', '(-2.0)'], {'quadsegs': '(2)', 'join_style': '"""mitre"""', 'mitre_limit': '(2.0)'}), "(line_string, -2.0, quadsegs=2, join_style='mitre',\n mitre_limit=2.0)\n", (6785, 6857), False, 'import pygeos\n'), ((6882, 6920), 'pygeos.offset_curve', 'pygeos.offset_curve', (['line_string', '(-2.0)'], {}), '(line_string, -2.0)\n', (6901, 6920), False, 'import pygeos\n'), ((9701, 9755), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))"""'], {}), "('POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))')\n", (9716, 9755), False, 'import pygeos\n'), ((9771, 9826), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))"""'], {}), "('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))')\n", (9786, 9826), False, 'import pygeos\n'), ((9840, 9872), 'pygeos.reverse', 'pygeos.reverse', (['[None, geometry]'], {}), '([None, geometry])\n', (9854, 9872), False, 'import pygeos\n'), ((9906, 9950), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['result[1]', 'expected'], {}), '(result[1], expected)\n', (9929, 9950), False, 'from pygeos.testing import assert_geometries_equal\n'), ((11028, 11069), 'pygeos.clip_by_rect', 'pygeos.clip_by_rect', (['geom', '(10)', '(10)', '(20)', '(20)'], {}), '(geom, 10, 10, 20, 20)\n', (11047, 11069), False, 'import pygeos\n'), ((11074, 11115), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (11097, 11115), False, 'from pygeos.testing import assert_geometries_equal\n'), ((12335, 12367), 'pygeos.clip_by_rect', 'pygeos.clip_by_rect', (['geom', '*rect'], {}), '(geom, *rect)\n', (12354, 12367), False, 'import pygeos\n'), ((12372, 12413), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (12395, 12413), False, 'from pygeos.testing import assert_geometries_equal\n'), ((12516, 12577), 'pygeos.clip_by_rect', 'pygeos.clip_by_rect', (['[geometry, geometry]', '(0.0)', '(0.0)', '(1.0)', '(1.0)'], {}), '([geometry, geometry], 0.0, 0.0, 1.0, 1.0)\n', (12535, 12577), False, 'import pygeos\n'), ((12722, 12759), 'pygeos.clip_by_rect', 'pygeos.clip_by_rect', (['None', '(0)', '(0)', '(1)', '(1)'], {}), '(None, 0, 0, 1, 1)\n', (12741, 12759), False, 'import pygeos\n'), ((12935, 12972), 'pygeos.clip_by_rect', 'pygeos.clip_by_rect', (['geom', '(0)', '(0)', '(1)', '(1)'], {}), '(geom, 0, 0, 1, 1)\n', (12954, 12972), False, 'import pygeos\n'), ((13656, 13680), 'pygeos.polygonize', 'pygeos.polygonize', (['lines'], {}), '(lines)\n', (13673, 13680), False, 'import pygeos\n'), ((13761, 13873), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION (POLYGON ((0 0, 1 1, 1 0, 0 0)), POLYGON ((1 1, 0 0, 0 1, 1 1)))"""'], {}), "(\n 'GEOMETRYCOLLECTION (POLYGON ((0 0, 1 1, 1 0, 0 0)), POLYGON ((1 1, 0 0, 0 1, 1 1)))'\n )\n", (13776, 13873), False, 'import pygeos\n'), ((14124, 14194), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))"""'], {}), "('GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))')\n", (14139, 14194), False, 'import pygeos\n'), ((14489, 14513), 'numpy.array', 'np.array', (['[lines, lines]'], {}), '([lines, lines])\n', (14497, 14513), True, 'import numpy as np\n'), ((14558, 14580), 'pygeos.polygonize', 'pygeos.polygonize', (['arr'], {}), '(arr)\n', (14575, 14580), False, 'import pygeos\n'), ((14732, 14790), 'numpy.array', 'np.array', (['[[lines, lines], [lines, lines], [lines, lines]]'], {}), '([[lines, lines], [lines, lines], [lines, lines]])\n', (14740, 14790), True, 'import numpy as np\n'), ((14838, 14860), 'pygeos.polygonize', 'pygeos.polygonize', (['arr'], {}), '(arr)\n', (14855, 14860), False, 'import pygeos\n'), ((15340, 15364), 'numpy.array', 'np.array', (['[lines, lines]'], {}), '([lines, lines])\n', (15348, 15364), True, 'import numpy as np\n'), ((15394, 15424), 'pygeos.polygonize', 'pygeos.polygonize', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (15411, 15424), False, 'import pygeos\n'), ((15470, 15500), 'pygeos.polygonize', 'pygeos.polygonize', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (15487, 15500), False, 'import pygeos\n'), ((15623, 15654), 'pygeos.polygonize', 'pygeos.polygonize', (['[None, None]'], {}), '([None, None])\n', (15640, 15654), False, 'import pygeos\n'), ((16219, 16248), 'pygeos.polygonize_full', 'pygeos.polygonize_full', (['lines'], {}), '(lines)\n', (16241, 16248), False, 'import pygeos\n'), ((16434, 16546), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION (POLYGON ((0 0, 1 1, 1 0, 0 0)), POLYGON ((1 1, 0 0, 0 1, 1 1)))"""'], {}), "(\n 'GEOMETRYCOLLECTION (POLYGON ((0 0, 1 1, 1 0, 0 0)), POLYGON ((1 1, 0 0, 0 1, 1 1)))'\n )\n", (16449, 16546), False, 'import pygeos\n'), ((16678, 16771), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION (LINESTRING (1 1, 100 100), LINESTRING (5 5, 6 6))"""'], {}), "(\n 'GEOMETRYCOLLECTION (LINESTRING (1 1, 100 100), LINESTRING (5 5, 6 6))')\n", (16693, 16771), False, 'import pygeos\n'), ((17107, 17177), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))"""'], {}), "('GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))')\n", (17122, 17177), False, 'import pygeos\n'), ((17823, 17847), 'numpy.array', 'np.array', (['[lines, lines]'], {}), '([lines, lines])\n', (17831, 17847), True, 'import numpy as np\n'), ((17892, 17919), 'pygeos.polygonize_full', 'pygeos.polygonize_full', (['arr'], {}), '(arr)\n', (17914, 17919), False, 'import pygeos\n'), ((18277, 18335), 'numpy.array', 'np.array', (['[[lines, lines], [lines, lines], [lines, lines]]'], {}), '([[lines, lines], [lines, lines], [lines, lines]])\n', (18285, 18335), True, 'import numpy as np\n'), ((18383, 18410), 'pygeos.polygonize_full', 'pygeos.polygonize_full', (['arr'], {}), '(arr)\n', (18405, 18410), False, 'import pygeos\n'), ((19097, 19121), 'numpy.array', 'np.array', (['[lines, lines]'], {}), '([lines, lines])\n', (19105, 19121), True, 'import numpy as np\n'), ((19151, 19186), 'pygeos.polygonize_full', 'pygeos.polygonize_full', (['arr'], {'axis': '(1)'}), '(arr, axis=1)\n', (19173, 19186), False, 'import pygeos\n'), ((19280, 19315), 'pygeos.polygonize_full', 'pygeos.polygonize_full', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (19302, 19315), False, 'import pygeos\n'), ((19491, 19527), 'pygeos.polygonize_full', 'pygeos.polygonize_full', (['[None, None]'], {}), '([None, None])\n', (19513, 19527), False, 'import pygeos\n'), ((20190, 20235), 'pygeos.segmentize', 'pygeos.segmentize', (['geometry'], {'tolerance': 'np.nan'}), '(geometry, tolerance=np.nan)\n', (20207, 20235), False, 'import pygeos\n'), ((20489, 20529), 'pygeos.segmentize', 'pygeos.segmentize', (['geometry'], {'tolerance': '(5)'}), '(geometry, tolerance=5)\n', (20506, 20529), False, 'import pygeos\n'), ((20534, 20575), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['actual', 'geometry'], {}), '(actual, geometry)\n', (20557, 20575), False, 'from pygeos.testing import assert_geometries_equal\n'), ((20776, 20816), 'pygeos.segmentize', 'pygeos.segmentize', (['geometry'], {'tolerance': '(5)'}), '(geometry, tolerance=5)\n', (20793, 20816), False, 'import pygeos\n'), ((20821, 20862), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['actual', 'geometry'], {}), '(actual, geometry)\n', (20844, 20862), False, 'from pygeos.testing import assert_geometries_equal\n'), ((23126, 23164), 'pygeos.segmentize', 'pygeos.segmentize', (['geometry', 'tolerance'], {}), '(geometry, tolerance)\n', (23143, 23164), False, 'import pygeos\n'), ((23169, 23210), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (23192, 23210), False, 'from pygeos.testing import assert_geometries_equal\n'), ((23402, 23454), 'pygeos.minimum_bounding_circle', 'pygeos.minimum_bounding_circle', (['[geometry, geometry]'], {}), '([geometry, geometry])\n', (23432, 23454), False, 'import pygeos\n'), ((23565, 23601), 'pygeos.minimum_bounding_circle', 'pygeos.minimum_bounding_circle', (['None'], {}), '(None)\n', (23595, 23601), False, 'import pygeos\n'), ((24499, 24539), 'pygeos.minimum_bounding_circle', 'pygeos.minimum_bounding_circle', (['geometry'], {}), '(geometry)\n', (24529, 24539), False, 'import pygeos\n'), ((24544, 24585), 'pygeos.testing.assert_geometries_equal', 'assert_geometries_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (24567, 24585), False, 'from pygeos.testing import assert_geometries_equal\n'), ((24771, 24817), 'pygeos.oriented_envelope', 'pygeos.oriented_envelope', (['[geometry, geometry]'], {}), '([geometry, geometry])\n', (24795, 24817), False, 'import pygeos\n'), ((24928, 24958), 'pygeos.oriented_envelope', 'pygeos.oriented_envelope', (['None'], {}), '(None)\n', (24952, 24958), False, 'import pygeos\n'), ((26021, 26055), 'pygeos.oriented_envelope', 'pygeos.oriented_envelope', (['geometry'], {}), '(geometry)\n', (26045, 26055), False, 'import pygeos\n'), ((27149, 27191), 'pygeos.minimum_rotated_rectangle', 'pygeos.minimum_rotated_rectangle', (['geometry'], {}), '(geometry)\n', (27181, 27191), False, 'import pygeos\n'), ((2285, 2351), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'invalid\' is not a valid option"""'}), '(ValueError, match="\'invalid\' is not a valid option")\n', (2298, 2351), False, 'import pytest\n'), ((2361, 2405), 'pygeos.buffer', 'pygeos.buffer', (['point', '(1)'], {'cap_style': '"""invalid"""'}), "(point, 1, cap_style='invalid')\n", (2374, 2405), False, 'import pygeos\n'), ((2455, 2521), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'invalid\' is not a valid option"""'}), '(ValueError, match="\'invalid\' is not a valid option")\n', (2468, 2521), False, 'import pytest\n'), ((2531, 2576), 'pygeos.buffer', 'pygeos.buffer', (['point', '(1)'], {'join_style': '"""invalid"""'}), "(point, 1, join_style='invalid')\n", (2544, 2576), False, 'import pygeos\n'), ((4861, 4885), 'pygeos.normalize', 'pygeos.normalize', (['actual'], {}), '(actual)\n', (4877, 4885), False, 'import pygeos\n'), ((6573, 6611), 'pygeos.offset_curve', 'pygeos.offset_curve', (['line_string', '(-2.0)'], {}), '(line_string, -2.0)\n', (6592, 6611), False, 'import pygeos\n'), ((6636, 6674), 'pygeos.offset_curve', 'pygeos.offset_curve', (['line_string', '(-3.0)'], {}), '(line_string, -3.0)\n', (6655, 6674), False, 'import pygeos\n'), ((7044, 7079), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (7057, 7079), False, 'import pytest\n'), ((7177, 7212), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (7190, 7212), False, 'import pytest\n'), ((7222, 7307), 'pygeos.offset_curve', 'pygeos.offset_curve', (['[line_string, line_string]', '(1)'], {'join_style': "['round', 'bevel']"}), "([line_string, line_string], 1, join_style=['round',\n 'bevel'])\n", (7241, 7307), False, 'import pygeos\n'), ((7336, 7371), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (7349, 7371), False, 'import pytest\n'), ((7381, 7455), 'pygeos.offset_curve', 'pygeos.offset_curve', (['[line_string, line_string]', '(1)'], {'mitre_limit': '[5.0, 6.0]'}), '([line_string, line_string], 1, mitre_limit=[5.0, 6.0])\n', (7400, 7455), False, 'import pygeos\n'), ((7511, 7577), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'invalid\' is not a valid option"""'}), '(ValueError, match="\'invalid\' is not a valid option")\n', (7524, 7577), False, 'import pytest\n'), ((7587, 7646), 'pygeos.offset_curve', 'pygeos.offset_curve', (['line_string', '(1.0)'], {'join_style': '"""invalid"""'}), "(line_string, 1.0, join_style='invalid')\n", (7606, 7646), False, 'import pygeos\n'), ((9459, 9479), 'pygeos.reverse', 'pygeos.reverse', (['geom'], {}), '(geom)\n', (9473, 9479), False, 'import pygeos\n'), ((9603, 9623), 'pygeos.reverse', 'pygeos.reverse', (['None'], {}), '(None)\n', (9617, 9623), False, 'import pygeos\n'), ((10129, 10204), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""One of the arguments is of incorrect type"""'}), "(TypeError, match='One of the arguments is of incorrect type')\n", (10142, 10204), False, 'import pytest\n'), ((10214, 10234), 'pygeos.reverse', 'pygeos.reverse', (['geom'], {}), '(geom)\n', (10228, 10234), False, 'import pygeos\n'), ((10966, 10987), 'pygeos.Geometry', 'pygeos.Geometry', (['geom'], {}), '(geom)\n', (10981, 10987), False, 'import pygeos\n'), ((10989, 11014), 'pygeos.Geometry', 'pygeos.Geometry', (['expected'], {}), '(expected)\n', (11004, 11014), False, 'import pygeos\n'), ((12273, 12294), 'pygeos.Geometry', 'pygeos.Geometry', (['geom'], {}), '(geom)\n', (12288, 12294), False, 'import pygeos\n'), ((12296, 12321), 'pygeos.Geometry', 'pygeos.Geometry', (['expected'], {}), '(expected)\n', (12311, 12321), False, 'import pygeos\n'), ((12994, 13030), 'pygeos.Geometry', 'Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (13002, 13030), False, 'from pygeos import Geometry, GEOSException\n'), ((13124, 13159), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (13137, 13159), False, 'import pytest\n'), ((13291, 13331), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 1)"""'], {}), "('LINESTRING (0 0, 1 1)')\n", (13306, 13331), False, 'import pygeos\n'), ((13341, 13381), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1)"""'], {}), "('LINESTRING (0 0, 0 1)')\n", (13356, 13381), False, 'import pygeos\n'), ((13391, 13431), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 1, 1 1)"""'], {}), "('LINESTRING (0 1, 1 1)')\n", (13406, 13431), False, 'import pygeos\n'), ((13441, 13481), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 1 0)"""'], {}), "('LINESTRING (1 1, 1 0)')\n", (13456, 13481), False, 'import pygeos\n'), ((13491, 13531), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 0, 0 0)"""'], {}), "('LINESTRING (1 0, 0 0)')\n", (13506, 13531), False, 'import pygeos\n'), ((13541, 13581), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (5 5, 6 6)"""'], {}), "('LINESTRING (5 5, 6 6)')\n", (13556, 13581), False, 'import pygeos\n'), ((13591, 13621), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (0 0)"""'], {}), "('POINT (0 0)')\n", (13606, 13621), False, 'import pygeos\n'), ((13692, 13718), 'pygeos.get_type_id', 'pygeos.get_type_id', (['result'], {}), '(result)\n', (13710, 13718), False, 'import pygeos\n'), ((13961, 14001), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 1)"""'], {}), "('LINESTRING (0 0, 1 1)')\n", (13976, 14001), False, 'import pygeos\n'), ((14011, 14051), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1)"""'], {}), "('LINESTRING (0 0, 0 1)')\n", (14026, 14051), False, 'import pygeos\n'), ((14061, 14101), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 1, 1 1)"""'], {}), "('LINESTRING (0 1, 1 1)')\n", (14076, 14101), False, 'import pygeos\n'), ((14226, 14241), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (14234, 14241), True, 'import numpy as np\n'), ((14352, 14369), 'numpy.array', 'np.array', (['[lines]'], {}), '([lines])\n', (14360, 14369), True, 'import numpy as np\n'), ((15182, 15222), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 1)"""'], {}), "('LINESTRING (0 0, 1 1)')\n", (15197, 15222), False, 'import pygeos\n'), ((15232, 15272), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1)"""'], {}), "('LINESTRING (0 0, 0 1)')\n", (15247, 15272), False, 'import pygeos\n'), ((15282, 15322), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 1, 1 1)"""'], {}), "('LINESTRING (0 1, 1 1)')\n", (15297, 15322), False, 'import pygeos\n'), ((15676, 15719), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (15691, 15719), False, 'import pygeos\n'), ((15786, 15826), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 1)"""'], {}), "('LINESTRING (0 0, 1 1)')\n", (15801, 15826), False, 'import pygeos\n'), ((15836, 15876), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1)"""'], {}), "('LINESTRING (0 0, 0 1)')\n", (15851, 15876), False, 'import pygeos\n'), ((15886, 15926), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 1, 1 1)"""'], {}), "('LINESTRING (0 1, 1 1)')\n", (15901, 15926), False, 'import pygeos\n'), ((15936, 15976), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 1 0)"""'], {}), "('LINESTRING (1 1, 1 0)')\n", (15951, 15976), False, 'import pygeos\n'), ((16000, 16040), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 0, 0 0)"""'], {}), "('LINESTRING (1 0, 0 0)')\n", (16015, 16040), False, 'import pygeos\n'), ((16050, 16090), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (5 5, 6 6)"""'], {}), "('LINESTRING (5 5, 6 6)')\n", (16065, 16090), False, 'import pygeos\n'), ((16100, 16144), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 100 100)"""'], {}), "('LINESTRING (1 1, 100 100)')\n", (16115, 16144), False, 'import pygeos\n'), ((16154, 16184), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (0 0)"""'], {}), "('POINT (0 0)')\n", (16169, 16184), False, 'import pygeos\n'), ((16611, 16654), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (16626, 16654), False, 'import pygeos\n'), ((16842, 16885), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (16857, 16885), False, 'import pygeos\n'), ((16944, 16984), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 1)"""'], {}), "('LINESTRING (0 0, 1 1)')\n", (16959, 16984), False, 'import pygeos\n'), ((16994, 17034), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1)"""'], {}), "('LINESTRING (0 0, 0 1)')\n", (17009, 17034), False, 'import pygeos\n'), ((17044, 17084), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 1, 1 1)"""'], {}), "('LINESTRING (0 1, 1 1)')\n", (17059, 17084), False, 'import pygeos\n'), ((17214, 17229), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (17222, 17229), True, 'import numpy as np\n'), ((17503, 17520), 'numpy.array', 'np.array', (['[lines]'], {}), '([lines])\n', (17511, 17520), True, 'import numpy as np\n'), ((18939, 18979), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 1)"""'], {}), "('LINESTRING (0 0, 1 1)')\n", (18954, 18979), False, 'import pygeos\n'), ((18989, 19029), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1)"""'], {}), "('LINESTRING (0 0, 0 1)')\n", (19004, 19029), False, 'import pygeos\n'), ((19039, 19079), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 1, 1 1)"""'], {}), "('LINESTRING (0 1, 1 1)')\n", (19054, 19079), False, 'import pygeos\n'), ((19885, 19947), 'pytest.raises', 'pytest.raises', (['GEOSException'], {'match': '"""IllegalArgumentException"""'}), "(GEOSException, match='IllegalArgumentException')\n", (19898, 19947), False, 'import pytest\n'), ((19957, 20005), 'pygeos.segmentize', 'pygeos.segmentize', (['geometry'], {'tolerance': 'tolerance'}), '(geometry, tolerance=tolerance)\n', (19974, 20005), False, 'import pygeos\n'), ((20980, 21016), 'pygeos.segmentize', 'pygeos.segmentize', (['None'], {'tolerance': '(5)'}), '(None, tolerance=5)\n', (20997, 21016), False, 'import pygeos\n'), ((1134, 1162), 'pygeos.get_type_id', 'pygeos.get_type_id', (['geometry'], {}), '(geometry)\n', (1152, 1162), False, 'import pygeos\n'), ((1191, 1252), 'pytest.raises', 'pytest.raises', (['GEOSException'], {'match': '"""only accept linestrings"""'}), "(GEOSException, match='only accept linestrings')\n", (1204, 1252), False, 'import pytest\n'), ((3343, 3450), 'pygeos.Geometry', 'Geometry', (['"""GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), POLYGON((1 1, 1 2, 2 2, 1 1)))"""'], {}), "(\n 'GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), POLYGON((1 1, 1 2, 2 2, 1 1)))'\n )\n", (3351, 3450), False, 'from pygeos import Geometry, GEOSException\n'), ((3484, 3553), 'pygeos.Geometry', 'Geometry', (['"""POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 2, 1 2, 1 1))"""'], {}), "('POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0), (1 1, 2 2, 1 2, 1 1))')\n", (3492, 3553), False, 'from pygeos import Geometry, GEOSException\n'), ((4235, 4281), 'pygeos.Geometry', 'Geometry', (['"""POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))"""'], {}), "('POLYGON((0 0, 1 1, 1 2, 1 1, 0 0))')\n", (4243, 4281), False, 'from pygeos import Geometry, GEOSException\n'), ((4295, 4347), 'pygeos.Geometry', 'Geometry', (['"""MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))"""'], {}), "('MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))')\n", (4303, 4347), False, 'from pygeos import Geometry, GEOSException\n'), ((4461, 4507), 'pygeos.Geometry', 'Geometry', (['"""POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))"""'], {}), "('POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))')\n", (4469, 4507), False, 'from pygeos import Geometry, GEOSException\n'), ((4521, 4594), 'pygeos.Geometry', 'Geometry', (['"""MULTIPOLYGON (((1 1, 2 2, 2 0, 1 1)), ((0 0, 0 2, 1 1, 0 0)))"""'], {}), "('MULTIPOLYGON (((1 1, 2 2, 2 0, 1 1)), ((0 0, 0 2, 1 1, 0 0)))')\n", (4529, 4594), False, 'from pygeos import Geometry, GEOSException\n'), ((5752, 5776), 'pygeos.normalize', 'pygeos.normalize', (['actual'], {}), '(actual)\n', (5768, 5776), False, 'import pygeos\n'), ((5780, 5806), 'pygeos.normalize', 'pygeos.normalize', (['expected'], {}), '(expected)\n', (5796, 5806), False, 'import pygeos\n'), ((6022, 6074), 'pygeos.Geometry', 'Geometry', (['"""MULTILINESTRING ((1 1, 0 0), (1 1, 1 2))"""'], {}), "('MULTILINESTRING ((1 1, 0 0), (1 1, 1 2))')\n", (6030, 6074), False, 'from pygeos import Geometry, GEOSException\n'), ((6088, 6140), 'pygeos.Geometry', 'Geometry', (['"""MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))"""'], {}), "('MULTILINESTRING ((1 1, 1 2), (0 0, 1 1))')\n", (6096, 6140), False, 'from pygeos import Geometry, GEOSException\n'), ((7798, 7838), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 1 2)"""'], {}), "('LINESTRING (0 0, 1 2)')\n", (7813, 7838), False, 'import pygeos\n'), ((7852, 7892), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 2, 0 0)"""'], {}), "('LINESTRING (1 2, 0 0)')\n", (7867, 7892), False, 'import pygeos\n'), ((7927, 7977), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINEARRING (0 0, 1 2, 1 3, 0 0)"""'], {}), "('LINEARRING (0 0, 1 2, 1 3, 0 0)')\n", (7942, 7977), False, 'import pygeos\n'), ((7991, 8041), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINEARRING (0 0, 1 3, 1 2, 0 0)"""'], {}), "('LINEARRING (0 0, 1 3, 1 2, 0 0)')\n", (8006, 8041), False, 'import pygeos\n'), ((8076, 8130), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))"""'], {}), "('POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))')\n", (8091, 8130), False, 'import pygeos\n'), ((8144, 8198), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))"""'], {}), "('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))')\n", (8159, 8198), False, 'import pygeos\n'), ((8233, 8322), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON((0 0, 10 0, 10 10, 0 10, 0 0), (2 2, 2 4, 4 4, 4 2, 2 2))"""'], {}), "(\n 'POLYGON((0 0, 10 0, 10 10, 0 10, 0 0), (2 2, 2 4, 4 4, 4 2, 2 2))')\n", (8248, 8322), False, 'import pygeos\n'), ((8361, 8450), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON((0 0, 0 10, 10 10, 10 0, 0 0), (2 2, 4 2, 4 4, 2 4, 2 2))"""'], {}), "(\n 'POLYGON((0 0, 0 10, 10 10, 10 0, 0 0), (2 2, 4 2, 4 4, 2 4, 2 2))')\n", (8376, 8450), False, 'import pygeos\n'), ((8522, 8581), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTILINESTRING ((0 0, 1 2), (3 3, 4 4))"""'], {}), "('MULTILINESTRING ((0 0, 1 2), (3 3, 4 4))')\n", (8537, 8581), False, 'import pygeos\n'), ((8595, 8654), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTILINESTRING ((1 2, 0 0), (4 4, 3 3))"""'], {}), "('MULTILINESTRING ((1 2, 0 0), (4 4, 3 3))')\n", (8610, 8654), False, 'import pygeos\n'), ((8813, 8908), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTIPOLYGON (((0 0, 1 0, 1 1, 0 1, 0 0)), ((2 2, 2 3, 3 3, 3 2, 2 2)))"""'], {}), "(\n 'MULTIPOLYGON (((0 0, 1 0, 1 1, 0 1, 0 0)), ((2 2, 2 3, 3 3, 3 2, 2 2)))')\n", (8828, 8908), False, 'import pygeos\n'), ((8947, 9042), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), ((2 2, 3 2, 3 3, 2 3, 2 2)))"""'], {}), "(\n 'MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), ((2 2, 3 2, 3 3, 2 3, 2 2)))')\n", (8962, 9042), False, 'import pygeos\n'), ((13226, 13242), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (13234, 13242), True, 'import numpy as np\n'), ((21253, 21294), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 10)"""'], {}), "('LINESTRING (0 0, 0 10)')\n", (21268, 21294), False, 'import pygeos\n'), ((21324, 21365), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 10)"""'], {}), "('LINESTRING (0 0, 0 10)')\n", (21339, 21365), False, 'import pygeos\n'), ((21400, 21458), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"""'], {}), "('POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))')\n", (21415, 21458), False, 'import pygeos\n'), ((21488, 21546), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"""'], {}), "('POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))')\n", (21503, 21546), False, 'import pygeos\n'), ((21631, 21672), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 10)"""'], {}), "('LINESTRING (0 0, 0 10)')\n", (21646, 21672), False, 'import pygeos\n'), ((21701, 21747), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 5, 0 10)"""'], {}), "('LINESTRING (0 0, 0 5, 0 10)')\n", (21716, 21747), False, 'import pygeos\n'), ((21782, 21833), 'pygeos.Geometry', 'Geometry', (['"""POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))"""'], {}), "('POLYGON ((0 0, 10 0, 10 10, 0 10, 0 0))')\n", (21790, 21833), False, 'from pygeos import Geometry, GEOSException\n'), ((21862, 21947), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 5 0, 10 0, 10 5, 10 10, 5 10, 0 10, 0 5, 0 0))"""'], {}), "('POLYGON ((0 0, 5 0, 10 0, 10 5, 10 10, 5 10, 0 10, 0 5, 0 0))'\n )\n", (21877, 21947), False, 'import pygeos\n'), ((23784, 23840), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 5, 5 10, 10 5, 5 0, 0 5))"""'], {}), "('POLYGON ((0 5, 5 10, 10 5, 5 0, 0 5))')\n", (23799, 23840), False, 'import pygeos\n'), ((23937, 23978), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 0, 1 10)"""'], {}), "('LINESTRING (1 0, 1 10)')\n", (23952, 23978), False, 'import pygeos\n'), ((24075, 24115), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTIPOINT (2 2, 4 2)"""'], {}), "('MULTIPOINT (2 2, 4 2)')\n", (24090, 24115), False, 'import pygeos\n'), ((24212, 24242), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (2 2)"""'], {}), "('POINT (2 2)')\n", (24227, 24242), False, 'import pygeos\n'), ((24256, 24286), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (2 2)"""'], {}), "('POINT (2 2)')\n", (24271, 24286), False, 'import pygeos\n'), ((24321, 24364), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (24336, 24364), False, 'import pygeos\n'), ((24378, 24410), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON EMPTY"""'], {}), "('POLYGON EMPTY')\n", (24393, 24410), False, 'import pygeos\n'), ((26067, 26098), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (26080, 26098), False, 'import pygeos\n'), ((25141, 25189), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTIPOINT (0 0, 10 0, 10 10)"""'], {}), "('MULTIPOINT (0 0, 10 0, 10 10)')\n", (25156, 25189), False, 'import pygeos\n'), ((25203, 25261), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 5 -5, 15 5, 10 10, 0 0))"""'], {}), "('POLYGON ((0 0, 5 -5, 15 5, 10 10, 0 0))')\n", (25218, 25261), False, 'import pygeos\n'), ((25296, 25343), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 5 1, 10 10)"""'], {}), "('LINESTRING (1 1, 5 1, 10 10)')\n", (25311, 25343), False, 'import pygeos\n'), ((25357, 25415), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((1 1, 3 -1, 12 8, 10 10, 1 1))"""'], {}), "('POLYGON ((1 1, 3 -1, 12 8, 10 10, 1 1))')\n", (25372, 25415), False, 'import pygeos\n'), ((25450, 25501), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((1 1, 15 1, 5 10, 1 1))"""'], {}), "('POLYGON ((1 1, 15 1, 5 10, 1 1))')\n", (25465, 25501), False, 'import pygeos\n'), ((25515, 25574), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((15 1, 15 10, 1 10, 1 1, 15 1))"""'], {}), "('POLYGON ((15 1, 15 10, 1 10, 1 1, 15 1))')\n", (25530, 25574), False, 'import pygeos\n'), ((25609, 25650), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 10 1)"""'], {}), "('LINESTRING (1 1, 10 1)')\n", (25624, 25650), False, 'import pygeos\n'), ((25664, 25705), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 10 1)"""'], {}), "('LINESTRING (1 1, 10 1)')\n", (25679, 25705), False, 'import pygeos\n'), ((25740, 25770), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (2 2)"""'], {}), "('POINT (2 2)')\n", (25755, 25770), False, 'import pygeos\n'), ((25784, 25814), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (2 2)"""'], {}), "('POINT (2 2)')\n", (25799, 25814), False, 'import pygeos\n'), ((25849, 25892), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (25864, 25892), False, 'import pygeos\n'), ((25906, 25938), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON EMPTY"""'], {}), "('POLYGON EMPTY')\n", (25921, 25938), False, 'import pygeos\n'), ((27203, 27234), 'pygeos.equals', 'pygeos.equals', (['actual', 'expected'], {}), '(actual, expected)\n', (27216, 27234), False, 'import pygeos\n'), ((26261, 26309), 'pygeos.Geometry', 'pygeos.Geometry', (['"""MULTIPOINT (0 0, 10 0, 10 10)"""'], {}), "('MULTIPOINT (0 0, 10 0, 10 10)')\n", (26276, 26309), False, 'import pygeos\n'), ((26323, 26381), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((0 0, 5 -5, 15 5, 10 10, 0 0))"""'], {}), "('POLYGON ((0 0, 5 -5, 15 5, 10 10, 0 0))')\n", (26338, 26381), False, 'import pygeos\n'), ((26416, 26463), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 5 1, 10 10)"""'], {}), "('LINESTRING (1 1, 5 1, 10 10)')\n", (26431, 26463), False, 'import pygeos\n'), ((26477, 26535), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((1 1, 3 -1, 12 8, 10 10, 1 1))"""'], {}), "('POLYGON ((1 1, 3 -1, 12 8, 10 10, 1 1))')\n", (26492, 26535), False, 'import pygeos\n'), ((26570, 26621), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((1 1, 15 1, 5 10, 1 1))"""'], {}), "('POLYGON ((1 1, 15 1, 5 10, 1 1))')\n", (26585, 26621), False, 'import pygeos\n'), ((26635, 26694), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON ((15 1, 15 10, 1 10, 1 1, 15 1))"""'], {}), "('POLYGON ((15 1, 15 10, 1 10, 1 1, 15 1))')\n", (26650, 26694), False, 'import pygeos\n'), ((26729, 26770), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 10 1)"""'], {}), "('LINESTRING (1 1, 10 1)')\n", (26744, 26770), False, 'import pygeos\n'), ((26784, 26825), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (1 1, 10 1)"""'], {}), "('LINESTRING (1 1, 10 1)')\n", (26799, 26825), False, 'import pygeos\n'), ((26860, 26890), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (2 2)"""'], {}), "('POINT (2 2)')\n", (26875, 26890), False, 'import pygeos\n'), ((26904, 26934), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (2 2)"""'], {}), "('POINT (2 2)')\n", (26919, 26934), False, 'import pygeos\n'), ((26969, 27012), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (26984, 27012), False, 'import pygeos\n'), ((27026, 27058), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POLYGON EMPTY"""'], {}), "('POLYGON EMPTY')\n", (27041, 27058), False, 'import pygeos\n'), ((5170, 5211), 'pygeos.Geometry', 'Geometry', (['"""POLYGON((0 0, 2 2, 0 2, 0 0))"""'], {}), "('POLYGON((0 0, 2 2, 0 2, 0 0))')\n", (5178, 5211), False, 'from pygeos import Geometry, GEOSException\n'), ((5229, 5275), 'pygeos.Geometry', 'Geometry', (['"""POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))"""'], {}), "('POLYGON((0 0, 2 2, 2 0, 0 2, 0 0))')\n", (5237, 5275), False, 'from pygeos import Geometry, GEOSException\n'), ((5322, 5363), 'pygeos.Geometry', 'Geometry', (['"""POLYGON((0 0, 2 2, 0 2, 0 0))"""'], {}), "('POLYGON((0 0, 2 2, 0 2, 0 0))')\n", (5330, 5363), False, 'from pygeos import Geometry, GEOSException\n'), ((5381, 5454), 'pygeos.Geometry', 'Geometry', (['"""MULTIPOLYGON (((1 1, 0 0, 0 2, 1 1)), ((1 1, 2 2, 2 0, 1 1)))"""'], {}), "('MULTIPOLYGON (((1 1, 0 0, 0 2, 1 1)), ((1 1, 2 2, 2 0, 1 1)))')\n", (5389, 5454), False, 'from pygeos import Geometry, GEOSException\n'), ((7149, 7165), 'numpy.array', 'np.array', (['[8, 9]'], {}), '([8, 9])\n', (7157, 7165), True, 'import numpy as np\n'), ((8674, 8748), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pygeos.geos_version < (3, 8, 1))'], {'reason': '"""GEOS < 3.8.1"""'}), "(pygeos.geos_version < (3, 8, 1), reason='GEOS < 3.8.1')\n", (8692, 8748), False, 'import pytest\n'), ((9643, 9665), 'pygeos.reverse', 'pygeos.reverse', (['[None]'], {}), '([None])\n', (9657, 9665), False, 'import pygeos\n'), ((16292, 16316), 'pygeos.get_type_id', 'pygeos.get_type_id', (['geom'], {}), '(geom)\n', (16310, 16316), False, 'import pygeos\n'), ((17393, 17436), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (17408, 17436), False, 'import pygeos\n'), ((17739, 17782), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (17754, 17782), False, 'import pygeos\n'), ((18163, 18206), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (18178, 18206), False, 'import pygeos\n'), ((18709, 18752), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (18724, 18752), False, 'import pygeos\n'), ((19579, 19622), 'pygeos.Geometry', 'pygeos.Geometry', (['"""GEOMETRYCOLLECTION EMPTY"""'], {}), "('GEOMETRYCOLLECTION EMPTY')\n", (19594, 19622), False, 'import pygeos\n'), ((22079, 22120), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 10)"""'], {}), "('LINESTRING (0 0, 0 10)')\n", (22094, 22120), False, 'import pygeos\n'), ((22138, 22178), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 2)"""'], {}), "('LINESTRING (0 0, 0 2)')\n", (22153, 22178), False, 'import pygeos\n'), ((22240, 22286), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 5, 0 10)"""'], {}), "('LINESTRING (0 0, 0 5, 0 10)')\n", (22255, 22286), False, 'import pygeos\n'), ((22304, 22344), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 2)"""'], {}), "('LINESTRING (0 0, 0 2)')\n", (22319, 22344), False, 'import pygeos\n'), ((22412, 22453), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 10)"""'], {}), "('LINESTRING (0 0, 0 10)')\n", (22427, 22453), False, 'import pygeos\n'), ((22471, 22511), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 2)"""'], {}), "('LINESTRING (0 0, 0 2)')\n", (22486, 22511), False, 'import pygeos\n'), ((22575, 22621), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 5, 0 10)"""'], {}), "('LINESTRING (0 0, 0 5, 0 10)')\n", (22590, 22621), False, 'import pygeos\n'), ((22639, 22679), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 2)"""'], {}), "('LINESTRING (0 0, 0 2)')\n", (22654, 22679), False, 'import pygeos\n'), ((22747, 22788), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 10)"""'], {}), "('LINESTRING (0 0, 0 10)')\n", (22762, 22788), False, 'import pygeos\n'), ((22806, 22846), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 2)"""'], {}), "('LINESTRING (0 0, 0 2)')\n", (22821, 22846), False, 'import pygeos\n'), ((22915, 22961), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 5, 0 10)"""'], {}), "('LINESTRING (0 0, 0 5, 0 10)')\n", (22930, 22961), False, 'import pygeos\n'), ((22979, 23024), 'pygeos.Geometry', 'pygeos.Geometry', (['"""LINESTRING (0 0, 0 1, 0 2)"""'], {}), "('LINESTRING (0 0, 0 1, 0 2)')\n", (22994, 23024), False, 'import pygeos\n'), ((23868, 23898), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (5 5)"""'], {}), "('POINT (5 5)')\n", (23883, 23898), False, 'import pygeos\n'), ((24006, 24036), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (1 5)"""'], {}), "('POINT (1 5)')\n", (24021, 24036), False, 'import pygeos\n'), ((24143, 24173), 'pygeos.Geometry', 'pygeos.Geometry', (['"""POINT (3 2)"""'], {}), "('POINT (3 2)')\n", (24158, 24173), False, 'import pygeos\n')] |
import numpy as np
import copy
def calcbadness(xvals, validcolumns, stimix, results, sessionindicator):
"""
badness = calcbadness(xvals,validcolumns,stimix,results,sessionindicator)
Arguments:
__________
<xvals>:
is a list vector of vectors of run indices
<validcolumns>:
is a list vector, each element is the vector of trial indices
associated with the run
<stimix>:
is a list vector, each element is the vector of actual condition
numbers occurring with a given run
<results>:
is a 1 x n with results. the first one is SPECIAL and is unregularized.
<sessionindicator>
is 1 x RUNS with positive integers indicating run groupings for sessions.
this is used only to perform the session-wise z-scoring for the purposes
of hyperparameter evaluation.
Returns
__________
<badness>:
voxels x hyperparameters with the sum of the squared error from
cross-validation.
the testing data consists of the beta weights from results[0],
i.e. unregularized beta weights.
note that the squared error is expressed in the z-score units
(given that we z-score the single-trial beta weights prior to evaluation
of the different hyperparameters).
note:
the unregularized betas set the stage for the session-wise normalization:
for each session, we determine a fixed mu and sigma that are applied to
the session under all of the various regularization levels.
"""
# initialize
badness = np.zeros(
(results[0].shape[0], len(results))
)
# calc
alltheruns = np.arange(len(validcolumns))
# z-score transform the single-trial beta weights
if np.max(sessionindicator) == 1:
sessions = [1]
else:
sessions = range(1, np.max(sessionindicator) + 1)
for sess in sessions:
wh = np.flatnonzero(np.array(sessionindicator) == sess)
whcol = np.concatenate(np.asarray(validcolumns)[wh])
# mean of unregularized case
mn = np.mean(results[0][:, whcol], axis=1)
# std dev of unregularized case
sd = np.std(results[0][:, whcol], axis=1)
resultsdm = copy.deepcopy(results)
for runis in range(len(resultsdm)):
rundemean = results[runis][:, whcol]-mn[:, np.newaxis]
with np.errstate(divide="ignore", invalid="ignore"):
resultsdm[runis][:, whcol] = rundemean / sd[:, np.newaxis]
# do cross-validation
for xx in range(len(xvals)):
# calc
# which runs are training, e.g. [1 2 5 6 7 8 9 10 11 12]
testix = xvals[xx]
trainix = np.setdiff1d(alltheruns, testix)
# calc
# vector of trial indices in the testing data
testcols = np.asarray(validcolumns[testix])
# vector of trial indices in the training data
traincols = np.concatenate(
np.asarray([validcolumns[tx] for tx in trainix])
)
# vector of condition-ids in the testing data
testids = stimix[testix]
# vector of condition-ids in the training data
trainids = np.concatenate(
np.asarray([stimix[tx] for tx in trainix])
)
# calculate cross-validation performance
for pcr in range(len(results)):
# hashrec = cell(1,max(testids)); # speed-up by caching results
for trial in range(len(testids)):
# which training trials match the current condition-id?
haveix = np.flatnonzero(trainids == testids[trial])
if haveix.size > 0:
# NOTE:
# testcols(trial) tells us which trial in the testing runs
# to pull betas for (these are 0-based trial numbers)
# traincols(haveix) tells us the corresponding trials
# (isolated within the training runs) to pull betas for
# (these are 1-based trial numbers)
# compute squared error of all training betas against the
# current testing beta, and accumulate!!
betas_1 = resultsdm[pcr][:, traincols[haveix]]
betas_2 = resultsdm[0][:, testcols[trial]]
badness[:, pcr] = badness[:, pcr] + np.sum(
(betas_1-betas_2[:, np.newaxis])**2, axis=1)
# NOTICE the use of results(0)
return badness
"""
# if isempty(hashrec{testids(ttt)})
# hashrec{testids(ttt)} = \\
# mean(results(ll).modelmd{2}(:,traincols(haveix)),2); # voxels x 1
# hashrec{testids(ttt)} = results(ll).modelmd{2}(:,traincols(haveix));
# voxels x instances
# end
"""
| [
"numpy.mean",
"copy.deepcopy",
"numpy.flatnonzero",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.errstate",
"numpy.sum",
"numpy.setdiff1d",
"numpy.std"
] | [((1706, 1730), 'numpy.max', 'np.max', (['sessionindicator'], {}), '(sessionindicator)\n', (1712, 1730), True, 'import numpy as np\n'), ((2033, 2070), 'numpy.mean', 'np.mean', (['results[0][:, whcol]'], {'axis': '(1)'}), '(results[0][:, whcol], axis=1)\n', (2040, 2070), True, 'import numpy as np\n'), ((2125, 2161), 'numpy.std', 'np.std', (['results[0][:, whcol]'], {'axis': '(1)'}), '(results[0][:, whcol], axis=1)\n', (2131, 2161), True, 'import numpy as np\n'), ((2183, 2205), 'copy.deepcopy', 'copy.deepcopy', (['results'], {}), '(results)\n', (2196, 2205), False, 'import copy\n'), ((2644, 2676), 'numpy.setdiff1d', 'np.setdiff1d', (['alltheruns', 'testix'], {}), '(alltheruns, testix)\n', (2656, 2676), True, 'import numpy as np\n'), ((2766, 2798), 'numpy.asarray', 'np.asarray', (['validcolumns[testix]'], {}), '(validcolumns[testix])\n', (2776, 2798), True, 'import numpy as np\n'), ((2903, 2951), 'numpy.asarray', 'np.asarray', (['[validcolumns[tx] for tx in trainix]'], {}), '([validcolumns[tx] for tx in trainix])\n', (2913, 2951), True, 'import numpy as np\n'), ((3157, 3199), 'numpy.asarray', 'np.asarray', (['[stimix[tx] for tx in trainix]'], {}), '([stimix[tx] for tx in trainix])\n', (3167, 3199), True, 'import numpy as np\n'), ((1798, 1822), 'numpy.max', 'np.max', (['sessionindicator'], {}), '(sessionindicator)\n', (1804, 1822), True, 'import numpy as np\n'), ((1884, 1910), 'numpy.array', 'np.array', (['sessionindicator'], {}), '(sessionindicator)\n', (1892, 1910), True, 'import numpy as np\n'), ((1952, 1976), 'numpy.asarray', 'np.asarray', (['validcolumns'], {}), '(validcolumns)\n', (1962, 1976), True, 'import numpy as np\n'), ((2335, 2381), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (2346, 2381), True, 'import numpy as np\n'), ((3524, 3566), 'numpy.flatnonzero', 'np.flatnonzero', (['(trainids == testids[trial])'], {}), '(trainids == testids[trial])\n', (3538, 3566), True, 'import numpy as np\n'), ((4319, 4374), 'numpy.sum', 'np.sum', (['((betas_1 - betas_2[:, np.newaxis]) ** 2)'], {'axis': '(1)'}), '((betas_1 - betas_2[:, np.newaxis]) ** 2, axis=1)\n', (4325, 4374), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
from PIL import Image
net = cv2.dnn.readNet("yolov3.weights", "cfg/yolov3.cfg")
classes = []
with open("data/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('face.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#roi_gray = gray[y:y+h, x:x+w]
#roi_color = img[y:y+h, x:x+w]
#eyes = eye_cascade.detectMultiScale(roi_gray)
#for (ex,ey,ew,eh) in eyes:
#cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
img_crop = img[y-10: y+10+h, x-10: x+10+w]
blob = cv2.dnn.blobFromImage(img_crop, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * w)
center_y = int(detection[1] * h)
w1 = int(detection[2] * w)
h1 = int(detection[3] * h)
x1 = int(center_x - w1 / 2)
y1 = int(center_y - h1 / 2)
boxes.append([x1, y1, w1, h1])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x1, y1, w1, h1 = boxes[i]
label = str(classes[class_ids[i]])
color = colors[i]
cv2.rectangle(img_crop, (x1, y1), (x1 + w1, y1 + h1), color, 2)
cv2.putText(img_crop, label, (x1, y1 + 30), font, 3, color, 3)
#cv2.imshow('img',img)
cv2.imshow('img_crop',img_crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.dnn.blobFromImage",
"numpy.argmax",
"cv2.imshow",
"cv2.putText",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.dnn.NMSBoxes",
"cv2.imread",
"cv2.dnn.readNet"
] | [((101, 152), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""yolov3.weights"""', '"""cfg/yolov3.cfg"""'], {}), "('yolov3.weights', 'cfg/yolov3.cfg')\n", (116, 152), False, 'import cv2\n'), ((459, 519), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (480, 519), False, 'import cv2\n'), ((590, 612), 'cv2.imread', 'cv2.imread', (['"""face.jpg"""'], {}), "('face.jpg')\n", (600, 612), False, 'import cv2\n'), ((621, 658), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (633, 658), False, 'import cv2\n'), ((2409, 2441), 'cv2.imshow', 'cv2.imshow', (['"""img_crop"""', 'img_crop'], {}), "('img_crop', img_crop)\n", (2419, 2441), False, 'import cv2\n'), ((2442, 2456), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2453, 2456), False, 'import cv2\n'), ((2458, 2481), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2479, 2481), False, 'import cv2\n'), ((748, 806), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (761, 806), False, 'import cv2\n'), ((1083, 1169), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['img_crop', '(0.00392)', '(416, 416)', '(0, 0, 0)', '(True)'], {'crop': '(False)'}), '(img_crop, 0.00392, (416, 416), (0, 0, 0), True, crop=\n False)\n', (1104, 1169), False, 'import cv2\n'), ((1403, 1420), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1412, 1420), True, 'import numpy as np\n'), ((2238, 2301), 'cv2.rectangle', 'cv2.rectangle', (['img_crop', '(x1, y1)', '(x1 + w1, y1 + h1)', 'color', '(2)'], {}), '(img_crop, (x1, y1), (x1 + w1, y1 + h1), color, 2)\n', (2251, 2301), False, 'import cv2\n'), ((2315, 2377), 'cv2.putText', 'cv2.putText', (['img_crop', 'label', '(x1, y1 + 30)', 'font', '(3)', 'color', '(3)'], {}), '(img_crop, label, (x1, y1 + 30), font, 3, color, 3)\n', (2326, 2377), False, 'import cv2\n'), ((1952, 1998), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.5)', '(0.4)'], {}), '(boxes, confidences, 0.5, 0.4)\n', (1968, 1998), False, 'import cv2\n')] |
from pathlib import Path
import argparse
import json
import glob
import sys
from matplotlib import pyplot as plt
import numpy as np
def roc_graphs(fprs, tprs, names, aucs, savename, minx=0.85):
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
plt.figure() # figsize=(10, 10)
ax = plt.axes(xscale='log', xlim=[1e-4, 1.0], ylim=[minx-0.05, 1])
for i, (fpr, tpr, name, auc) in enumerate(zip(fprs, tprs, names, aucs)):
fpr = np.flipud(fpr)
tpr = np.flipud(tpr)
auc *= 100
ax.plot(fpr, tpr, color=colors[i], lw=2,
label=f'{name} (AUC: {auc:.2f}%)')
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.grid(b=True, which='minor', axis='x',
color='#666666', linestyle='dotted', alpha=0.4)
plt.grid(b=True, which='major', axis='y',
color='#999999', linestyle='solid', alpha=0.1)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
ax.legend(loc='lower right', fontsize=10,
fancybox=True).get_frame().set_alpha(0.5)
plt.savefig(savename + '_roc.pdf', bbox_inches='tight')
def acc_plot(accs, names, savename):
plt.figure() # figsize=(8, 3)
accs = np.array(accs) * 100
minacc = min(accs)
maxacc = max(accs)
ax = plt.axes(xlim=[minacc-0.5, maxacc+0.5])
bars = ax.barh(names, accs)
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.title('Thresholded Accurracy')
plt.xlabel('Accuracy (%)\nHigher is better')
for name, acc in zip(names, accs):
plt.text(s=f'{acc:.2f}%', x=acc-0.175, y=name, color="r",
verticalalignment="center", size=9)
bars[np.argmax(accs)].set_color('green')
plt.tight_layout()
plt.savefig(savename + '_acc.pdf')
def inftime_plot(inftimes, names, savename):
plt.figure() # figsize=(8, 3)
inftimes = np.array(inftimes) * 1000
mintime = np.min(inftimes)
maxtime = np.max(inftimes)
ax = plt.axes(xlim=[mintime-5, maxtime+1])
bars = ax.barh(names, inftimes)
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.title('Inference time')
plt.xlabel('Inference time (ms)\nLower is better')
for name, time in zip(names, inftimes):
plt.text(s=f'{time:.2f}ms', x=time-1.75, y=name, color="r",
verticalalignment="center", size=9)
bars[np.argmin(inftimes)].set_color('green')
plt.tight_layout()
plt.savefig(savename + '_time.pdf')
def tpr_at_fpr_plot(tpr_at_fprs, names, savename):
plt.figure() # figsize=(8, 3)
tpr_at_fprs = np.array(tpr_at_fprs)
minval = np.min(tpr_at_fprs)
maxval = np.max(tpr_at_fprs)
ax = plt.axes(xlim=[minval-0.05, maxval+0.05])
bars = ax.barh(names, tpr_at_fprs)
plt.grid(b=True, which='major', axis='x',
color='#666666', linestyle='dashed', alpha=0.6)
plt.title('Verification TAR (@FAR=1e-4)')
plt.xlabel('TAR (@FAR=1e-4)')
for name, val in zip(names, tpr_at_fprs):
plt.text(s=f'{val:.4f}', x=val-0.015, y=name, color="r",
verticalalignment="center", size=9)
bars[np.argmax(tpr_at_fprs)].set_color('green')
plt.tight_layout()
plt.savefig(savename + '_TAR.pdf')
def main():
parser = argparse.ArgumentParser(
description="Compare evaluation results",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--src", "-s", type=str, required=True,
help="path to dir with evaluation .json results")
parser.add_argument("--name", "-n", type=str, required=True,
help="name for output file")
args = parser.parse_args()
fprs, tprs, accs, theshs, aucs, inftimes = [], [], [], [], [], []
names, tpr_at_fprs = [], []
min_tpr = 1.
src_path = Path(args.src)
jpaths = glob.glob(str(src_path / '*.json'))
if len(jpaths) > 7:
print('Cannot compare more than 7!')
sys.exit()
for jpath in jpaths:
with open(jpath, 'r') as jfile:
jsondata = json.load(jfile)
name = Path(jpath).stem
names.append(name)
fprs.append(jsondata['roc']['fpr'])
tprs.append(jsondata['roc']['tpr'])
kfold_acc = jsondata['kfold']['acc']
roc_acc = jsondata['roc']['acc']
if kfold_acc > roc_acc:
accs.append(kfold_acc)
theshs.append(jsondata['kfold']['th'])
else:
accs.append(roc_acc)
theshs.append(jsondata['roc']['th'])
aucs.append(jsondata['roc']['auc'])
inftimes.append(jsondata['inference_time'])
tpr_at_fprs.append(jsondata['roc']['tpr_at_fpr']['0.0001'])
min_tpr = min(jsondata['roc']['tpr_at_fpr']['1e-06'], min_tpr)
Path(args.name).parent.mkdir(parents=True, exist_ok=True)
roc_graphs(fprs, tprs, names, aucs, args.name, min_tpr)
acc_plot(accs, names, args.name)
tpr_at_fpr_plot(tpr_at_fprs, names, args.name)
inftime_plot(inftimes, names, args.name)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sys.exit",
"argparse.ArgumentParser",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.min",
"numpy.argmin",
"matplotlib.pyplot.savefig",
"numpy.flipud",
"numpy.argmax",
"matplotlib.pyplot.axes",
"matplo... | [((251, 263), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (261, 263), True, 'from matplotlib import pyplot as plt\n'), ((294, 359), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xscale': '"""log"""', 'xlim': '[0.0001, 1.0]', 'ylim': '[minx - 0.05, 1]'}), "(xscale='log', xlim=[0.0001, 1.0], ylim=[minx - 0.05, 1])\n", (302, 359), True, 'from matplotlib import pyplot as plt\n'), ((616, 710), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'axis': '"""x"""', 'color': '"""#666666"""', 'linestyle': '"""dashed"""', 'alpha': '(0.6)'}), "(b=True, which='major', axis='x', color='#666666', linestyle=\n 'dashed', alpha=0.6)\n", (624, 710), True, 'from matplotlib import pyplot as plt\n'), ((723, 817), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""minor"""', 'axis': '"""x"""', 'color': '"""#666666"""', 'linestyle': '"""dotted"""', 'alpha': '(0.4)'}), "(b=True, which='minor', axis='x', color='#666666', linestyle=\n 'dotted', alpha=0.4)\n", (731, 817), True, 'from matplotlib import pyplot as plt\n'), ((830, 923), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'axis': '"""y"""', 'color': '"""#999999"""', 'linestyle': '"""solid"""', 'alpha': '(0.1)'}), "(b=True, which='major', axis='y', color='#999999', linestyle=\n 'solid', alpha=0.1)\n", (838, 923), True, 'from matplotlib import pyplot as plt\n'), ((937, 970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (947, 970), True, 'from matplotlib import pyplot as plt\n'), ((975, 1007), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (985, 1007), True, 'from matplotlib import pyplot as plt\n'), ((1115, 1170), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savename + '_roc.pdf')"], {'bbox_inches': '"""tight"""'}), "(savename + '_roc.pdf', bbox_inches='tight')\n", (1126, 1170), True, 'from matplotlib import pyplot as plt\n'), ((1214, 1226), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1224, 1226), True, 'from matplotlib import pyplot as plt\n'), ((1333, 1376), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '[minacc - 0.5, maxacc + 0.5]'}), '(xlim=[minacc - 0.5, maxacc + 0.5])\n', (1341, 1376), True, 'from matplotlib import pyplot as plt\n'), ((1409, 1503), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'axis': '"""x"""', 'color': '"""#666666"""', 'linestyle': '"""dashed"""', 'alpha': '(0.6)'}), "(b=True, which='major', axis='x', color='#666666', linestyle=\n 'dashed', alpha=0.6)\n", (1417, 1503), True, 'from matplotlib import pyplot as plt\n'), ((1516, 1550), 'matplotlib.pyplot.title', 'plt.title', (['"""Thresholded Accurracy"""'], {}), "('Thresholded Accurracy')\n", (1525, 1550), True, 'from matplotlib import pyplot as plt\n'), ((1555, 1602), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Accuracy (%)\nHigher is better"""'], {}), '("""Accuracy (%)\nHigher is better""")\n', (1565, 1602), True, 'from matplotlib import pyplot as plt\n'), ((1810, 1828), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1826, 1828), True, 'from matplotlib import pyplot as plt\n'), ((1833, 1867), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savename + '_acc.pdf')"], {}), "(savename + '_acc.pdf')\n", (1844, 1867), True, 'from matplotlib import pyplot as plt\n'), ((1919, 1931), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1929, 1931), True, 'from matplotlib import pyplot as plt\n'), ((2005, 2021), 'numpy.min', 'np.min', (['inftimes'], {}), '(inftimes)\n', (2011, 2021), True, 'import numpy as np\n'), ((2036, 2052), 'numpy.max', 'np.max', (['inftimes'], {}), '(inftimes)\n', (2042, 2052), True, 'import numpy as np\n'), ((2063, 2104), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '[mintime - 5, maxtime + 1]'}), '(xlim=[mintime - 5, maxtime + 1])\n', (2071, 2104), True, 'from matplotlib import pyplot as plt\n'), ((2141, 2235), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'axis': '"""x"""', 'color': '"""#666666"""', 'linestyle': '"""dashed"""', 'alpha': '(0.6)'}), "(b=True, which='major', axis='x', color='#666666', linestyle=\n 'dashed', alpha=0.6)\n", (2149, 2235), True, 'from matplotlib import pyplot as plt\n'), ((2248, 2275), 'matplotlib.pyplot.title', 'plt.title', (['"""Inference time"""'], {}), "('Inference time')\n", (2257, 2275), True, 'from matplotlib import pyplot as plt\n'), ((2280, 2333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Inference time (ms)\nLower is better"""'], {}), '("""Inference time (ms)\nLower is better""")\n', (2290, 2333), True, 'from matplotlib import pyplot as plt\n'), ((2552, 2570), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2568, 2570), True, 'from matplotlib import pyplot as plt\n'), ((2575, 2610), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savename + '_time.pdf')"], {}), "(savename + '_time.pdf')\n", (2586, 2610), True, 'from matplotlib import pyplot as plt\n'), ((2668, 2680), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2678, 2680), True, 'from matplotlib import pyplot as plt\n'), ((2717, 2738), 'numpy.array', 'np.array', (['tpr_at_fprs'], {}), '(tpr_at_fprs)\n', (2725, 2738), True, 'import numpy as np\n'), ((2752, 2771), 'numpy.min', 'np.min', (['tpr_at_fprs'], {}), '(tpr_at_fprs)\n', (2758, 2771), True, 'import numpy as np\n'), ((2785, 2804), 'numpy.max', 'np.max', (['tpr_at_fprs'], {}), '(tpr_at_fprs)\n', (2791, 2804), True, 'import numpy as np\n'), ((2815, 2860), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '[minval - 0.05, maxval + 0.05]'}), '(xlim=[minval - 0.05, maxval + 0.05])\n', (2823, 2860), True, 'from matplotlib import pyplot as plt\n'), ((2900, 2994), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'axis': '"""x"""', 'color': '"""#666666"""', 'linestyle': '"""dashed"""', 'alpha': '(0.6)'}), "(b=True, which='major', axis='x', color='#666666', linestyle=\n 'dashed', alpha=0.6)\n", (2908, 2994), True, 'from matplotlib import pyplot as plt\n'), ((3007, 3048), 'matplotlib.pyplot.title', 'plt.title', (['"""Verification TAR (@FAR=1e-4)"""'], {}), "('Verification TAR (@FAR=1e-4)')\n", (3016, 3048), True, 'from matplotlib import pyplot as plt\n'), ((3053, 3082), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""TAR (@FAR=1e-4)"""'], {}), "('TAR (@FAR=1e-4)')\n", (3063, 3082), True, 'from matplotlib import pyplot as plt\n'), ((3306, 3324), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3322, 3324), True, 'from matplotlib import pyplot as plt\n'), ((3329, 3363), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savename + '_TAR.pdf')"], {}), "(savename + '_TAR.pdf')\n", (3340, 3363), True, 'from matplotlib import pyplot as plt\n'), ((3391, 3516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compare evaluation results"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Compare evaluation results',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (3414, 3516), False, 'import argparse\n'), ((3955, 3969), 'pathlib.Path', 'Path', (['args.src'], {}), '(args.src)\n', (3959, 3969), False, 'from pathlib import Path\n'), ((448, 462), 'numpy.flipud', 'np.flipud', (['fpr'], {}), '(fpr)\n', (457, 462), True, 'import numpy as np\n'), ((477, 491), 'numpy.flipud', 'np.flipud', (['tpr'], {}), '(tpr)\n', (486, 491), True, 'import numpy as np\n'), ((1256, 1270), 'numpy.array', 'np.array', (['accs'], {}), '(accs)\n', (1264, 1270), True, 'import numpy as np\n'), ((1648, 1747), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': 'f"""{acc:.2f}%"""', 'x': '(acc - 0.175)', 'y': 'name', 'color': '"""r"""', 'verticalalignment': '"""center"""', 'size': '(9)'}), "(s=f'{acc:.2f}%', x=acc - 0.175, y=name, color='r',\n verticalalignment='center', size=9)\n", (1656, 1747), True, 'from matplotlib import pyplot as plt\n'), ((1965, 1983), 'numpy.array', 'np.array', (['inftimes'], {}), '(inftimes)\n', (1973, 1983), True, 'import numpy as np\n'), ((2384, 2485), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': 'f"""{time:.2f}ms"""', 'x': '(time - 1.75)', 'y': 'name', 'color': '"""r"""', 'verticalalignment': '"""center"""', 'size': '(9)'}), "(s=f'{time:.2f}ms', x=time - 1.75, y=name, color='r',\n verticalalignment='center', size=9)\n", (2392, 2485), True, 'from matplotlib import pyplot as plt\n'), ((3138, 3236), 'matplotlib.pyplot.text', 'plt.text', ([], {'s': 'f"""{val:.4f}"""', 'x': '(val - 0.015)', 'y': 'name', 'color': '"""r"""', 'verticalalignment': '"""center"""', 'size': '(9)'}), "(s=f'{val:.4f}', x=val - 0.015, y=name, color='r',\n verticalalignment='center', size=9)\n", (3146, 3236), True, 'from matplotlib import pyplot as plt\n'), ((4097, 4107), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4105, 4107), False, 'import sys\n'), ((4197, 4213), 'json.load', 'json.load', (['jfile'], {}), '(jfile)\n', (4206, 4213), False, 'import json\n'), ((4230, 4241), 'pathlib.Path', 'Path', (['jpath'], {}), '(jpath)\n', (4234, 4241), False, 'from pathlib import Path\n'), ((1769, 1784), 'numpy.argmax', 'np.argmax', (['accs'], {}), '(accs)\n', (1778, 1784), True, 'import numpy as np\n'), ((2507, 2526), 'numpy.argmin', 'np.argmin', (['inftimes'], {}), '(inftimes)\n', (2516, 2526), True, 'import numpy as np\n'), ((3258, 3280), 'numpy.argmax', 'np.argmax', (['tpr_at_fprs'], {}), '(tpr_at_fprs)\n', (3267, 3280), True, 'import numpy as np\n'), ((4906, 4921), 'pathlib.Path', 'Path', (['args.name'], {}), '(args.name)\n', (4910, 4921), False, 'from pathlib import Path\n')] |
import types
import math
import numpy as np
import cvxpy as cp
import scipy.sparse as sp
from gurobipy import GRB, Model, LinExpr, abs_, max_
from copy import copy
from evanqp import Polytope
from evanqp.problems import CvxpyProblem, MPCProblem
from evanqp.layers import BoundArithmetic, InputLayer, QPLayer, SeqLayer
class Verifier:
def __init__(self, parameter_set, *problems):
self.input_layer = InputLayer(parameter_set)
self.problems = []
for problem in problems:
if isinstance(problem, CvxpyProblem):
self.problems.append(QPLayer(problem, 1))
else:
self.problems.append(SeqLayer.from_pytorch(problem))
self.bounds_calculated = False
def compute_bounds(self, method=BoundArithmetic.ZONO_ARITHMETIC, **kwargs):
self.input_layer.compute_bounds(method, **kwargs)
for p in self.problems:
p.compute_bounds(method, self.input_layer, **kwargs)
self.bounds_calculated = True
return [p.bounds['out'] for p in self.problems]
def compute_ideal_cuts(self, model):
ineqs = []
for p in self.problems:
ineqs += p.compute_ideal_cuts(model, self.input_layer, None)
return ineqs
def setup_milp(self, model):
if not self.bounds_calculated:
self.compute_bounds()
self.input_layer.add_vars(model)
for p in self.problems:
p.add_vars(model)
model.update()
self.input_layer.add_constr(model)
for p in self.problems:
p.add_constr(model, self.input_layer)
model.update()
def ideal_cuts_callback(self):
def _callback(model, where):
if where == GRB.Callback.MIPNODE:
if model.cbGet(GRB.Callback.MIPNODE_STATUS) == GRB.Status.OPTIMAL:
# decrease cut freq with number of nodes explored
freq = model.cbGet(GRB.Callback.MIPNODE_NODCNT) + 1
if np.random.randint(0, freq, 1) == 0:
ineqs = self.compute_ideal_cuts(model)
for (lhs, rhs) in ineqs:
model.cbCut(lhs <= rhs)
return _callback
def warm_start(self, guess=None):
if guess is None:
guess = self.input_layer.forward(None)
for problem in self.problems:
problem.forward(guess, warm_start=True)
def find_max_abs_diff(self, threads=0, output_flag=1, ideal_cuts=False, warm_start=True, guess=None):
model = Model()
model.setParam('OutputFlag', output_flag)
model.setParam('Threads', threads)
if len(self.problems) != 2:
raise Exception('Number of problems must be 2.')
if self.problems[0].out_size != self.problems[0].out_size:
raise Exception('Problems do not have the same output size')
self.setup_milp(model)
if warm_start:
self.warm_start(guess)
diff = [model.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY) for _ in range(self.problems[0].out_size)]
abs_diff = [model.addVar(vtype=GRB.CONTINUOUS, lb=0, ub=GRB.INFINITY) for _ in range(self.problems[0].out_size)]
for i in range(self.problems[0].out_size):
model.addConstr(diff[i] == self.problems[0].vars['out'][i] - self.problems[1].vars['out'][i])
model.addConstr(abs_diff[i] == abs_(diff[i]))
max_abs_diff = model.addVar(vtype=GRB.CONTINUOUS)
model.addConstr(max_abs_diff == max_(abs_diff))
model.setObjective(max_abs_diff, GRB.MAXIMIZE)
model.update()
if ideal_cuts:
model.optimize(self.ideal_cuts_callback())
else:
model.optimize()
return model.objBound, [p.x for p in self.input_layer.vars['out']]
def verify_stability(self, threads=0, output_flag=1, ideal_cuts=False, warm_start=True, guess=None):
if len(self.problems) != 2:
raise Exception('Number of problems must be 2.')
if not isinstance(self.problems[0].problem, MPCProblem):
raise Exception('The first problem must be of type MPCProblem.')
mpc_problem = self.problems[0].problem
model = Model()
model.setParam('OutputFlag', output_flag)
model.setParam('Threads', threads)
if not self.bounds_calculated:
self.compute_bounds()
reduced_objective_problem = copy(mpc_problem)
# monkey patch original mpc problem with reduced objective function
def problem_patch(_self):
return cp.Problem(_self.reduced_objective(), _self.original_problem().constraints)
reduced_objective_problem.original_problem = reduced_objective_problem.problem
reduced_objective_problem.problem = types.MethodType(problem_patch, reduced_objective_problem)
reduced_objective_mpc_layer = QPLayer(reduced_objective_problem, 1)
reduced_objective_mpc_layer.compute_bounds(BoundArithmetic.INT_ARITHMETIC, self.input_layer)
self.input_layer.add_vars(model)
self.problems[0].add_vars(model, only_primal=True)
reduced_objective_mpc_layer.add_vars(model)
self.problems[1].add_vars(model)
model.update()
self.input_layer.add_constr(model)
self.problems[0].add_constr(model, self.input_layer, only_primal=True)
reduced_objective_mpc_layer.add_constr(model, self.input_layer)
self.problems[1].add_constr(model, self.input_layer)
model.update()
for i in range(reduced_objective_mpc_layer.out_size):
model.addConstr(reduced_objective_mpc_layer.vars['out'][i] == self.problems[1].vars['out'][i])
model.update()
if warm_start:
self.warm_start(guess)
x = self.problems[0].vars['x']
x_t = reduced_objective_mpc_layer.vars['x']
obj = 0
P_row_idx, P_col_idx, P_col_coef = sp.find(self.problems[0].P)
for i, j, Pij in zip(P_row_idx, P_col_idx, P_col_coef):
obj += 0.5 * x[i] * Pij * x[j]
obj += LinExpr(self.problems[0].q, x)
P_t_row_idx, P_t_col_idx, P_t_col_coef = sp.find(reduced_objective_mpc_layer.P)
for i, j, Pij in zip(P_t_row_idx, P_t_col_idx, P_t_col_coef):
obj -= 0.5 * x_t[i] * Pij * x_t[j]
obj -= LinExpr(reduced_objective_mpc_layer.q, x_t)
# allow non-convex MIQP formulation
if len(P_t_col_coef) > 0:
model.setParam('NonConvex', 2)
model.setObjective(obj, GRB.MINIMIZE)
model.update()
if ideal_cuts:
model.optimize(self.ideal_cuts_callback())
else:
model.optimize()
return model.objBound, [p.x for p in self.input_layer.vars['out']]
def variables_in_polytope(self, poly, eps=1e-6, threads=0, output_flag=1, warm_start=True, guess=None):
if len(self.problems) != 1:
raise Exception('Number of problems must be 1.')
if not isinstance(poly, Polytope):
raise Exception('poly must be of type Polytope.')
if self.problems[0].out_size != poly.A.shape[1]:
raise Exception('poly shape does not match problem output size.')
model = Model()
model.setParam('OutputFlag', output_flag)
model.setParam('Threads', threads)
self.setup_milp(model)
if warm_start:
self.warm_start(guess)
for i in range(self.problems[0].out_size):
model.setObjective(LinExpr(poly.A[i, :], self.problems[0].vars['out']) - poly.b[i], GRB.MAXIMIZE)
model.update()
model.optimize()
if model.objBound > eps:
return False
return True
@staticmethod
def min_optimal_mpc_horizon(parameter_set, mpc_factory, poly, eps=1e-6, threads=0, warm_start=True, guess=None):
N = 1
mpc_problem = mpc_factory(N)
print(f'Checking N = {N}')
verifier = Verifier(parameter_set, mpc_problem)
res = verifier.variables_in_polytope(poly, eps=eps, threads=threads, output_flag=0, warm_start=warm_start, guess=guess)
if res:
return N
lb = N + 1
ub = float('inf')
while lb < ub:
if ub == float('inf'):
N *= 2
else:
N = math.floor((lb + ub) / 2)
mpc_problem = mpc_factory(N)
print(f'Checking N = {N}')
verifier = Verifier(parameter_set, mpc_problem)
res = verifier.variables_in_polytope(poly, eps=eps, threads=threads, output_flag=0)
if res:
ub = N
else:
lb = N + 1
return lb
| [
"scipy.sparse.find",
"gurobipy.max_",
"gurobipy.abs_",
"math.floor",
"evanqp.layers.QPLayer",
"evanqp.layers.SeqLayer.from_pytorch",
"evanqp.layers.InputLayer",
"numpy.random.randint",
"gurobipy.LinExpr",
"gurobipy.Model",
"copy.copy",
"types.MethodType"
] | [((415, 440), 'evanqp.layers.InputLayer', 'InputLayer', (['parameter_set'], {}), '(parameter_set)\n', (425, 440), False, 'from evanqp.layers import BoundArithmetic, InputLayer, QPLayer, SeqLayer\n'), ((2560, 2567), 'gurobipy.Model', 'Model', ([], {}), '()\n', (2565, 2567), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((4255, 4262), 'gurobipy.Model', 'Model', ([], {}), '()\n', (4260, 4262), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((4467, 4484), 'copy.copy', 'copy', (['mpc_problem'], {}), '(mpc_problem)\n', (4471, 4484), False, 'from copy import copy\n'), ((4822, 4880), 'types.MethodType', 'types.MethodType', (['problem_patch', 'reduced_objective_problem'], {}), '(problem_patch, reduced_objective_problem)\n', (4838, 4880), False, 'import types\n'), ((4920, 4957), 'evanqp.layers.QPLayer', 'QPLayer', (['reduced_objective_problem', '(1)'], {}), '(reduced_objective_problem, 1)\n', (4927, 4957), False, 'from evanqp.layers import BoundArithmetic, InputLayer, QPLayer, SeqLayer\n'), ((5959, 5986), 'scipy.sparse.find', 'sp.find', (['self.problems[0].P'], {}), '(self.problems[0].P)\n', (5966, 5986), True, 'import scipy.sparse as sp\n'), ((6109, 6139), 'gurobipy.LinExpr', 'LinExpr', (['self.problems[0].q', 'x'], {}), '(self.problems[0].q, x)\n', (6116, 6139), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((6189, 6227), 'scipy.sparse.find', 'sp.find', (['reduced_objective_mpc_layer.P'], {}), '(reduced_objective_mpc_layer.P)\n', (6196, 6227), True, 'import scipy.sparse as sp\n'), ((6360, 6403), 'gurobipy.LinExpr', 'LinExpr', (['reduced_objective_mpc_layer.q', 'x_t'], {}), '(reduced_objective_mpc_layer.q, x_t)\n', (6367, 6403), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((7256, 7263), 'gurobipy.Model', 'Model', ([], {}), '()\n', (7261, 7263), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((3553, 3567), 'gurobipy.max_', 'max_', (['abs_diff'], {}), '(abs_diff)\n', (3557, 3567), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((8361, 8386), 'math.floor', 'math.floor', (['((lb + ub) / 2)'], {}), '((lb + ub) / 2)\n', (8371, 8386), False, 'import math\n'), ((589, 608), 'evanqp.layers.QPLayer', 'QPLayer', (['problem', '(1)'], {}), '(problem, 1)\n', (596, 608), False, 'from evanqp.layers import BoundArithmetic, InputLayer, QPLayer, SeqLayer\n'), ((665, 695), 'evanqp.layers.SeqLayer.from_pytorch', 'SeqLayer.from_pytorch', (['problem'], {}), '(problem)\n', (686, 695), False, 'from evanqp.layers import BoundArithmetic, InputLayer, QPLayer, SeqLayer\n'), ((3440, 3453), 'gurobipy.abs_', 'abs_', (['diff[i]'], {}), '(diff[i])\n', (3444, 3453), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((7530, 7581), 'gurobipy.LinExpr', 'LinExpr', (['poly.A[i, :]', "self.problems[0].vars['out']"], {}), "(poly.A[i, :], self.problems[0].vars['out'])\n", (7537, 7581), False, 'from gurobipy import GRB, Model, LinExpr, abs_, max_\n'), ((2005, 2034), 'numpy.random.randint', 'np.random.randint', (['(0)', 'freq', '(1)'], {}), '(0, freq, 1)\n', (2022, 2034), True, 'import numpy as np\n')] |
import argparse
import base64
import json
import os
import os.path as osp
import imgviz
import PIL.Image
from labelme.logger import logger
from labelme import utils
import cv2
import numpy as np
def json2mask( json_file, jsonDir, imgDir,maskDir ):
'''Convert the json files to mask images
Parameters
----------
json_file: str
the name of json file
jsonDir: str
the name of json file's directory
imgDir: str
the name of image file's directory
maskDir: str
the name of mask file's directory
'''
jsonID = json_file.split('.')[0]
data = json.load(open(json_Dir+'/'+json_file))
imageData = data.get("imageData")
if not imageData:
imagePath = os.path.join(os.path.dirname(json_file), data["imagePath"])
with open(imagePath, "rb") as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode("utf-8")
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {"_background_": 0}
for shape in sorted(data["shapes"], key=lambda x: x["label"]):
label_name = shape["label"]
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
lbl, _ = utils.shapes_to_label(
img.shape, data["shapes"], label_name_to_value
)
label_names = [None] * (max(label_name_to_value.values()) + 1)
for name, value in label_name_to_value.items():
label_names[value] = name
lbl_viz = imgviz.label2rgb(
label=lbl, img=imgviz.asgray(img), label_names=label_names, loc="rb"
)
PIL.Image.fromarray(img).save(osp.join(imgDir, jsonID + ".png"))
utils.lblsave(osp.join(maskDir, jsonID + ".png"), lbl)
def masks_to_graypic( path ):
'''Convert the mask image to uint8
Parameters
----------
path: str
the name of mask image
'''
filelist = os.listdir(path) # list the names of images
for item in filelist:
image_name= path+'/'+item
image= cv2.imread(image_name)
mask_image = (image[:,:,2]>0)
mask= np.zeros( (image.shape[0],image.shape[1]) )
mask[mask_image]=1
cv2.imwrite(path+'/'+item ,(mask * 255).astype('uint8') )
if __name__ == "__main__":
json_Dir = 'patients_dataset/json'
imgDir = 'patients_dataset/image'
maskDir = 'patients_dataset/mask'
filelist = os.listdir(json_Dir)
for file_ in filelist:
json2mask( file_, json_Dir, imgDir,maskDir)
masks_to_graypic(maskDir) | [
"labelme.utils.img_b64_to_arr",
"os.listdir",
"labelme.utils.shapes_to_label",
"imgviz.asgray",
"base64.b64encode",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"cv2.imread"
] | [((973, 1004), 'labelme.utils.img_b64_to_arr', 'utils.img_b64_to_arr', (['imageData'], {}), '(imageData)\n', (993, 1004), False, 'from labelme import utils\n'), ((1405, 1474), 'labelme.utils.shapes_to_label', 'utils.shapes_to_label', (['img.shape', "data['shapes']", 'label_name_to_value'], {}), "(img.shape, data['shapes'], label_name_to_value)\n", (1426, 1474), False, 'from labelme import utils\n'), ((2078, 2094), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2088, 2094), False, 'import os\n'), ((2580, 2600), 'os.listdir', 'os.listdir', (['json_Dir'], {}), '(json_Dir)\n', (2590, 2600), False, 'import os\n'), ((1806, 1839), 'os.path.join', 'osp.join', (['imgDir', "(jsonID + '.png')"], {}), "(imgDir, jsonID + '.png')\n", (1814, 1839), True, 'import os.path as osp\n'), ((1860, 1894), 'os.path.join', 'osp.join', (['maskDir', "(jsonID + '.png')"], {}), "(maskDir, jsonID + '.png')\n", (1868, 1894), True, 'import os.path as osp\n'), ((2200, 2222), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (2210, 2222), False, 'import cv2\n'), ((2277, 2319), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1])'], {}), '((image.shape[0], image.shape[1]))\n', (2285, 2319), True, 'import numpy as np\n'), ((770, 796), 'os.path.dirname', 'os.path.dirname', (['json_file'], {}), '(json_file)\n', (785, 796), False, 'import os\n'), ((1708, 1726), 'imgviz.asgray', 'imgviz.asgray', (['img'], {}), '(img)\n', (1721, 1726), False, 'import imgviz\n'), ((918, 945), 'base64.b64encode', 'base64.b64encode', (['imageData'], {}), '(imageData)\n', (934, 945), False, 'import base64\n')] |
#!/usr/bin/env python
"""
:Abstract: Given an abundance table, calculate pairwise Spearman's Rho of the columns.
:Date: 02/15/2017
:Author: <NAME>
"""
import sys
import argparse
from traceback import format_exc
from time import localtime, strftime
from itertools import combinations
from phylotoast.util import parse_map_file, gather_categories
from phylotoast.biom_calc import arcsine_sqrt_transform as ast, relative_abundance
importerrors = set()
try:
import biom
except ImportError:
importerrors.add("biom")
try:
import numpy as np
except ImportError:
importerrors.add("numpy")
try:
from multiprocessing import Pool, current_process
except ImportError:
importerrors.add("multiprocessing")
try:
from statsmodels.sandbox.stats.multicomp import fdrcorrection0 as FDR
except ImportError:
importerrors.add("statsmodels")
try:
from scipy.stats import rankdata, spearmanr as Spearman, kendalltau as KendallTau
except ImportError:
importerrors.add("scipy")
if len(importerrors) > 0:
for err in importerrors:
print("Please install missing module: {}".format(err))
sys.exit()
def calc_corr(otu_combos, gather_categories, abd_data):
"""This function calculates the pairwise spearman correlation for each group."""
starttime = strftime("%x | %X".format(localtime))
print("{0} | Core-{1}".format(starttime, current_process().name.split("-")[1]))
result = []
for combo in otu_combos:
list1 = []
list2 = []
otu1 = combo[0]
otu2 = combo[1]
for cat in gather_categories.keys():
for sid in gather_categories[cat].sids:
list1.append(abd_data[sid][otu1])
list2.append(abd_data[sid][otu2])
try:
assert len(list1) == len(list2)
except AssertionError:
sys.exit("Abundance array lengths do not match.")
rho_corr, rho_p_val = Spearman(rankdata(list1, "ordinal"),
rankdata(list2, "ordinal"))
result.append(["Spearman", cat, otu1, otu2, rho_corr, rho_p_val])
kendall_corr, kendall_p_val = KendallTau(rankdata(list1, "ordinal"),
rankdata(list2, "ordinal"))
result.append(["Kendall", cat, otu1, otu2, kendall_corr, kendall_p_val])
return result
def calc_corr_helper(args):
"""Helper function to use Pool.map() one-argument worker functions."""
return calc_corr(*args)
def run_fdr(result_list):
"""Run BH FDR correction on correlation results."""
try:
assert all(isinstance(x, list) for x in result_list) is True
except AssertionError:
sys.exit("Data format for FDR Correction not compatible. Please check.")
else:
p_vals = [entry[-1] for entry in result_list]
hypothesis, pvals_corrected = FDR(p_vals)
idx_to_retain = np.where(np.array(pvals_corrected) < 0.05)[0].tolist()
print("{} correlations removed through multiple test correction."
.format(len(result_list) - len(idx_to_retain)))
updated_result = []
for idx in idx_to_retain:
updated_p_val = pvals_corrected[idx]
updated_result.append((result_list[idx][0], result_list[idx][1],
result_list[idx][2], result_list[idx][3],
result_list[idx][4], updated_p_val,))
return updated_result
def program_options():
"""Function to house the user inputs."""
parser = argparse.ArgumentParser(
description=("Given an abundance table, calculate and return FDR corrected "
"significant pairwise Spearman's Rho."))
parser.add_argument("in_biomf", help="Input abundance file containing OTU names as "
"columns and SampleIDs as rows. Ideally, this is the output from "
"biom_relative_abundance.py script.")
parser.add_argument("map_fnh", help="Mapping file associated with input BIOM file.")
parser.add_argument("category_column", help="Column name from mapping file which is "
"associated with categories.")
parser.add_argument("out_fnh", help="Path and name to output correlation matrix file."
" The format for the tab-separated file will be: Category -> "
"Variable -> by Variable -> Correlation")
return parser.parse_args()
def main():
args = program_options()
try:
biomf = biom.load_table(args.in_biomf)
except IOError as ioe:
sys.exit("Error with input BIOM format file: {}".format(ioe))
else:
rel_abd = relative_abundance(biomf)
ast_rel_abd = ast(rel_abd)
# Get pairwise combinations of OTUs
otu_combos = list(combinations(biomf.ids("observation"), 2))
try:
mheader, mdata = parse_map_file(args.map_fnh)
except IOError as ioe:
sys.exit("Error with input mapping file: {}".format(ioe))
else:
# Gather sampleID categories
sid_cat = gather_categories(mdata, mheader, [args.category_column])
# Create arguments for helper function to be supplied to multiprocessing pool.map()
chunksize = 10000
jobs = [(otu_combos[x:x+chunksize], sid_cat, ast_rel_abd,)
for x in xrange(0, len(otu_combos), chunksize)]
print("{0} jobs created.".format(len(jobs)))
# Start multiprocessing jobs
try:
print("Starting map_async()...")
pool = Pool()
res = pool.map_async(calc_corr_helper, jobs)
pool.close()
pool.join()
except Exception:
sys.exit("Error while calculating correlations\n{}".format(format_exc()))
else:
s_rho_calc = []
k_tau_calc = []
for r in res.get():
for s in r:
if s[0] == "Spearman":
s_rho_calc.append(s)
else:
k_tau_calc.append(s)
# Get FDR corrected correlation results
print("Running FDR correction on {} Spearman's Rho.".format(len(s_rho_calc)))
fdr_corr_s_rho = run_fdr(s_rho_calc)
print("Running FDR correction on {} Kendall Tau.".format(len(k_tau_calc)))
fdr_corr_k_tau = run_fdr(k_tau_calc)
# Consolidate correlation results
k_kos = {(e[2], e[3],) for e in fdr_corr_k_tau}
s_kos = {(f[2], f[3],) for f in fdr_corr_s_rho}
final_kos = s_kos & k_kos
print("{0} elements from KendallTau\n{1} elements from SpearmanRho\n{2} elements are "
"common to both.".format(len(k_kos), len(s_kos), len(final_kos)))
final_fdr_corr_results = [cdata[1:] for cdata in fdr_corr_s_rho
if (cdata[2], cdata[3],) in final_kos]
# Write our results to file
with open(args.out_fnh, "w") as outf:
outf.write("Category\tVariable\tby Variable\tCorrelation\tp value\n")
for k in final_fdr_corr_results:
outf.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(k[0], k[1], k[2], k[3], k[4]))
if __name__ == "__main__":
sys.exit(main())
| [
"phylotoast.util.gather_categories.keys",
"biom.load_table",
"traceback.format_exc",
"argparse.ArgumentParser",
"scipy.stats.rankdata",
"phylotoast.biom_calc.arcsine_sqrt_transform",
"statsmodels.sandbox.stats.multicomp.fdrcorrection0",
"numpy.array",
"phylotoast.biom_calc.relative_abundance",
"ph... | [((1115, 1125), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1123, 1125), False, 'import sys\n'), ((3501, 3647), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Given an abundance table, calculate and return FDR corrected significant pairwise Spearman\'s Rho."""'}), '(description=\n "Given an abundance table, calculate and return FDR corrected significant pairwise Spearman\'s Rho."\n )\n', (3524, 3647), False, 'import argparse\n'), ((1557, 1581), 'phylotoast.util.gather_categories.keys', 'gather_categories.keys', ([], {}), '()\n', (1579, 1581), False, 'from phylotoast.util import parse_map_file, gather_categories\n'), ((2851, 2862), 'statsmodels.sandbox.stats.multicomp.fdrcorrection0', 'FDR', (['p_vals'], {}), '(p_vals)\n', (2854, 2862), True, 'from statsmodels.sandbox.stats.multicomp import fdrcorrection0 as FDR\n'), ((4493, 4523), 'biom.load_table', 'biom.load_table', (['args.in_biomf'], {}), '(args.in_biomf)\n', (4508, 4523), False, 'import biom\n'), ((4649, 4674), 'phylotoast.biom_calc.relative_abundance', 'relative_abundance', (['biomf'], {}), '(biomf)\n', (4667, 4674), False, 'from phylotoast.biom_calc import arcsine_sqrt_transform as ast, relative_abundance\n'), ((4697, 4709), 'phylotoast.biom_calc.arcsine_sqrt_transform', 'ast', (['rel_abd'], {}), '(rel_abd)\n', (4700, 4709), True, 'from phylotoast.biom_calc import arcsine_sqrt_transform as ast, relative_abundance\n'), ((4858, 4886), 'phylotoast.util.parse_map_file', 'parse_map_file', (['args.map_fnh'], {}), '(args.map_fnh)\n', (4872, 4886), False, 'from phylotoast.util import parse_map_file, gather_categories\n'), ((5045, 5102), 'phylotoast.util.gather_categories', 'gather_categories', (['mdata', 'mheader', '[args.category_column]'], {}), '(mdata, mheader, [args.category_column])\n', (5062, 5102), False, 'from phylotoast.util import parse_map_file, gather_categories\n'), ((5485, 5491), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (5489, 5491), False, 'from multiprocessing import Pool, current_process\n'), ((1924, 1950), 'scipy.stats.rankdata', 'rankdata', (['list1', '"""ordinal"""'], {}), "(list1, 'ordinal')\n", (1932, 1950), False, 'from scipy.stats import rankdata, spearmanr as Spearman, kendalltau as KendallTau\n'), ((1991, 2017), 'scipy.stats.rankdata', 'rankdata', (['list2', '"""ordinal"""'], {}), "(list2, 'ordinal')\n", (1999, 2017), False, 'from scipy.stats import rankdata, spearmanr as Spearman, kendalltau as KendallTau\n'), ((2142, 2168), 'scipy.stats.rankdata', 'rankdata', (['list1', '"""ordinal"""'], {}), "(list1, 'ordinal')\n", (2150, 2168), False, 'from scipy.stats import rankdata, spearmanr as Spearman, kendalltau as KendallTau\n'), ((2219, 2245), 'scipy.stats.rankdata', 'rankdata', (['list2', '"""ordinal"""'], {}), "(list2, 'ordinal')\n", (2227, 2245), False, 'from scipy.stats import rankdata, spearmanr as Spearman, kendalltau as KendallTau\n'), ((2676, 2748), 'sys.exit', 'sys.exit', (['"""Data format for FDR Correction not compatible. Please check."""'], {}), "('Data format for FDR Correction not compatible. Please check.')\n", (2684, 2748), False, 'import sys\n'), ((1835, 1884), 'sys.exit', 'sys.exit', (['"""Abundance array lengths do not match."""'], {}), "('Abundance array lengths do not match.')\n", (1843, 1884), False, 'import sys\n'), ((5675, 5687), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (5685, 5687), False, 'from traceback import format_exc\n'), ((1368, 1385), 'multiprocessing.current_process', 'current_process', ([], {}), '()\n', (1383, 1385), False, 'from multiprocessing import Pool, current_process\n'), ((2896, 2921), 'numpy.array', 'np.array', (['pvals_corrected'], {}), '(pvals_corrected)\n', (2904, 2921), True, 'import numpy as np\n')] |
import pickle
import os
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import multiprocessing
# from sklearn.utils.random import sample_without_replacement
# from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
# from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
# from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA
# from sklearn.gaussian_process import GaussianProcessClassifier
# from sklearn.svm import LinearSVR
# from sklearn.neural_network import MLPClassifier
# from sklearn.feature_selection import RFECV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import make_scorer
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
from framework.data_portals.data_portal import DataPortal
from pyESN import ESN
all_tickers = pd.read_csv('C:\\Users\\kohle\\Documents\\Machine Learning\\Echo State Networks\\Stock_Data\\list.csv')[
'A'].tolist()
pknum = 0
ticker_range = (pknum * 7000, (pknum + 1) * 7000)
ticker_range = (0, len(all_tickers))
delay_minutes = 0
tasks = ['new', 'continue', 'loop_new'] # choose from: ['new', 'predict_all', 'continue', 'combine', 'loop_new']
# tasks = ['continue']
# tasks = ['combine']
tasks = []
new_env = False # instruction to keep specified model_env (instead of loading old one, when available)
end_int = -1 # condition to set number of iterations to be run
model_env = {
'all_tickers': all_tickers,
'tickers': np.random.choice(all_tickers, 500, replace=False),
'n_res_list': [30, 30, 30, 30, 30, 30, 50, 80],
'sparsity_list': [0.5, 0.75, 0.8, 0.85, 0.9, 0.925, 0.95, 0.96],
'train_len': 4000,
'drop_len': 200,
'n_trees': 200,
'n_comp': 10,
'vol': False,
'individual': False,
# 'model_ui': '0145_SGD',
'model_ui': '0041_SGD',
'folder_path': 'models/SGD_hinge_loss'
}
class LinDetrend(object):
lin_trend = None
def fit(self, X, y, **fit_params):
self.lin_trend = np.polyfit(range(len(X)), X, 1)
return self
def transform(self, X):
return X - np.polyval(self.lin_trend, range(len(X))).reshape((1, len(X))).T
def individual_fit_results(tickers, model, prep, net, pca=None, new_fit=True, drop_len=200, train_len=4000,
test_len=200, vol=True):
"""
model is assumed to generate a 1,0 classification to either buy or sell
"""
gen = portal.iter_get_uids('daily_prices', 'default', tickers)
df = pd.DataFrame() # Dataframe with tickers and performance metrics
df1 = pd.DataFrame() # array of model coefficients
df2 = pd.DataFrame() # array of trading results
df3 = pd.DataFrame() # array of buy & hold results
df4 = pd.DataFrame() # array of predictions from model
i = 0
for data in gen:
print(i)
x_train, x_test = np.zeros((0, sum(model_env['n_res_list']) + 1)), \
np.zeros((0, sum(model_env['n_res_list']) + 1))
y_train, y_test, y_cv, y_tcv = [], [], [], []
w_train, w_test = [], []
log_vol = np.log10(np.array(data['volume'] + 1).reshape((len(data), 1)))
log_prices = np.log10(np.array(data['adjusted_close']).reshape((len(data), 1)))
if len(log_prices) > train_len + test_len:
prep.fit(log_prices[:train_len])
log_prices = prep.transform(log_prices)
if vol:
prep.fit(log_vol[:train_len])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(data), 1))
states = net.get_states(log_vol, log_prices)
# if pca:
# states = pca.transform(states)
x_train = np.vstack((x_train, states[model_env['drop_len']:train_len]))
y_train += np.sign((np.sign(
log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[model_env['drop_len']:train_len,
0]) + 1) / 2).tolist()
w_train += np.abs(
log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[model_env['drop_len']:train_len,
0]).tolist()
y_cv += (log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[
model_env['drop_len']:train_len,
0]).tolist()
x_test = np.vstack((x_test, states[train_len:-1]))
y_test += np.sign(
(np.sign(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]) + 1) / 2).tolist()
w_test += np.abs(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]).tolist()
y_tcv += (log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0]).tolist()
if pca:
states = pca.transform(states)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
if new_fit:
model.fit(x_train, y_train, sample_weight=w_train)
acc = model.score(states[1:], np.sign((np.sign(log_prices[1:, 0] - log_prices[:-1, 0]) + 1) / 2).tolist())
pred = model.predict(states[drop_len:])
hold = np.array(np.log10(data['adjusted_close'])[drop_len:])
trading = np.hstack((hold[0], (hold[0] + ((2 * pred[:-1] - 1) * (hold[1:] - hold[:-1])).cumsum())))
all_hold_ret = hold[-1] - hold[0]
all_trade_ret = trading[-1] - trading[0]
all_inc_ret = all_trade_ret / abs(all_hold_ret) - 1
train_hold_ret = hold[train_len - drop_len] - hold[0]
train_trade_ret = trading[train_len - drop_len] - trading[0]
train_inc_ret = train_trade_ret / abs(train_hold_ret) - 1
test_hold_ret = hold[train_len + test_len - drop_len] - hold[train_len - drop_len]
test_trade_ret = trading[train_len + test_len - drop_len] - trading[train_len - drop_len]
test_inc_ret = test_trade_ret - test_hold_ret
if isinstance(df2, pd.DataFrame):
df2 = np.pad(trading[:train_len + test_len],
[0, train_len + test_len - len(trading[:train_len + test_len])])
df3 = np.pad(hold[:train_len + test_len],
[0, train_len + test_len - len(hold[:train_len + test_len])])
# df1 = model._get_coef() #Support Vector Classifier (SVC)
# df1 = model.feature_importances_ #Random Forest (RF)
df1 = model.coef_ # SGDClassifier (SGD)
df4 = np.pad(pred[:train_len + test_len],
[0, train_len + test_len - len(pred[:train_len + test_len])])
df = df.append(pd.DataFrame([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,
train_hold_ret, train_trade_ret, train_inc_ret,
test_hold_ret, test_trade_ret, test_inc_ret]],
columns=['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret',
'all_inc_ret', 'train_hold_ret', 'train_trade_ret',
'train_inc_ret', 'test_hold_ret', 'test_trade_ret',
'test_inc_ret']))
else:
df2 = np.vstack((df2, np.pad(trading[:train_len + test_len],
[0, train_len + test_len - len(trading[:train_len + test_len])])))
df3 = np.vstack((df3, np.pad(hold[:train_len + test_len],
[0, train_len + test_len - len(hold[:train_len + test_len])])))
df1 = np.vstack((df1, model.coef_))
# df1 = np.vstack((df1, model._get_coef()))
# df1 = np.vstack((df1, model.feature_importances_()))
df4 = np.vstack((df4, np.pad(pred[:train_len + test_len],
[0, train_len + test_len - len(pred[:train_len + test_len])])))
df = df.append(pd.DataFrame([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,
train_hold_ret, train_trade_ret, train_inc_ret,
test_hold_ret, test_trade_ret, test_inc_ret]],
columns=['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret',
'all_inc_ret', 'train_hold_ret', 'train_trade_ret',
'train_inc_ret', 'test_hold_ret', 'test_trade_ret',
'test_inc_ret']))
i += 1
df.reset_index(drop=True, inplace=True)
return df, df1, df2, df3, df4
def inspect_ticker(ticker, model, prep, net, pca=None, vol=None, drop_len=200):
data = portal.get('daily_prices', 'default', ticker)
pp = np.log10(np.array(data['adjusted_close']).reshape((len(data), 1)))
prep.fit(pp[:model_env['train_len']])
pp = prep.transform(pp)
if vol:
log_vol = np.log10(np.array(ticker['volume'] + 1).reshape((len(ticker), 1)))
prep.fit(log_vol[:model_env['train_len']])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(data), 1))
states = net.get_states(log_vol, pp)
if pca:
states = pca.transform(states)
pred = model.predict(states[drop_len:])
# score = trading_score()
hold = data['adjusted_close'][drop_len:]
trading = hold[0] + ((2 * pred[:-1] - 1) * (hold[1:] - hold[:-1])).cumsum()
return hold, trading
def plot_ticker(ticker, model, prep, net, pca=None, vol=False):
hold, trading = inspect_ticker(ticker, model, prep, net, pca=pca, vol=vol)
plt.plot(hold, label=ticker)
plt.plot(trading, label=ticker + '_ESN')
plt.legend()
def generate_plots(tickers, model, prep, net):
for ticker in tickers:
print(ticker)
yield plot_ticker(ticker, model, prep, net)
def trading_score(y, y_pred):
return sum(y * np.sign(y_pred)) / sum(y * np.sign(y))
def combine_pickles(model_uis, path, keys=('out', 'coefs', 'trading', 'hold', 'pred')):
""" Combines dictionaries of arrays (saved as separate pickles) into in a single dictionary of arrays """
data_dict = {}
if isinstance(model_uis, str):
model_uis = [model_uis]
for model_ui in model_uis:
data_dict[model_ui] = dict(zip(keys, [None] * len(keys)))
for frame in keys:
with open(f'{path}/{model_ui}/{model_ui}_{frame}0.pkl', 'rb') as file:
data_dict[model_ui][frame] = pickle.load(file)
for frame in keys:
for i in range(1, pknum + 1):
with open(f'{path}/{model_ui}/{model_ui}_{frame}{i}.pkl', 'rb') as file:
df = pickle.load(file)
if isinstance(df, pd.DataFrame):
data_dict[model_ui][frame] = data_dict[model_ui][frame].append(df)
else:
data_dict[model_ui][frame] = np.vstack((data_dict[model_ui][frame], df))
return data_dict.copy()
def predict_all(model_env, ticker_range, all_tickers, pknum=0, new_env=True):
path = model_env["folder_path"]
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_def.pkl', 'rb') as file:
model_def = pickle.load(file)
if not new_env:
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_env.pkl', 'rb') as file:
model_env = pickle.load(file)
out, coefs, trading, hold, pred = pd.DataFrame(), None, None, None, None
for batch in range(ticker_range[0], ticker_range[-1], 25):
df, df1, df2, df3, df4 = individual_fit_results(all_tickers[batch:batch + 25],
model_def['model'], model_def['prep'],
model_def['net'], pca=model_def['pca'],
new_fit=model_env['individual'],
train_len=model_env['train_len'],
vol=model_env['vol'], drop_len=model_env['drop_len'])
out = out.append(df)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_out{pknum}.pkl', 'wb+') as file:
pickle.dump(out, file)
if coefs is None:
coefs, trading, hold, pred = df1, df2, df3, df4
else:
trading = np.vstack((trading, df2))
coefs = np.vstack((coefs, df1))
hold = np.vstack((hold, df3))
pred = np.vstack((pred, df4))
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_coefs{pknum}.pkl', 'wb+') as file:
pickle.dump(coefs, file)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_trading{pknum}.pkl', 'wb+') as file:
pickle.dump(trading, file)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_hold{pknum}.pkl', 'wb+') as file:
pickle.dump(hold, file)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_pred{pknum}.pkl', 'wb+') as file:
pickle.dump(pred, file)
def continue_predict(model_env, ticker_range, all_tickers, pknum=0, new_env=True):
path = model_env["folder_path"]
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_def.pkl', 'rb') as file:
model_def = pickle.load(file)
if not new_env:
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_env.pkl', 'rb') as file:
model_env = pickle.load(file)
out, coefs, trading, hold, pred = pd.DataFrame(), None, None, None, None
for batch in range(ticker_range[0], ticker_range[-1], 25):
df, df1, df2, df3, df4 = individual_fit_results(all_tickers[batch:batch + 25], model_def['model'],
model_def['prep'], model_def['net'], pca=model_def['pca'],
new_fit=model_env['individual'], vol=model_env['vol'],
train_len=model_env['train_len'],
drop_len=model_env['drop_len'])
out = out.append(df)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_out{pknum}.pkl', 'wb+') as file:
pickle.dump(out, file)
if coefs is None:
coefs, trading, hold, pred = df1, df2, df3, df4
else:
trading = np.vstack((trading, df2))
coefs = np.vstack((coefs, df1))
hold = np.vstack((hold, df3))
pred = np.vstack((pred, df4))
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_coefs{pknum}.pkl', 'wb+') as file:
pickle.dump(coefs, file)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_trading{pknum}.pkl', 'wb+') as file:
pickle.dump(trading, file)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_hold{pknum}.pkl', 'wb+') as file:
pickle.dump(hold, file)
with open(f'{path}/{model_env["model_ui"]}/{model_env["model_ui"]}_pred{pknum}.pkl', 'wb+') as file:
pickle.dump(pred, file)
def collect_model_outputs(path, start=None, end=None):
data_dict = {}
model_list = os.listdir(path)
if start:
model_list = list(lambda x: x >= start, model_list)
if end:
model_list = list(lambda x: x <= end, model_list)
for model_ui in model_list:
data_dict[model_ui] = combine_pickles(model_ui, path)
print(model_ui)
return data_dict
def get_stats(data_dict, idx, analysis_period):
""" calculates analysis metrics from dictionaries of outputs """
df = pd.DataFrame(
columns=['model', 'ticker', 'idx', 'analysis_period', 'pred_var', 'pred_avg', 'next_trade', 'next_hold'])
for period in analysis_period:
for i in idx:
for key in data_dict.keys():
tickers = data_dict[key]['out'].ticker
pred_var = data_dict[key]['pred'][:, i - period:i].var(axis=1)
pred_avg = data_dict[key]['pred'][:, i - period:i].mean(axis=1)
next_trade = (data_dict[key]['trading'][:, i + 1]) - (data_dict[key]['trading'][:, i])
next_hold = (data_dict[key]['hold'][:, i + 1]) - (data_dict[key]['hold'][:, i])
try:
df1 = pd.DataFrame(columns=['ticker', 'pred_var', 'pred_avg', 'next_trade', 'next_hold'],
data=np.vstack(([tickers], [pred_var], [pred_avg], [next_trade], [next_hold])).T)
df1['model'] = key
df1['idx'] = i
df1['analysis_period'] = period
df = df.append(df1)
except ValueError as ex:
print(ex)
print(key)
print((period, i))
return df
def compile_stats(path):
""" combines all items from the given folder of stats arrays """
df = pd.DataFrame()
for item in os.listdir(path):
print(item)
with open(path + '/' + item, 'rb') as file:
df1 = pickle.load(file)
# df1 = df1.loc[df1.pred_var < 1.0]
# df1 = df1.loc[df1.pred_var > 0.0]
df1 = df1.loc[df1.next_hold != np.inf]
df1 = df1.loc[df1.next_hold != -np.inf]
df = df.append(df1)
return df
def calculate(model_ui, path):
data = combine_pickles(model_ui, path)
df = get_stats(data, range(3999, 4199, 5), [2, 3, 4, 5, 7, 10, 15, 25, 50, 75, 100, 150, 200])
with open(f'{path}_stats/{model_ui[:4]}_stats.pkl', 'wb+') as file:
pickle.dump(df, file)
# items = Parallel(n_jobs=multiprocessing.cpu_count(), verbose=10)(delayed(calculate)(ui, model_env['folder_path']) for ui in os.listdir('models/SGD_hinge_loss'))
if __name__ == '__main__':
import time
import datetime
print(datetime.datetime.today())
print(f'pickle number: {str(pknum)}')
print(f'delay: {delay_minutes} min')
print(f'model_ui: {model_env["model_ui"]}')
print(f'tasks: {str(tasks)}')
time.sleep(delay_minutes * 60)
portal = DataPortal()
loop = 0
while loop != end_int:
if 'new' in tasks:
gen = portal.iter_get_uids('daily_prices', 'default', model_env['tickers'])
net = ESN(1, 1, n_reservoir=model_env['n_res_list'][0], sparsity=model_env['sparsity_list'][0], noise=0)
for i in range(1, len(model_env['n_res_list'])):
temp_net = ESN(1, 1, n_reservoir=model_env['n_res_list'][i], sparsity=model_env['sparsity_list'][i],
noise=0)
net.merge(temp_net)
x_train, x_test = np.zeros((0, sum(model_env['n_res_list']) + 1)), np.zeros(
(0, sum(model_env['n_res_list']) + 1))
y_train, y_test, y_cv, y_tcv = [], [], [], []
w_train, w_test = [], []
prep = Pipeline([('detrend', LinDetrend()), ('scaler', StandardScaler())])
for ticker in gen:
log_prices = np.log10(np.array(ticker['adjusted_close']).reshape((len(ticker), 1)))
if len(log_prices) > model_env['train_len']:
prep.fit(log_prices[:model_env['train_len']])
log_prices = prep.transform(log_prices)
if model_env['vol']:
log_vol = np.log10(np.array(ticker['volume'] + 1).reshape((len(ticker), 1)))
prep.fit(log_vol[:model_env['train_len']])
log_vol = prep.transform(log_vol)
else:
log_vol = np.ones((len(ticker), 1))
states = net.get_states(log_vol, log_prices)
x_train = np.vstack((x_train, states[model_env['drop_len']:model_env['train_len']]))
y_train += np.sign((np.sign(
log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] - log_prices[
model_env['drop_len']:
model_env['train_len'],
0]) + 1) / 2).tolist()
w_train += np.abs(log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] - log_prices[
model_env[
'drop_len']:
model_env[
'train_len'],
0]).tolist()
y_cv += (log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] - log_prices[model_env[
'drop_len']:
model_env[
'train_len'],
0]).tolist()
x_test = np.vstack((x_test, states[model_env['train_len']:-1]))
y_test += np.sign((np.sign(
log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env['train_len']:-1,
0]) + 1) / 2).tolist()
w_test += np.abs(
log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env['train_len']:-1, 0]).tolist()
y_tcv += (log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env['train_len']:-1,
0]).tolist()
# score_parameters = {'score_weights': w_train}
weighted_scorer = make_scorer(trading_score, greater_is_better=True)
# ToDo: add random scrambling to training data
# model = RandomForestClassifier(verbose=True, n_estimators=model_env['n_trees'], n_jobs=-1, criterion='entropy', max_features='sqrt', max_depth=8)
# model = RandomForestRegressor(n_estimators=2, verbose=10, n_jobs=8, criterion='mae', max_features='sqrt', max_depth=8)
# model = LinearSVR(max_iter=10000)
# model = MLPClassifier(verbose=True, hidden_layer_sizes=(50, 10, 30, 4), learning_rate='adaptive', max_iter=500,
# warm_start=True, tol=0.00000001, n_iter_no_change=20, activation='tanh')
# model = SVC(cache_size=3000, verbose=10, kernel='linear')
model = SGDClassifier(verbose=10, n_jobs=-1)
pca = PCA(n_components=model_env['n_comp'])
pc_x_train = pca.fit_transform(x_train)
pc_x_test = pca.transform(x_test)
model.fit(pc_x_train, y_train, sample_weight=w_train)
model_def = {'model': model,
'net': net,
'prep': prep,
'pca': pca,
}
if not os.path.isdir(f'{model_env["folder_path"]}/{model_env["model_ui"]}'):
os.mkdir(f'{model_env["folder_path"]}/{model_env["model_ui"]}')
with open(f'{model_env["folder_path"]}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_env.pkl',
'wb+') as file:
pickle.dump(model_env, file)
with open(f'{model_env["folder_path"]}/{model_env["model_ui"]}/{model_env["model_ui"]}_model_def.pkl',
'wb+') as file:
pickle.dump(model_def, file)
# model.fit(x_train, y_cv)
# try:
# selector = RFECV(model, step=0.33, cv=5, scoring=weighted_scorer)
# selector.fit(x_train, y_cv)
# df = individual_results(all_tickers, selector, prep, net)
# except Exception as ex:
# print(ex)
# df = individual_results(all_tickers, model, prep, net)
# df = individual_results(all_tickers, model, prep, net)
if 'predict_all' in tasks:
predict_all(model_env, ticker_range, all_tickers, pknum=pknum, new_env=new_env)
if 'continue' in tasks:
continue_predict(model_env, ticker_range, all_tickers, pknum=pknum, new_env=new_env)
if 'combine' in tasks:
data_dict = combine_pickles(model_env['model_ui'], model_env['folder_path'])
if 'loop_new' in tasks:
loop += 1
num = int(model_env['model_ui'][:4]) + 1
mod = model_env['model_ui'][4:]
model_env['model_ui'] = f'{num :04d}' + mod
model_env['tickers'] = np.random.choice(all_tickers, 500, replace=False)
else:
break
if 'sandbox' in tasks:
df = compile_stats('models/SGD_hinge_loss_stats')
df['next_ideal'] = np.sign(df.next_hold).astype(int)
df.reset_index(inplace=True, drop=True)
train_idx = np.random.choice(df.index, 10000000, replace=False)
train_idx.sort()
df['pred'] = (np.sign(df.next_trade) / np.sign(df.next_hold.replace({0.0: 1.0}))).astype(int)
df_train = df.loc[train_idx, ['model', 'ticker', 'analysis_period', 'pred_var', 'pred_avg', 'pred']]
df_test = df.loc[:, ['model', 'ticker', 'analysis_period', 'pred_var', 'pred_avg', 'pred']].drop(train_idx)
y_train = df.loc[train_idx, 'next_ideal']
y_test = df.loc[:, 'next_ideal'].drop(train_idx)
df_train = df_train.astype(dict(zip(['model', 'ticker', 'analysis_period', 'pred_var', 'pred_avg', 'pred'],
[str, str, int, float, float, int])))
df_test = df_test.astype(dict(zip(['model', 'ticker', 'analysis_period', 'pred_var', 'pred_avg', 'pred'],
[str, str, int, float, float, int])))
from sklearn.preprocessing import OneHotEncoder
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector
from sklearn.pipeline import make_pipeline
one_hot_encoder = make_column_transformer(
(OneHotEncoder(sparse=False, handle_unknown='ignore'),
make_column_selector(dtype_include='object')),
remainder='passthrough')
gbc_one_hot = make_pipeline(one_hot_encoder,
HistGradientBoostingClassifier(verbose=10))
gbc_one_hot.fit(df_train.loc[:, ['model', 'analysis_period', 'pred_var', 'pred_avg', 'pred']], y_train)
# prep dataframe of predictions
df['next_ideal'] = np.sign(df.next_hold).astype(int)
df.reset_index(inplace=True, drop=True)
train_idx = df.loc[df.idx < 4120].index.tolist()
train_idx.sort()
df['pred'] = (np.sign(df.next_trade) / np.sign(df.next_hold.replace({0.0: 1.0}))).astype(int)
# split into train and test sets
df_train = df.loc[train_idx, ['model', 'pred_var', 'pred_avg', 'pred']]
df_test = df.loc[:, ['model', 'pred_var', 'pred_avg', 'pred']].drop(train_idx)
y_train = df.loc[train_idx, 'next_ideal']
y_test = df.loc[:, 'next_ideal'].drop(train_idx)
df_train = df_train.astype(dict(zip(['model', 'pred_var', 'pred_avg', 'pred'], [str, float, float, int])))
df_test = df_test.astype(dict(zip(['model', 'pred_var', 'pred_avg', 'pred'], [str, float, float, int])))
df_train.pred_avg = df_train.pred_avg.astype(float)
df_train.pred_var = df_train.pred_var.astype(float)
df_test.pred_avg = df_test.pred_avg.astype(float)
df_test.pred_var = df_test.pred_var.astype(float)
# try gradient boosted tree
gbc_one_hot.fit(df_train, y_train)
p = gbc_one_hot.predict(df_test)
act = df.loc[:, 'next_hold'].drop(train_idx)
res = p * act
p_t = gbc_one_hot.predict(df_train)
act_t = df.loc[train_idx, 'next_hold']
res_t = p_t * act_t
# try gmm clustering
from sklearn.mixture import GaussianMixture
mix = GaussianMixture(n_components=7)
mix.fit(df_train[[('pred', ''), ('pred_var', 2),
('pred_var', 4),
('pred_var', 7),
('pred_var', 25),
('pred_var', 100),
('pred_avg', 2),
('pred_avg', 4),
('pred_avg', 7),
('pred_avg', 25),
('pred_avg', 100)]])
labels = mix.predict(df_train[[('pred', ''), ('pred_var', 2),
('pred_var', 4),
('pred_var', 7),
('pred_var', 25),
('pred_var', 100),
('pred_avg', 2),
('pred_avg', 4),
('pred_avg', 7),
('pred_avg', 25),
('pred_avg', 100)]])
# assign cluster labels to to dataframe for analysis
train_labels = pd.DataFrame(columns=['model', 'idx', 'ticker', 'cluster', 'trade', 'hold'],
data=np.vstack((df.loc[train_idx, ('model', "")],
df.loc[train_idx, ('idx', "")],
df.loc[train_idx, ('ticker', "")],
labels,
df.loc[train_idx, ('next_trade', "")],
df.loc[train_idx, ('next_hold', "")])).T)
train_labels.hold = train_labels.hold.astype(float)
train_labels.trade = train_labels.trade.astype(float)
# plot histograms (with kernel density)
for i in range(mix.n_components):
try:
df.loc[train_idx].loc[(labels == i) & (df.loc[train_idx].next_trade > -0.15) & (
df.loc[train_idx].next_trade < 0.15), 'next_trade'].plot(kind='kde', label=i)
except Exception as ex:
print(ex)
pass
print(
f'Cluster {i}: count={df.loc[train_idx].loc[labels == i, "next_trade"].count()} sum={df.loc[train_idx].loc[labels == i, "next_trade"].sum()} avg={df.loc[train_idx].loc[labels == i, "next_trade"].mean()}')
print(
f'Hold All: count={df.loc[train_idx].loc[:, "next_hold"].count()} sum={df.loc[train_idx].loc[:, "next_hold"].sum()} avg={df.loc[train_idx].loc[:, "next_hold"].mean()}')
df.loc[train_idx].loc[
(df.loc[train_idx].next_trade > -0.15) & (df.loc[train_idx].next_trade < 0.15), 'next_hold'].plot(
kind='kde', label='all', color='k')
plt.legend()
# plot results against random buy-and-hold selection
rand_res = np.zeros((mix.n_components, 100))
for i in range(mix.n_components):
try:
rand_res[i] = np.random.choice(df.loc[train_idx].loc[(train_labels == i)].next_trade.values, 100,
replace=False)
except Exception as ex:
print(ex)
print(
f"cluster {i}: mean={rand_res[i].mean()} std={rand_res[i].std()} z-score={rand_res[i].mean() / rand_res[i].std()}")
# try a statistical goodness of fit test (kolmogorov-smirnov)
trainer = train_labels.groupby(by=['cluster', 'ticker']).aggregate(
{'hold': np.mean, 'trade': np.mean, 'idx': 'count', 'model': 'count'}).reset_index()
from scipy.stats import ks_2samp
for i in range(7):
print(i)
print((trainer.loc[trainer.cluster != i, 'trade'].mean(),
trainer.loc[trainer.cluster == i, 'trade'].mean()))
print(ks_2samp(trainer.loc[trainer.cluster != i, 'trade'], trainer.loc[trainer.cluster == i, 'trade']))
# repeat analysis with test data
test_labels = mix.predict(df_test[[('pred', ''), ('pred_var', 2),
('pred_var', 4),
('pred_var', 7),
('pred_var', 25),
('pred_var', 100),
('pred_avg', 2),
('pred_avg', 4),
('pred_avg', 7),
('pred_avg', 25),
('pred_avg', 100)]])
test_idx = df.index.drop(train_idx).to_list()
for i in range(mix.n_components):
try:
df.loc[test_idx].loc[(test_labels == i) & (df.loc[test_idx].next_trade > -0.15) & (
df.loc[test_idx].next_trade < 0.15), 'next_trade'].plot(kind='kde', label=i)
except Exception as ex:
print(ex)
pass
print(
f'Cluster {i}: count={df.loc[test_idx].loc[test_labels == i, "next_trade"].count()} sum={df.loc[test_idx].loc[test_labels == i, "next_trade"].sum()} avg={round(100 * (np.exp(df.loc[test_idx].loc[test_labels == i, "next_trade"].mean()) - 1), 3)}%')
print(
f'Hold All: count={df.loc[test_idx].loc[:, "next_hold"].count()} sum={df.loc[test_idx].loc[:, "next_hold"].sum()} avg={round(100 * (np.exp(df.loc[test_idx].loc[:, "next_hold"].mean()) - 1), 3)}%')
df.loc[test_idx].loc[
(df.loc[test_idx].next_trade > -0.15) & (df.loc[test_idx].next_trade < 0.15), 'next_hold'].plot(
kind='kde', label='all', color='k')
plt.legend()
rand_res = np.zeros((mix.n_components, 200))
for i in range(mix.n_components):
try:
for j in range(rand_res.shape[1]):
rand_res[i, j] = np.random.choice(df.loc[test_idx].loc[(test_labels == i)].next_trade.values,
100, replace=False)
except Exception as ex:
print(ex)
print(
f"cluster {i}: mean={round(100 * (np.exp(rand_res[i].mean()) - 1), 3)}% std={rand_res[i].std()} z-score={rand_res[i].mean() / rand_res[i].std()}")
trainer = test_labels.groupby(by=['cluster', 'ticker']).aggregate(
{'hold': np.mean, 'trade': np.mean, 'idx': 'count', 'model': 'count'}).reset_index()
for i in range(7):
print(i)
print((trainer.loc[trainer.cluster != i, 'trade'].mean(),
trainer.loc[trainer.cluster == i, 'trade'].mean()))
print(ks_2samp(trainer.loc[trainer.cluster != i, 'trade'], trainer.loc[trainer.cluster == i, 'trade']))
| [
"pyESN.ESN",
"numpy.log10",
"pandas.read_csv",
"time.sleep",
"numpy.array",
"datetime.datetime.today",
"sklearn.linear_model.SGDClassifier",
"os.listdir",
"sklearn.ensemble.HistGradientBoostingClassifier",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.plot",
"os.path.isdir",
"numpy.vstack"... | [((1552, 1601), 'numpy.random.choice', 'np.random.choice', (['all_tickers', '(500)'], {'replace': '(False)'}), '(all_tickers, 500, replace=False)\n', (1568, 1601), True, 'import numpy as np\n'), ((2561, 2575), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2573, 2575), True, 'import pandas as pd\n'), ((2636, 2650), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2648, 2650), True, 'import pandas as pd\n'), ((2692, 2706), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2704, 2706), True, 'import pandas as pd\n'), ((2745, 2759), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2757, 2759), True, 'import pandas as pd\n'), ((2801, 2815), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2813, 2815), True, 'import pandas as pd\n'), ((10175, 10203), 'matplotlib.pyplot.plot', 'plt.plot', (['hold'], {'label': 'ticker'}), '(hold, label=ticker)\n', (10183, 10203), True, 'import matplotlib.pyplot as plt\n'), ((10208, 10248), 'matplotlib.pyplot.plot', 'plt.plot', (['trading'], {'label': "(ticker + '_ESN')"}), "(trading, label=ticker + '_ESN')\n", (10216, 10248), True, 'import matplotlib.pyplot as plt\n'), ((10253, 10265), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10263, 10265), True, 'import matplotlib.pyplot as plt\n'), ((15918, 15934), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (15928, 15934), False, 'import os\n'), ((16346, 16468), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['model', 'ticker', 'idx', 'analysis_period', 'pred_var', 'pred_avg',\n 'next_trade', 'next_hold']"}), "(columns=['model', 'ticker', 'idx', 'analysis_period',\n 'pred_var', 'pred_avg', 'next_trade', 'next_hold'])\n", (16358, 16468), True, 'import pandas as pd\n'), ((17655, 17669), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17667, 17669), True, 'import pandas as pd\n'), ((17686, 17702), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (17696, 17702), False, 'import os\n'), ((18770, 18800), 'time.sleep', 'time.sleep', (['(delay_minutes * 60)'], {}), '(delay_minutes * 60)\n', (18780, 18800), False, 'import time\n'), ((18814, 18826), 'framework.data_portals.data_portal.DataPortal', 'DataPortal', ([], {}), '()\n', (18824, 18826), False, 'from framework.data_portals.data_portal import DataPortal\n'), ((11780, 11797), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11791, 11797), False, 'import pickle\n'), ((12004, 12018), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12016, 12018), True, 'import pandas as pd\n'), ((13952, 13969), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (13963, 13969), False, 'import pickle\n'), ((14176, 14190), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14188, 14190), True, 'import pandas as pd\n'), ((18312, 18333), 'pickle.dump', 'pickle.dump', (['df', 'file'], {}), '(df, file)\n', (18323, 18333), False, 'import pickle\n'), ((18574, 18599), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (18597, 18599), False, 'import datetime\n'), ((915, 1028), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\kohle\\\\Documents\\\\Machine Learning\\\\Echo State Networks\\\\Stock_Data\\\\list.csv"""'], {}), "(\n 'C:\\\\Users\\\\kohle\\\\Documents\\\\Machine Learning\\\\Echo State Networks\\\\Stock_Data\\\\list.csv'\n )\n", (926, 1028), True, 'import pandas as pd\n'), ((3788, 3849), 'numpy.vstack', 'np.vstack', (["(x_train, states[model_env['drop_len']:train_len])"], {}), "((x_train, states[model_env['drop_len']:train_len]))\n", (3797, 3849), True, 'import numpy as np\n'), ((4651, 4692), 'numpy.vstack', 'np.vstack', (['(x_test, states[train_len:-1])'], {}), '((x_test, states[train_len:-1]))\n', (4660, 4692), True, 'import numpy as np\n'), ((11948, 11965), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11959, 11965), False, 'import pickle\n'), ((12822, 12844), 'pickle.dump', 'pickle.dump', (['out', 'file'], {}), '(out, file)\n', (12833, 12844), False, 'import pickle\n'), ((12967, 12992), 'numpy.vstack', 'np.vstack', (['(trading, df2)'], {}), '((trading, df2))\n', (12976, 12992), True, 'import numpy as np\n'), ((13013, 13036), 'numpy.vstack', 'np.vstack', (['(coefs, df1)'], {}), '((coefs, df1))\n', (13022, 13036), True, 'import numpy as np\n'), ((13056, 13078), 'numpy.vstack', 'np.vstack', (['(hold, df3)'], {}), '((hold, df3))\n', (13065, 13078), True, 'import numpy as np\n'), ((13098, 13120), 'numpy.vstack', 'np.vstack', (['(pred, df4)'], {}), '((pred, df4))\n', (13107, 13120), True, 'import numpy as np\n'), ((13243, 13267), 'pickle.dump', 'pickle.dump', (['coefs', 'file'], {}), '(coefs, file)\n', (13254, 13267), False, 'import pickle\n'), ((13392, 13418), 'pickle.dump', 'pickle.dump', (['trading', 'file'], {}), '(trading, file)\n', (13403, 13418), False, 'import pickle\n'), ((13540, 13563), 'pickle.dump', 'pickle.dump', (['hold', 'file'], {}), '(hold, file)\n', (13551, 13563), False, 'import pickle\n'), ((13685, 13708), 'pickle.dump', 'pickle.dump', (['pred', 'file'], {}), '(pred, file)\n', (13696, 13708), False, 'import pickle\n'), ((14120, 14137), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (14131, 14137), False, 'import pickle\n'), ((14938, 14960), 'pickle.dump', 'pickle.dump', (['out', 'file'], {}), '(out, file)\n', (14949, 14960), False, 'import pickle\n'), ((15083, 15108), 'numpy.vstack', 'np.vstack', (['(trading, df2)'], {}), '((trading, df2))\n', (15092, 15108), True, 'import numpy as np\n'), ((15129, 15152), 'numpy.vstack', 'np.vstack', (['(coefs, df1)'], {}), '((coefs, df1))\n', (15138, 15152), True, 'import numpy as np\n'), ((15172, 15194), 'numpy.vstack', 'np.vstack', (['(hold, df3)'], {}), '((hold, df3))\n', (15181, 15194), True, 'import numpy as np\n'), ((15214, 15236), 'numpy.vstack', 'np.vstack', (['(pred, df4)'], {}), '((pred, df4))\n', (15223, 15236), True, 'import numpy as np\n'), ((15359, 15383), 'pickle.dump', 'pickle.dump', (['coefs', 'file'], {}), '(coefs, file)\n', (15370, 15383), False, 'import pickle\n'), ((15508, 15534), 'pickle.dump', 'pickle.dump', (['trading', 'file'], {}), '(trading, file)\n', (15519, 15534), False, 'import pickle\n'), ((15656, 15679), 'pickle.dump', 'pickle.dump', (['hold', 'file'], {}), '(hold, file)\n', (15667, 15679), False, 'import pickle\n'), ((15801, 15824), 'pickle.dump', 'pickle.dump', (['pred', 'file'], {}), '(pred, file)\n', (15812, 15824), False, 'import pickle\n'), ((17794, 17811), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (17805, 17811), False, 'import pickle\n'), ((19000, 19103), 'pyESN.ESN', 'ESN', (['(1)', '(1)'], {'n_reservoir': "model_env['n_res_list'][0]", 'sparsity': "model_env['sparsity_list'][0]", 'noise': '(0)'}), "(1, 1, n_reservoir=model_env['n_res_list'][0], sparsity=model_env[\n 'sparsity_list'][0], noise=0)\n", (19003, 19103), False, 'from pyESN import ESN\n'), ((23148, 23198), 'sklearn.metrics.make_scorer', 'make_scorer', (['trading_score'], {'greater_is_better': '(True)'}), '(trading_score, greater_is_better=True)\n', (23159, 23198), False, 'from sklearn.metrics import make_scorer\n'), ((23926, 23962), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'verbose': '(10)', 'n_jobs': '(-1)'}), '(verbose=10, n_jobs=-1)\n', (23939, 23962), False, 'from sklearn.linear_model import SGDClassifier\n'), ((23981, 24018), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': "model_env['n_comp']"}), "(n_components=model_env['n_comp'])\n", (23984, 24018), False, 'from sklearn.decomposition import PCA\n'), ((26019, 26068), 'numpy.random.choice', 'np.random.choice', (['all_tickers', '(500)'], {'replace': '(False)'}), '(all_tickers, 500, replace=False)\n', (26035, 26068), True, 'import numpy as np\n'), ((26335, 26386), 'numpy.random.choice', 'np.random.choice', (['df.index', '(10000000)'], {'replace': '(False)'}), '(df.index, 10000000, replace=False)\n', (26351, 26386), True, 'import numpy as np\n'), ((29790, 29821), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(7)'}), '(n_components=7)\n', (29805, 29821), False, 'from sklearn.mixture import GaussianMixture\n'), ((32856, 32868), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (32866, 32868), True, 'import matplotlib.pyplot as plt\n'), ((32957, 32990), 'numpy.zeros', 'np.zeros', (['(mix.n_components, 100)'], {}), '((mix.n_components, 100))\n', (32965, 32990), True, 'import numpy as np\n'), ((35976, 35988), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (35986, 35988), True, 'import matplotlib.pyplot as plt\n'), ((36012, 36045), 'numpy.zeros', 'np.zeros', (['(mix.n_components, 200)'], {}), '((mix.n_components, 200))\n', (36020, 36045), True, 'import numpy as np\n'), ((8065, 8094), 'numpy.vstack', 'np.vstack', (['(df1, model.coef_)'], {}), '((df1, model.coef_))\n', (8074, 8094), True, 'import numpy as np\n'), ((9340, 9372), 'numpy.array', 'np.array', (["data['adjusted_close']"], {}), "(data['adjusted_close'])\n", (9348, 9372), True, 'import numpy as np\n'), ((10467, 10482), 'numpy.sign', 'np.sign', (['y_pred'], {}), '(y_pred)\n', (10474, 10482), True, 'import numpy as np\n'), ((10494, 10504), 'numpy.sign', 'np.sign', (['y'], {}), '(y)\n', (10501, 10504), True, 'import numpy as np\n'), ((11044, 11061), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11055, 11061), False, 'import pickle\n'), ((19187, 19290), 'pyESN.ESN', 'ESN', (['(1)', '(1)'], {'n_reservoir': "model_env['n_res_list'][i]", 'sparsity': "model_env['sparsity_list'][i]", 'noise': '(0)'}), "(1, 1, n_reservoir=model_env['n_res_list'][i], sparsity=model_env[\n 'sparsity_list'][i], noise=0)\n", (19190, 19290), False, 'from pyESN import ESN\n'), ((24383, 24451), 'os.path.isdir', 'os.path.isdir', (['f"""{model_env[\'folder_path\']}/{model_env[\'model_ui\']}"""'], {}), '(f"{model_env[\'folder_path\']}/{model_env[\'model_ui\']}")\n', (24396, 24451), False, 'import os\n'), ((24469, 24532), 'os.mkdir', 'os.mkdir', (['f"""{model_env[\'folder_path\']}/{model_env[\'model_ui\']}"""'], {}), '(f"{model_env[\'folder_path\']}/{model_env[\'model_ui\']}")\n', (24477, 24532), False, 'import os\n'), ((24702, 24730), 'pickle.dump', 'pickle.dump', (['model_env', 'file'], {}), '(model_env, file)\n', (24713, 24730), False, 'import pickle\n'), ((24900, 24928), 'pickle.dump', 'pickle.dump', (['model_def', 'file'], {}), '(model_def, file)\n', (24911, 24928), False, 'import pickle\n'), ((27995, 28037), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {'verbose': '(10)'}), '(verbose=10)\n', (28025, 28037), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n'), ((3164, 3192), 'numpy.array', 'np.array', (["(data['volume'] + 1)"], {}), "(data['volume'] + 1)\n", (3172, 3192), True, 'import numpy as np\n'), ((3248, 3280), 'numpy.array', 'np.array', (["data['adjusted_close']"], {}), "(data['adjusted_close'])\n", (3256, 3280), True, 'import numpy as np\n'), ((4127, 4243), 'numpy.abs', 'np.abs', (["(log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[\n model_env['drop_len']:train_len, 0])"], {}), "(log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[\n model_env['drop_len']:train_len, 0])\n", (4133, 4243), True, 'import numpy as np\n'), ((4851, 4918), 'numpy.abs', 'np.abs', (['(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0])'], {}), '(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0])\n', (4857, 4918), True, 'import numpy as np\n'), ((5473, 5505), 'numpy.log10', 'np.log10', (["data['adjusted_close']"], {}), "(data['adjusted_close'])\n", (5481, 5505), True, 'import numpy as np\n'), ((7003, 7376), 'pandas.DataFrame', 'pd.DataFrame', (['[[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret, train_hold_ret,\n train_trade_ret, train_inc_ret, test_hold_ret, test_trade_ret,\n test_inc_ret]]'], {'columns': "['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret', 'all_inc_ret',\n 'train_hold_ret', 'train_trade_ret', 'train_inc_ret', 'test_hold_ret',\n 'test_trade_ret', 'test_inc_ret']"}), "([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,\n train_hold_ret, train_trade_ret, train_inc_ret, test_hold_ret,\n test_trade_ret, test_inc_ret]], columns=['ticker', 'accuracy',\n 'all_hold_ret', 'all_trade_ret', 'all_inc_ret', 'train_hold_ret',\n 'train_trade_ret', 'train_inc_ret', 'test_hold_ret', 'test_trade_ret',\n 'test_inc_ret'])\n", (7015, 7376), True, 'import pandas as pd\n'), ((8440, 8813), 'pandas.DataFrame', 'pd.DataFrame', (['[[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret, train_hold_ret,\n train_trade_ret, train_inc_ret, test_hold_ret, test_trade_ret,\n test_inc_ret]]'], {'columns': "['ticker', 'accuracy', 'all_hold_ret', 'all_trade_ret', 'all_inc_ret',\n 'train_hold_ret', 'train_trade_ret', 'train_inc_ret', 'test_hold_ret',\n 'test_trade_ret', 'test_inc_ret']"}), "([[tickers[i], acc, all_hold_ret, all_trade_ret, all_inc_ret,\n train_hold_ret, train_trade_ret, train_inc_ret, test_hold_ret,\n test_trade_ret, test_inc_ret]], columns=['ticker', 'accuracy',\n 'all_hold_ret', 'all_trade_ret', 'all_inc_ret', 'train_hold_ret',\n 'train_trade_ret', 'train_inc_ret', 'test_hold_ret', 'test_trade_ret',\n 'test_inc_ret'])\n", (8452, 8813), True, 'import pandas as pd\n'), ((9507, 9537), 'numpy.array', 'np.array', (["(ticker['volume'] + 1)"], {}), "(ticker['volume'] + 1)\n", (9515, 9537), True, 'import numpy as np\n'), ((11245, 11262), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11256, 11262), False, 'import pickle\n'), ((11470, 11513), 'numpy.vstack', 'np.vstack', (['(data_dict[model_ui][frame], df)'], {}), '((data_dict[model_ui][frame], df))\n', (11479, 11513), True, 'import numpy as np\n'), ((20446, 20520), 'numpy.vstack', 'np.vstack', (["(x_train, states[model_env['drop_len']:model_env['train_len']])"], {}), "((x_train, states[model_env['drop_len']:model_env['train_len']]))\n", (20455, 20520), True, 'import numpy as np\n'), ((22396, 22450), 'numpy.vstack', 'np.vstack', (["(x_test, states[model_env['train_len']:-1])"], {}), "((x_test, states[model_env['train_len']:-1]))\n", (22405, 22450), True, 'import numpy as np\n'), ((26225, 26246), 'numpy.sign', 'np.sign', (['df.next_hold'], {}), '(df.next_hold)\n', (26232, 26246), True, 'import numpy as np\n'), ((27738, 27790), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'handle_unknown': '"""ignore"""'}), "(sparse=False, handle_unknown='ignore')\n", (27751, 27790), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((27809, 27853), 'sklearn.compose.make_column_selector', 'make_column_selector', ([], {'dtype_include': '"""object"""'}), "(dtype_include='object')\n", (27829, 27853), False, 'from sklearn.compose import make_column_selector\n'), ((28231, 28252), 'numpy.sign', 'np.sign', (['df.next_hold'], {}), '(df.next_hold)\n', (28238, 28252), True, 'import numpy as np\n'), ((33092, 33192), 'numpy.random.choice', 'np.random.choice', (['df.loc[train_idx].loc[train_labels == i].next_trade.values', '(100)'], {'replace': '(False)'}), '(df.loc[train_idx].loc[train_labels == i].next_trade.values,\n 100, replace=False)\n', (33108, 33192), True, 'import numpy as np\n'), ((33999, 34100), 'scipy.stats.ks_2samp', 'ks_2samp', (["trainer.loc[trainer.cluster != i, 'trade']", "trainer.loc[trainer.cluster == i, 'trade']"], {}), "(trainer.loc[trainer.cluster != i, 'trade'], trainer.loc[trainer.\n cluster == i, 'trade'])\n", (34007, 34100), False, 'from scipy.stats import ks_2samp\n'), ((37031, 37132), 'scipy.stats.ks_2samp', 'ks_2samp', (["trainer.loc[trainer.cluster != i, 'trade']", "trainer.loc[trainer.cluster == i, 'trade']"], {}), "(trainer.loc[trainer.cluster != i, 'trade'], trainer.loc[trainer.\n cluster == i, 'trade'])\n", (37039, 37132), False, 'from scipy.stats import ks_2samp\n'), ((19659, 19675), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (19673, 19675), False, 'from sklearn.preprocessing import StandardScaler\n'), ((26442, 26464), 'numpy.sign', 'np.sign', (['df.next_trade'], {}), '(df.next_trade)\n', (26449, 26464), True, 'import numpy as np\n'), ((28433, 28455), 'numpy.sign', 'np.sign', (['df.next_trade'], {}), '(df.next_trade)\n', (28440, 28455), True, 'import numpy as np\n'), ((31150, 31356), 'numpy.vstack', 'np.vstack', (["(df.loc[train_idx, ('model', '')], df.loc[train_idx, ('idx', '')], df.loc[\n train_idx, ('ticker', '')], labels, df.loc[train_idx, ('next_trade', ''\n )], df.loc[train_idx, ('next_hold', '')])"], {}), "((df.loc[train_idx, ('model', '')], df.loc[train_idx, ('idx', '')],\n df.loc[train_idx, ('ticker', '')], labels, df.loc[train_idx, (\n 'next_trade', '')], df.loc[train_idx, ('next_hold', '')]))\n", (31159, 31356), True, 'import numpy as np\n'), ((36209, 36308), 'numpy.random.choice', 'np.random.choice', (['df.loc[test_idx].loc[test_labels == i].next_trade.values', '(100)'], {'replace': '(False)'}), '(df.loc[test_idx].loc[test_labels == i].next_trade.values, \n 100, replace=False)\n', (36225, 36308), True, 'import numpy as np\n'), ((19748, 19782), 'numpy.array', 'np.array', (["ticker['adjusted_close']"], {}), "(ticker['adjusted_close'])\n", (19756, 19782), True, 'import numpy as np\n'), ((21059, 21200), 'numpy.abs', 'np.abs', (["(log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] -\n log_prices[model_env['drop_len']:model_env['train_len'], 0])"], {}), "(log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] -\n log_prices[model_env['drop_len']:model_env['train_len'], 0])\n", (21065, 21200), True, 'import numpy as np\n'), ((22728, 22826), 'numpy.abs', 'np.abs', (["(log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env[\n 'train_len']:-1, 0])"], {}), "(log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env[\n 'train_len']:-1, 0])\n", (22734, 22826), True, 'import numpy as np\n'), ((3882, 3999), 'numpy.sign', 'np.sign', (["(log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices[\n model_env['drop_len']:train_len, 0])"], {}), "(log_prices[model_env['drop_len'] + 1:train_len + 1, 0] - log_prices\n [model_env['drop_len']:train_len, 0])\n", (3889, 3999), True, 'import numpy as np\n'), ((4741, 4809), 'numpy.sign', 'np.sign', (['(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0])'], {}), '(log_prices[train_len + 1:, 0] - log_prices[train_len:-1, 0])\n', (4748, 4809), True, 'import numpy as np\n'), ((17160, 17233), 'numpy.vstack', 'np.vstack', (['([tickers], [pred_var], [pred_avg], [next_trade], [next_hold])'], {}), '(([tickers], [pred_var], [pred_avg], [next_trade], [next_hold]))\n', (17169, 17233), True, 'import numpy as np\n'), ((5325, 5372), 'numpy.sign', 'np.sign', (['(log_prices[1:, 0] - log_prices[:-1, 0])'], {}), '(log_prices[1:, 0] - log_prices[:-1, 0])\n', (5332, 5372), True, 'import numpy as np\n'), ((20081, 20111), 'numpy.array', 'np.array', (["(ticker['volume'] + 1)"], {}), "(ticker['volume'] + 1)\n", (20089, 20111), True, 'import numpy as np\n'), ((20561, 20703), 'numpy.sign', 'np.sign', (["(log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] -\n log_prices[model_env['drop_len']:model_env['train_len'], 0])"], {}), "(log_prices[model_env['drop_len'] + 1:model_env['train_len'] + 1, 0] -\n log_prices[model_env['drop_len']:model_env['train_len'], 0])\n", (20568, 20703), True, 'import numpy as np\n'), ((22490, 22589), 'numpy.sign', 'np.sign', (["(log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env[\n 'train_len']:-1, 0])"], {}), "(log_prices[model_env['train_len'] + 1:, 0] - log_prices[model_env[\n 'train_len']:-1, 0])\n", (22497, 22589), True, 'import numpy as np\n')] |
from utils.misc import isnotebook
if isnotebook():
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
import numpy as np
import torch
from torch_geometric.data import Data
from torch_geometric.utils import remove_self_loops
from utils.load_dataset import load_data, load_zinc_data, load_ogb_data, load_g6_graphs
from utils.utils_subgraphs import compute_degrees
import os
import torch_geometric.datasets as ptg_datasets
def unique_indices(num_unique, inverse):
perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device)
inverse, perm = inverse.flip([0]), perm.flip([0])
perm = inverse.new_empty(num_unique).scatter_(0, inverse, perm)
return perm
def generate_dataset(data_path,
dataset_name,
directed):
### load and preprocess dataset
dataset_family = os.path.split(os.path.split(data_path)[0])[1]
if dataset_family == 'PPI':
dataset_type = 'ptg'
graphs = []
start = 0
if not os.path.exists(os.path.join(data_path, '10fold_idx')):
os.makedirs(os.path.join(data_path, '10fold_idx'))
for split in ['train', 'test', 'val']:
graphs_temp = getattr(ptg_datasets, dataset_family)(data_path, split)
graphs += [graphs_temp[i] for i in range(len(graphs_temp))]
end = len(graphs)
split_idx = list(range(start, end))
filename = os.path.join(data_path, '10fold_idx', split+'_idx-{}.txt'.format(0))
np.savetxt(filename, np.array(split_idx).astype(int), fmt='%d')
start = end
num_classes = graphs_temp.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'KarateClub':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)()
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'TUDataset':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)(data_path, dataset_name, cleaned=True)
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'Amazon':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)(data_path, dataset_name)
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif dataset_family == 'Planetoid':
dataset_type = 'ptg'
graphs = getattr(ptg_datasets, dataset_family)(data_path, dataset_name)
num_classes = graphs.num_classes
num_node_type, num_edge_type = None, None
elif 'ogb' in data_path:
dataset_type = 'general'
graphs, num_classes = load_ogb_data(data_path, dataset_name, False)
num_node_type, num_edge_type = None, None
elif dataset_name == 'ZINC':
dataset_type = 'general'
graphs, num_classes, num_node_type, num_edge_type = load_zinc_data(data_path, dataset_name, False)
elif os.path.split(data_path)[-1] in ['SR_graphs', 'all_graphs']:
dataset_type = 'general'
graphs, num_classes = load_g6_graphs(data_path, dataset_name)
num_node_type, num_edge_type = None, None
else:
dataset_type = 'general'
graphs, num_classes = load_data(data_path, dataset_name, False)
num_node_type, num_edge_type = None, None
graphs_ptg = list()
for i, data in tqdm(enumerate(graphs)):
new_data = _prepare(data, directed, dataset_type, dataset_name)
graphs_ptg.append(new_data)
return graphs_ptg, num_classes, num_node_type, num_edge_type
# ------------------------------------------------------------------------
def _prepare(data, directed, dataset_type='ptg', dataset_name=None):
new_data = Data()
# nodes
if dataset_type == 'ptg':
if hasattr(data, 'x') and data.x is not None:
num_nodes = data.x.shape[0]
x = data.x
else:
num_nodes = data.num_nodes
x = torch.ones((num_nodes,1))
setattr(new_data, 'x', x)
else:
num_nodes = data.node_features.shape[0]
setattr(new_data, 'x', data.node_features)
setattr(new_data, 'graph_size', float(num_nodes))
# edges
if dataset_type == 'ptg':
num_edges = float(data.edge_index.shape[1]) if directed else data.edge_index.shape[1]/2
edge_index = data.edge_index
if hasattr(data, 'edge_attr') and data.edge_attr is not None:
edge_features = data.edge_attr
else:
edge_features = None
else:
num_edges = float(data.edge_mat.shape[1]) if directed else data.edge_mat.shape[1]/2
edge_index = data.edge_mat
if hasattr(data, 'edge_features') and data.edge_features is not None:
edge_features = data.edge_features
else:
edge_features = None
setattr(new_data, 'edge_size', num_edges)
# adjacency
if edge_index.numel()!=0:
# multi-edge graphs not allowed
init_num_edges = edge_index.shape[1]
edge_index, inverse = torch.unique(edge_index, dim=1, sorted=True, return_inverse=True)
kept_inds = unique_indices(edge_index.shape[1], inverse)
# warning messages
if init_num_edges!=edge_index.shape[1]:
print('Warning: detected duplicate edges')
init_num_edges = edge_index.shape[1]
if edge_features is not None:
edge_features = edge_features[kept_inds]
edge_index, edge_features = remove_self_loops(edge_index, edge_features)
else:
edge_index, _ = remove_self_loops(edge_index, None)
# warning messages
if init_num_edges!=edge_index.shape[1]:
print('Warning: detected self loops')
setattr(new_data, 'edge_index', edge_index)
# edge features
if edge_features is not None:
setattr(new_data, 'edge_features', edge_features)
# degrees
degrees = compute_degrees(edge_index, num_nodes, directed)
setattr(new_data, 'degrees', degrees)
# if regression or dataset_name in {'ogbg-molpcba', 'ogbg-molhiv', 'ZINC'}:
# setattr(new_data, 'y', torch.tensor(data.label).unsqueeze(0).float())
# else:
# setattr(new_data, 'y', torch.tensor(data.label).unsqueeze(0).long())
return new_data
# -------------------------------------------------------------------------------------- | [
"utils.load_dataset.load_ogb_data",
"torch.unique",
"utils.load_dataset.load_data",
"os.path.join",
"utils.utils_subgraphs.compute_degrees",
"utils.misc.isnotebook",
"os.path.split",
"numpy.array",
"torch_geometric.utils.remove_self_loops",
"utils.load_dataset.load_zinc_data",
"utils.load_datase... | [((37, 49), 'utils.misc.isnotebook', 'isnotebook', ([], {}), '()\n', (47, 49), False, 'from utils.misc import isnotebook\n'), ((3833, 3839), 'torch_geometric.data.Data', 'Data', ([], {}), '()\n', (3837, 3839), False, 'from torch_geometric.data import Data\n'), ((6063, 6111), 'utils.utils_subgraphs.compute_degrees', 'compute_degrees', (['edge_index', 'num_nodes', 'directed'], {}), '(edge_index, num_nodes, directed)\n', (6078, 6111), False, 'from utils.utils_subgraphs import compute_degrees\n'), ((5147, 5212), 'torch.unique', 'torch.unique', (['edge_index'], {'dim': '(1)', 'sorted': '(True)', 'return_inverse': '(True)'}), '(edge_index, dim=1, sorted=True, return_inverse=True)\n', (5159, 5212), False, 'import torch\n'), ((4068, 4094), 'torch.ones', 'torch.ones', (['(num_nodes, 1)'], {}), '((num_nodes, 1))\n', (4078, 4094), False, 'import torch\n'), ((5602, 5646), 'torch_geometric.utils.remove_self_loops', 'remove_self_loops', (['edge_index', 'edge_features'], {}), '(edge_index, edge_features)\n', (5619, 5646), False, 'from torch_geometric.utils import remove_self_loops\n'), ((5689, 5724), 'torch_geometric.utils.remove_self_loops', 'remove_self_loops', (['edge_index', 'None'], {}), '(edge_index, None)\n', (5706, 5724), False, 'from torch_geometric.utils import remove_self_loops\n'), ((883, 907), 'os.path.split', 'os.path.split', (['data_path'], {}), '(data_path)\n', (896, 907), False, 'import os\n'), ((1044, 1081), 'os.path.join', 'os.path.join', (['data_path', '"""10fold_idx"""'], {}), "(data_path, '10fold_idx')\n", (1056, 1081), False, 'import os\n'), ((1108, 1145), 'os.path.join', 'os.path.join', (['data_path', '"""10fold_idx"""'], {}), "(data_path, '10fold_idx')\n", (1120, 1145), False, 'import os\n'), ((1551, 1570), 'numpy.array', 'np.array', (['split_idx'], {}), '(split_idx)\n', (1559, 1570), True, 'import numpy as np\n'), ((2756, 2801), 'utils.load_dataset.load_ogb_data', 'load_ogb_data', (['data_path', 'dataset_name', '(False)'], {}), '(data_path, dataset_name, False)\n', (2769, 2801), False, 'from utils.load_dataset import load_data, load_zinc_data, load_ogb_data, load_g6_graphs\n'), ((2978, 3024), 'utils.load_dataset.load_zinc_data', 'load_zinc_data', (['data_path', 'dataset_name', '(False)'], {}), '(data_path, dataset_name, False)\n', (2992, 3024), False, 'from utils.load_dataset import load_data, load_zinc_data, load_ogb_data, load_g6_graphs\n'), ((3158, 3197), 'utils.load_dataset.load_g6_graphs', 'load_g6_graphs', (['data_path', 'dataset_name'], {}), '(data_path, dataset_name)\n', (3172, 3197), False, 'from utils.load_dataset import load_data, load_zinc_data, load_ogb_data, load_g6_graphs\n'), ((3321, 3362), 'utils.load_dataset.load_data', 'load_data', (['data_path', 'dataset_name', '(False)'], {}), '(data_path, dataset_name, False)\n', (3330, 3362), False, 'from utils.load_dataset import load_data, load_zinc_data, load_ogb_data, load_g6_graphs\n'), ((3034, 3058), 'os.path.split', 'os.path.split', (['data_path'], {}), '(data_path)\n', (3047, 3058), False, 'import os\n')] |
"""This module contains unit tests for :mod:`~prody.select`."""
import os
import os.path
import inspect
import numpy as np
from numpy.testing import *
from prody import *
from prody import LOGGER
from prody.tests import unittest
from prody.tests.datafiles import *
from prody.atomic.atommap import DUMMY
try:
range = xrange
except NameError:
pass
prody.atomic.select.DEBUG = False
LOGGER.verbosity = 'none'
TESTS_PATH = os.path.abspath(os.path.split(inspect.getfile(
inspect.currentframe()))[0])
# If a selection string is paired with None, SelectionError is expected
# If two selection strings are paired, they must select exactly same of atoms
# Else, number must be the number atoms that the string is expected to select
pdb3mht = prody.parsePDB(pathDatafile('pdb3mht.pdb'), secondary=True)
SELECTION_TESTS = {'pdb3mht':
{'n_atoms': len(pdb3mht),
'ag': pdb3mht,
'all': pdb3mht.all,
'atommap': AtomMap(pdb3mht, [DUMMY] + list(range(1500)) + [DUMMY] +
list(range(1500, len(pdb3mht))) + [DUMMY]),
'test_flags': [('none', 0),
('all', 3211),
('acidic', 334),
('acyclic', 2040),
('aliphatic', 821),
('aromatic', 475),
('at', 0),
('basic', 450),
('buried', 944),
('cg', 0),
('charged', 784),
('cyclic', 566),
('heme', 0),
('hydrophobic', 999),
('ion', 0),
('large', 1629),
('lipid', 0),
('medium', 689),
('neutral', 1822),
('nucleic', 509),
('nucleotide', 509),
('nucleobase', 0),
('nucleoside', 0),
('polar', 1607),
('protein', 2606, 'aminoacid'),
('stdaa', 2606),
('nonstdaa', 0),
('purine', 0),
('pyrimidine', 0),
('small', 288),
('sugar', 0),
('surface', 1662),
('water', 70),
('hetero', 96),
('hetatm', 96),
('calpha', 327, 'ca'),
('backbone', 1308, 'bb'),
('backbonefull', 1309, 'bbfull'),
('sidechain', 1297, 'sc'),
('carbon', 1920),
('hydrogen', 0),
('noh', 3211),
('nitrogen', 542),
('oxygen', 711),
('sulfur', 14),
('extended', 503),
('helix', 763),
('helix310', 118),
('turn', 0),
('bridge', 0),
('bend', 0),
('coil', 1222),],
'test_without_and': [
('coil protein', 1222),
('sidechain sc protein', 1297),
('bbfull bb', 1308),
('(charged basic)', 450),
('(protein nucleic)', 0),
('noh hetero water', 70, 'water hetero noh'),
('ca occupancy > 0', 327, 'occupancy > 0 ca'),
('ca occupancy - 0 > 0', 327, 'occupancy - 0 > 0 ca'),
('ca occupancy - 0 > 0 + 0', 327,
'occupancy - 0 > 0 + 0 ca'),
('occupancy > ca 0', None),
('noh hetero (water)', 70),
('noh hetero not (water)', 26),
('(water) hetero', 70),
('ca abs(beta) = beta + abs(0)', 327,
'abs(beta) = beta + abs(0) ca'),],
'test_unintended': [
('abs beta = beta', 3211, 'abs (beta) = beta')],
'test_string': [
('name P', 24),
('name P CA', 352),
('name `A 1`', 0),
('chain C', 248),
('chain x', 0),
('chain x y', 0),
('chain x y z', 0),
('chain x y z C', 248),
('chain C D', 521),
('chain CD', 0),
('resname DG', 132),
('resname DG ALA', 212),
('altloc A', 0),
('altloc _', 3211),
('secondary H', 763, 'helix'),
('secondary H E', 1266),
('secondary _', 605),
('segment _', 3211),],
'test_integer': [
('index 10 20 10000', 2),
('serial 0', 0),
('serial 1 2', 2),
('resnum 0', 0),
('resnum 100 105', 13),
('resid 0', 0),
('resid 100 105', 13),
('resid 100 A 105', 13),
('fragindex 0', None),
('fragment 0', None),],
'test_range': [
('index 0:10', 10),
('index 0to10', 11, 'index 0 to 10'),
('serial 0:10:2', 4),
('serial 0:10:10', 0),
('resnum 10to15', 49),
('resnum 10:16:1', 49),
('resnum `-3:16:1`', 125),
('resid 10to15', 49),
('resid 10:16:1', 49),
('x `-10:20`', 673, 'x `-10 to 20`'),
('x 0:20:1', 0),
('beta 13.02:13.01', None)],
'test_float': [
('beta 5.0 41.15 11.85', 2),
('occupancy 1.0', 3211),
('x 6.665', 1),
('y 69.99 13.314', 2),
('z 115.246 45.784', 2),
('charge 0', 0),
('mass 1', 0),
('radius 0', None),
('beta "1."', 0),
('beta = "1."', None),],
'test_comparisons': [
('x = -51.659', 1),
('x != -51.659', 3210),
('z >= 82.813', 1670),
('z < 82.813', 1541),
('beta > 10', 2874),
('beta < 10', 336),
('occupancy > 0.999999', 3211),
('-10 <= x < 0', 557, '-10 <= x and x < 0'),
('11 > 10', None),
('radius > 10', None),
('chain = A', None),
('x x < 1', None),
('name < 1', None),],
'test_operation': [
('x ** 2 < 10', 238),
('x ** 2 ** 2 ** 2 < 10', 87),
('x ** (+2 ** (+2 ** +2)) < 10', 87),
('occupancy % 2 == 1', 3211),
('x**2 + y**2 + z**2 < 10000', 1975),],
'test_function': [
('sqrt(x**2 + y**2 + z**2) < 100', 1975,
'x**2 + y**2 + z**2 < 10000'),
('sqrt(x**2 + y**2 + z**2) == '
'(x**2 + y**2 + z**2) ** 0.5', 3211),
('beta % 3 < 1', 1070),
('beta % 4 % 3 < 1', 1530),
('ceil(beta) == 10', 60),
('floor(beta) == 10', 58),
('abs(x) == sqrt(sq(x))', 3211),
('sq(x-5)+sq(y+4)+sq(z) > sq(100)', 1444),
('1 > sq(occ)', None),
('sq(x x) > 1', None),],
'test_composite': [
('chain x y z C and x 10', 0),
('resnum `1` `2`', 16, 'resnum 1 2'),
('same residue as within 4 of resname SAH', 177),
('name CA and same residue as within 4 of resname SAH', 20),
('water and within 5 of not protein', 70),
('backbone and sqrt((x - 25)**2 + (y - 74)**2 + '
'(z - 13)**2) <= 500', 1308),
('(not resname SAH) and (protein and name CA) or '
'(nucleic and name P)', 351,
'(protein and name CA) or (nucleic and name P)'),
('protein and (backbone or name H)', 1308),
('same residue as within 4 of and resname SAH', None),
('protein and name CA CB and same residue as '
'((x+21.2)**2 + (y-35.9)**2 + (z-80.0)**2)**0.5 < 10',
78, 'protein and name CA CB and same residue as '
'within 10 of center', {'center': np.array([21.2, 35.9, 80.0])})],
'test_within': [
('within 10 of index 0', 72),
('exwithin 100 of index 0', 3210),
('exwithin 4 of resname SAH', 61),
('(within 4 of water) and not water', 534, 'exwithin 4 of water'),
('within 5 of within 5 of within 5 of index 0', 135),
('exwithin 5 of exwithin 5 of exwithin 5 of index 0', 99),
('within 1 of pdb', 3211, None, {'pdb': pdb3mht}),
('exwithin 1 of pdb', 0, None, {'pdb': pdb3mht}),
('exwithin 1 of ag', None, None, {'ag': AtomGroup()}),
('within 100 of index 10000', 0),],
'test_sameas': [
('same residue as index 0', 22),
('same chain as index 0', 248),
('same segment as index 0', 3211),
('same residue as resname DG ALA', 212),
('same chain as chain C', 248),
('same residue as chain X', 0),
('same none as chain C', None),
('same residue as same residue as same residue as index 0', 22,
'resindex 0')],
'test_regexp': [
('resname "S.."', 122),
('name "C.*"', 1920),
('name ".*\'"', 208),
('name "C(A|B)"', 628),
('name "C((A|B)"', None),],
'test_specialchar': [
('altloc ` `', 3211),
('name A` `CA`', 328),
('name `A``', 0),
('z `+100.291`', 1),],
'test_logical': [
('name or name', None),
('name and name', None),
('name CA and name CA', 328),
('name CA or name CA', 328),
('index 0 or index 1 ', 2),
('not not not not index 1', 1),
('index 0 or index 1 or index 2', 3, 'index 0 1 2'),
('index 0 or index 1 or index 2 or index 4', 4, 'index 0 1 2 4'),
('index 0 and index 1 ', 0),
('index < 50 and index < 5', 5, 'index < 5'),
('index < 50 and index < 25 and index < 5', 5),
('index < 5 and index < 25 and index < 50', 5),
('index 0 to 5 and index 0 to 25 and index 0 to 50', 6),
('index < 5 and index < 25 and index < 50 or index < 50 or index < 5',
50),],
'test_kwargs': [
('within 100 of origin', 1975, None, {'origin': np.zeros(3)}),
('within 100 of origin', 1975, None, {'origin': np.zeros((1, 3))}),
('within 100 of origin', 1975, None, {'origin': np.zeros((10, 3))}),
('within 100 of origin', 1975, None, {'origin': np.zeros((50, 3))}),
('within 100 of none', None, None, {'none': np.zeros((50, 3))}),],
'test_equivalent':[
('chain C', 248, 'not not chain C'),
('chain C', 248, 'not not not not chain C'),
('nucleic', 509, 'nucleoside or nucleotide or nucleobase'),],
'test_invalid':[
('chain C and and chain C', None),
('chain C or or chain D', None),
('chain C or not or chain D', None),
('chain C + 3', None),
('sqr(x-5)+sqr(y+4)+sqr(z) > sqr(100)', None),
('x > sq(calpha)', None),
('x > sq(name CA and resname ALA)', None),
('resname ALA and +1', None)],
'test_userdata':[
('temp < 10', 336, 'beta < 10'),
('temp < 10 and chain D', 37, 'temp < 10 and chain D'),
('oc10 - 9 == 1', 3211, 'occupancy 1'),
('temp < 10', 336, 'temp + oc10 < 20'),
('occ', 3211, 'occupancy != 0'),
('occ and occupancy == 1', 3211, 'occupancy != 0'),
('occ and occupancy == 1 and oc10 - 9 == 1', 3211),
('occ and occupancy == 1 and temp < 10', 336),
('occ > 0', None),],
'test_synonyms': [
('chain C', 248, 'chid C'),
('chain C D', 521, 'chid C D'),],
'test_sequence': [
('sequence al', 0),
('sequence A', 80, 'resname ALA'),
('sequence MIEIK', 42, 'resindex 25 to 29'),
('sequence VLNAL', 36, 'resindex 175 to 179'),
('sequence FKPY', 40, 'resindex 348 to 351'),
('sequence "SS."', 20, 'resindex 344 to 346'),
('sequence "S[A-Z]{2}G"', 79,
'resindex 109 to 112 267 to 270 276 to 279'),
('sequence "S.S.S"', 0),
('sequence "."', 2606),],
'test_docexamples': [
('serial 1 2 3', 3),
('serial 1 to 10', 10),
('serial 1:10:2', 5),
('serial < 10', 9),
('beta 555.55', 0),
('beta 1 to 500', 3211),
('beta 1:500', 3211),
('beta < 500', 3211),
('resnum 120A 120B', 0),
('icode A', 0),
('icode _', 3211),
('charge 1', 3211),
('abs(charge) == 1', 3211),
('charge < 0', 0),
('0 < mass < 500', 3211),
('abs(mass) <= mass <= 10', 337),],
}
}
subsets = []
for ch in pdb3mht.iterChains():
subsets.append((ch.getSelstr(), ch.numAtoms()))
for i, res in enumerate(pdb3mht.iterResidues()):
if i % 80 == 0:
subsets.append((res.getSelstr(), res.numAtoms()))
SELECTION_TESTS['pdb3mht']['subsets'] = subsets
ligand = fetchPDBLigand(pathDatafile('sti'))['ideal']
SELECTION_TESTS['imatinib'] = {
'n_atoms': len(ligand),
'ag': ligand,
'all': ligand.all,
'atommap': AtomMap(ligand, [DUMMY] + list(range(10)) + [DUMMY] +
list(range(10, len(ligand))) + [DUMMY]),
'test_bondedto': [
('bonded to index 0', ligand[0].numBonds() + 1),
('exbonded to index 0', ligand[0].numBonds()),
('bonded to index 67', ligand[67].numBonds() + 1),
('exbonded to index 67', ligand[67].numBonds()),
('bonded 2 to index 0', 8, 'bonded to bonded to index 0'),
('bonded 0 to index 0', 0),
('bonded 3 to index 0', 10,
'bonded to bonded to bonded to index 0'),
('bonded 4 to index 0', 13,
'bonded to bonded to bonded to bonded to index 0'),
('bonded 4 to index 0', 13,
'bonded 2 to bonded 2 to index 0'),
('bonded 4 to index 0', 13,
'bonded to bonded 3 to index 0'),
('exbonded 1 to index 0', 3, 'exbonded to index 0'),
('exbonded 2 to index 0', 5,
'exbonded to exbonded to index 0'),
('exbonded 3 to index 0', 5,
'exbonded to exbonded to exbonded to index 0'),
('bonded 20 to index 0', 64),
('bonded 20 to index 10', 68),
('bonded to index 1000', 0),
('fragment 0', len(ligand)),
('fragment 1', 0),
('fragment 0 1 2', len(ligand)),
('fragindex 0 1 2', len(ligand)),
('fragindex 0:2', len(ligand)),],
}
pdb3mht = SELECTION_TESTS['pdb3mht']['ag']
pdb3mht.setCharges(pdb3mht.getOccupancies())
pdb3mht.setMasses(pdb3mht.getBetas())
pdb3mht.setData('temp', pdb3mht.getBetas())
pdb3mht.setData('oc10', pdb3mht.getOccupancies() * 10)
pdb3mht.setFlags('occ', pdb3mht.getOccupancies().astype(bool))
SELECT = prody.Select()
EMPTYDICT = {}
class TestSelect(unittest.TestCase):
"""Test :class:`.Select`."""
pass
for case, items in SELECTION_TESTS.items():
for key, tests in items.items():
if not key.startswith('test_'):
continue
type_ = key[5:]
count = 0
for test in tests:
def func(self, pdb=case, test=test, type_=type_, **kwargs):
atoms = SELECTION_TESTS[pdb]['ag']
selstr = test[0]
natoms = test[1]
selstr2 = None
kwargs = EMPTYDICT
if len(test) == 3:
selstr2 = test[2]
if len(test) == 4:
kwargs = test[3]
if natoms is None:
self.assertRaises(prody.select.SelectionError,
SELECT.getIndices, atoms, selstr, **kwargs)
elif selstr2 is None:
sel = SELECT.getIndices(atoms, selstr, **kwargs)
self.assertEqual(len(sel), natoms,
'selection {0} for {1} failed, expected '
'{2}, selected {3}'.format(repr(selstr),
str(atoms), natoms, len(sel)))
else:
sel = SELECT.getIndices(atoms, selstr, **kwargs)
sel2 = SELECT.getIndices(atoms, selstr2, **kwargs)
self.assertTrue(len(sel) == len(sel2) == natoms and
np.all(sel == sel2),
'selection strings {0} and {1} for '
'{2} failed to select same number of atoms, '
'expected ({3})'.format(repr(selstr),
repr(selstr2), str(atoms), natoms))
count += 1
func.__name__ = 'test{0}Selection{1}'.format(
type_.title(), count)
func.__doc__ = ('Test {0} selections {1} for '
'{2}').format(type_,
repr(test[0]), case)
setattr(TestSelect, func.__name__, func)
@dec.slow
def func(self, pdb=case, test=test, type_=type_, **kwargs):
atoms = SELECTION_TESTS[pdb]['all']
selstr = test[0]
natoms = test[1]
selstr2 = None
kwargs = EMPTYDICT
if len(test) == 3:
selstr2 = test[2]
if len(test) == 4:
kwargs = test[3]
if natoms is None:
self.assertRaises(prody.select.SelectionError,
SELECT.getIndices, atoms, selstr, **kwargs)
elif selstr2 is None:
sel = SELECT.getIndices(atoms, selstr, **kwargs)
self.assertEqual(len(sel), natoms,
'selection {0} for {1} failed, expected '
'{2}, selected {3}'.format(repr(selstr),
str(atoms), natoms, len(sel)))
else:
sel = SELECT.getIndices(atoms, selstr, **kwargs)
sel2 = SELECT.getIndices(atoms, selstr2, **kwargs)
self.assertTrue(len(sel) == len(sel2) == natoms and
np.all(sel == sel2),
'selection strings {0} and {1} for '
'{2} failed to select same number of atoms, '
'expected ({3})'.format(repr(selstr),
repr(selstr2), str(atoms), natoms))
count += 1
func.__name__ = 'test{0}Selection{1}'.format(type_.title(),
count)
func.__doc__ = 'Test {0} selections "{1}"'.format(type_,
test[0])
setattr(TestSelect, func.__name__, func)
del func
MACROS = [('cacb', 'name CA CB'),
('donors', '(protein) and (name N NE NH2 ND2 NE2 ND1 OG OH NH1 '
'SG OG1 NE2 NZ NE1 ND1 NE2)')]
class TestMacros(unittest.TestCase):
"""Test selection macros."""
def testMacroFunctions(self):
for name, macro in MACROS:
prody.defSelectionMacro(name, macro)
self.assertEqual(prody.getSelectionMacro(name), macro,
'failed to get correct macro definition')
prody.delSelectionMacro(name)
count = 0
for name, macro in MACROS:
def func(self, name=name, macro=macro):
prody.defSelectionMacro(name, macro)
for key, case in SELECTION_TESTS.items():
atoms = case['ag']
assert_equal(
SELECT.getBoolArray(atoms, macro),
SELECT.getBoolArray(atoms, name),
'failed to select correct selection using macro')
prody.delSelectionMacro(name)
count += 1
func.__name__ = 'testMacro{0}'.format(count)
func.__doc__ = 'Test macro *{0}*: {1}'.format(name, repr(macro))
setattr(TestMacros, func.__name__, func)
del func
def testGetBoolArray():
ca = pdb3mht.ca
assert_equal(len(ca), len(SELECT.getBoolArray(ca, 'index 510')))
| [
"numpy.all",
"inspect.currentframe",
"numpy.array",
"numpy.zeros"
] | [((531, 553), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (551, 553), False, 'import inspect\n'), ((6862, 6890), 'numpy.array', 'np.array', (['[21.2, 35.9, 80.0]'], {}), '([21.2, 35.9, 80.0])\n', (6870, 6890), True, 'import numpy as np\n'), ((9038, 9049), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9046, 9049), True, 'import numpy as np\n'), ((9109, 9125), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (9117, 9125), True, 'import numpy as np\n'), ((9185, 9202), 'numpy.zeros', 'np.zeros', (['(10, 3)'], {}), '((10, 3))\n', (9193, 9202), True, 'import numpy as np\n'), ((9262, 9279), 'numpy.zeros', 'np.zeros', (['(50, 3)'], {}), '((50, 3))\n', (9270, 9279), True, 'import numpy as np\n'), ((9335, 9352), 'numpy.zeros', 'np.zeros', (['(50, 3)'], {}), '((50, 3))\n', (9343, 9352), True, 'import numpy as np\n'), ((15110, 15129), 'numpy.all', 'np.all', (['(sel == sel2)'], {}), '(sel == sel2)\n', (15116, 15129), True, 'import numpy as np\n'), ((16969, 16988), 'numpy.all', 'np.all', (['(sel == sel2)'], {}), '(sel == sel2)\n', (16975, 16988), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image
def test_metadata_from_json():
json = { 'cereal_class_version': 1,
'fluorescence': True,
'force': False,
'scan count': 0,
'scan volume': {'center point (um)': {'x': 58.075877109272604,
'y': 31.978375270573267,
'z': 0},
'cereal_class_version': 1,
'pixel time (ms)': 0.5,
'scan axes': [{'axis': 0,
'cereal_class_version': 1,
'num of pixels': 240,
'pixel size (nm)': 150,
'scan time (ms)': 0,
'scan width (um)': 36.07468112612217}]}}
image_metadata = ImageMetadata.from_dataset(json)
res = image_metadata.resolution
assert np.isclose(res[0], 1e7 / 150)
assert np.isclose(res[1], 1e7 / 150)
assert res[2] == 'CENTIMETER'
assert np.isclose(image_metadata.metadata['PixelTime'], .0005)
assert image_metadata.metadata['PixelTimeUnit'] == 's'
def test_timestamps_image():
infowave = np.array([0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2])
time = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
line_stamps = line_timestamps_image(time, infowave, 5)
assert line_stamps.shape == (1,)
assert np.all(line_stamps == [1])
line_stamps = line_timestamps_image(time, infowave, 2)
assert line_stamps.shape == (2,)
assert np.all(line_stamps == [1, 15])
def test_reconstruct():
infowave = np.array([0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2])
the_data = np.array([1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
image = reconstruct_image(the_data, infowave, 5)
assert image.shape == (1, 5)
assert np.all(image == [4, 8, 12, 0, 0])
image = reconstruct_image(the_data, infowave, 2)
assert image.shape == (2, 2)
assert np.all(image == [[4, 8], [12, 0]])
def test_reconstruct_multiframe():
size = 100
infowave = np.ones(size)
infowave[9::10] = 2
the_data = np.arange(size)
assert reconstruct_image(the_data, infowave, 5).shape == (2, 5)
assert reconstruct_image(the_data, infowave, 2).shape == (5, 2)
assert reconstruct_image(the_data, infowave, 1).shape == (10, 1)
assert reconstruct_image(the_data, infowave, 2, 2).shape == (3, 2, 2)
assert reconstruct_image(the_data, infowave, 2, 3).shape == (2, 3, 2)
assert reconstruct_image(the_data, infowave, 2, 5).shape == (5, 2)
assert reconstruct_num_frames(infowave, 2, 2) == 3
assert reconstruct_num_frames(infowave, 2, 3) == 2
assert reconstruct_num_frames(infowave, 2, 5) == 1
def test_int_tiff(tmpdir):
def grab_tags(file):
import tifffile
from ast import literal_eval
with tifffile.TiffFile(file) as tif:
tiff_tags = {}
for tag in tif.pages[0].tags.values():
name, value = tag.name, tag.value
try:
tiff_tags[name] = literal_eval(value)
except (ValueError, SyntaxError):
tiff_tags[name] = value
return tiff_tags
image16 = np.ones(shape=(10, 10, 3)) * np.iinfo(np.uint16).max
save_tiff(image16, str(tmpdir.join("1")), dtype=np.uint16, metadata=ImageMetadata(pixel_size_x=1.0, pixel_time=1.0))
save_tiff(image16, str(tmpdir.join("2")), dtype=np.float32, metadata=ImageMetadata(pixel_size_x=5.0, pixel_time=5.0))
save_tiff(image16, str(tmpdir.join("3")), dtype=np.uint8, clip=True)
with pytest.raises(RuntimeError) as excinfo:
save_tiff(image16, str(tmpdir.join("4")), dtype=np.uint8)
assert "Can't safely export image with `dtype=uint8` channels" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
save_tiff(image16, str(tmpdir.join("5")), dtype=np.float16)
assert "Can't safely export image with `dtype=float16` channels" in str(excinfo.value)
tags = grab_tags(str(tmpdir.join("1")))
assert str(tags['ResolutionUnit']) == "RESUNIT.CENTIMETER"
assert np.allclose(tags['ImageDescription']['PixelTime'], 0.001)
assert tags['ImageDescription']['PixelTimeUnit'] == "s"
assert np.allclose(tags['ImageDescription']['shape'], [10, 10, 3])
assert np.allclose(tags['XResolution'][0], 10000000)
assert np.allclose(tags['YResolution'][0], 10000000)
tags = grab_tags(str(tmpdir.join("2")))
assert str(tags['ResolutionUnit']) == "RESUNIT.CENTIMETER"
assert np.allclose(tags['ImageDescription']['PixelTime'], 0.005)
assert tags['ImageDescription']['PixelTimeUnit'] == "s"
assert np.allclose(tags['ImageDescription']['shape'], [10, 10, 3])
assert np.allclose(tags['XResolution'][0], 2000000)
assert np.allclose(tags['YResolution'][0], 2000000)
def test_float_tiff(tmpdir):
image32 = np.ones(shape=(10, 10, 3)) * np.finfo(np.float32).max
save_tiff(image32, str(tmpdir.join("1")), dtype=np.float32)
save_tiff(image32, str(tmpdir.join("2")), dtype=np.float16, clip=True)
with pytest.raises(RuntimeError) as excinfo:
save_tiff(image32, str(tmpdir.join("3")), dtype=np.float16)
assert "Can't safely export image with `dtype=float16` channels" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
save_tiff(image32, str(tmpdir.join("1")), dtype=np.uint16)
assert "Can't safely export image with `dtype=uint16` channels" in str(excinfo.value)
| [
"lumicks.pylake.detail.image.reconstruct_image",
"tifffile.TiffFile",
"numpy.allclose",
"numpy.isclose",
"numpy.ones",
"lumicks.pylake.detail.image.line_timestamps_image",
"numpy.iinfo",
"lumicks.pylake.detail.image.reconstruct_num_frames",
"ast.literal_eval",
"numpy.array",
"lumicks.pylake.deta... | [((1084, 1116), 'lumicks.pylake.detail.image.ImageMetadata.from_dataset', 'ImageMetadata.from_dataset', (['json'], {}), '(json)\n', (1110, 1116), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((1165, 1201), 'numpy.isclose', 'np.isclose', (['res[0]', '(10000000.0 / 150)'], {}), '(res[0], 10000000.0 / 150)\n', (1175, 1201), True, 'import numpy as np\n'), ((1206, 1242), 'numpy.isclose', 'np.isclose', (['res[1]', '(10000000.0 / 150)'], {}), '(res[1], 10000000.0 / 150)\n', (1216, 1242), True, 'import numpy as np\n'), ((1282, 1338), 'numpy.isclose', 'np.isclose', (["image_metadata.metadata['PixelTime']", '(0.0005)'], {}), "(image_metadata.metadata['PixelTime'], 0.0005)\n", (1292, 1338), True, 'import numpy as np\n'), ((1443, 1507), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2]'], {}), '([0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2])\n', (1451, 1507), True, 'import numpy as np\n'), ((1526, 1599), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])\n', (1534, 1599), True, 'import numpy as np\n'), ((1619, 1659), 'lumicks.pylake.detail.image.line_timestamps_image', 'line_timestamps_image', (['time', 'infowave', '(5)'], {}), '(time, infowave, 5)\n', (1640, 1659), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((1708, 1734), 'numpy.all', 'np.all', (['(line_stamps == [1])'], {}), '(line_stamps == [1])\n', (1714, 1734), True, 'import numpy as np\n'), ((1754, 1794), 'lumicks.pylake.detail.image.line_timestamps_image', 'line_timestamps_image', (['time', 'infowave', '(2)'], {}), '(time, infowave, 2)\n', (1775, 1794), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((1843, 1873), 'numpy.all', 'np.all', (['(line_stamps == [1, 15])'], {}), '(line_stamps == [1, 15])\n', (1849, 1873), True, 'import numpy as np\n'), ((1915, 1979), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2]'], {}), '([0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2])\n', (1923, 1979), True, 'import numpy as np\n'), ((1995, 2059), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]'], {}), '([1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3])\n', (2003, 2059), True, 'import numpy as np\n'), ((2073, 2113), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(5)'], {}), '(the_data, infowave, 5)\n', (2090, 2113), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2158, 2191), 'numpy.all', 'np.all', (['(image == [4, 8, 12, 0, 0])'], {}), '(image == [4, 8, 12, 0, 0])\n', (2164, 2191), True, 'import numpy as np\n'), ((2205, 2245), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(2)'], {}), '(the_data, infowave, 2)\n', (2222, 2245), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2290, 2324), 'numpy.all', 'np.all', (['(image == [[4, 8], [12, 0]])'], {}), '(image == [[4, 8], [12, 0]])\n', (2296, 2324), True, 'import numpy as np\n'), ((2392, 2405), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (2399, 2405), True, 'import numpy as np\n'), ((2445, 2460), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2454, 2460), True, 'import numpy as np\n'), ((4461, 4518), 'numpy.allclose', 'np.allclose', (["tags['ImageDescription']['PixelTime']", '(0.001)'], {}), "(tags['ImageDescription']['PixelTime'], 0.001)\n", (4472, 4518), True, 'import numpy as np\n'), ((4590, 4649), 'numpy.allclose', 'np.allclose', (["tags['ImageDescription']['shape']", '[10, 10, 3]'], {}), "(tags['ImageDescription']['shape'], [10, 10, 3])\n", (4601, 4649), True, 'import numpy as np\n'), ((4661, 4706), 'numpy.allclose', 'np.allclose', (["tags['XResolution'][0]", '(10000000)'], {}), "(tags['XResolution'][0], 10000000)\n", (4672, 4706), True, 'import numpy as np\n'), ((4718, 4763), 'numpy.allclose', 'np.allclose', (["tags['YResolution'][0]", '(10000000)'], {}), "(tags['YResolution'][0], 10000000)\n", (4729, 4763), True, 'import numpy as np\n'), ((4883, 4940), 'numpy.allclose', 'np.allclose', (["tags['ImageDescription']['PixelTime']", '(0.005)'], {}), "(tags['ImageDescription']['PixelTime'], 0.005)\n", (4894, 4940), True, 'import numpy as np\n'), ((5012, 5071), 'numpy.allclose', 'np.allclose', (["tags['ImageDescription']['shape']", '[10, 10, 3]'], {}), "(tags['ImageDescription']['shape'], [10, 10, 3])\n", (5023, 5071), True, 'import numpy as np\n'), ((5083, 5127), 'numpy.allclose', 'np.allclose', (["tags['XResolution'][0]", '(2000000)'], {}), "(tags['XResolution'][0], 2000000)\n", (5094, 5127), True, 'import numpy as np\n'), ((5139, 5183), 'numpy.allclose', 'np.allclose', (["tags['YResolution'][0]", '(2000000)'], {}), "(tags['YResolution'][0], 2000000)\n", (5150, 5183), True, 'import numpy as np\n'), ((2898, 2936), 'lumicks.pylake.detail.image.reconstruct_num_frames', 'reconstruct_num_frames', (['infowave', '(2)', '(2)'], {}), '(infowave, 2, 2)\n', (2920, 2936), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2953, 2991), 'lumicks.pylake.detail.image.reconstruct_num_frames', 'reconstruct_num_frames', (['infowave', '(2)', '(3)'], {}), '(infowave, 2, 3)\n', (2975, 2991), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((3008, 3046), 'lumicks.pylake.detail.image.reconstruct_num_frames', 'reconstruct_num_frames', (['infowave', '(2)', '(5)'], {}), '(infowave, 2, 5)\n', (3030, 3046), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((3559, 3585), 'numpy.ones', 'np.ones', ([], {'shape': '(10, 10, 3)'}), '(shape=(10, 10, 3))\n', (3566, 3585), True, 'import numpy as np\n'), ((3938, 3965), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3951, 3965), False, 'import pytest\n'), ((4143, 4170), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (4156, 4170), False, 'import pytest\n'), ((5229, 5255), 'numpy.ones', 'np.ones', ([], {'shape': '(10, 10, 3)'}), '(shape=(10, 10, 3))\n', (5236, 5255), True, 'import numpy as np\n'), ((5432, 5459), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5445, 5459), False, 'import pytest\n'), ((5641, 5668), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5654, 5668), False, 'import pytest\n'), ((2473, 2513), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(5)'], {}), '(the_data, infowave, 5)\n', (2490, 2513), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2541, 2581), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(2)'], {}), '(the_data, infowave, 2)\n', (2558, 2581), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2609, 2649), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(1)'], {}), '(the_data, infowave, 1)\n', (2626, 2649), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2678, 2721), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(2)', '(2)'], {}), '(the_data, infowave, 2, 2)\n', (2695, 2721), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2752, 2795), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(2)', '(3)'], {}), '(the_data, infowave, 2, 3)\n', (2769, 2795), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((2826, 2869), 'lumicks.pylake.detail.image.reconstruct_image', 'reconstruct_image', (['the_data', 'infowave', '(2)', '(5)'], {}), '(the_data, infowave, 2, 5)\n', (2843, 2869), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((3181, 3204), 'tifffile.TiffFile', 'tifffile.TiffFile', (['file'], {}), '(file)\n', (3198, 3204), False, 'import tifffile\n'), ((3588, 3607), 'numpy.iinfo', 'np.iinfo', (['np.uint16'], {}), '(np.uint16)\n', (3596, 3607), True, 'import numpy as np\n'), ((3684, 3731), 'lumicks.pylake.detail.image.ImageMetadata', 'ImageMetadata', ([], {'pixel_size_x': '(1.0)', 'pixel_time': '(1.0)'}), '(pixel_size_x=1.0, pixel_time=1.0)\n', (3697, 3731), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((3806, 3853), 'lumicks.pylake.detail.image.ImageMetadata', 'ImageMetadata', ([], {'pixel_size_x': '(5.0)', 'pixel_time': '(5.0)'}), '(pixel_size_x=5.0, pixel_time=5.0)\n', (3819, 3853), False, 'from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image\n'), ((5258, 5278), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (5266, 5278), True, 'import numpy as np\n'), ((3400, 3419), 'ast.literal_eval', 'literal_eval', (['value'], {}), '(value)\n', (3412, 3419), False, 'from ast import literal_eval\n')] |
"""
Dynamics-aware Adversarial Attack of 3D Sparse Convolution Network
@Author:
<NAME>,
<NAME>
@Contact:
<EMAIL>,
<EMAIL>
@Time:
2022/1/23 9:32 PM
"""
import os
from datetime import datetime
import numpy as np
np.seterr(divide='ignore',invalid='ignore')
import torch
from torch import nn
import torch.nn.functional as F
import utils
from config.scannet import parser, parameters
from models.res16unet import Res16UNet34C
import models.scannet_model as new_model
# Set labels for ScanNet dataset
VALID_CLASS_NAMES = utils.SCANNET_VALID_CLASS_NAMES
### Initialization ###
config = parser.parse_args()
if config.resume_path is not None:
config.exp_name = os.path.split(config.resume_path)[-1]
elif config.exp_name is None:
dt = datetime.now()
config.exp_name = 'Logs_' + dt.strftime('%Y-%m-%d_%H-%M-%S')
save_path = os.path.join('outputs/scannet', 'budget_' + str(config.budget), config.exp_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
io = utils.IOStream(save_path + '/run.log')
# Load parameters for attack
if config.default_para == True:
if config.budget not in [0.005, 0.01, 0.02, 0.05]:
io.cprint('Cannot load default attack parameters for budget ' + str(config.budget))
else:
if config.iter_num is None:
config.iter_num = parameters[config.budget][config.dynamics_aware]['iter_num']
if config.step is None:
config.step = parameters[config.budget][config.dynamics_aware]['step']
if config.lamda_input is None:
config.lamda_input = parameters[config.budget][config.dynamics_aware]['lamda_input']
if config.lamda_conv is None:
config.lamda_conv = parameters[config.budget][config.dynamics_aware]['lamda_conv']
if config.lamda_output is None:
config.lamda_output = parameters[config.budget][config.dynamics_aware]['lamda_output']
io.cprint('Successfully load default attack parameters for budget {}'.format(config.budget))
# Check whether lack attack parameters
lack_para = False
if config.iter_num is None:
io.cprint('Please give iteration number with --iter_num')
lack_para = True
if config.step is None:
io.cprint('Please give step size with --step')
lack_para = True
if config.lamda_input is None:
io.cprint('Please give lamda for input with --lamda_input')
lack_para = True
if config.lamda_conv is None:
io.cprint('Please give lamda for convolution with --lamda_conv')
lack_para = True
if config.lamda_output is None:
io.cprint('Please give lamda for output with --lamda_output')
lack_para = True
if lack_para:
exit(1)
io.cprint(str(config))
device = torch.device('cuda:0')
num_devices = torch.cuda.device_count()
io.cprint('Use {} GPU cards'.format(num_devices))
# Define a model and load the weights
model = Res16UNet34C(3, 20, config).to(device)
model_dict = torch.load(config.weights, map_location=device)
model.load_state_dict(model_dict['state_dict'])
model.eval()
# Split model layers into different GPU cards
if config.dynamics_aware:
if num_devices >= 8:
num_devices_real = 8
new_model_1 = new_model.NewRes16UNet34C_d8_1(3, 20, config).to(device)
new_model_1.load_state_dict(model_dict['state_dict'])
new_model_1.eval()
with torch.cuda.device('cuda:1'):
new_model_2 = new_model.NewRes16UNet34C_d8_2(3, 20, config).to('cuda:1')
new_model_2.load_state_dict(torch.load(config.weights, map_location='cuda:1')['state_dict'])
new_model_2.eval()
with torch.cuda.device('cuda:2'):
new_model_3 = new_model.NewRes16UNet34C_d8_3(3, 20, config).to('cuda:2')
new_model_3.load_state_dict(torch.load(config.weights, map_location='cuda:2')['state_dict'])
new_model_3.eval()
with torch.cuda.device('cuda:3'):
new_model_4 = new_model.NewRes16UNet34C_d8_4(3, 20, config).to('cuda:3')
new_model_4.load_state_dict(torch.load(config.weights, map_location='cuda:3')['state_dict'])
new_model_4.eval()
with torch.cuda.device('cuda:4'):
new_model_5 = new_model.NewRes16UNet34C_d8_5(3, 20, config).to('cuda:4')
new_model_5.load_state_dict(torch.load(config.weights, map_location='cuda:4')['state_dict'])
new_model_5.eval()
with torch.cuda.device('cuda:5'):
new_model_6 = new_model.NewRes16UNet34C_d8_6(3, 20, config).to('cuda:5')
new_model_6.load_state_dict(torch.load(config.weights, map_location='cuda:5')['state_dict'])
new_model_6.eval()
with torch.cuda.device('cuda:6'):
new_model_7 = new_model.NewRes16UNet34C_d8_7(3, 20, config).to('cuda:6')
new_model_7.load_state_dict(torch.load(config.weights, map_location='cuda:6')['state_dict'])
new_model_7.eval()
with torch.cuda.device('cuda:7'):
new_model_8 = new_model.NewRes16UNet34C_d8_8(3, 20, config).to('cuda:7')
new_model_8.load_state_dict(torch.load(config.weights, map_location='cuda:7')['state_dict'])
new_model_8.eval()
elif num_devices >= 4:
num_devices_real = 4
new_model_1 = new_model.NewRes16UNet34C_d4_1(3, 20, config).to(device)
new_model_1.load_state_dict(model_dict['state_dict'])
new_model_1.eval()
with torch.cuda.device('cuda:1'):
new_model_2 = new_model.NewRes16UNet34C_d4_2(3, 20, config).to('cuda:1')
new_model_2.load_state_dict(torch.load(config.weights, map_location='cuda:1')['state_dict'])
new_model_2.eval()
with torch.cuda.device('cuda:2'):
new_model_3 = new_model.NewRes16UNet34C_d4_3(3, 20, config).to('cuda:2')
new_model_3.load_state_dict(torch.load(config.weights, map_location='cuda:2')['state_dict'])
new_model_3.eval()
with torch.cuda.device('cuda:3'):
new_model_4 = new_model.NewRes16UNet34C_d4_4(3, 20, config).to('cuda:3')
new_model_4.load_state_dict(torch.load(config.weights, map_location='cuda:3')['state_dict'])
new_model_4.eval()
elif num_devices >= 2:
num_devices_real = 2
new_model_1 = new_model.NewRes16UNet34C_d2_1(3, 20, config).to(device)
new_model_1.load_state_dict(model_dict['state_dict'])
new_model_1.eval()
with torch.cuda.device('cuda:1'):
new_model_2 = new_model.NewRes16UNet34C_d2_2(3, 20, config).to('cuda:1')
new_model_2.load_state_dict(torch.load(config.weights, map_location='cuda:1')['state_dict'])
new_model_2.eval()
else:
num_devices_real = 1
new_model = new_model.NewRes16UNet34C(3, 20, config).to(device)
new_model.load_state_dict(model_dict['state_dict'])
new_model.eval()
### Attack ###
# Note:
# Because the perturbed point cloud finally needs to be tested in original sparse convolution network,
# we test the performance of the original network during our attack at each iteration.
labels_pcl_all, preds_pcl_all = np.array([]), np.array([])
with open(os.path.join(config.data_path, 'scannetv2_val.txt'), 'r') as f:
all_rooms = f.readlines()
all_rooms = [room[:-1] for room in all_rooms]
room_num = len(all_rooms)
num_classes = len(VALID_CLASS_NAMES)
io.cprint('ScanNet Class Number: {}'.format(num_classes))
# Start attack for each room
for i, room_name in enumerate(all_rooms):
coords_pcl = None
labels_pcl = None
probs_pcl_orig_best = None
coords_pcl_best = None
mIoU_orig_best = 100
load_attacked_coords = False
if config.resume_path is not None:
attacked_coords_path = os.path.join(config.resume_path, 'coord', room_name + '.txt')
if os.path.exists(attacked_coords_path):
coords_pcl = np.loadtxt(attacked_coords_path)
load_attacked_coords = True
for iter in range(config.iter_num):
data = os.path.join(config.data_path, room_name)
# Obtain performance on original sparse convolution network
with torch.no_grad():
data = os.path.join(config.data_path, room_name)
idx, inverse_idx, coords_pcl, sinput_orig, labels_pcl = \
utils.generate_input_sparse_tensor(
data,
config,
coords_pcl=coords_pcl,
labels_pcl=labels_pcl,
extend=False,
dataset='scannet')
if iter == 0:
labels_pcl = utils.convert_label_scannet(labels_pcl)
coords_pcl0 = coords_pcl.clone()
coords_pcl_list = [coords_pcl0[1].unsqueeze(0)]
sinput_orig = sinput_orig.to(device)
soutput_orig = model(sinput_orig)
preds_vox_orig = soutput_orig.F.max(1)[1].cpu().numpy()
preds_pcl_orig = preds_vox_orig[inverse_idx]
if config.save_probs:
probs_vox_orig = torch.nn.functional.softmax(soutput_orig.F, dim=1).cpu().numpy()
probs_pcl_orig = probs_vox_orig[inverse_idx]
intersection, union, target = utils.intersectionAndUnion(
preds_pcl_orig, labels_pcl, num_classes, 255)
mIoU_orig = np.nanmean(intersection / union)
if iter == 0:
mIoU_orig0 = mIoU_orig
preds_pcl_orig0 = preds_pcl_orig
preds_pcl_orig_best = preds_pcl_orig
if config.save_probs:
probs_pcl_orig_best = probs_pcl_orig
coords_pcl_best = coords_pcl.clone()
if mIoU_orig < mIoU_orig_best:
mIoU_orig_best = mIoU_orig
preds_pcl_orig_best = preds_pcl_orig
if config.save_probs:
probs_pcl_orig_best = probs_pcl_orig
coords_pcl_best = coords_pcl.clone()
iter_best = iter - 1
torch.cuda.empty_cache()
if load_attacked_coords:
break
# Obtain performance on our modified dynamics-aware sparse convolution network
idx, inverse_idx, coords_vox, coords_pcl, sinput, occupy_conv, valid = \
utils.generate_input_sparse_tensor(
data,
config,
coords_pcl=coords_pcl,
coords_pcl0=coords_pcl0,
labels_pcl=labels_pcl,
dataset='scannet')
sinput = sinput.to(device)
if config.dynamics_aware:
occupy_conv = occupy_conv.to(device)
if num_devices >= 8:
interm_1 = new_model_1(sinput, idx.shape[0], occupy_conv)
with torch.cuda.device('cuda:1'):
interm_2 = new_model_2(interm_1)
with torch.cuda.device('cuda:2'):
interm_3 = new_model_3(interm_2)
with torch.cuda.device('cuda:3'):
interm_4 = new_model_4(interm_3)
with torch.cuda.device('cuda:4'):
interm_5 = new_model_5(interm_4)
with torch.cuda.device('cuda:5'):
interm_6 = new_model_6(interm_5)
with torch.cuda.device('cuda:6'):
interm_7 = new_model_7(interm_6)
with torch.cuda.device('cuda:7'):
soutput = new_model_8(interm_7)
elif num_devices >= 4:
interm_1 = new_model_1(sinput, idx.shape[0], occupy_conv)
with torch.cuda.device('cuda:1'):
interm_2 = new_model_2(interm_1)
with torch.cuda.device('cuda:2'):
interm_3 = new_model_3(interm_2)
with torch.cuda.device('cuda:3'):
soutput = new_model_4(interm_3)
elif num_devices >= 2:
interm = new_model_1(sinput, idx.shape[0], occupy_conv)
with torch.cuda.device('cuda:1'):
soutput = new_model_2(interm)
else:
soutput = new_model(sinput, idx.shape[1], occupy_conv)
else:
soutput = model(sinput)
outputs_pcl = utils.get_point_output(config, soutput, inverse_idx, coords_vox, coords_pcl, valid)
preds_pcl = outputs_pcl.max(1)[1].cpu().numpy()
if (num_devices > 1) and config.dynamics_aware:
label_sparse = torch.LongTensor(labels_pcl).to('cuda:'+str(num_devices_real-1))
else:
label_sparse = torch.LongTensor(labels_pcl).to(device)
if iter != (config.iter_num - 1):
loss = F.cross_entropy(outputs_pcl, label_sparse.long(), ignore_index=255)
loss.backward()
intersection, union, target = utils.intersectionAndUnion(
preds_pcl, labels_pcl, num_classes, 255)
mIoU = np.nanmean(intersection / union)
io.cprint('Room: {:>3}/{:>3} | Iter: {:>2}/{:>2} | mIoU: [Original Conv] {:.4F}, [Dyn-aware Conv] {:.4F}'\
.format(i, room_num, iter, config.iter_num, mIoU_orig, mIoU))
# Perturb the point cloud
if iter != (config.iter_num - 1):
coords_pcl = coords_pcl + config.step * (coords_pcl.grad / (torch.max(torch.abs(coords_pcl.grad), dim=-1)[0].unsqueeze(1).repeat(1, 3) + 1e-8))
coords_pcl = torch.where(coords_pcl < (coords_pcl0 - config.budget), coords_pcl0 - config.budget, coords_pcl)
coords_pcl = torch.where(coords_pcl > (coords_pcl0 + config.budget), coords_pcl0 + config.budget, coords_pcl)
coords_pcl_list.append(coords_pcl[1].clone().unsqueeze(0))
torch.cuda.empty_cache()
# Attack finished
if load_attacked_coords:
io.cprint('=> Resume Room: {:>3}/{:>3} Attacked mIoU: [Original Conv] {:.4F}\n'.format(i, room_num, mIoU_orig_best))
else:
print(torch.cat(coords_pcl_list, dim=0))
print('Best iter:' + str(iter_best))
io.cprint('=> Attack Finished! mIoU: [Original Conv] {:.4F} -> {:.4F}\n'.format(mIoU_orig0, mIoU_orig_best))
# Save results
preds_pcl_all = np.hstack([preds_pcl_all, preds_pcl_orig_best]) if \
preds_pcl_all.size else preds_pcl_orig_best
labels_pcl_all = np.hstack([labels_pcl_all, labels_pcl]) if \
labels_pcl_all.size else labels_pcl
if config.save_preds or config.save_probs:
utils.save_prediction(config, save_path, room_name, preds_pcl_orig_best, probs_pcl_orig_best, dataset='scannet')
if config.save_coords and (load_attacked_coords == False):
utils.save_attacked_coords(save_path, room_name, coords_pcl_best.numpy())
# Visualization
if config.visual:
utils.visualize(config, room_name, coords_pcl0.detach().numpy(), labels_pcl, save_path, remark='gt')
utils.visualize(config, room_name, coords_pcl0.detach().numpy(), preds_pcl_orig0, save_path, remark='noattack')
utils.visualize(config, room_name, coords_pcl.detach().numpy(), preds_pcl_orig_best, save_path, remark='attack')
# Calculate final performance
intersection, union, target = \
utils.intersectionAndUnion(
preds_pcl_all, labels_pcl_all, num_classes, 255)
iou_class = intersection / (union + 1e-10)
accuracy_class = intersection / (target + 1e-10)
mIoU = np.mean(iou_class)
mAcc = np.mean(accuracy_class)
allAcc = sum(intersection) / (sum(target) + 1e-10)
io.cprint('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.
format(mIoU, mAcc, allAcc))
for i in range(num_classes):
io.cprint('Class_{} Result: Iou/Accuracy {:.4f}/{:.4f}, Name: {}.'.
format(i, iou_class[i], accuracy_class[i], VALID_CLASS_NAMES[i]))
io.cprint('\n' + str(config))
io.close() | [
"utils.IOStream",
"utils.get_point_output",
"models.scannet_model.NewRes16UNet34C_d8_4",
"models.scannet_model.NewRes16UNet34C_d2_1",
"numpy.hstack",
"torch.LongTensor",
"torch.cuda.device_count",
"models.scannet_model.NewRes16UNet34C_d2_2",
"numpy.array",
"numpy.nanmean",
"models.scannet_model.... | [((244, 288), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (253, 288), True, 'import numpy as np\n'), ((617, 636), 'config.scannet.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (634, 636), False, 'from config.scannet import parser, parameters\n'), ((1015, 1053), 'utils.IOStream', 'utils.IOStream', (["(save_path + '/run.log')"], {}), "(save_path + '/run.log')\n", (1029, 1053), False, 'import utils\n'), ((2706, 2728), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2718, 2728), False, 'import torch\n'), ((2743, 2768), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2766, 2768), False, 'import torch\n'), ((2919, 2966), 'torch.load', 'torch.load', (['config.weights'], {'map_location': 'device'}), '(config.weights, map_location=device)\n', (2929, 2966), False, 'import torch\n'), ((15157, 15232), 'utils.intersectionAndUnion', 'utils.intersectionAndUnion', (['preds_pcl_all', 'labels_pcl_all', 'num_classes', '(255)'], {}), '(preds_pcl_all, labels_pcl_all, num_classes, 255)\n', (15183, 15232), False, 'import utils\n'), ((15341, 15359), 'numpy.mean', 'np.mean', (['iou_class'], {}), '(iou_class)\n', (15348, 15359), True, 'import numpy as np\n'), ((15367, 15390), 'numpy.mean', 'np.mean', (['accuracy_class'], {}), '(accuracy_class)\n', (15374, 15390), True, 'import numpy as np\n'), ((955, 980), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (969, 980), False, 'import os\n'), ((986, 1008), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (997, 1008), False, 'import os\n'), ((7127, 7139), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7135, 7139), True, 'import numpy as np\n'), ((7141, 7153), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7149, 7153), True, 'import numpy as np\n'), ((695, 728), 'os.path.split', 'os.path.split', (['config.resume_path'], {}), '(config.resume_path)\n', (708, 728), False, 'import os\n'), ((772, 786), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (784, 786), False, 'from datetime import datetime\n'), ((2867, 2894), 'models.res16unet.Res16UNet34C', 'Res16UNet34C', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (2879, 2894), False, 'from models.res16unet import Res16UNet34C\n'), ((7164, 7215), 'os.path.join', 'os.path.join', (['config.data_path', '"""scannetv2_val.txt"""'], {}), "(config.data_path, 'scannetv2_val.txt')\n", (7176, 7215), False, 'import os\n'), ((7728, 7789), 'os.path.join', 'os.path.join', (['config.resume_path', '"""coord"""', "(room_name + '.txt')"], {}), "(config.resume_path, 'coord', room_name + '.txt')\n", (7740, 7789), False, 'import os\n'), ((7801, 7837), 'os.path.exists', 'os.path.exists', (['attacked_coords_path'], {}), '(attacked_coords_path)\n', (7815, 7837), False, 'import os\n'), ((7993, 8034), 'os.path.join', 'os.path.join', (['config.data_path', 'room_name'], {}), '(config.data_path, room_name)\n', (8005, 8034), False, 'import os\n'), ((10264, 10406), 'utils.generate_input_sparse_tensor', 'utils.generate_input_sparse_tensor', (['data', 'config'], {'coords_pcl': 'coords_pcl', 'coords_pcl0': 'coords_pcl0', 'labels_pcl': 'labels_pcl', 'dataset': '"""scannet"""'}), "(data, config, coords_pcl=coords_pcl,\n coords_pcl0=coords_pcl0, labels_pcl=labels_pcl, dataset='scannet')\n", (10298, 10406), False, 'import utils\n'), ((12245, 12332), 'utils.get_point_output', 'utils.get_point_output', (['config', 'soutput', 'inverse_idx', 'coords_vox', 'coords_pcl', 'valid'], {}), '(config, soutput, inverse_idx, coords_vox, coords_pcl,\n valid)\n', (12267, 12332), False, 'import utils\n'), ((12812, 12879), 'utils.intersectionAndUnion', 'utils.intersectionAndUnion', (['preds_pcl', 'labels_pcl', 'num_classes', '(255)'], {}), '(preds_pcl, labels_pcl, num_classes, 255)\n', (12838, 12879), False, 'import utils\n'), ((12908, 12940), 'numpy.nanmean', 'np.nanmean', (['(intersection / union)'], {}), '(intersection / union)\n', (12918, 12940), True, 'import numpy as np\n'), ((13697, 13721), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13719, 13721), False, 'import torch\n'), ((14167, 14214), 'numpy.hstack', 'np.hstack', (['[preds_pcl_all, preds_pcl_orig_best]'], {}), '([preds_pcl_all, preds_pcl_orig_best])\n', (14176, 14214), True, 'import numpy as np\n'), ((14293, 14332), 'numpy.hstack', 'np.hstack', (['[labels_pcl_all, labels_pcl]'], {}), '([labels_pcl_all, labels_pcl])\n', (14302, 14332), True, 'import numpy as np\n'), ((14438, 14554), 'utils.save_prediction', 'utils.save_prediction', (['config', 'save_path', 'room_name', 'preds_pcl_orig_best', 'probs_pcl_orig_best'], {'dataset': '"""scannet"""'}), "(config, save_path, room_name, preds_pcl_orig_best,\n probs_pcl_orig_best, dataset='scannet')\n", (14459, 14554), False, 'import utils\n'), ((3337, 3364), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (3354, 3364), False, 'import torch\n'), ((3601, 3628), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:2"""'], {}), "('cuda:2')\n", (3618, 3628), False, 'import torch\n'), ((3865, 3892), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:3"""'], {}), "('cuda:3')\n", (3882, 3892), False, 'import torch\n'), ((4129, 4156), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:4"""'], {}), "('cuda:4')\n", (4146, 4156), False, 'import torch\n'), ((4393, 4420), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:5"""'], {}), "('cuda:5')\n", (4410, 4420), False, 'import torch\n'), ((4657, 4684), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:6"""'], {}), "('cuda:6')\n", (4674, 4684), False, 'import torch\n'), ((4921, 4948), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:7"""'], {}), "('cuda:7')\n", (4938, 4948), False, 'import torch\n'), ((7864, 7896), 'numpy.loadtxt', 'np.loadtxt', (['attacked_coords_path'], {}), '(attacked_coords_path)\n', (7874, 7896), True, 'import numpy as np\n'), ((8117, 8132), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8130, 8132), False, 'import torch\n'), ((8153, 8194), 'os.path.join', 'os.path.join', (['config.data_path', 'room_name'], {}), '(config.data_path, room_name)\n', (8165, 8194), False, 'import os\n'), ((8281, 8412), 'utils.generate_input_sparse_tensor', 'utils.generate_input_sparse_tensor', (['data', 'config'], {'coords_pcl': 'coords_pcl', 'labels_pcl': 'labels_pcl', 'extend': '(False)', 'dataset': '"""scannet"""'}), "(data, config, coords_pcl=coords_pcl,\n labels_pcl=labels_pcl, extend=False, dataset='scannet')\n", (8315, 8412), False, 'import utils\n'), ((9196, 9268), 'utils.intersectionAndUnion', 'utils.intersectionAndUnion', (['preds_pcl_orig', 'labels_pcl', 'num_classes', '(255)'], {}), '(preds_pcl_orig, labels_pcl, num_classes, 255)\n', (9222, 9268), False, 'import utils\n'), ((9310, 9342), 'numpy.nanmean', 'np.nanmean', (['(intersection / union)'], {}), '(intersection / union)\n', (9320, 9342), True, 'import numpy as np\n'), ((9997, 10021), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (10019, 10021), False, 'import torch\n'), ((13397, 13496), 'torch.where', 'torch.where', (['(coords_pcl < coords_pcl0 - config.budget)', '(coords_pcl0 - config.budget)', 'coords_pcl'], {}), '(coords_pcl < coords_pcl0 - config.budget, coords_pcl0 - config.\n budget, coords_pcl)\n', (13408, 13496), False, 'import torch\n'), ((13519, 13618), 'torch.where', 'torch.where', (['(coords_pcl > coords_pcl0 + config.budget)', '(coords_pcl0 + config.budget)', 'coords_pcl'], {}), '(coords_pcl > coords_pcl0 + config.budget, coords_pcl0 + config.\n budget, coords_pcl)\n', (13530, 13618), False, 'import torch\n'), ((13929, 13962), 'torch.cat', 'torch.cat', (['coords_pcl_list'], {'dim': '(0)'}), '(coords_pcl_list, dim=0)\n', (13938, 13962), False, 'import torch\n'), ((3177, 3222), 'models.scannet_model.NewRes16UNet34C_d8_1', 'new_model.NewRes16UNet34C_d8_1', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (3207, 3222), True, 'import models.scannet_model as new_model\n'), ((5410, 5437), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (5427, 5437), False, 'import torch\n'), ((5674, 5701), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:2"""'], {}), "('cuda:2')\n", (5691, 5701), False, 'import torch\n'), ((5938, 5965), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:3"""'], {}), "('cuda:3')\n", (5955, 5965), False, 'import torch\n'), ((6797, 6848), 'models.scannet_model.load_state_dict', 'new_model.load_state_dict', (["model_dict['state_dict']"], {}), "(model_dict['state_dict'])\n", (6822, 6848), True, 'import models.scannet_model as new_model\n'), ((6857, 6873), 'models.scannet_model.eval', 'new_model.eval', ([], {}), '()\n', (6871, 6873), True, 'import models.scannet_model as new_model\n'), ((8586, 8625), 'utils.convert_label_scannet', 'utils.convert_label_scannet', (['labels_pcl'], {}), '(labels_pcl)\n', (8613, 8625), False, 'import utils\n'), ((3392, 3437), 'models.scannet_model.NewRes16UNet34C_d8_2', 'new_model.NewRes16UNet34C_d8_2', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (3422, 3437), True, 'import models.scannet_model as new_model\n'), ((3491, 3540), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:1"""'}), "(config.weights, map_location='cuda:1')\n", (3501, 3540), False, 'import torch\n'), ((3656, 3701), 'models.scannet_model.NewRes16UNet34C_d8_3', 'new_model.NewRes16UNet34C_d8_3', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (3686, 3701), True, 'import models.scannet_model as new_model\n'), ((3755, 3804), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:2"""'}), "(config.weights, map_location='cuda:2')\n", (3765, 3804), False, 'import torch\n'), ((3920, 3965), 'models.scannet_model.NewRes16UNet34C_d8_4', 'new_model.NewRes16UNet34C_d8_4', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (3950, 3965), True, 'import models.scannet_model as new_model\n'), ((4019, 4068), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:3"""'}), "(config.weights, map_location='cuda:3')\n", (4029, 4068), False, 'import torch\n'), ((4184, 4229), 'models.scannet_model.NewRes16UNet34C_d8_5', 'new_model.NewRes16UNet34C_d8_5', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (4214, 4229), True, 'import models.scannet_model as new_model\n'), ((4283, 4332), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:4"""'}), "(config.weights, map_location='cuda:4')\n", (4293, 4332), False, 'import torch\n'), ((4448, 4493), 'models.scannet_model.NewRes16UNet34C_d8_6', 'new_model.NewRes16UNet34C_d8_6', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (4478, 4493), True, 'import models.scannet_model as new_model\n'), ((4547, 4596), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:5"""'}), "(config.weights, map_location='cuda:5')\n", (4557, 4596), False, 'import torch\n'), ((4712, 4757), 'models.scannet_model.NewRes16UNet34C_d8_7', 'new_model.NewRes16UNet34C_d8_7', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (4742, 4757), True, 'import models.scannet_model as new_model\n'), ((4811, 4860), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:6"""'}), "(config.weights, map_location='cuda:6')\n", (4821, 4860), False, 'import torch\n'), ((4976, 5021), 'models.scannet_model.NewRes16UNet34C_d8_8', 'new_model.NewRes16UNet34C_d8_8', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (5006, 5021), True, 'import models.scannet_model as new_model\n'), ((5075, 5124), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:7"""'}), "(config.weights, map_location='cuda:7')\n", (5085, 5124), False, 'import torch\n'), ((5250, 5295), 'models.scannet_model.NewRes16UNet34C_d4_1', 'new_model.NewRes16UNet34C_d4_1', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (5280, 5295), True, 'import models.scannet_model as new_model\n'), ((6427, 6454), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (6444, 6454), False, 'import torch\n'), ((10749, 10776), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (10766, 10776), False, 'import torch\n'), ((10852, 10879), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:2"""'], {}), "('cuda:2')\n", (10869, 10879), False, 'import torch\n'), ((10955, 10982), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:3"""'], {}), "('cuda:3')\n", (10972, 10982), False, 'import torch\n'), ((11058, 11085), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:4"""'], {}), "('cuda:4')\n", (11075, 11085), False, 'import torch\n'), ((11161, 11188), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:5"""'], {}), "('cuda:5')\n", (11178, 11188), False, 'import torch\n'), ((11264, 11291), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:6"""'], {}), "('cuda:6')\n", (11281, 11291), False, 'import torch\n'), ((11367, 11394), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:7"""'], {}), "('cuda:7')\n", (11384, 11394), False, 'import torch\n'), ((12469, 12497), 'torch.LongTensor', 'torch.LongTensor', (['labels_pcl'], {}), '(labels_pcl)\n', (12485, 12497), False, 'import torch\n'), ((12575, 12603), 'torch.LongTensor', 'torch.LongTensor', (['labels_pcl'], {}), '(labels_pcl)\n', (12591, 12603), False, 'import torch\n'), ((5465, 5510), 'models.scannet_model.NewRes16UNet34C_d4_2', 'new_model.NewRes16UNet34C_d4_2', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (5495, 5510), True, 'import models.scannet_model as new_model\n'), ((5564, 5613), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:1"""'}), "(config.weights, map_location='cuda:1')\n", (5574, 5613), False, 'import torch\n'), ((5729, 5774), 'models.scannet_model.NewRes16UNet34C_d4_3', 'new_model.NewRes16UNet34C_d4_3', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (5759, 5774), True, 'import models.scannet_model as new_model\n'), ((5828, 5877), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:2"""'}), "(config.weights, map_location='cuda:2')\n", (5838, 5877), False, 'import torch\n'), ((5993, 6038), 'models.scannet_model.NewRes16UNet34C_d4_4', 'new_model.NewRes16UNet34C_d4_4', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (6023, 6038), True, 'import models.scannet_model as new_model\n'), ((6092, 6141), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:3"""'}), "(config.weights, map_location='cuda:3')\n", (6102, 6141), False, 'import torch\n'), ((6267, 6312), 'models.scannet_model.NewRes16UNet34C_d2_1', 'new_model.NewRes16UNet34C_d2_1', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (6297, 6312), True, 'import models.scannet_model as new_model\n'), ((6737, 6777), 'models.scannet_model.NewRes16UNet34C', 'new_model.NewRes16UNet34C', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (6762, 6777), True, 'import models.scannet_model as new_model\n'), ((11579, 11606), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (11596, 11606), False, 'import torch\n'), ((11682, 11709), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:2"""'], {}), "('cuda:2')\n", (11699, 11709), False, 'import torch\n'), ((11785, 11812), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:3"""'], {}), "('cuda:3')\n", (11802, 11812), False, 'import torch\n'), ((12119, 12163), 'models.scannet_model', 'new_model', (['sinput', 'idx.shape[1]', 'occupy_conv'], {}), '(sinput, idx.shape[1], occupy_conv)\n', (12128, 12163), True, 'import models.scannet_model as new_model\n'), ((6482, 6527), 'models.scannet_model.NewRes16UNet34C_d2_2', 'new_model.NewRes16UNet34C_d2_2', (['(3)', '(20)', 'config'], {}), '(3, 20, config)\n', (6512, 6527), True, 'import models.scannet_model as new_model\n'), ((6581, 6630), 'torch.load', 'torch.load', (['config.weights'], {'map_location': '"""cuda:1"""'}), "(config.weights, map_location='cuda:1')\n", (6591, 6630), False, 'import torch\n'), ((11995, 12022), 'torch.cuda.device', 'torch.cuda.device', (['"""cuda:1"""'], {}), "('cuda:1')\n", (12012, 12022), False, 'import torch\n'), ((9027, 9077), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['soutput_orig.F'], {'dim': '(1)'}), '(soutput_orig.F, dim=1)\n', (9054, 9077), False, 'import torch\n'), ((13298, 13324), 'torch.abs', 'torch.abs', (['coords_pcl.grad'], {}), '(coords_pcl.grad)\n', (13307, 13324), False, 'import torch\n')] |
#!/usr/bin/env python3
import numpy, sys
def ror_str(byte, count):
binb = numpy.base_repr(byte, 2).zfill(32)
while count > 0:
binb = binb[-1] + binb[0:-1]
count -= 1
return (int(binb, 2))
if __name__ == '__main__':
try:
esi = sys.argv[1]
except IndexError:
print("Usage: %s INPUTSTRING" % sys.argv[0])
sys.exit()
# Initialize variables
edx = 0x00
ror_count = 0
for eax in esi:
edx = edx + ord(eax)
if ror_count < len(esi)-1:
edx = ror_str(edx, 0xd)
ror_count += 1
print(hex(edx)) | [
"sys.exit",
"numpy.base_repr"
] | [((81, 105), 'numpy.base_repr', 'numpy.base_repr', (['byte', '(2)'], {}), '(byte, 2)\n', (96, 105), False, 'import numpy, sys\n'), ((367, 377), 'sys.exit', 'sys.exit', ([], {}), '()\n', (375, 377), False, 'import numpy, sys\n')] |
"""
A script to trial phase correlation for aligning 2D slices, runs through the first 5 patients and saves images for
each body part on each of them. This is not expected to generate results as impressive as the local 3D phase
correlation method, but demonstrates the method can work in 2D
"""
from pathlib import Path
from ai_ct_scans import data_loading
import matplotlib.pyplot as plt
import numpy as np
from ai_ct_scans import phase_correlation
from ai_ct_scans import phase_correlation_image_processing
plt.ion()
thresh = 500
for patient_num in range(1, 6):
for body_part in ["abdo", "thorax"]:
save_dir = (
Path(__file__).parents[2]
/ "extra_data"
/ "figures"
/ "global_phase_corr_alignment_2d"
/ f"{patient_num}"
/ f"{body_part}"
)
save_dir.mkdir(exist_ok=True, parents=True)
patient_dir = data_loading.data_root_directory() / f"{patient_num}"
patient_loader = data_loading.PatientLoader(patient_dir)
if body_part == "abdo":
scans = [patient_loader.abdo.scan_1, patient_loader.abdo.scan_2]
else:
scans = [patient_loader.thorax.scan_1, patient_loader.thorax.scan_2]
f, axes = plt.subplots(1, 3, figsize=[16, 8])
axes = np.ravel(axes)
scans[0].load_scan()
scans[1].load_scan()
# thresh out noise
for i, _ in enumerate(scans):
scans[i].full_scan[scans[i].full_scan < thresh] = 0
central_coronal_indices = [int(scan.full_scan.shape[1] / 2) for scan in scans]
coronal_views = [
scan.full_scan[:, central_coronal_index, :]
for scan, central_coronal_index in zip(scans, central_coronal_indices)
]
for ax, view in zip(axes, coronal_views):
ax.imshow(view)
overlaid = phase_correlation_image_processing.generate_overlay_2d(coronal_views)
axes[2].imshow(overlaid)
axes[0].set_title("Scan 1")
axes[1].set_title("Scan 2")
axes[2].set_title(
"Scans with overlay\n Central axial plane assumed, no correction\nMost basic overlay method"
)
plt.tight_layout()
curr_path = save_dir / f"central_plane.png"
plt.savefig(curr_path)
shift = phase_correlation.shift_via_phase_correlation_nd(
coronal_views, lmr_radius=3, apply_zero_crossings=False
)
coronal_views[1] = phase_correlation.shift_nd(
coronal_views[1], -np.array(shift[1])
)
f_2, axes_2 = plt.subplots(1, 3, figsize=[16, 8])
axes_2 = np.ravel(axes_2)
for ax, scan in zip(axes_2, coronal_views):
ax.imshow(scan)
overlaid = phase_correlation_image_processing.generate_overlay_2d(coronal_views)
axes_2[2].imshow(overlaid)
axes_2[0].set_title("Scan 1")
axes_2[1].set_title("Scan 2")
axes_2[2].set_title(
"Scans with overlay\nFull 2D view phase correlation alignment correction\nAlignment of major structures dominates,\nlocal differences possible due to body shape changes"
)
plt.tight_layout()
curr_path = save_dir / f"central_plane_phase_corr.png"
plt.savefig(curr_path)
plt.pause(0.1)
plt.close("all")
| [
"matplotlib.pyplot.savefig",
"ai_ct_scans.data_loading.data_root_directory",
"pathlib.Path",
"matplotlib.pyplot.close",
"numpy.array",
"ai_ct_scans.phase_correlation.shift_via_phase_correlation_nd",
"ai_ct_scans.phase_correlation_image_processing.generate_overlay_2d",
"ai_ct_scans.data_loading.Patient... | [((512, 521), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (519, 521), True, 'import matplotlib.pyplot as plt\n'), ((990, 1029), 'ai_ct_scans.data_loading.PatientLoader', 'data_loading.PatientLoader', (['patient_dir'], {}), '(patient_dir)\n', (1016, 1029), False, 'from ai_ct_scans import data_loading\n'), ((1254, 1289), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '[16, 8]'}), '(1, 3, figsize=[16, 8])\n', (1266, 1289), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1319), 'numpy.ravel', 'np.ravel', (['axes'], {}), '(axes)\n', (1313, 1319), True, 'import numpy as np\n'), ((1870, 1939), 'ai_ct_scans.phase_correlation_image_processing.generate_overlay_2d', 'phase_correlation_image_processing.generate_overlay_2d', (['coronal_views'], {}), '(coronal_views)\n', (1924, 1939), False, 'from ai_ct_scans import phase_correlation_image_processing\n'), ((2195, 2213), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2211, 2213), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2296), 'matplotlib.pyplot.savefig', 'plt.savefig', (['curr_path'], {}), '(curr_path)\n', (2285, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2314, 2424), 'ai_ct_scans.phase_correlation.shift_via_phase_correlation_nd', 'phase_correlation.shift_via_phase_correlation_nd', (['coronal_views'], {'lmr_radius': '(3)', 'apply_zero_crossings': '(False)'}), '(coronal_views, lmr_radius=\n 3, apply_zero_crossings=False)\n', (2362, 2424), False, 'from ai_ct_scans import phase_correlation\n'), ((2581, 2616), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '[16, 8]'}), '(1, 3, figsize=[16, 8])\n', (2593, 2616), True, 'import matplotlib.pyplot as plt\n'), ((2634, 2650), 'numpy.ravel', 'np.ravel', (['axes_2'], {}), '(axes_2)\n', (2642, 2650), True, 'import numpy as np\n'), ((2751, 2820), 'ai_ct_scans.phase_correlation_image_processing.generate_overlay_2d', 'phase_correlation_image_processing.generate_overlay_2d', (['coronal_views'], {}), '(coronal_views)\n', (2805, 2820), False, 'from ai_ct_scans import phase_correlation_image_processing\n'), ((3161, 3179), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3177, 3179), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['curr_path'], {}), '(curr_path)\n', (3262, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3282, 3296), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (3291, 3296), True, 'import matplotlib.pyplot as plt\n'), ((3305, 3321), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3314, 3321), True, 'import matplotlib.pyplot as plt\n'), ((910, 944), 'ai_ct_scans.data_loading.data_root_directory', 'data_loading.data_root_directory', ([], {}), '()\n', (942, 944), False, 'from ai_ct_scans import data_loading\n'), ((2529, 2547), 'numpy.array', 'np.array', (['shift[1]'], {}), '(shift[1])\n', (2537, 2547), True, 'import numpy as np\n'), ((642, 656), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (646, 656), False, 'from pathlib import Path\n')] |
import cv2
import numpy as np
from typing import List
from statistics import mean
from .DatabaseHandler import DatabaseHandler
from .algorithms.DepthEstimation import DepthEstimation
from .algorithms.ObjectDetection import ObjectDetection
from .algorithms.PositionReconstruction import PositionReconstruction
CONFIDENCE_SCORE_MIN = 0.15
CV2_IMSHOW_RESIZE = (480, 320)
# Uses dependency injection for DepthEstimation and ObjectDetection
class DataProcessingEntity:
def __init__(self, camera_id: int, latitude: float, longitude: float, rotation_x: float, rotation_y: float, depth_estimation: DepthEstimation, object_detection: ObjectDetection, database_handler: DatabaseHandler, position_reconstruction: PositionReconstruction, verbose: bool):
self.__camera_id = camera_id
self.__database_handler = database_handler
self.__depth_estimation = depth_estimation
self.__object_detection = object_detection
self.__position_reconstruction = position_reconstruction
self.__classes = self.__object_detection.get_classes()
self.__car_classes = self.__object_detection.get_car_classes()
self.__other_classes = self.__object_detection.get_other_classes()
self.__verbose = verbose
def process_frame(self, frame):
depth_frame = self.__depth_estimation.process_frame(frame)
detected_objects = self.__object_detection.process_frame(frame)
if detected_objects is None:
return
detected_objects = [
{
'box': detected_objects['boxes'][i],
'is_car': self.__classes[detected_objects['labels'][i]] in self.__car_classes,
'confidence': detected_objects['scores'][i],
'label': self.__classes[detected_objects['labels'][i]]
}
for i in range(len(detected_objects['boxes'])) if detected_objects['scores'][i] >= CONFIDENCE_SCORE_MIN
]
cars_positions = []
obstacles_positions = []
for i in range(len(detected_objects)):
box_vertices_absolute_positions = self.__position_reconstruction.get_box_vertices_absolute_positions(detected_objects[i]['box'], depth_frame)
box_1_longitude = box_vertices_absolute_positions[0][0]
box_1_latitude = box_vertices_absolute_positions[0][1]
box_2_longitude = box_vertices_absolute_positions[1][0]
box_2_latitude = box_vertices_absolute_positions[1][1]
box_3_longitude = box_vertices_absolute_positions[2][0]
box_3_latitude = box_vertices_absolute_positions[2][1]
box_4_longitude = box_vertices_absolute_positions[3][0]
box_4_latitude = box_vertices_absolute_positions[3][1]
if detected_objects[i]['is_car']:
cars_positions.append((
box_1_latitude,
box_1_longitude,
box_2_latitude,
box_2_longitude,
box_3_latitude,
box_3_longitude,
box_4_latitude,
box_4_longitude
))
else:
obstacles_positions.append((
box_1_latitude,
box_1_longitude,
box_2_latitude,
box_2_longitude,
box_3_latitude,
box_3_longitude,
box_4_latitude,
box_4_longitude
))
self.__database_handler.insert_cars(self.__camera_id, cars_positions)
self.__database_handler.insert_obstacles(self.__camera_id, obstacles_positions)
if self.__verbose:
resized_depth_frame = cv2.resize(depth_frame, CV2_IMSHOW_RESIZE, interpolation=cv2.INTER_AREA) / 255.
resized_frame = cv2.resize(frame, CV2_IMSHOW_RESIZE, interpolation=cv2.INTER_AREA)
frame_copy = resized_frame.copy()
for detected_object in detected_objects:
box = detected_object['box'].detach().cpu().numpy()
is_car = detected_object['is_car']
confidence = detected_object['confidence']
object_class = detected_object['label']
(startX, startY, endX, endY) = box.astype('int')
startX = round(startX * frame_copy.shape[1] / frame.shape[1])
endX = round(endX * frame_copy.shape[1] / frame.shape[1])
startY = round(startY * frame_copy.shape[0] / frame.shape[0])
endY = round(endY * frame_copy.shape[0] / frame.shape[0])
label = '{}: {:.2f}%'.format(object_class, confidence * 100)
color = (0, 255, 0) if is_car else (255, 0, 0)
cv2.rectangle(frame_copy, (startX, startY), (endX, endY), color, 2)
startY = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame_copy, label, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
entities_map = self.__create_entities_map(cars_positions, obstacles_positions)
cv2.imshow('Depth', resized_depth_frame)
cv2.imshow('Objects', frame_copy)
cv2.imshow('Map', entities_map)
cv2.waitKey(1)
def __create_entities_map(self, cars_positions, obstacles_positions):
width = CV2_IMSHOW_RESIZE[0]
height = CV2_IMSHOW_RESIZE[0]
entities_map = np.zeros((height, width, 3), np.uint8)
if len(cars_positions) == 0 and len(obstacles_positions) == 0:
return entities_map
cars_map_coords = [
(
mean([car_position[0], car_position[2], car_position[4], car_position[6]]),
mean([car_position[1], car_position[3], car_position[5], car_position[7]])
) for car_position in cars_positions
]
obstacles_map_coords = [
(
mean([obstacle_position[0], obstacle_position[2], obstacle_position[4], obstacle_position[6]]),
mean([obstacle_position[1], obstacle_position[3], obstacle_position[5], obstacle_position[7]])
) for obstacle_position in obstacles_positions
]
min_latitude = min(cars_map_coords + obstacles_map_coords, key=lambda x: x[0])[0]
min_longitude = min(cars_map_coords + obstacles_map_coords, key=lambda x: x[1])[1]
max_latitude = max(cars_map_coords + obstacles_map_coords, key=lambda x: x[0])[0]
max_longitude = max(cars_map_coords + obstacles_map_coords, key=lambda x: x[1])[1]
factor = 10 / 100
min_latitude -= factor * (max_latitude - min_latitude)
min_longitude -= factor * (max_longitude - min_longitude)
max_latitude += factor * (max_latitude - min_latitude)
max_longitude += factor * (max_longitude - min_longitude)
if max_longitude == min_longitude:
max_longitude = min_longitude + 1
if max_latitude == min_latitude:
max_latitude = min_latitude + 1
for car_map_coord in cars_map_coords:
x = round(width / (max_longitude - min_longitude) * (car_map_coord[0] - min_longitude))
y = height - round(height / (max_latitude - min_latitude) * (car_map_coord[1] - min_latitude))
entities_map = cv2.circle(entities_map, (x, y), radius=5, color=(0, 255, 0), thickness=-1)
for obstacle_map_coord in obstacles_map_coords:
x = round(width / (max_longitude - min_longitude) * (obstacle_map_coord[0] - min_longitude))
y = height - round(height / (max_latitude - min_latitude) * (obstacle_map_coord[1] - min_latitude))
entities_map = cv2.circle(entities_map, (x, y), radius=5, color=(255, 0, 0), thickness=-1)
return entities_map
| [
"cv2.rectangle",
"statistics.mean",
"cv2.imshow",
"cv2.putText",
"cv2.circle",
"numpy.zeros",
"cv2.resize",
"cv2.waitKey"
] | [((5487, 5525), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (5495, 5525), True, 'import numpy as np\n'), ((3873, 3939), 'cv2.resize', 'cv2.resize', (['frame', 'CV2_IMSHOW_RESIZE'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, CV2_IMSHOW_RESIZE, interpolation=cv2.INTER_AREA)\n', (3883, 3939), False, 'import cv2\n'), ((5155, 5195), 'cv2.imshow', 'cv2.imshow', (['"""Depth"""', 'resized_depth_frame'], {}), "('Depth', resized_depth_frame)\n", (5165, 5195), False, 'import cv2\n'), ((5208, 5241), 'cv2.imshow', 'cv2.imshow', (['"""Objects"""', 'frame_copy'], {}), "('Objects', frame_copy)\n", (5218, 5241), False, 'import cv2\n'), ((5254, 5285), 'cv2.imshow', 'cv2.imshow', (['"""Map"""', 'entities_map'], {}), "('Map', entities_map)\n", (5264, 5285), False, 'import cv2\n'), ((5298, 5312), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5309, 5312), False, 'import cv2\n'), ((7359, 7434), 'cv2.circle', 'cv2.circle', (['entities_map', '(x, y)'], {'radius': '(5)', 'color': '(0, 255, 0)', 'thickness': '(-1)'}), '(entities_map, (x, y), radius=5, color=(0, 255, 0), thickness=-1)\n', (7369, 7434), False, 'import cv2\n'), ((7736, 7811), 'cv2.circle', 'cv2.circle', (['entities_map', '(x, y)'], {'radius': '(5)', 'color': '(255, 0, 0)', 'thickness': '(-1)'}), '(entities_map, (x, y), radius=5, color=(255, 0, 0), thickness=-1)\n', (7746, 7811), False, 'import cv2\n'), ((3765, 3837), 'cv2.resize', 'cv2.resize', (['depth_frame', 'CV2_IMSHOW_RESIZE'], {'interpolation': 'cv2.INTER_AREA'}), '(depth_frame, CV2_IMSHOW_RESIZE, interpolation=cv2.INTER_AREA)\n', (3775, 3837), False, 'import cv2\n'), ((4801, 4868), 'cv2.rectangle', 'cv2.rectangle', (['frame_copy', '(startX, startY)', '(endX, endY)', 'color', '(2)'], {}), '(frame_copy, (startX, startY), (endX, endY), color, 2)\n', (4814, 4868), False, 'import cv2\n'), ((4960, 5054), 'cv2.putText', 'cv2.putText', (['frame_copy', 'label', '(startX, startY)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(frame_copy, label, (startX, startY), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, color, 2)\n', (4971, 5054), False, 'import cv2\n'), ((5689, 5763), 'statistics.mean', 'mean', (['[car_position[0], car_position[2], car_position[4], car_position[6]]'], {}), '([car_position[0], car_position[2], car_position[4], car_position[6]])\n', (5693, 5763), False, 'from statistics import mean\n'), ((5781, 5855), 'statistics.mean', 'mean', (['[car_position[1], car_position[3], car_position[5], car_position[7]]'], {}), '([car_position[1], car_position[3], car_position[5], car_position[7]])\n', (5785, 5855), False, 'from statistics import mean\n'), ((5978, 6076), 'statistics.mean', 'mean', (['[obstacle_position[0], obstacle_position[2], obstacle_position[4],\n obstacle_position[6]]'], {}), '([obstacle_position[0], obstacle_position[2], obstacle_position[4],\n obstacle_position[6]])\n', (5982, 6076), False, 'from statistics import mean\n'), ((6090, 6188), 'statistics.mean', 'mean', (['[obstacle_position[1], obstacle_position[3], obstacle_position[5],\n obstacle_position[7]]'], {}), '([obstacle_position[1], obstacle_position[3], obstacle_position[5],\n obstacle_position[7]])\n', (6094, 6188), False, 'from statistics import mean\n')] |
from abc import ABC, abstractmethod
from collections import defaultdict
from datetime import datetime
from functools import cached_property
from typing import List, Dict, Union, Optional, Iterable
import numpy as np
import pandas as pd
from gym import Space, spaces
from pandas import Interval
from torch.utils.data import Dataset
from yacht import Mode, utils
from yacht.data.markets import Market
from yacht.data.scalers import Scaler
from yacht.data.transforms import Compose
from yacht.logger import Logger
class DatasetPeriod:
def __init__(
self,
start: datetime,
end: datetime,
window_size: int,
include_weekends: bool,
take_action_at: str = 'current',
frequency: str = 'd'
):
assert frequency in ('d', )
self.unadjusted_start = start
self.unadjusted_end = end
self.period_adjustment_size = self.compute_period_adjustment_size(
window_size=window_size,
take_action_at=take_action_at
)
# Adjust start with a 'window_size' length so we take data from the past & actually start from the given start.
self.start = utils.adjust_period_with_window(
datetime_point=start,
window_size=self.period_adjustment_size, # We also use the initial price within the period.
action='-',
include_weekends=include_weekends,
frequency=frequency
)
self.end = end
self.window_size = window_size
self.include_weekends = include_weekends
self.take_action_at = take_action_at
self.frequency = frequency
assert self.start <= self.unadjusted_start
@classmethod
def compute_period_adjustment_size(cls, window_size: int, take_action_at: str) -> int:
assert take_action_at in ('current', 'next')
if take_action_at == 'current':
return window_size - 1
elif take_action_at == 'next':
return window_size
def __len__(self) -> int:
return utils.len_period_range(
start=self.start,
end=self.end,
include_weekends=self.include_weekends
)
class AssetDataset(Dataset, ABC):
PRICE_FEATURES = (
'Close',
'Open',
'High',
'Low'
)
def __init__(
self,
market: Market,
storage_dir: str,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
mode: Mode,
logger: Logger,
window_size: int = 1,
):
"""
market:
storage_dir:
ticker:
intervals: data bars frequency
features: observation data features
decision_price_feature: the feature that it will used for buying / selling assets or other decision making
start:
end:
render_intervals: a list of datetime intervals to know if this environment should be rendered or not.
normalizer:
window_size: The past information that you want to add to the current item that you query from the dataset.
data: If data != None, it will be encapsulated within the Dataset Object, otherwise it will be queried
from the market.
"""
assert '1d' == intervals[0], 'One day bar interval is mandatory to exist & index=0 in input.intervals config.'
assert window_size >= 1
self.market = market
self.storage_dir = storage_dir
self.intervals = intervals
self.features = features
self.decision_price_feature = decision_price_feature
self.render_intervals = render_intervals
self.period = period
self.mode = mode
self.logger = logger
self.window_size = window_size
def close(self):
self.market.close()
@property
def period_window_size(self) -> int:
return self.period.window_size
@property
def period_adjustment_size(self) -> int:
return self.period.period_adjustment_size
@property
def take_action_at(self) -> str:
return self.period.take_action_at
@property
def first_observation_index(self) -> int:
# Starting from 0 & the minimum value for the window_size is 1.
return self.period_window_size - 1
@property
def last_observation_index(self) -> int:
return self.period_adjustment_size + self.num_days - 1
@property
def unadjusted_start(self) -> datetime:
return self.period.unadjusted_start
@property
def unadjusted_end(self) -> datetime:
return self.period.unadjusted_end
@property
def start(self) -> datetime:
return self.period.start
@property
def end(self) -> datetime:
return self.period.end
@property
def include_weekends(self) -> bool:
return self.market.include_weekends
@cached_property
def should_render(self) -> bool:
# Because it is not efficient to render all the environments, we choose over some desired logic what to render.
for render_interval in self.render_intervals:
if self.start in render_interval or self.end in render_interval:
return True
return False
@property
@abstractmethod
def num_days(self) -> int:
pass
@property
@abstractmethod
def num_assets(self) -> int:
pass
@property
@abstractmethod
def asset_tickers(self) -> List[str]:
pass
@abstractmethod
def index_to_datetime(self, integer_index: int) -> datetime:
pass
@abstractmethod
def inverse_scaling(self, observation: dict, **kwargs) -> dict:
pass
@abstractmethod
def __len__(self):
pass
@abstractmethod
def __getitem__(self, current_index: int) -> Dict[str, np.array]:
"""
Args:
current_index: The relative index the data will be given from.
Returns:
The data features within the [current_index - window_size + 1, current_index] interval.
"""
pass
@abstractmethod
def __str__(self):
pass
@abstractmethod
def get_prices(self) -> pd.DataFrame:
pass
@abstractmethod
def get_decision_prices(self, t_tick: Optional[int] = None, **kwargs) -> pd.Series:
pass
@abstractmethod
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def get_external_observation_space(self) -> Dict[str, Space]:
"""
Returns the gym spaces observation space in the format that the dataset gives the data.
"""
pass
class SingleAssetDataset(AssetDataset, ABC):
def __init__(
self,
ticker: str,
market: Market,
storage_dir: str,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
render_tickers: List[str],
mode: Mode,
logger: Logger,
scaler: Scaler,
window_transforms: Optional[Compose] = None,
window_size: int = 1,
data: Dict[str, pd.DataFrame] = None
):
super().__init__(
market=market,
storage_dir=storage_dir,
intervals=intervals,
features=features,
decision_price_feature=decision_price_feature,
period=period,
render_intervals=render_intervals,
mode=mode,
logger=logger,
window_size=window_size,
)
self.ticker = ticker
self.scaler = scaler
self.window_transforms = window_transforms
self.render_tickers = render_tickers
if data is not None:
self.data = data
else:
self.data = dict()
for interval in self.intervals:
self.data[interval] = self.market.get(
ticker=ticker,
interval=interval,
start=self.start,
end=self.end,
features=self.features + [self.decision_price_feature],
squeeze=False
)
self.prices = self.get_prices()
def __str__(self) -> str:
return self.ticker
def __len__(self) -> int:
# All the adjusted interval.
return len(self.prices)
@property
def num_days(self) -> int:
# Only the unadjusted interval.
return utils.len_period_range(
start=self.unadjusted_start,
end=self.unadjusted_end,
include_weekends=self.include_weekends
)
@property
def num_assets(self) -> int:
return 1
@property
def asset_tickers(self) -> List[str]:
return [self.ticker]
@cached_property
def should_render(self) -> bool:
if self.ticker in self.render_tickers:
return super().should_render
return False
def index_to_datetime(self, integer_index: Union[int, Iterable]) -> Union[datetime, Iterable[datetime]]:
return self.data['1d'].index[integer_index].to_pydatetime()
def get_prices(self) -> pd.DataFrame:
return self.market.get(
ticker=self.ticker,
interval='1d',
start=self.start,
end=self.end,
features=list(self.market.DOWNLOAD_MANDATORY_FEATURES) + [self.decision_price_feature],
squeeze=False
)
def get_decision_prices(self, t_tick: Optional[int] = None, **kwargs) -> pd.Series:
if t_tick is None:
decision_prices = self.prices.loc[slice(None), self.decision_price_feature]
decision_prices.name = 'decision_price'
else:
t_datetime = self.index_to_datetime(t_tick)
decision_prices = self.prices.loc[t_datetime, self.decision_price_feature]
decision_prices = pd.Series(decision_prices, index=[self.ticker], name='decision_price')
return decision_prices
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
period_data = self.data['1d'].loc[start:end, self.decision_price_feature]
period_mean = period_data.mean()
return pd.Series(period_mean, index=[self.ticker], name='mean_price')
def inverse_scaling(self, observation: dict, asset_idx: int = -1) -> dict:
for interval in self.intervals:
if asset_idx == -1:
observation[interval] = self.scaler.inverse_transform(observation[interval])
else:
observation[interval][:, :, asset_idx, :] = self.scaler.inverse_transform(
observation[interval][:, :, asset_idx, :]
)
return observation
class MultiAssetDataset(AssetDataset):
# TODO: Implement the multi-asset dependency within a DataFrame for faster processing.
def __init__(
self,
datasets: List[SingleAssetDataset],
storage_dir: str,
market: Market,
intervals: List[str],
features: List[str],
decision_price_feature: str,
period: DatasetPeriod,
render_intervals: List[Interval],
render_tickers: List[str],
mode: Mode,
logger: Logger,
window_size: int = 1,
attached_datasets: Optional[List[SingleAssetDataset]] = None
):
super().__init__(
market=market,
storage_dir=storage_dir,
intervals=intervals,
features=features,
decision_price_feature=decision_price_feature,
period=period,
render_intervals=render_intervals,
mode=mode,
logger=logger,
window_size=window_size,
)
self.datasets = datasets
self.render_tickers = render_tickers
self.attached_datasets = attached_datasets if attached_datasets is not None else []
assert self.datasets[0].num_days * len(self.datasets) == sum([dataset.num_days for dataset in self.datasets]), \
'All the datasets should have the same length.'
@property
def num_days(self) -> int:
# All the datasets have the same number of days, because they are reflecting the same time (eg. the same month).
return self.datasets[0].num_days
@property
def num_assets(self) -> int:
return len(self.datasets)
@property
def asset_tickers(self) -> List[str]:
return [dataset.ticker for dataset in self.datasets]
@cached_property
def should_render(self) -> bool:
return any([dataset.should_render for dataset in self.datasets])
def index_to_datetime(self, integer_index: Union[int, Iterable]) -> Union[datetime, Iterable[datetime]]:
# All the datasets have the same indices to dates mappings.
return self.datasets[0].index_to_datetime(integer_index)
def __len__(self):
# All the datasets have the same length.
return len(self.datasets[0])
def __getitem__(self, current_index: int) -> Dict[str, np.array]:
datasets = self.datasets + self.attached_datasets
stacked_items: Dict[str, list] = defaultdict(list)
for dataset in datasets:
item = dataset[current_index]
for key, value in item.items():
stacked_items[key].append(value)
for key, value in stacked_items.items():
stacked_items[key] = np.stack(stacked_items[key], axis=2)
return stacked_items
def inverse_scaling(self, observation: dict, **kwargs) -> dict:
for asset_idx in range(self.num_assets):
dataset = self.datasets[asset_idx]
observation = dataset.inverse_scaling(observation, asset_idx)
return observation
def __str__(self):
asset_tickers = [ticker.split('-')[0] for ticker in self.asset_tickers]
return '-'.join(asset_tickers)
def get_prices(self) -> pd.DataFrame:
prices = []
for dataset in self.datasets:
dataset_prices = dataset.get_prices()
dataset_prices = dataset_prices.assign(ticker=dataset.ticker)
dataset_prices = dataset_prices.set_index(keys=['ticker'], drop=True, append=True)
prices.append(dataset_prices)
prices = pd.concat(prices)
return prices
def get_labels(self, t_tick: Optional[int] = None) -> Union[pd.DataFrame, pd.Series]:
labels = []
for dataset in self.datasets:
ticker_labels = getattr(dataset, 'labels', pd.Series())
ticker_labels.name = dataset.ticker
labels.append(ticker_labels)
labels = pd.concat(labels, axis=1)
if len(labels) < t_tick:
return pd.Series()
if t_tick is not None:
labels = labels.iloc[t_tick]
return labels
def get_decision_prices(self, t_tick: Optional[int] = None, ticker: Optional[str] = None) -> pd.Series:
if ticker is not None:
datasets = [self._pick_dataset(ticker=ticker)]
else:
datasets = self.datasets
prices = []
for dataset in datasets:
decision_prices = dataset.get_decision_prices(t_tick)
decision_prices.name = dataset.ticker
prices.append(decision_prices)
if t_tick is None:
prices = pd.concat(prices, axis=1) # We want to keep the dates as index.
else:
prices = pd.concat(prices, axis=0) # We want to make the ticker as index.
prices.name = 'decision_price'
return prices
def _pick_dataset(self, ticker: str) -> SingleAssetDataset:
for dataset in self.datasets:
if dataset.ticker == ticker:
return dataset
raise RuntimeError(f'No dataset with ticker: {ticker}')
def compute_mean_price(self, start: datetime, end: datetime) -> Union[pd.DataFrame, pd.Series]:
mean_data = []
for dataset in self.datasets:
mean_data.append(
dataset.compute_mean_price(start, end).item()
)
return pd.Series(data=mean_data, index=[d.ticker for d in self.datasets])
def get_external_observation_space(self) -> Dict[str, Space]:
"""
Returns the gym spaces observation space in the format that the dataset gives the data.
"""
observation_space = dict()
# All the single asset observation spaces should have the same shapes.
single_asset_observation_space = self.datasets[0].get_external_observation_space()
for key, value in single_asset_observation_space.items():
observation_space[key] = spaces.Box(
low=-np.inf,
high=np.inf,
shape=(*value.shape[:-1], len(self.datasets + self.attached_datasets), value.shape[-1]),
dtype=value.dtype
)
return observation_space
| [
"pandas.Series",
"yacht.utils.len_period_range",
"yacht.utils.adjust_period_with_window",
"numpy.stack",
"collections.defaultdict",
"pandas.concat"
] | [((1193, 1364), 'yacht.utils.adjust_period_with_window', 'utils.adjust_period_with_window', ([], {'datetime_point': 'start', 'window_size': 'self.period_adjustment_size', 'action': '"""-"""', 'include_weekends': 'include_weekends', 'frequency': 'frequency'}), "(datetime_point=start, window_size=self.\n period_adjustment_size, action='-', include_weekends=include_weekends,\n frequency=frequency)\n", (1224, 1364), False, 'from yacht import Mode, utils\n'), ((2076, 2175), 'yacht.utils.len_period_range', 'utils.len_period_range', ([], {'start': 'self.start', 'end': 'self.end', 'include_weekends': 'self.include_weekends'}), '(start=self.start, end=self.end, include_weekends=\n self.include_weekends)\n', (2098, 2175), False, 'from yacht import Mode, utils\n'), ((8846, 8966), 'yacht.utils.len_period_range', 'utils.len_period_range', ([], {'start': 'self.unadjusted_start', 'end': 'self.unadjusted_end', 'include_weekends': 'self.include_weekends'}), '(start=self.unadjusted_start, end=self.unadjusted_end,\n include_weekends=self.include_weekends)\n', (8868, 8966), False, 'from yacht import Mode, utils\n'), ((10619, 10681), 'pandas.Series', 'pd.Series', (['period_mean'], {'index': '[self.ticker]', 'name': '"""mean_price"""'}), "(period_mean, index=[self.ticker], name='mean_price')\n", (10628, 10681), True, 'import pandas as pd\n'), ((13613, 13630), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13624, 13630), False, 'from collections import defaultdict\n'), ((14742, 14759), 'pandas.concat', 'pd.concat', (['prices'], {}), '(prices)\n', (14751, 14759), True, 'import pandas as pd\n'), ((15106, 15131), 'pandas.concat', 'pd.concat', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (15115, 15131), True, 'import pandas as pd\n'), ((16557, 16623), 'pandas.Series', 'pd.Series', ([], {'data': 'mean_data', 'index': '[d.ticker for d in self.datasets]'}), '(data=mean_data, index=[d.ticker for d in self.datasets])\n', (16566, 16623), True, 'import pandas as pd\n'), ((10276, 10346), 'pandas.Series', 'pd.Series', (['decision_prices'], {'index': '[self.ticker]', 'name': '"""decision_price"""'}), "(decision_prices, index=[self.ticker], name='decision_price')\n", (10285, 10346), True, 'import pandas as pd\n'), ((13883, 13919), 'numpy.stack', 'np.stack', (['stacked_items[key]'], {'axis': '(2)'}), '(stacked_items[key], axis=2)\n', (13891, 13919), True, 'import numpy as np\n'), ((15184, 15195), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (15193, 15195), True, 'import pandas as pd\n'), ((15805, 15830), 'pandas.concat', 'pd.concat', (['prices'], {'axis': '(1)'}), '(prices, axis=1)\n', (15814, 15830), True, 'import pandas as pd\n'), ((15905, 15930), 'pandas.concat', 'pd.concat', (['prices'], {'axis': '(0)'}), '(prices, axis=0)\n', (15914, 15930), True, 'import pandas as pd\n'), ((14987, 14998), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (14996, 14998), True, 'import pandas as pd\n')] |
from datetime import timedelta
from operator import methodcaller
import itertools
import math
import pytest
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
import os
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from datashape import dshape
from odo import odo, drop, discover
from blaze import (
data,
atan2,
by,
coalesce,
compute,
concat,
cos,
greatest,
join,
least,
radians,
sin,
sqrt,
symbol,
transform,
)
from blaze.interactive import iscorescalar
from blaze.utils import example, normalize
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def pg_ip():
return os.environ.get('POSTGRES_IP', 'localhost')
@pytest.fixture
def url(pg_ip):
return 'postgresql://postgres@{}/test::%s'.format(pg_ip)
@pytest.yield_fixture
def sql(url):
ds = dshape('var * {A: string, B: int64}')
try:
t = data(url % next(names), dshape=ds)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
assert t.dshape == ds
t = data(odo([('a', 1), ('b', 2)], t))
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_null(url):
ds = dshape(""" var * {name: ?string,
sex: ?string,
amount: int,
id: int,
comment: ?string}
""")
rows = [('Alice', 'F', 100, 1, 'Alice comment'),
(None, 'M', 300, 2, None),
('Drew', 'F', 100, 4, 'Drew comment'),
('Bob', 'M', 100, 5, 'Bob comment 2'),
('Drew', 'M', 200, 5, None),
('first', None, 300, 4, 'Missing info'),
(None, None, 300, 6, None)]
try:
x = url % next(names)
t = data(x, dshape=ds)
print(x)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
assert t.dshape == ds
t = data(odo(rows, t))
try:
yield t
finally:
drop(t)
@pytest.yield_fixture(scope='module')
def nyc(pg_ip):
# odoing csv -> pandas -> postgres is more robust, as it doesn't require
# the postgres server to be on the same filesystem as the csv file.
nyc_pd = odo(example('nyc.csv'), pd.DataFrame)
try:
t = odo(nyc_pd,
'postgresql://postgres@{}/test::nyc'.format(pg_ip))
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def big_sql(url):
try:
t = data(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo(zip(list('a'*100), list(range(100))), t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqla(url):
try:
t = data(url % next(names), dshape='var * {A: ?string, B: ?int32}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), (None, 1), ('c', None)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sqlb(url):
try:
t = data(url % next(names), dshape='var * {A: string, B: int64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([('a', 1), ('b', 2)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_dts(url):
try:
t = data(url % next(names), dshape='var * {A: datetime}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(d,) for d in pd.date_range('2014-01-01', '2014-02-01')], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_with_timedeltas(url):
try:
t = data(url % next(names), dshape='var * {N: timedelta}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
t = odo([(timedelta(seconds=n),) for n in range(10)], t)
try:
yield t
finally:
drop(t)
@pytest.yield_fixture
def sql_two_tables(url):
dshape = 'var * {a: int32}'
try:
t = data(url % next(names), dshape=dshape)
u = data(url % next(names), dshape=dshape)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield u, t
finally:
drop(t)
drop(u)
@pytest.yield_fixture
def products(url):
try:
products = data(url % 'products',
dshape="""var * {
product_id: int64,
color: ?string,
price: float64}""",
primary_key=['product_id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield products
finally:
drop(products)
@pytest.yield_fixture
def orders(url, products):
try:
orders = data(url % 'orders',
dshape="""var * {
order_id: int64,
product_id: map[int64, T],
quantity: int64}""",
foreign_keys=dict(product_id=products.data.c.product_id),
primary_key=['order_id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield orders
finally:
drop(orders)
# TODO: scope these as module because I think pytest is caching sa.Table, which
# doesn't work if remove it after every run
@pytest.yield_fixture
def main(url):
try:
main = odo([(i, int(np.random.randint(10))) for i in range(13)],
url % 'main',
dshape=dshape('var * {id: int64, data: int64}'),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield main
finally:
drop(main)
@pytest.yield_fixture
def pkey(url, main):
choices = [u'AAPL', u'HPQ', u'ORCL', u'IBM', u'DOW', u'SBUX', u'AMD',
u'INTC', u'GOOG', u'PRU', u'MSFT', u'AIG', u'TXN', u'DELL',
u'PEP']
n = 100
data = list(zip(range(n),
np.random.choice(choices, size=n).tolist(),
np.random.uniform(10000, 20000, size=n).tolist(),
np.random.randint(main.count().scalar(), size=n).tolist()))
try:
pkey = odo(data, url % 'pkey',
dshape=dshape('var * {id: int64, sym: string, price: float64, main: map[int64, T]}'),
foreign_keys=dict(main=main.c.id),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield pkey
finally:
drop(pkey)
@pytest.yield_fixture
def fkey(url, pkey):
try:
fkey = odo([(i,
int(np.random.randint(pkey.count().scalar())),
int(np.random.randint(10000)))
for i in range(10)],
url % 'fkey',
dshape=dshape('var * {id: int64, sym_id: map[int64, T], size: int64}'),
foreign_keys=dict(sym_id=pkey.c.id),
primary_key=['id'])
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield fkey
finally:
drop(fkey)
@pytest.yield_fixture
def sql_with_float(url):
try:
t = data(url % next(names), dshape='var * {c: float64}')
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
@pytest.yield_fixture(scope='module')
def nyc_csv(pg_ip):
try:
t = odo(
example('nyc.csv'),
'postgresql://postgres@{}/test::nyc'.format(pg_ip),
)
except sa.exc.OperationalError as e:
pytest.skip(str(e))
else:
try:
yield t
finally:
drop(t)
def test_nyc_csv(nyc_csv):
t = symbol('t', discover(nyc_csv))
assert compute(t.nrows, nyc_csv, return_type='core') > 0
def test_postgres_create(sql):
assert odo(sql, list) == [('a', 1), ('b', 2)]
def test_postgres_isnan(sql_with_float):
dta = (1.0,), (float('nan'),)
table = odo(dta, sql_with_float)
sym = symbol('s', discover(dta))
assert compute(sym.isnan(), table, return_type=list) == [(False,), (True,)]
def test_insert_from_subselect(sql_with_float):
data = pd.DataFrame([{'c': 2.0}, {'c': 1.0}])
tbl = odo(data, sql_with_float)
s = symbol('s', discover(data))
odo(compute(s[s.c.isin((1.0, 2.0))].sort(), tbl, return_type='native'), sql_with_float),
tm.assert_frame_equal(
odo(sql_with_float, pd.DataFrame).iloc[2:].reset_index(drop=True),
pd.DataFrame([{'c': 1.0}, {'c': 2.0}]),
)
def test_concat(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
t = symbol('t', discover(t_data))
u = symbol('u', discover(u_data))
tm.assert_frame_equal(
compute(concat(t, u).sort('a'), {t: t_table, u: u_table}, return_type=pd.DataFrame),
pd.DataFrame(np.arange(10), columns=['a']),
)
def test_concat_invalid_axis(sql_two_tables):
t_table, u_table = sql_two_tables
t_data = pd.DataFrame(np.arange(5), columns=['a'])
u_data = pd.DataFrame(np.arange(5, 10), columns=['a'])
odo(t_data, t_table)
odo(u_data, u_table)
# We need to force the shape to not be a record here so we can
# create the `Concat` node with an axis=1.
t = symbol('t', '5 * 1 * int32')
u = symbol('u', '5 * 1 * int32')
with pytest.raises(ValueError) as e:
compute(concat(t, u, axis=1), {t: t_table, u: u_table}, return_type='native')
# Preserve the suggestion to use merge.
assert "'merge'" in str(e.value)
def test_timedelta_arith(sql_with_dts):
delta = timedelta(days=1)
dates = pd.Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(dates))
assert (
compute(sym + delta, sql_with_dts, return_type=pd.Series) == dates + delta
).all()
assert (
compute(sym - delta, sql_with_dts, return_type=pd.Series) == dates - delta
).all()
assert (
compute(sym - (sym - delta), sql_with_dts, return_type=pd.Series) ==
dates - (dates - delta)
).all()
@pytest.mark.parametrize('func', ('var', 'std'))
def test_timedelta_stat_reduction(sql_with_timedeltas, func):
sym = symbol('s', discover(sql_with_timedeltas))
expr = getattr(sym.N, func)()
deltas = pd.Series([timedelta(seconds=n) for n in range(10)])
expected = timedelta(
seconds=getattr(deltas.astype('int64') / 1e9, func)(ddof=expr.unbiased)
)
assert compute(expr, sql_with_timedeltas, return_type=timedelta) == expected
def test_coerce_bool_and_sum(sql):
sql = sql.data
n = sql.name
t = symbol(n, discover(sql))
expr = (t.B > 1.0).coerce(to='int32').sum()
result = compute(expr, sql).scalar()
expected = compute(t.B, sql, return_type=pd.Series).gt(1).sum()
assert result == expected
def test_distinct_on(sql):
sql = sql.data
t = symbol('t', discover(sql))
computation = compute(t[['A', 'B']].sort('A').distinct('A'), sql, return_type='native')
assert normalize(str(computation)) == normalize("""
SELECT DISTINCT ON (anon_1."A") anon_1."A", anon_1."B"
FROM (SELECT {tbl}."A" AS "A", {tbl}."B" AS "B"
FROM {tbl}) AS anon_1 ORDER BY anon_1."A" ASC
""".format(tbl=sql.name))
assert odo(computation, tuple) == (('a', 1), ('b', 2))
def test_relabel_columns_over_selection(big_sql):
t = symbol('t', discover(big_sql))
result = compute(t[t['B'] == 2].relabel(B=u'b'),
big_sql, return_type=pd.DataFrame)
expected = pd.DataFrame([['a', 2]], columns=[u'A', u'b'])
tm.assert_frame_equal(result, expected)
def test_auto_join_field(orders):
t = symbol('t', discover(orders))
expr = t.product_id.color
result = compute(expr, orders, return_type='native')
expected = """SELECT
products.color
FROM products, orders
WHERE orders.product_id = products.product_id
"""
assert normalize(str(result)) == normalize(expected)
def test_auto_join_projection(orders):
t = symbol('t', discover(orders))
expr = t.product_id[['color', 'price']]
result = compute(expr, orders, return_type='native')
expected = """SELECT
products.color,
products.price
FROM products, orders
WHERE orders.product_id = products.product_id
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail
@pytest.mark.parametrize('func', ['max', 'min', 'sum'])
def test_foreign_key_reduction(orders, products, func):
t = symbol('t', discover(orders))
expr = methodcaller(func)(t.product_id.price)
result = compute(expr, orders, return_type='native')
expected = """WITH alias as (select
products.price as price
from
products, orders
where orders.product_id = products.product_id)
select {0}(alias.price) as price_{0} from alias
""".format(func)
assert normalize(str(result)) == normalize(expected)
def test_foreign_key_chain(fkey):
t = symbol('t', discover(fkey))
expr = t.sym_id.main.data
result = compute(expr, fkey, return_type='native')
expected = """SELECT
main.data
FROM main, fkey, pkey
WHERE fkey.sym_id = pkey.id and pkey.main = main.id
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail(raises=AssertionError,
reason='CTE mucks up generation here')
@pytest.mark.parametrize('grouper', ['sym', ['sym']])
def test_foreign_key_group_by(fkey, grouper):
t = symbol('fkey', discover(fkey))
expr = by(t.sym_id[grouper], avg_price=t.sym_id.price.mean())
result = compute(expr, fkey, return_type='native')
expected = """SELECT
pkey.sym,
avg(pkey.price) AS avg_price
FROM pkey, fkey
WHERE fkey.sym_id = pkey.id
GROUP BY pkey.sym
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.parametrize('grouper', ['sym_id', ['sym_id']])
def test_group_by_map(fkey, grouper):
t = symbol('fkey', discover(fkey))
expr = by(t[grouper], id_count=t.size.count())
result = compute(expr, fkey, return_type='native')
expected = """SELECT
fkey.sym_id,
count(fkey.size) AS id_count
FROM fkey
GROUP BY fkey.sym_id
"""
assert normalize(str(result)) == normalize(expected)
def test_foreign_key_isin(fkey):
t = symbol('fkey', discover(fkey))
expr = t.sym_id.isin([1, 2])
result = compute(expr, fkey, return_type='native')
expected = """SELECT
fkey.sym_id IN (%(sym_id_1)s, %(sym_id_2)s) AS anon_1
FROM fkey
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.xfail(raises=AssertionError, reason='Not yet implemented')
def test_foreign_key_merge_expression(fkey):
from blaze import merge
t = symbol('fkey', discover(fkey))
expr = merge(t.sym_id.sym, t.sym_id.main.data)
expected = """
select pkey.sym, main.data
from
fkey, pkey, main
where
fkey.sym_id = pkey.id and pkey.main = main.id
"""
result = compute(expr, fkey, return_type='native')
assert normalize(str(result)) == normalize(expected)
def test_join_type_promotion(sqla, sqlb):
t, s = symbol(sqla.name, discover(sqla)), symbol(sqlb.name, discover(sqlb))
expr = join(t, s, 'B', how='inner')
result = set(map(tuple, compute(expr, {t: sqla, s: sqlb}, return_type='native').execute().fetchall()))
expected = set([(1, 'a', 'a'), (1, None, 'a')])
assert result == expected
@pytest.mark.parametrize(['n', 'column'],
[(1, 'A'), (-1, 'A'),
(1, 'B'), (-1, 'B'),
(0, 'A'), (0, 'B')])
def test_shift_on_column(n, column, sql):
sql = sql.data
t = symbol('t', discover(sql))
expr = t[column].shift(n)
result = compute(expr, sql, return_type=pd.Series)
expected = odo(sql, pd.DataFrame)[column].shift(n)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('n', [-1, 0, 1])
def test_shift_arithmetic(sql, n):
t = symbol('t', discover(sql))
expr = t.B - t.B.shift(n)
result = compute(expr, sql, return_type=pd.Series)
df = odo(sql, pd.DataFrame)
expected = df.B - df.B.shift(n)
tm.assert_series_equal(result, expected)
def test_dist(nyc):
def distance(lat1, lon1, lat2, lon2, R=3959):
# http://andrew.hedges.name/experiments/haversine/
dlon = radians(lon2 - lon1)
dlat = radians(lat2 - lat1)
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
return R * 2 * atan2(sqrt(a), sqrt(1 - a))
t = symbol('t', discover(nyc))
filtered = t[
(t.pickup_latitude >= 40.477399) &
(t.pickup_latitude <= 40.917577) &
(t.dropoff_latitude >= 40.477399) &
(t.dropoff_latitude <= 40.917577) &
(t.pickup_longitude >= -74.259090) &
(t.pickup_longitude <= -73.700272) &
(t.dropoff_longitude >= -74.259090) &
(t.dropoff_longitude <= -73.700272) &
(t.passenger_count < 6)
]
dist = distance(filtered.pickup_latitude, filtered.pickup_longitude,
filtered.dropoff_latitude, filtered.dropoff_longitude)
transformed = transform(filtered, dist=dist)
assert (
compute(transformed.dist.max(), nyc, return_type=float) ==
compute(transformed.dist, nyc, return_type=pd.Series).max()
)
def test_multiple_columns_in_transform(nyc):
t = symbol('t', discover(nyc))
t = t[
(t.pickup_latitude >= 40.477399) &
(t.pickup_latitude <= 40.917577) &
(t.dropoff_latitude >= 40.477399) &
(t.dropoff_latitude <= 40.917577) &
(t.pickup_longitude >= -74.259090) &
(t.pickup_longitude <= -73.700272) &
(t.dropoff_longitude >= -74.259090) &
(t.dropoff_longitude <= -73.700272) &
(t.passenger_count < 6)
]
hours = t.trip_time_in_secs.coerce('float64') / 3600.0
avg_speed_in_mph = t.trip_distance / hours
d = transform(t, avg_speed_in_mph=avg_speed_in_mph, mycol=avg_speed_in_mph + 1)
df = compute(d[d.avg_speed_in_mph <= 200], nyc, return_type=pd.DataFrame)
assert not df.empty
def test_coerce_on_select(nyc):
t = symbol('t', discover(nyc))
t = t[
(t.pickup_latitude >= 40.477399) &
(t.pickup_latitude <= 40.917577) &
(t.dropoff_latitude >= 40.477399) &
(t.dropoff_latitude <= 40.917577) &
(t.pickup_longitude >= -74.259090) &
(t.pickup_longitude <= -73.700272) &
(t.dropoff_longitude >= -74.259090) &
(t.dropoff_longitude <= -73.700272) &
(t.passenger_count < 6)
]
t = transform(t, pass_count=t.passenger_count + 1)
result = compute(t.pass_count.coerce('float64'), nyc, return_type='native')
s = odo(result, pd.Series)
expected = compute(t, nyc, return_type=pd.DataFrame) \
.passenger_count.astype('float64') + 1.0
assert list(s) == list(expected)
def test_interactive_len(sql):
t = data(sql)
assert len(t) == int(t.count())
def test_sample_n(nyc):
t = symbol('t', discover(nyc))
result = compute(t.sample(n=14), nyc, return_type=pd.DataFrame)
assert len(result) == 14
def test_sample_bounded(nyc):
t = symbol('t', discover(nyc))
nrows = compute(t.nrows, nyc, return_type=int)
result = compute(t.sample(n=2*nrows), nyc, return_type=pd.DataFrame)
assert len(result) == nrows
def test_sample_frac(nyc):
t = symbol('t', discover(nyc))
result = compute(t.sample(frac=0.5), nyc, return_type=pd.DataFrame)
num_rows = compute(t.nrows, nyc, return_type=int)
# *Sigh* have to do proper rounding manually; Python's round() builtin is
# borked.
fractional, integral = math.modf(num_rows * 0.5)
assert int(integral + (0 if fractional < 0.5 else 1)) == len(result)
def test_sample(big_sql):
nn = symbol('nn', discover(big_sql))
nrows = odo(compute(nn.nrows, big_sql), int)
result = compute(nn.sample(n=nrows // 2), big_sql, return_type=pd.DataFrame)
assert len(result) == nrows // 2
result2 = compute(nn.sample(frac=0.5), big_sql, return_type=pd.DataFrame)
assert len(result) == len(result2)
@pytest.mark.parametrize("sep", [None, " -- "])
def test_str_cat_with_null(sql_with_null, sep):
t = symbol('t', discover(sql_with_null))
res = compute(t.name.str_cat(t.sex, sep=sep), sql_with_null,
return_type=list)
res = [r[0] for r in res]
cols = compute(t[['name', 'sex']], sql_with_null, return_type=list)
for r, (n, s) in zip(res, cols):
if n is None or s is None:
assert r is None
else:
assert (r == n + s if sep is None else r == n + sep + s)
def test_chain_str_cat_with_null(sql_with_null):
t = symbol('t', discover(sql_with_null))
expr = (t.name
.str_cat(t.comment, sep=' ++ ')
.str_cat(t.sex, sep=' -- '))
res = compute(expr, sql_with_null, return_type=list)
res = [r[0] for r in res]
cols = compute(t[['name', 'comment', 'sex']], sql_with_null,
return_type=list)
for r, (n, c, s) in zip(res, cols):
if n is None or c is None or s is None:
assert r is None
else:
assert (r == n + ' ++ ' + c + ' -- ' + s)
def test_str_cat_bcast(sql_with_null):
t = symbol('t', discover(sql_with_null))
lit_sym = symbol('s', 'string')
s = t[t.amount <= 200]
result = compute(s.comment.str_cat(lit_sym, sep=' '),
{t: sql_with_null, lit_sym: '!!'},
return_type=pd.Series)
df = compute(s, sql_with_null,
return_type=pd.DataFrame)
expected = df.comment.str.cat(['!!']*len(df.comment), sep=' ')
assert all(expected[~expected.isnull()] == result[~result.isnull()])
assert all(expected[expected.isnull()].index == result[result.isnull()].index)
def test_str_cat_where_clause(sql_with_null):
"""
Invokes the (Select, Select) path for compute_up
"""
t = symbol('t', discover(sql_with_null))
s = t[t.amount <= 200]
c1 = s.comment.str_cat(s.sex, sep=' -- ')
bres = compute(c1, sql_with_null, return_type=pd.Series)
df_s = compute(s, sql_with_null, return_type=pd.DataFrame)
exp = df_s.comment.str.cat(df_s.sex, ' -- ')
assert all(exp[~exp.isnull()] == bres[~bres.isnull()])
assert all(exp[exp.isnull()].index == bres[bres.isnull()].index)
def test_core_compute(nyc):
t = symbol('t', discover(nyc))
assert isinstance(compute(t, nyc, return_type='core'), pd.DataFrame)
assert isinstance(compute(t.passenger_count, nyc, return_type='core'), pd.Series)
assert iscorescalar(compute(t.passenger_count.mean(), nyc, return_type='core'))
assert isinstance(compute(t, nyc, return_type=list), list)
@pytest.fixture
def gl_data(sql_two_tables):
u_data, t_data = sql_two_tables
# populate the tables with some data and return it
return data(odo([(1,)], u_data)), data(odo([(2,)], t_data))
def test_greatest(gl_data):
u, t = gl_data
assert odo(greatest(u.a.max(), t.a.max()), int) == 2
def test_least(gl_data):
u, t = gl_data
assert odo(least(u.a.max(), t.a.max()), int) == 1
def test_coalesce(sqla):
t = symbol('t', discover(sqla))
assert (
compute(coalesce(t.B, -1), {t: sqla}, return_type=list) ==
[(1,), (1,), (-1,)]
)
assert (
compute(coalesce(t.A, 'z'), {t: sqla}, return_type=list) ==
[('a',), ('z',), ('c',)]
)
def test_any(sql):
s = symbol('s', discover(sql))
assert compute((s.B == 1).any(), {s: sql}, return_type='core')
assert compute((s.B == 2).any(), {s: sql}, return_type='core')
assert compute(~(s.B == 3).any(), {s: sql}, return_type='core')
def test_all(sql):
s = symbol('s', discover(sql))
assert compute(s.B.isin({1, 2}).all(), {s: sql}, return_type='core')
assert compute(~(s.B == 1).all(), {s: sql}, return_type='core')
assert compute(~(s.B == 2).all(), {s: sql}, return_type='core')
assert compute(~(s.B == 3).all(), {s: sql}, return_type='core')
| [
"odo.discover",
"odo.odo",
"blaze.sin",
"blaze.join",
"pytest.fixture",
"datetime.timedelta",
"numpy.arange",
"pandas.util.testing.assert_frame_equal",
"pandas.date_range",
"pytest.mark.xfail",
"blaze.merge",
"datashape.dshape",
"pandas.DataFrame",
"math.modf",
"odo.drop",
"numpy.rando... | [((115, 148), 'pytest.importorskip', 'pytest.importorskip', (['"""sqlalchemy"""'], {}), "('sqlalchemy')\n", (134, 148), False, 'import pytest\n'), ((149, 180), 'pytest.importorskip', 'pytest.importorskip', (['"""psycopg2"""'], {}), "('psycopg2')\n", (168, 180), False, 'import pytest\n'), ((663, 693), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (677, 693), False, 'import pytest\n'), ((2121, 2157), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2141, 2157), False, 'import pytest\n'), ((8007, 8043), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (8027, 8043), False, 'import pytest\n'), ((10879, 10926), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "('var', 'std')"], {}), "('func', ('var', 'std'))\n", (10902, 10926), False, 'import pytest\n'), ((13181, 13235), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['max', 'min', 'sum']"], {}), "('func', ['max', 'min', 'sum'])\n", (13204, 13235), False, 'import pytest\n'), ((14090, 14169), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'AssertionError', 'reason': '"""CTE mucks up generation here"""'}), "(raises=AssertionError, reason='CTE mucks up generation here')\n", (14107, 14169), False, 'import pytest\n'), ((14190, 14242), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""grouper"""', "['sym', ['sym']]"], {}), "('grouper', ['sym', ['sym']])\n", (14213, 14242), False, 'import pytest\n'), ((14671, 14729), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""grouper"""', "['sym_id', ['sym_id']]"], {}), "('grouper', ['sym_id', ['sym_id']])\n", (14694, 14729), False, 'import pytest\n'), ((15431, 15501), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'AssertionError', 'reason': '"""Not yet implemented"""'}), "(raises=AssertionError, reason='Not yet implemented')\n", (15448, 15501), False, 'import pytest\n'), ((16310, 16419), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['n', 'column']", "[(1, 'A'), (-1, 'A'), (1, 'B'), (-1, 'B'), (0, 'A'), (0, 'B')]"], {}), "(['n', 'column'], [(1, 'A'), (-1, 'A'), (1, 'B'), (-\n 1, 'B'), (0, 'A'), (0, 'B')])\n", (16333, 16419), False, 'import pytest\n'), ((16776, 16816), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[-1, 0, 1]'], {}), "('n', [-1, 0, 1])\n", (16799, 16816), False, 'import pytest\n'), ((21029, 21075), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sep"""', "[None, ' -- ']"], {}), "('sep', [None, ' -- '])\n", (21052, 21075), False, 'import pytest\n'), ((718, 760), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_IP"""', '"""localhost"""'], {}), "('POSTGRES_IP', 'localhost')\n", (732, 760), False, 'import os\n'), ((902, 939), 'datashape.dshape', 'dshape', (['"""var * {A: string, B: int64}"""'], {}), "('var * {A: string, B: int64}')\n", (908, 939), False, 'from datashape import dshape\n'), ((1279, 1502), 'datashape.dshape', 'dshape', (['""" var * {name: ?string,\n sex: ?string,\n amount: int,\n id: int,\n comment: ?string}\n """'], {}), '(\n """ var * {name: ?string,\n sex: ?string,\n amount: int,\n id: int,\n comment: ?string}\n """\n )\n', (1285, 1502), False, 'from datashape import dshape\n'), ((8646, 8670), 'odo.odo', 'odo', (['dta', 'sql_with_float'], {}), '(dta, sql_with_float)\n', (8649, 8670), False, 'from odo import odo, drop, discover\n'), ((8849, 8887), 'pandas.DataFrame', 'pd.DataFrame', (["[{'c': 2.0}, {'c': 1.0}]"], {}), "([{'c': 2.0}, {'c': 1.0}])\n", (8861, 8887), True, 'import pandas as pd\n'), ((8898, 8923), 'odo.odo', 'odo', (['data', 'sql_with_float'], {}), '(data, sql_with_float)\n', (8901, 8923), False, 'from odo import odo, drop, discover\n'), ((9400, 9420), 'odo.odo', 'odo', (['t_data', 't_table'], {}), '(t_data, t_table)\n', (9403, 9420), False, 'from odo import odo, drop, discover\n'), ((9425, 9445), 'odo.odo', 'odo', (['u_data', 'u_table'], {}), '(u_data, u_table)\n', (9428, 9445), False, 'from odo import odo, drop, discover\n'), ((9905, 9925), 'odo.odo', 'odo', (['t_data', 't_table'], {}), '(t_data, t_table)\n', (9908, 9925), False, 'from odo import odo, drop, discover\n'), ((9930, 9950), 'odo.odo', 'odo', (['u_data', 'u_table'], {}), '(u_data, u_table)\n', (9933, 9950), False, 'from odo import odo, drop, discover\n'), ((10074, 10102), 'blaze.symbol', 'symbol', (['"""t"""', '"""5 * 1 * int32"""'], {}), "('t', '5 * 1 * int32')\n", (10080, 10102), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((10111, 10139), 'blaze.symbol', 'symbol', (['"""u"""', '"""5 * 1 * int32"""'], {}), "('u', '5 * 1 * int32')\n", (10117, 10139), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((10404, 10421), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (10413, 10421), False, 'from datetime import timedelta\n'), ((12325, 12371), 'pandas.DataFrame', 'pd.DataFrame', (["[['a', 2]]"], {'columns': "[u'A', u'b']"}), "([['a', 2]], columns=[u'A', u'b'])\n", (12337, 12371), True, 'import pandas as pd\n'), ((12376, 12415), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (12397, 12415), True, 'import pandas.util.testing as tm\n'), ((12533, 12576), 'blaze.compute', 'compute', (['expr', 'orders'], {'return_type': '"""native"""'}), "(expr, orders, return_type='native')\n", (12540, 12576), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((12902, 12945), 'blaze.compute', 'compute', (['expr', 'orders'], {'return_type': '"""native"""'}), "(expr, orders, return_type='native')\n", (12909, 12945), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((13393, 13436), 'blaze.compute', 'compute', (['expr', 'orders'], {'return_type': '"""native"""'}), "(expr, orders, return_type='native')\n", (13400, 13436), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((13855, 13896), 'blaze.compute', 'compute', (['expr', 'fkey'], {'return_type': '"""native"""'}), "(expr, fkey, return_type='native')\n", (13862, 13896), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((14407, 14448), 'blaze.compute', 'compute', (['expr', 'fkey'], {'return_type': '"""native"""'}), "(expr, fkey, return_type='native')\n", (14414, 14448), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((14871, 14912), 'blaze.compute', 'compute', (['expr', 'fkey'], {'return_type': '"""native"""'}), "(expr, fkey, return_type='native')\n", (14878, 14912), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((15220, 15261), 'blaze.compute', 'compute', (['expr', 'fkey'], {'return_type': '"""native"""'}), "(expr, fkey, return_type='native')\n", (15227, 15261), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((15626, 15665), 'blaze.merge', 'merge', (['t.sym_id.sym', 't.sym_id.main.data'], {}), '(t.sym_id.sym, t.sym_id.main.data)\n', (15631, 15665), False, 'from blaze import merge\n'), ((15855, 15896), 'blaze.compute', 'compute', (['expr', 'fkey'], {'return_type': '"""native"""'}), "(expr, fkey, return_type='native')\n", (15862, 15896), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((16089, 16117), 'blaze.join', 'join', (['t', 's', '"""B"""'], {'how': '"""inner"""'}), "(t, s, 'B', how='inner')\n", (16093, 16117), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((16631, 16672), 'blaze.compute', 'compute', (['expr', 'sql'], {'return_type': 'pd.Series'}), '(expr, sql, return_type=pd.Series)\n', (16638, 16672), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((16732, 16772), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (16754, 16772), True, 'import pandas.util.testing as tm\n'), ((16930, 16971), 'blaze.compute', 'compute', (['expr', 'sql'], {'return_type': 'pd.Series'}), '(expr, sql, return_type=pd.Series)\n', (16937, 16971), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((16981, 17003), 'odo.odo', 'odo', (['sql', 'pd.DataFrame'], {}), '(sql, pd.DataFrame)\n', (16984, 17003), False, 'from odo import odo, drop, discover\n'), ((17044, 17084), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (17066, 17084), True, 'import pandas.util.testing as tm\n'), ((18034, 18064), 'blaze.transform', 'transform', (['filtered'], {'dist': 'dist'}), '(filtered, dist=dist)\n', (18043, 18064), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((18820, 18895), 'blaze.transform', 'transform', (['t'], {'avg_speed_in_mph': 'avg_speed_in_mph', 'mycol': '(avg_speed_in_mph + 1)'}), '(t, avg_speed_in_mph=avg_speed_in_mph, mycol=avg_speed_in_mph + 1)\n', (18829, 18895), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((18905, 18973), 'blaze.compute', 'compute', (['d[d.avg_speed_in_mph <= 200]', 'nyc'], {'return_type': 'pd.DataFrame'}), '(d[d.avg_speed_in_mph <= 200], nyc, return_type=pd.DataFrame)\n', (18912, 18973), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((19480, 19526), 'blaze.transform', 'transform', (['t'], {'pass_count': '(t.passenger_count + 1)'}), '(t, pass_count=t.passenger_count + 1)\n', (19489, 19526), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((19615, 19637), 'odo.odo', 'odo', (['result', 'pd.Series'], {}), '(result, pd.Series)\n', (19618, 19637), False, 'from odo import odo, drop, discover\n'), ((19838, 19847), 'blaze.data', 'data', (['sql'], {}), '(sql)\n', (19842, 19847), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((20121, 20159), 'blaze.compute', 'compute', (['t.nrows', 'nyc'], {'return_type': 'int'}), '(t.nrows, nyc, return_type=int)\n', (20128, 20159), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((20416, 20454), 'blaze.compute', 'compute', (['t.nrows', 'nyc'], {'return_type': 'int'}), '(t.nrows, nyc, return_type=int)\n', (20423, 20454), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((20574, 20599), 'math.modf', 'math.modf', (['(num_rows * 0.5)'], {}), '(num_rows * 0.5)\n', (20583, 20599), False, 'import math\n'), ((21311, 21371), 'blaze.compute', 'compute', (["t[['name', 'sex']]", 'sql_with_null'], {'return_type': 'list'}), "(t[['name', 'sex']], sql_with_null, return_type=list)\n", (21318, 21371), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((21767, 21813), 'blaze.compute', 'compute', (['expr', 'sql_with_null'], {'return_type': 'list'}), '(expr, sql_with_null, return_type=list)\n', (21774, 21813), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((21855, 21926), 'blaze.compute', 'compute', (["t[['name', 'comment', 'sex']]", 'sql_with_null'], {'return_type': 'list'}), "(t[['name', 'comment', 'sex']], sql_with_null, return_type=list)\n", (21862, 21926), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((22232, 22253), 'blaze.symbol', 'symbol', (['"""s"""', '"""string"""'], {}), "('s', 'string')\n", (22238, 22253), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((22448, 22499), 'blaze.compute', 'compute', (['s', 'sql_with_null'], {'return_type': 'pd.DataFrame'}), '(s, sql_with_null, return_type=pd.DataFrame)\n', (22455, 22499), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((22993, 23042), 'blaze.compute', 'compute', (['c1', 'sql_with_null'], {'return_type': 'pd.Series'}), '(c1, sql_with_null, return_type=pd.Series)\n', (23000, 23042), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((23054, 23105), 'blaze.compute', 'compute', (['s', 'sql_with_null'], {'return_type': 'pd.DataFrame'}), '(s, sql_with_null, return_type=pd.DataFrame)\n', (23061, 23105), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((642, 659), 'itertools.count', 'itertools.count', ([], {}), '()\n', (657, 659), False, 'import itertools\n'), ((1872, 1890), 'blaze.data', 'data', (['x'], {'dshape': 'ds'}), '(x, dshape=ds)\n', (1876, 1890), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((2340, 2358), 'blaze.utils.example', 'example', (['"""nyc.csv"""'], {}), "('nyc.csv')\n", (2347, 2358), False, 'from blaze.utils import example, normalize\n'), ((3170, 3212), 'odo.odo', 'odo', (["[('a', 1), (None, 1), ('c', None)]", 't'], {}), "([('a', 1), (None, 1), ('c', None)], t)\n", (3173, 3212), False, 'from odo import odo, drop, discover\n'), ((3496, 3524), 'odo.odo', 'odo', (["[('a', 1), ('b', 2)]", 't'], {}), "([('a', 1), ('b', 2)], t)\n", (3499, 3524), False, 'from odo import odo, drop, discover\n'), ((4723, 4940), 'blaze.data', 'data', (["(url % 'products')"], {'dshape': '"""var * {\n product_id: int64,\n color: ?string,\n price: float64}"""', 'primary_key': "['product_id']"}), '(url % \'products\', dshape=\n """var * {\n product_id: int64,\n color: ?string,\n price: float64}"""\n , primary_key=[\'product_id\'])\n', (4727, 4940), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((8394, 8411), 'odo.discover', 'discover', (['nyc_csv'], {}), '(nyc_csv)\n', (8402, 8411), False, 'from odo import odo, drop, discover\n'), ((8424, 8469), 'blaze.compute', 'compute', (['t.nrows', 'nyc_csv'], {'return_type': '"""core"""'}), "(t.nrows, nyc_csv, return_type='core')\n", (8431, 8469), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((8518, 8532), 'odo.odo', 'odo', (['sql', 'list'], {}), '(sql, list)\n', (8521, 8532), False, 'from odo import odo, drop, discover\n'), ((8693, 8706), 'odo.discover', 'discover', (['dta'], {}), '(dta)\n', (8701, 8706), False, 'from odo import odo, drop, discover\n'), ((8944, 8958), 'odo.discover', 'discover', (['data'], {}), '(data)\n', (8952, 8958), False, 'from odo import odo, drop, discover\n'), ((9163, 9201), 'pandas.DataFrame', 'pd.DataFrame', (["[{'c': 1.0}, {'c': 2.0}]"], {}), "([{'c': 1.0}, {'c': 2.0}])\n", (9175, 9201), True, 'import pandas as pd\n'), ((9308, 9320), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9317, 9320), True, 'import numpy as np\n'), ((9363, 9379), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (9372, 9379), True, 'import numpy as np\n'), ((9467, 9483), 'odo.discover', 'discover', (['t_data'], {}), '(t_data)\n', (9475, 9483), False, 'from odo import odo, drop, discover\n'), ((9505, 9521), 'odo.discover', 'discover', (['u_data'], {}), '(u_data)\n', (9513, 9521), False, 'from odo import odo, drop, discover\n'), ((9813, 9825), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (9822, 9825), True, 'import numpy as np\n'), ((9868, 9884), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (9877, 9884), True, 'import numpy as np\n'), ((10150, 10175), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10163, 10175), False, 'import pytest\n'), ((10444, 10485), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-02-01"""'], {}), "('2014-01-01', '2014-02-01')\n", (10457, 10485), True, 'import pandas as pd\n'), ((10509, 10524), 'odo.discover', 'discover', (['dates'], {}), '(dates)\n', (10517, 10524), False, 'from odo import odo, drop, discover\n'), ((11011, 11040), 'odo.discover', 'discover', (['sql_with_timedeltas'], {}), '(sql_with_timedeltas)\n', (11019, 11040), False, 'from odo import odo, drop, discover\n'), ((11266, 11323), 'blaze.compute', 'compute', (['expr', 'sql_with_timedeltas'], {'return_type': 'timedelta'}), '(expr, sql_with_timedeltas, return_type=timedelta)\n', (11273, 11323), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((11427, 11440), 'odo.discover', 'discover', (['sql'], {}), '(sql)\n', (11435, 11440), False, 'from odo import odo, drop, discover\n'), ((11697, 11710), 'odo.discover', 'discover', (['sql'], {}), '(sql)\n', (11705, 11710), False, 'from odo import odo, drop, discover\n'), ((12062, 12085), 'odo.odo', 'odo', (['computation', 'tuple'], {}), '(computation, tuple)\n', (12065, 12085), False, 'from odo import odo, drop, discover\n'), ((12182, 12199), 'odo.discover', 'discover', (['big_sql'], {}), '(big_sql)\n', (12190, 12199), False, 'from odo import odo, drop, discover\n'), ((12472, 12488), 'odo.discover', 'discover', (['orders'], {}), '(orders)\n', (12480, 12488), False, 'from odo import odo, drop, discover\n'), ((12746, 12765), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (12755, 12765), False, 'from blaze.utils import example, normalize\n'), ((12827, 12843), 'odo.discover', 'discover', (['orders'], {}), '(orders)\n', (12835, 12843), False, 'from odo import odo, drop, discover\n'), ((13139, 13158), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (13148, 13158), False, 'from blaze.utils import example, normalize\n'), ((13312, 13328), 'odo.discover', 'discover', (['orders'], {}), '(orders)\n', (13320, 13328), False, 'from odo import odo, drop, discover\n'), ((13341, 13359), 'operator.methodcaller', 'methodcaller', (['func'], {}), '(func)\n', (13353, 13359), False, 'from operator import methodcaller\n'), ((13720, 13739), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (13729, 13739), False, 'from blaze.utils import example, normalize\n'), ((13796, 13810), 'odo.discover', 'discover', (['fkey'], {}), '(fkey)\n', (13804, 13810), False, 'from odo import odo, drop, discover\n'), ((14067, 14086), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (14076, 14086), False, 'from blaze.utils import example, normalize\n'), ((14312, 14326), 'odo.discover', 'discover', (['fkey'], {}), '(fkey)\n', (14320, 14326), False, 'from odo import odo, drop, discover\n'), ((14648, 14667), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (14657, 14667), False, 'from blaze.utils import example, normalize\n'), ((14791, 14805), 'odo.discover', 'discover', (['fkey'], {}), '(fkey)\n', (14799, 14805), False, 'from odo import odo, drop, discover\n'), ((15080, 15099), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (15089, 15099), False, 'from blaze.utils import example, normalize\n'), ((15158, 15172), 'odo.discover', 'discover', (['fkey'], {}), '(fkey)\n', (15166, 15172), False, 'from odo import odo, drop, discover\n'), ((15408, 15427), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (15417, 15427), False, 'from blaze.utils import example, normalize\n'), ((15599, 15613), 'odo.discover', 'discover', (['fkey'], {}), '(fkey)\n', (15607, 15613), False, 'from odo import odo, drop, discover\n'), ((15934, 15953), 'blaze.utils.normalize', 'normalize', (['expected'], {}), '(expected)\n', (15943, 15953), False, 'from blaze.utils import example, normalize\n'), ((16573, 16586), 'odo.discover', 'discover', (['sql'], {}), '(sql)\n', (16581, 16586), False, 'from odo import odo, drop, discover\n'), ((16872, 16885), 'odo.discover', 'discover', (['sql'], {}), '(sql)\n', (16880, 16885), False, 'from odo import odo, drop, discover\n'), ((17231, 17251), 'blaze.radians', 'radians', (['(lon2 - lon1)'], {}), '(lon2 - lon1)\n', (17238, 17251), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17267, 17287), 'blaze.radians', 'radians', (['(lat2 - lat1)'], {}), '(lat2 - lat1)\n', (17274, 17287), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17440, 17453), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (17448, 17453), False, 'from odo import odo, drop, discover\n'), ((18286, 18299), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (18294, 18299), False, 'from odo import odo, drop, discover\n'), ((19052, 19065), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (19060, 19065), False, 'from odo import odo, drop, discover\n'), ((19930, 19943), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (19938, 19943), False, 'from odo import odo, drop, discover\n'), ((20094, 20107), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (20102, 20107), False, 'from odo import odo, drop, discover\n'), ((20314, 20327), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (20322, 20327), False, 'from odo import odo, drop, discover\n'), ((20723, 20740), 'odo.discover', 'discover', (['big_sql'], {}), '(big_sql)\n', (20731, 20740), False, 'from odo import odo, drop, discover\n'), ((20758, 20784), 'blaze.compute', 'compute', (['nn.nrows', 'big_sql'], {}), '(nn.nrows, big_sql)\n', (20765, 20784), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((21144, 21167), 'odo.discover', 'discover', (['sql_with_null'], {}), '(sql_with_null)\n', (21152, 21167), False, 'from odo import odo, drop, discover\n'), ((21628, 21651), 'odo.discover', 'discover', (['sql_with_null'], {}), '(sql_with_null)\n', (21636, 21651), False, 'from odo import odo, drop, discover\n'), ((22193, 22216), 'odo.discover', 'discover', (['sql_with_null'], {}), '(sql_with_null)\n', (22201, 22216), False, 'from odo import odo, drop, discover\n'), ((22883, 22906), 'odo.discover', 'discover', (['sql_with_null'], {}), '(sql_with_null)\n', (22891, 22906), False, 'from odo import odo, drop, discover\n'), ((23334, 23347), 'odo.discover', 'discover', (['nyc'], {}), '(nyc)\n', (23342, 23347), False, 'from odo import odo, drop, discover\n'), ((23371, 23406), 'blaze.compute', 'compute', (['t', 'nyc'], {'return_type': '"""core"""'}), "(t, nyc, return_type='core')\n", (23378, 23406), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((23444, 23495), 'blaze.compute', 'compute', (['t.passenger_count', 'nyc'], {'return_type': '"""core"""'}), "(t.passenger_count, nyc, return_type='core')\n", (23451, 23495), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((23614, 23647), 'blaze.compute', 'compute', (['t', 'nyc'], {'return_type': 'list'}), '(t, nyc, return_type=list)\n', (23621, 23647), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((24110, 24124), 'odo.discover', 'discover', (['sqla'], {}), '(sqla)\n', (24118, 24124), False, 'from odo import odo, drop, discover\n'), ((24401, 24414), 'odo.discover', 'discover', (['sql'], {}), '(sql)\n', (24409, 24414), False, 'from odo import odo, drop, discover\n'), ((24659, 24672), 'odo.discover', 'discover', (['sql'], {}), '(sql)\n', (24667, 24672), False, 'from odo import odo, drop, discover\n'), ((1122, 1150), 'odo.odo', 'odo', (["[('a', 1), ('b', 2)]", 't'], {}), "([('a', 1), ('b', 2)], t)\n", (1125, 1150), False, 'from odo import odo, drop, discover\n'), ((1214, 1221), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (1218, 1221), False, 'from odo import odo, drop, discover\n'), ((2034, 2046), 'odo.odo', 'odo', (['rows', 't'], {}), '(rows, t)\n', (2037, 2046), False, 'from odo import odo, drop, discover\n'), ((2110, 2117), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (2114, 2117), False, 'from odo import odo, drop, discover\n'), ((2616, 2623), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (2620, 2623), False, 'from odo import odo, drop, discover\n'), ((2947, 2954), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (2951, 2954), False, 'from odo import odo, drop, discover\n'), ((3275, 3282), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (3279, 3282), False, 'from odo import odo, drop, discover\n'), ((3587, 3594), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (3591, 3594), False, 'from odo import odo, drop, discover\n'), ((3936, 3943), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (3940, 3943), False, 'from odo import odo, drop, discover\n'), ((4280, 4287), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (4284, 4287), False, 'from odo import odo, drop, discover\n'), ((4624, 4631), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (4628, 4631), False, 'from odo import odo, drop, discover\n'), ((4644, 4651), 'odo.drop', 'drop', (['u'], {}), '(u)\n', (4648, 4651), False, 'from odo import odo, drop, discover\n'), ((5127, 5141), 'odo.drop', 'drop', (['products'], {}), '(products)\n', (5131, 5141), False, 'from odo import odo, drop, discover\n'), ((5691, 5703), 'odo.drop', 'drop', (['orders'], {}), '(orders)\n', (5695, 5703), False, 'from odo import odo, drop, discover\n'), ((6234, 6244), 'odo.drop', 'drop', (['main'], {}), '(main)\n', (6238, 6244), False, 'from odo import odo, drop, discover\n'), ((7108, 7118), 'odo.drop', 'drop', (['pkey'], {}), '(pkey)\n', (7112, 7118), False, 'from odo import odo, drop, discover\n'), ((7721, 7731), 'odo.drop', 'drop', (['fkey'], {}), '(fkey)\n', (7725, 7731), False, 'from odo import odo, drop, discover\n'), ((7996, 8003), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (8000, 8003), False, 'from odo import odo, drop, discover\n'), ((8102, 8120), 'blaze.utils.example', 'example', (['"""nyc.csv"""'], {}), "('nyc.csv')\n", (8109, 8120), False, 'from blaze.utils import example, normalize\n'), ((8337, 8344), 'odo.drop', 'drop', (['t'], {}), '(t)\n', (8341, 8344), False, 'from odo import odo, drop, discover\n'), ((9664, 9677), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (9673, 9677), True, 'import numpy as np\n'), ((10198, 10218), 'blaze.concat', 'concat', (['t', 'u'], {'axis': '(1)'}), '(t, u, axis=1)\n', (10204, 10218), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((11101, 11121), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'n'}), '(seconds=n)\n', (11110, 11121), False, 'from datetime import timedelta\n'), ((11503, 11521), 'blaze.compute', 'compute', (['expr', 'sql'], {}), '(expr, sql)\n', (11510, 11521), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((16027, 16041), 'odo.discover', 'discover', (['sqla'], {}), '(sqla)\n', (16035, 16041), False, 'from odo import odo, drop, discover\n'), ((16062, 16076), 'odo.discover', 'discover', (['sqlb'], {}), '(sqlb)\n', (16070, 16076), False, 'from odo import odo, drop, discover\n'), ((23809, 23828), 'odo.odo', 'odo', (['[(1,)]', 'u_data'], {}), '([(1,)], u_data)\n', (23812, 23828), False, 'from odo import odo, drop, discover\n'), ((23836, 23855), 'odo.odo', 'odo', (['[(2,)]', 't_data'], {}), '([(2,)], t_data)\n', (23839, 23855), False, 'from odo import odo, drop, discover\n'), ((24155, 24172), 'blaze.coalesce', 'coalesce', (['t.B', '(-1)'], {}), '(t.B, -1)\n', (24163, 24172), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((24269, 24287), 'blaze.coalesce', 'coalesce', (['t.A', '"""z"""'], {}), "(t.A, 'z')\n", (24277, 24287), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((6009, 6049), 'datashape.dshape', 'dshape', (['"""var * {id: int64, data: int64}"""'], {}), "('var * {id: int64, data: int64}')\n", (6015, 6049), False, 'from datashape import dshape\n'), ((6792, 6869), 'datashape.dshape', 'dshape', (['"""var * {id: int64, sym: string, price: float64, main: map[int64, T]}"""'], {}), "('var * {id: int64, sym: string, price: float64, main: map[int64, T]}')\n", (6798, 6869), False, 'from datashape import dshape\n'), ((7417, 7480), 'datashape.dshape', 'dshape', (['"""var * {id: int64, sym_id: map[int64, T], size: int64}"""'], {}), "('var * {id: int64, sym_id: map[int64, T], size: int64}')\n", (7423, 7480), False, 'from datashape import dshape\n'), ((10547, 10604), 'blaze.compute', 'compute', (['(sym + delta)', 'sql_with_dts'], {'return_type': 'pd.Series'}), '(sym + delta, sql_with_dts, return_type=pd.Series)\n', (10554, 10604), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((10655, 10712), 'blaze.compute', 'compute', (['(sym - delta)', 'sql_with_dts'], {'return_type': 'pd.Series'}), '(sym - delta, sql_with_dts, return_type=pd.Series)\n', (10662, 10712), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((10763, 10828), 'blaze.compute', 'compute', (['(sym - (sym - delta))', 'sql_with_dts'], {'return_type': 'pd.Series'}), '(sym - (sym - delta), sql_with_dts, return_type=pd.Series)\n', (10770, 10828), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((16688, 16710), 'odo.odo', 'odo', (['sql', 'pd.DataFrame'], {}), '(sql, pd.DataFrame)\n', (16691, 16710), False, 'from odo import odo, drop, discover\n'), ((17300, 17315), 'blaze.sin', 'sin', (['(dlat / 2.0)'], {}), '(dlat / 2.0)\n', (17303, 17315), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17397, 17404), 'blaze.sqrt', 'sqrt', (['a'], {}), '(a)\n', (17401, 17404), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17406, 17417), 'blaze.sqrt', 'sqrt', (['(1 - a)'], {}), '(1 - a)\n', (17410, 17417), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((18153, 18206), 'blaze.compute', 'compute', (['transformed.dist', 'nyc'], {'return_type': 'pd.Series'}), '(transformed.dist, nyc, return_type=pd.Series)\n', (18160, 18206), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((3827, 3868), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-02-01"""'], {}), "('2014-01-01', '2014-02-01')\n", (3840, 3868), True, 'import pandas as pd\n'), ((4171, 4191), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'n'}), '(seconds=n)\n', (4180, 4191), False, 'from datetime import timedelta\n'), ((6524, 6557), 'numpy.random.choice', 'np.random.choice', (['choices'], {'size': 'n'}), '(choices, size=n)\n', (6540, 6557), True, 'import numpy as np\n'), ((6588, 6627), 'numpy.random.uniform', 'np.random.uniform', (['(10000)', '(20000)'], {'size': 'n'}), '(10000, 20000, size=n)\n', (6605, 6627), True, 'import numpy as np\n'), ((9566, 9578), 'blaze.concat', 'concat', (['t', 'u'], {}), '(t, u)\n', (9572, 9578), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((11546, 11586), 'blaze.compute', 'compute', (['t.B', 'sql'], {'return_type': 'pd.Series'}), '(t.B, sql, return_type=pd.Series)\n', (11553, 11586), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17323, 17332), 'blaze.cos', 'cos', (['lat1'], {}), '(lat1)\n', (17326, 17332), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17335, 17344), 'blaze.cos', 'cos', (['lat2'], {}), '(lat2)\n', (17338, 17344), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((17347, 17362), 'blaze.sin', 'sin', (['(dlon / 2.0)'], {}), '(dlon / 2.0)\n', (17350, 17362), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((19653, 19694), 'blaze.compute', 'compute', (['t', 'nyc'], {'return_type': 'pd.DataFrame'}), '(t, nyc, return_type=pd.DataFrame)\n', (19660, 19694), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n'), ((5905, 5926), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (5922, 5926), True, 'import numpy as np\n'), ((7290, 7314), 'numpy.random.randint', 'np.random.randint', (['(10000)'], {}), '(10000)\n', (7307, 7314), True, 'import numpy as np\n'), ((9088, 9121), 'odo.odo', 'odo', (['sql_with_float', 'pd.DataFrame'], {}), '(sql_with_float, pd.DataFrame)\n', (9091, 9121), False, 'from odo import odo, drop, discover\n'), ((16146, 16201), 'blaze.compute', 'compute', (['expr', '{t: sqla, s: sqlb}'], {'return_type': '"""native"""'}), "(expr, {t: sqla, s: sqlb}, return_type='native')\n", (16153, 16201), False, 'from blaze import data, atan2, by, coalesce, compute, concat, cos, greatest, join, least, radians, sin, sqrt, symbol, transform\n')] |
#!/usr/bin/env python
import numpy as np
def bezier(t, p0, p1, p2, p3):
return p0 * (1 - t) ** 3 \
+ 3 * p1 * t * (1 - t) ** 2 \
+ 3 * p2 * t ** 2 * (1 - t) \
+ p3 * t ** 3
def angle_deg(a1, a2):
return np.mod(a1 + a2 + 3*180.0, 360) - 180
def angle_rad(a1, a2):
return np.mod(a1 + a2 + 3*np.pi, 2*np.pi) - np.pi
def homothetie_vec(vec, theta, x, y):
# Rotation matrix
R = np.array([[np.cos(theta), -np.sin(theta), x],
[np.sin(theta), np.cos(theta), y],
[0, 0, 1]])
return np.dot(R, vec)
def rotation(vec, theta):
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
return np.dot(R, vec)
def draw_kayak(theta, x, y):
# Original
hull = np.array([[-1.4, 1.0, 1.4, 1.4, 1.0, -1.4, -1.4, -1.4],
[-0.4, -0.4, -0.2, 0.2, 0.4, 0.4, -0.4, -0.4],
[ 1, 1, 1, 1, 1, 1, 1, 1]])
return homothetie_vec(hull, theta, x, y)
| [
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.sin",
"numpy.mod"
] | [((576, 590), 'numpy.dot', 'np.dot', (['R', 'vec'], {}), '(R, vec)\n', (582, 590), True, 'import numpy as np\n'), ((733, 747), 'numpy.dot', 'np.dot', (['R', 'vec'], {}), '(R, vec)\n', (739, 747), True, 'import numpy as np\n'), ((805, 939), 'numpy.array', 'np.array', (['[[-1.4, 1.0, 1.4, 1.4, 1.0, -1.4, -1.4, -1.4], [-0.4, -0.4, -0.2, 0.2, 0.4,\n 0.4, -0.4, -0.4], [1, 1, 1, 1, 1, 1, 1, 1]]'], {}), '([[-1.4, 1.0, 1.4, 1.4, 1.0, -1.4, -1.4, -1.4], [-0.4, -0.4, -0.2, \n 0.2, 0.4, 0.4, -0.4, -0.4], [1, 1, 1, 1, 1, 1, 1, 1]])\n', (813, 939), True, 'import numpy as np\n'), ((249, 281), 'numpy.mod', 'np.mod', (['(a1 + a2 + 3 * 180.0)', '(360)'], {}), '(a1 + a2 + 3 * 180.0, 360)\n', (255, 281), True, 'import numpy as np\n'), ((322, 360), 'numpy.mod', 'np.mod', (['(a1 + a2 + 3 * np.pi)', '(2 * np.pi)'], {}), '(a1 + a2 + 3 * np.pi, 2 * np.pi)\n', (328, 360), True, 'import numpy as np\n'), ((446, 459), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (452, 459), True, 'import numpy as np\n'), ((500, 513), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (506, 513), True, 'import numpy as np\n'), ((515, 528), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (521, 528), True, 'import numpy as np\n'), ((638, 651), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (644, 651), True, 'import numpy as np\n'), ((689, 702), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (695, 702), True, 'import numpy as np\n'), ((704, 717), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (710, 717), True, 'import numpy as np\n'), ((462, 475), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (468, 475), True, 'import numpy as np\n'), ((654, 667), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (660, 667), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
from scipy import signal
M = 32
N = 128
hN = N//2
hM = M//2
fftbuffer = np.zeros(N)
w = signal.blackman(M)
plt.figure(1, figsize=(9.5, 6))
plt.subplot(3,2,1)
plt.plot(np.arange(-hM, hM), w, 'b', lw=1.5)
plt.axis([-hM, hM-1, 0, 1.05])
plt.title('w1, M=32')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = w[hM:]
fftbuffer[N-hM:] = w[:hM]
X = fft(fftbuffer)
mX = 20*np.log10(abs(fftshift(X)))
plt.subplot(3,2,3)
plt.plot(np.arange(-hN, hN), mX-max(mX), 'r', lw=1.5)
plt.axis([-hN//2,hN//2,-80,1])
plt.title('mW1')
pX = np.angle(fftshift(X))
plt.subplot(3,2,5)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,-np.pi,np.pi])
plt.title('pW1')
M = 31
N = 128
hN = N//2
hM = (M+1)//2
fftbuffer = np.zeros(N)
w = signal.blackman(M)
plt.subplot(3,2,2)
plt.plot(np.arange(-hM, hM-1), w, 'b', lw=1.5)
plt.axis([-hM, hM, 0, 1.05])
plt.title('w2, M=31')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = w[hM-1:]
fftbuffer[N-hM+1:] = w[:hM-1]
X = fft(fftbuffer)
mX = 20*np.log10(abs(fftshift(X)))
plt.subplot(3,2,4)
plt.plot(np.arange(-hN, hN), mX-max(mX), 'r', lw=1.5)
plt.axis([-hN/2,hN/2-1,-80,1])
plt.title('mW2')
pX = np.angle(fftshift(X))
plt.subplot(3,2,6)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN,hN-1,-np.pi,np.pi])
plt.title('pW2')
plt.tight_layout()
plt.savefig('blackman-even-odd.png')
plt.show()
| [
"scipy.signal.blackman",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"scipy.fftpack.fftshift",
"numpy.zeros",
"matplotlib.pyplot.figure",
"scipy.fftpack.fft",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot... | [((169, 180), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (177, 180), True, 'import numpy as np\n'), ((185, 203), 'scipy.signal.blackman', 'signal.blackman', (['M'], {}), '(M)\n', (200, 203), False, 'from scipy import signal\n'), ((205, 236), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(9.5, 6)'}), '(1, figsize=(9.5, 6))\n', (215, 236), True, 'import matplotlib.pyplot as plt\n'), ((238, 258), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (249, 258), True, 'import matplotlib.pyplot as plt\n'), ((302, 334), 'matplotlib.pyplot.axis', 'plt.axis', (['[-hM, hM - 1, 0, 1.05]'], {}), '([-hM, hM - 1, 0, 1.05])\n', (310, 334), True, 'import matplotlib.pyplot as plt\n'), ((333, 354), 'matplotlib.pyplot.title', 'plt.title', (['"""w1, M=32"""'], {}), "('w1, M=32')\n", (342, 354), True, 'import matplotlib.pyplot as plt\n'), ((368, 379), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (376, 379), True, 'import numpy as np\n'), ((460, 474), 'scipy.fftpack.fft', 'fft', (['fftbuffer'], {}), '(fftbuffer)\n', (463, 474), False, 'from scipy.fftpack import fft, fftshift\n'), ((514, 534), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (525, 534), True, 'import matplotlib.pyplot as plt\n'), ((587, 624), 'matplotlib.pyplot.axis', 'plt.axis', (['[-hN // 2, hN // 2, -80, 1]'], {}), '([-hN // 2, hN // 2, -80, 1])\n', (595, 624), True, 'import matplotlib.pyplot as plt\n'), ((618, 634), 'matplotlib.pyplot.title', 'plt.title', (['"""mW1"""'], {}), "('mW1')\n", (627, 634), True, 'import matplotlib.pyplot as plt\n'), ((663, 683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5)'], {}), '(3, 2, 5)\n', (674, 683), True, 'import matplotlib.pyplot as plt\n'), ((728, 766), 'matplotlib.pyplot.axis', 'plt.axis', (['[-hN, hN - 1, -np.pi, np.pi]'], {}), '([-hN, hN - 1, -np.pi, np.pi])\n', (736, 766), True, 'import matplotlib.pyplot as plt\n'), ((762, 778), 'matplotlib.pyplot.title', 'plt.title', (['"""pW1"""'], {}), "('pW1')\n", (771, 778), True, 'import matplotlib.pyplot as plt\n'), ((836, 847), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (844, 847), True, 'import numpy as np\n'), ((852, 870), 'scipy.signal.blackman', 'signal.blackman', (['M'], {}), '(M)\n', (867, 870), False, 'from scipy import signal\n'), ((872, 892), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(2)'], {}), '(3, 2, 2)\n', (883, 892), True, 'import matplotlib.pyplot as plt\n'), ((938, 966), 'matplotlib.pyplot.axis', 'plt.axis', (['[-hM, hM, 0, 1.05]'], {}), '([-hM, hM, 0, 1.05])\n', (946, 966), True, 'import matplotlib.pyplot as plt\n'), ((967, 988), 'matplotlib.pyplot.title', 'plt.title', (['"""w2, M=31"""'], {}), "('w2, M=31')\n", (976, 988), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1013), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1010, 1013), True, 'import numpy as np\n'), ((1101, 1115), 'scipy.fftpack.fft', 'fft', (['fftbuffer'], {}), '(fftbuffer)\n', (1104, 1115), False, 'from scipy.fftpack import fft, fftshift\n'), ((1155, 1175), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(4)'], {}), '(3, 2, 4)\n', (1166, 1175), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1267), 'matplotlib.pyplot.axis', 'plt.axis', (['[-hN / 2, hN / 2 - 1, -80, 1]'], {}), '([-hN / 2, hN / 2 - 1, -80, 1])\n', (1236, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1259, 1275), 'matplotlib.pyplot.title', 'plt.title', (['"""mW2"""'], {}), "('mW2')\n", (1268, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1324), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(6)'], {}), '(3, 2, 6)\n', (1315, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1407), 'matplotlib.pyplot.axis', 'plt.axis', (['[-hN, hN - 1, -np.pi, np.pi]'], {}), '([-hN, hN - 1, -np.pi, np.pi])\n', (1377, 1407), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1419), 'matplotlib.pyplot.title', 'plt.title', (['"""pW2"""'], {}), "('pW2')\n", (1412, 1419), True, 'import matplotlib.pyplot as plt\n'), ((1421, 1439), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1437, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1476), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""blackman-even-odd.png"""'], {}), "('blackman-even-odd.png')\n", (1451, 1476), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1485, 1487), True, 'import matplotlib.pyplot as plt\n'), ((266, 284), 'numpy.arange', 'np.arange', (['(-hM)', 'hM'], {}), '(-hM, hM)\n', (275, 284), True, 'import numpy as np\n'), ((542, 560), 'numpy.arange', 'np.arange', (['(-hN)', 'hN'], {}), '(-hN, hN)\n', (551, 560), True, 'import numpy as np\n'), ((650, 661), 'scipy.fftpack.fftshift', 'fftshift', (['X'], {}), '(X)\n', (658, 661), False, 'from scipy.fftpack import fft, fftshift\n'), ((691, 709), 'numpy.arange', 'np.arange', (['(-hN)', 'hN'], {}), '(-hN, hN)\n', (700, 709), True, 'import numpy as np\n'), ((900, 922), 'numpy.arange', 'np.arange', (['(-hM)', '(hM - 1)'], {}), '(-hM, hM - 1)\n', (909, 922), True, 'import numpy as np\n'), ((1183, 1201), 'numpy.arange', 'np.arange', (['(-hN)', 'hN'], {}), '(-hN, hN)\n', (1192, 1201), True, 'import numpy as np\n'), ((1291, 1302), 'scipy.fftpack.fftshift', 'fftshift', (['X'], {}), '(X)\n', (1299, 1302), False, 'from scipy.fftpack import fft, fftshift\n'), ((1332, 1350), 'numpy.arange', 'np.arange', (['(-hN)', 'hN'], {}), '(-hN, hN)\n', (1341, 1350), True, 'import numpy as np\n'), ((496, 507), 'scipy.fftpack.fftshift', 'fftshift', (['X'], {}), '(X)\n', (504, 507), False, 'from scipy.fftpack import fft, fftshift\n'), ((1137, 1148), 'scipy.fftpack.fftshift', 'fftshift', (['X'], {}), '(X)\n', (1145, 1148), False, 'from scipy.fftpack import fft, fftshift\n')] |
import argparse
import random
import time
import numpy as np
from keras.models import load_model
CHUNK_SIZE = 256
def generate(model, start_index, amount):
if start_index is None:
start_index = random.randint(0, len(all_txt)-CHUNK_SIZE-1)
fragment = all_txt[start_index : start_index+CHUNK_SIZE]
result = list(fragment)
for i in range(amount):
x = np.zeros((1, CHUNK_SIZE, num_chars))
for t, char in enumerate(fragment):
x[0, t, char_to_idx[char]] = 1.
preds = np.asarray(model.predict(x, verbose=0)[0, -1, :])
next_index = np.argmax(preds)
next_char = chars[next_index]
result.append(next_char)
fragment = fragment[1:] + next_char
return start_index, ''.join(result)
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--model', type=str, default='./models/model.100.h5', help='path to model h5 file')
ap.add_argument('-n', '--num', type=int, default=100, help='num of output length')
ap.add_argument('-s', '--start_index', type=int, default=None, help='start index')
ap.add_argument('-i', '--input', type=str, default='input.txt', help='path to input txt file')
args = vars(ap.parse_args())
print(args)
all_txt = open(args['input']).read()
chars = list(sorted(set(all_txt)))
char_to_idx = {ch: idx for idx, ch in enumerate(chars)}
num_chars = len(chars)
print('loading model', args['model'])
model = load_model(args['model'])
print('generating txt')
t = time.time()
start_index, txt = generate(model, args['start_index'], args['num'])
print('done, took {}s'.format(time.time()-t))
with open('gen.txt', 'wb') as fout:
fout.write(txt.encode('utf8'))
with open('ori.txt', 'wb') as fout:
fout.write(all_txt[start_index:start_index+args['num']+CHUNK_SIZE].encode('utf8'))
| [
"keras.models.load_model",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.zeros",
"time.time"
] | [((775, 800), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (798, 800), False, 'import argparse\n'), ((1409, 1434), 'keras.models.load_model', 'load_model', (["args['model']"], {}), "(args['model'])\n", (1419, 1434), False, 'from keras.models import load_model\n'), ((1463, 1474), 'time.time', 'time.time', ([], {}), '()\n', (1472, 1474), False, 'import time\n'), ((384, 420), 'numpy.zeros', 'np.zeros', (['(1, CHUNK_SIZE, num_chars)'], {}), '((1, CHUNK_SIZE, num_chars))\n', (392, 420), True, 'import numpy as np\n'), ((596, 612), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (605, 612), True, 'import numpy as np\n'), ((1575, 1586), 'time.time', 'time.time', ([], {}), '()\n', (1584, 1586), False, 'import time\n')] |
import itertools
from math import sqrt
from typing import List, Sequence
import torch
import torch.nn.functional as F
# import torch should be first. Unclear issue, mentionned here: https://github.com/pytorch/pytorch/issues/2083
import numpy as np
import os
import csv
import time
import heapq
import fiona # keep this import. it sets GDAL_DATA to right value
import rasterio
from PIL import Image
import torchvision
import ttach as tta
from collections import OrderedDict, defaultdict
import pandas as pd
import geopandas as gpd
from fiona.crs import to_string
from omegaconf.errors import ConfigKeyError
from tqdm import tqdm
from rasterio import features
from shapely.geometry import Polygon
from rasterio.windows import Window
from rasterio.plot import reshape_as_image
from pathlib import Path
from omegaconf.listconfig import ListConfig
from utils.logger import dict_path
from utils.metrics import ComputePixelMetrics
from models.model_choice import net
from utils import augmentation
from utils.geoutils import vector_to_raster, clip_raster_with_gpkg
from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, \
list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file
from utils.verifications import add_background_to_num_class, validate_num_classes, assert_crs_match
try:
import boto3
except ModuleNotFoundError:
pass
# Set the logging file
from utils import utils
logging = utils.get_logger(__name__)
def _pad_diff(arr, w, h, arr_shape):
""" Pads img_arr width or height < samples_size with zeros """
w_diff = arr_shape - w
h_diff = arr_shape - h
if len(arr.shape) > 2:
padded_arr = np.pad(arr, ((0, w_diff), (0, h_diff), (0, 0)), "constant", constant_values=np.nan)
else:
padded_arr = np.pad(arr, ((0, w_diff), (0, h_diff)), "constant", constant_values=np.nan)
return padded_arr
def _pad(arr, chunk_size):
""" Pads img_arr """
aug = int(round(chunk_size * (1 - 1.0 / 2.0)))
if len(arr.shape) > 2:
padded_arr = np.pad(arr, ((aug, aug), (aug, aug), (0, 0)), mode='reflect')
else:
padded_arr = np.pad(arr, ((aug, aug), (aug, aug)), mode='reflect')
return padded_arr
def ras2vec(raster_file, output_path):
# Create a generic polygon schema for the output vector file
i = 0
feat_schema = {'geometry': 'Polygon',
'properties': OrderedDict([('value', 'int')])
}
class_value_domain = set()
out_features = []
print(" - Processing raster file: {}".format(raster_file))
with rasterio.open(raster_file, 'r') as src:
raster = src.read(1)
mask = raster != 0
# Vectorize the polygons
polygons = features.shapes(raster, mask, transform=src.transform)
# Create shapely polygon featyres
for polygon in polygons:
feature = {'geometry': {
'type': 'Polygon',
'coordinates': None},
'properties': OrderedDict([('value', 0)])}
feature['geometry']['coordinates'] = polygon[0]['coordinates']
value = int(polygon[1]) # Pixel value of the class (layer)
class_value_domain.add(value)
feature['properties']['value'] = value
i += 1
out_features.append(feature)
print(" - Writing output vector file: {}".format(output_path))
num_layers = list(class_value_domain) # Number of unique pixel value
for num_layer in num_layers:
polygons = [feature for feature in out_features if feature['properties']['value'] == num_layer]
layer_name = 'vector_' + str(num_layer).rjust(3, '0')
print(" - Writing layer: {}".format(layer_name))
with fiona.open(output_path, 'w',
crs=to_string(src.crs),
layer=layer_name,
schema=feat_schema,
driver='GPKG') as dest:
for polygon in polygons:
dest.write(polygon)
print("")
print("Number of features written: {}".format(i))
def gen_img_samples(src, chunk_size, step, *band_order):
"""
Args:
src: input image (rasterio object)
chunk_size: image tile size
step: stride used during inference (in pixels)
*band_order: ignore
Returns: generator object
"""
for row in range(0, src.height, step):
for column in range(0, src.width, step):
window = Window.from_slices(slice(row, row + chunk_size),
slice(column, column + chunk_size))
if band_order:
window_array = reshape_as_image(src.read(band_order[0], window=window))
else:
window_array = reshape_as_image(src.read(window=window))
if window_array.shape[0] < chunk_size or window_array.shape[1] < chunk_size:
window_array = _pad_diff(window_array, window_array.shape[0], window_array.shape[1], chunk_size)
window_array = _pad(window_array, chunk_size)
yield window_array, row, column
@torch.no_grad()
def segmentation(param,
input_image,
label_arr,
num_classes: int,
gpkg_name,
model,
chunk_size: int,
device,
scale: List,
BGR_to_RGB: bool,
tp_mem,
debug=False,
):
"""
Args:
param: parameter dict
input_image: opened image (rasterio object)
label_arr: numpy array of label if available
num_classes: number of classes
gpkg_name: geo-package name if available
model: model weights
chunk_size: image tile size
device: cuda/cpu device
scale: scale range
BGR_to_RGB: True/False
tp_mem: memory temp file for saving numpy array to disk
debug: True/False
Returns:
"""
xmin, ymin, xmax, ymax = (input_image.bounds.left,
input_image.bounds.bottom,
input_image.bounds.right,
input_image.bounds.top)
xres, yres = (abs(input_image.transform.a), abs(input_image.transform.e))
mx = chunk_size * xres
my = chunk_size * yres
padded = chunk_size * 2
h = input_image.height
w = input_image.width
h_ = h + padded
w_ = w + padded
dist_samples = int(round(chunk_size * (1 - 1.0 / 2.0)))
# switch to evaluate mode
model.eval()
# initialize test time augmentation
transforms = tta.Compose([tta.HorizontalFlip(), ])
# construct window for smoothing
WINDOW_SPLINE_2D = _window_2D(window_size=padded, power=2.0)
WINDOW_SPLINE_2D = torch.as_tensor(np.moveaxis(WINDOW_SPLINE_2D, 2, 0), ).type(torch.float)
WINDOW_SPLINE_2D = WINDOW_SPLINE_2D.to(device)
fp = np.memmap(tp_mem, dtype='float16', mode='w+', shape=(h_, w_, num_classes))
sample = {'sat_img': None, 'map_img': None, 'metadata': None}
cnt = 0
subdiv = 2
step = int(chunk_size / subdiv)
total_inf_windows = int(np.ceil(input_image.height / step) * np.ceil(input_image.width / step))
img_gen = gen_img_samples(src=input_image,
chunk_size=chunk_size,
step=step)
start_seg = time.time()
print_log = True
for img in tqdm(img_gen, position=1, leave=False,
desc=f'Inferring on window slices of size {chunk_size}',
total=total_inf_windows):
row = img[1]
col = img[2]
sub_image = img[0]
image_metadata = add_metadata_from_raster_to_sample(sat_img_arr=sub_image,
raster_handle=input_image,
raster_info={})
sample['metadata'] = image_metadata
totensor_transform = augmentation.compose_transforms(param,
dataset="tst",
input_space=BGR_to_RGB,
scale=scale,
aug_type='totensor',
print_log=print_log)
sample['sat_img'] = sub_image
sample = totensor_transform(sample)
inputs = sample['sat_img'].unsqueeze_(0)
inputs = inputs.to(device)
if inputs.shape[1] == 4 and any("module.modelNIR" in s for s in model.state_dict().keys()):
############################
# Test Implementation of the NIR
############################
# Init NIR TODO: make a proper way to read the NIR channel
# and put an option to be able to give the idex of the NIR channel
# Extract the NIR channel -> [batch size, H, W] since it's only one channel
inputs_NIR = inputs[:, -1, ...]
# add a channel to get the good size -> [:, 1, :, :]
inputs_NIR.unsqueeze_(1)
# take out the NIR channel and take only the RGB for the inputs
inputs = inputs[:, :-1, ...]
# Suggestion of implementation
# inputs_NIR = data['NIR'].to(device)
inputs = [inputs, inputs_NIR]
# outputs = model(inputs, inputs_NIR)
############################
# End of the test implementation module
############################
output_lst = []
for transformer in transforms:
# augment inputs
augmented_input = transformer.augment_image(inputs)
augmented_output = model(augmented_input)
if isinstance(augmented_output, OrderedDict) and 'out' in augmented_output.keys():
augmented_output = augmented_output['out']
logging.debug(f'Shape of augmented output: {augmented_output.shape}')
# reverse augmentation for outputs
deaugmented_output = transformer.deaugment_mask(augmented_output)
deaugmented_output = F.softmax(deaugmented_output, dim=1).squeeze(dim=0)
output_lst.append(deaugmented_output)
outputs = torch.stack(output_lst)
outputs = torch.mul(outputs, WINDOW_SPLINE_2D)
outputs, _ = torch.max(outputs, dim=0)
outputs = outputs.permute(1, 2, 0)
outputs = outputs.reshape(padded, padded, num_classes).cpu().numpy().astype('float16')
outputs = outputs[dist_samples:-dist_samples, dist_samples:-dist_samples, :]
fp[row:row + chunk_size, col:col + chunk_size, :] = \
fp[row:row + chunk_size, col:col + chunk_size, :] + outputs
cnt += 1
fp.flush()
del fp
fp = np.memmap(tp_mem, dtype='float16', mode='r', shape=(h_, w_, num_classes))
pred_img = np.zeros((h_, w_), dtype=np.uint8)
for row, col in tqdm(itertools.product(range(0, input_image.height, step), range(0, input_image.width, step)),
leave=False,
total=total_inf_windows,
desc="Writing to array"):
arr1 = fp[row:row + chunk_size, col:col + chunk_size, :] / (2 ** 2)
arr1 = arr1.argmax(axis=-1).astype('uint8')
pred_img[row:row + chunk_size, col:col + chunk_size] = arr1
pred_img = pred_img[:h, :w]
end_seg = time.time() - start_seg
logging.info('Segmentation operation completed in {:.0f}m {:.0f}s'.format(end_seg // 60, end_seg % 60))
if debug:
logging.debug(f'Bin count of final output: {np.unique(pred_img, return_counts=True)}')
gdf = None
if label_arr is not None:
start_seg_ = time.time()
feature = defaultdict(list)
cnt = 0
for row in tqdm(range(0, h, chunk_size), position=2, leave=False):
for col in tqdm(range(0, w, chunk_size), position=3, leave=False):
label = label_arr[row:row + chunk_size, col:col + chunk_size]
pred = pred_img[row:row + chunk_size, col:col + chunk_size]
pixelMetrics = ComputePixelMetrics(label.flatten(), pred.flatten(), num_classes)
eval = pixelMetrics.update(pixelMetrics.iou)
feature['id_image'].append(gpkg_name)
for c_num in range(num_classes):
feature['L_count_' + str(c_num)].append(int(np.count_nonzero(label == c_num)))
feature['P_count_' + str(c_num)].append(int(np.count_nonzero(pred == c_num)))
feature['IoU_' + str(c_num)].append(eval['iou_' + str(c_num)])
feature['mIoU'].append(eval['macro_avg_iou'])
x_1, y_1 = (xmin + (col * xres)), (ymax - (row * yres))
x_2, y_2 = (xmin + ((col * xres) + mx)), y_1
x_3, y_3 = x_2, (ymax - ((row * yres) + my))
x_4, y_4 = x_1, y_3
geom = Polygon([(x_1, y_1), (x_2, y_2), (x_3, y_3), (x_4, y_4)])
feature['geometry'].append(geom)
feature['length'].append(geom.length)
feature['pointx'].append(geom.centroid.x)
feature['pointy'].append(geom.centroid.y)
feature['area'].append(geom.area)
cnt += 1
gdf = gpd.GeoDataFrame(feature, crs=input_image.crs)
end_seg_ = time.time() - start_seg_
logging.info('Benchmark operation completed in {:.0f}m {:.0f}s'.format(end_seg_ // 60, end_seg_ % 60))
input_image.close()
return pred_img, gdf
def classifier(params, img_list, model, device, working_folder):
"""
Classify images by class
:param params:
:param img_list:
:param model:
:param device:
:return:
"""
weights_file_name = params['inference']['state_dict_path']
num_classes = params['global']['num_classes']
bucket = params['global']['bucket_name']
classes_file = weights_file_name.split('/')[:-1]
if bucket:
class_csv = ''
for folder in classes_file:
class_csv = os.path.join(class_csv, folder)
bucket.download_file(os.path.join(class_csv, 'classes.csv'), 'classes.csv')
with open('classes.csv', 'rt') as file:
reader = csv.reader(file)
classes = list(reader)
else:
class_csv = ''
for c in classes_file:
class_csv = class_csv + c + '/'
with open(class_csv + 'classes.csv', 'rt') as f:
reader = csv.reader(f)
classes = list(reader)
classified_results = np.empty((0, 2 + num_classes))
for image in img_list:
img_name = os.path.basename(image['tif']) # TODO: pathlib
model.eval()
if bucket:
img = Image.open(f"Images/{img_name}").resize((299, 299), resample=Image.BILINEAR)
else:
img = Image.open(image['tif']).resize((299, 299), resample=Image.BILINEAR)
to_tensor = torchvision.transforms.ToTensor()
img = to_tensor(img)
img = img.unsqueeze(0)
with torch.no_grad():
img = img.to(device)
outputs = model(img)
_, predicted = torch.max(outputs, 1)
top5 = heapq.nlargest(5, outputs.cpu().numpy()[0])
top5_loc = []
for i in top5:
top5_loc.append(np.where(outputs.cpu().numpy()[0] == i)[0][0])
logging.info(f"Image {img_name} classified as {classes[0][predicted]}")
logging.info('Top 5 classes:')
for i in range(0, 5):
logging.info(f"\t{classes[0][top5_loc[i]]} : {top5[i]}")
classified_results = np.append(classified_results, [np.append([image['tif'], classes[0][predicted]],
outputs.cpu().numpy()[0])], axis=0)
csv_results = 'classification_results.csv'
if bucket:
np.savetxt(csv_results, classified_results, fmt='%s', delimiter=',')
bucket.upload_file(csv_results, os.path.join(working_folder, csv_results)) # TODO: pathlib
else:
np.savetxt(os.path.join(working_folder, csv_results), classified_results, fmt='%s', # TODO: pathlib
delimiter=',')
def calc_inference_chunk_size(gpu_devices_dict: dict, max_pix_per_mb_gpu: int = 200):
"""
Calculate maximum chunk_size that could fit on GPU during inference based on thumb rule with hardcoded
"pixels per MB of GPU RAM" as threshold. Threshold based on inference with a large model (Deeplabv3_resnet101)
:param gpu_devices_dict: dictionary containing info on GPU devices as returned by lst_device_ids (utils.py)
:param max_pix_per_mb_gpu: Maximum number of pixels that can fit on each MB of GPU (better to underestimate)
:return: returns a downgraded evaluation batch size if the original batch size is considered too high
"""
# get max ram for smallest gpu
smallest_gpu_ram = min(gpu_info['max_ram'] for _, gpu_info in gpu_devices_dict.items())
# rule of thumb to determine max chunk size based on approximate max pixels a gpu can handle during inference
max_chunk_size = sqrt(max_pix_per_mb_gpu * smallest_gpu_ram)
max_chunk_size_rd = int(max_chunk_size - (max_chunk_size % 256))
logging.info(f'Images will be split into chunks of {max_chunk_size_rd}')
return max_chunk_size_rd
def main(params: dict) -> None:
"""
Function to manage details about the inference on segmentation task.
1. Read the parameters from the config given.
2. Read and load the state dict from the previous training or the given one.
3. Make the inference on the data specifies in the config.
-------
:param params: (dict) Parameters found in the yaml config file.
"""
# since = time.time()
# PARAMETERS
mode = get_key_def('mode', params, expected_type=str)
task = get_key_def('task_name', params['task'], expected_type=str)
model_name = get_key_def('model_name', params['model'], expected_type=str).lower()
num_classes = len(get_key_def('classes_dict', params['dataset']).keys())
modalities = read_modalities(get_key_def('modalities', params['dataset'], expected_type=str))
BGR_to_RGB = get_key_def('BGR_to_RGB', params['dataset'], expected_type=bool)
num_bands = len(modalities)
debug = get_key_def('debug', params, default=False, expected_type=bool)
# SETTING OUTPUT DIRECTORY
try:
state_dict = Path(params['inference']['state_dict_path']).resolve(strict=True)
except FileNotFoundError:
logging.info(
f"\nThe state dict path directory '{params['inference']['state_dict_path']}' don't seem to be find," +
f"we will try to locate a state dict path in the '{params['general']['save_weights_dir']}' " +
f"specify during the training phase"
)
try:
state_dict = Path(params['general']['save_weights_dir']).resolve(strict=True)
except FileNotFoundError:
raise logging.critical(
f"\nThe state dict path directory '{params['general']['save_weights_dir']}'" +
f" don't seem to be find either, please specify the path to a state dict"
)
# TODO add more detail in the parent folder
working_folder = state_dict.parent.joinpath(f'inference_{num_bands}bands')
logging.info("\nThe state dict path directory used '{}'".format(working_folder))
Path.mkdir(working_folder, parents=True, exist_ok=True)
# LOGGING PARAMETERS TODO put option not just mlflow
experiment_name = get_key_def('project_name', params['general'], default='gdl-training')
try:
tracker_uri = get_key_def('uri', params['tracker'], default=None, expected_type=str)
Path(tracker_uri).mkdir(exist_ok=True)
run_name = get_key_def('run_name', params['tracker'], default='gdl') # TODO change for something meaningful
run_name = '{}_{}_{}'.format(run_name, mode, task)
logging.info(f'\nInference and log files will be saved to: {working_folder}')
# TODO change to fit whatever inport
from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics
# tracking path + parameters logging
set_tracking_uri(tracker_uri)
set_experiment(experiment_name)
start_run(run_name=run_name)
log_params(dict_path(params, 'general'))
log_params(dict_path(params, 'dataset'))
log_params(dict_path(params, 'data'))
log_params(dict_path(params, 'model'))
log_params(dict_path(params, 'inference'))
# meaning no logging tracker as been assigned or it doesnt exist in config/logging
except ConfigKeyError:
logging.info(
"\nNo logging tracker as been assigned or the yaml config doesnt exist in 'config/tracker'."
"\nNo tracker file will be save in that case."
)
# MANDATORY PARAMETERS
img_dir_or_csv = get_key_def(
'img_dir_or_csv_file', params['inference'], default=params['general']['raw_data_csv'], expected_type=str
)
if not (Path(img_dir_or_csv).is_dir() or Path(img_dir_or_csv).suffix == '.csv'):
raise logging.critical(
FileNotFoundError(
f'\nCouldn\'t locate .csv file or directory "{img_dir_or_csv}" containing imagery for inference'
)
)
# load the checkpoint
try:
# Sort by modification time (mtime) descending
sorted_by_mtime_descending = sorted(
[os.path.join(state_dict, x) for x in os.listdir(state_dict)], key=lambda t: -os.stat(t).st_mtime
)
last_checkpoint_save = find_first_file('checkpoint.pth.tar', sorted_by_mtime_descending)
if last_checkpoint_save is None:
raise FileNotFoundError
# change the state_dict
state_dict = last_checkpoint_save
except FileNotFoundError as e:
logging.error(f"\nNo file name 'checkpoint.pth.tar' as been found at '{state_dict}'")
raise e
task = get_key_def('task_name', params['task'], expected_type=str)
# TODO change it next version for all task
if task not in ['classification', 'segmentation']:
raise logging.critical(
ValueError(f'\nTask should be either "classification" or "segmentation". Got {task}')
)
# OPTIONAL PARAMETERS
dontcare_val = get_key_def("ignore_index", params["training"], default=-1, expected_type=int)
num_devices = get_key_def('num_gpus', params['training'], default=0, expected_type=int)
default_max_used_ram = 25
max_used_ram = get_key_def('max_used_ram', params['training'], default=default_max_used_ram, expected_type=int)
max_used_perc = get_key_def('max_used_perc', params['training'], default=25, expected_type=int)
scale = get_key_def('scale_data', params['augmentation'], default=[0, 1], expected_type=ListConfig)
raster_to_vec = get_key_def('ras2vec', params['inference'], False) # FIXME not implemented with hydra
# benchmark (ie when gkpgs are inputted along with imagery)
dontcare = get_key_def("ignore_index", params["training"], -1)
attribute_field = get_key_def('attribute_field', params['dataset'], None, expected_type=str)
attr_vals = get_key_def('attribute_values', params['dataset'], None, expected_type=Sequence)
if debug:
logging.warning(f'\nDebug mode activated. Some debug features may mobilize extra disk space and '
f'cause delays in execution.')
# Assert that all values are integers (ex.: to benchmark single-class model with multi-class labels)
if attr_vals:
for item in attr_vals:
if not isinstance(item, int):
raise ValueError(f'\nValue "{item}" in attribute_values is {type(item)}, expected int.')
logging.info(f'\nInferences will be saved to: {working_folder}\n\n')
if not (0 <= max_used_ram <= 100):
logging.warning(f'\nMax used ram parameter should be a percentage. Got {max_used_ram}. '
f'Will set default value of {default_max_used_ram} %')
max_used_ram = default_max_used_ram
# AWS
bucket = None
bucket_file_cache = []
bucket_name = get_key_def('bucket_name', params['AWS'])
# list of GPU devices that are available and unused. If no GPUs, returns empty dict
gpu_devices_dict = get_device_ids(num_devices,
max_used_ram_perc=max_used_ram,
max_used_perc=max_used_perc)
if gpu_devices_dict:
chunk_size = calc_inference_chunk_size(gpu_devices_dict=gpu_devices_dict, max_pix_per_mb_gpu=50)
logging.info(f"\nNumber of cuda devices requested: {num_devices}. "
f"\nCuda devices available: {gpu_devices_dict}. "
f"\nUsing {list(gpu_devices_dict.keys())[0]}\n\n")
device = torch.device(f'cuda:{list(range(len(gpu_devices_dict.keys())))[0]}')
else:
chunk_size = get_key_def('chunk_size', params['inference'], default=512, expected_type=int)
logging.warning(f"\nNo Cuda device available. This process will only run on CPU")
device = torch.device('cpu')
# CONFIGURE MODEL
num_classes_backgr = add_background_to_num_class(task, num_classes)
model, loaded_checkpoint, model_name = net(model_name=model_name,
num_bands=num_bands,
num_channels=num_classes_backgr,
dontcare_val=dontcare_val,
num_devices=1,
net_params=params,
inference_state_dict=state_dict)
try:
model.to(device)
except RuntimeError:
logging.info(f"\nUnable to use device. Trying device 0")
device = torch.device(f'cuda' if gpu_devices_dict else 'cpu')
model.to(device)
# CREATE LIST OF INPUT IMAGES FOR INFERENCE
try:
# check if the data folder exist
raw_data_dir = get_key_def('raw_data_dir', params['dataset'])
my_data_path = Path(raw_data_dir).resolve(strict=True)
logging.info("\nImage directory used '{}'".format(my_data_path))
data_path = Path(my_data_path)
except FileNotFoundError:
raw_data_dir = get_key_def('raw_data_dir', params['dataset'])
raise logging.critical(
"\nImage directory '{}' doesn't exist, please change the path".format(raw_data_dir)
)
list_img = list_input_images(
img_dir_or_csv, bucket_name, glob_patterns=["*.tif", "*.TIF"], in_case_of_path=str(data_path)
)
# VALIDATION: anticipate problems with imagery and label (if provided) before entering main for loop
valid_gpkg_set = set()
for info in tqdm(list_img, desc='Validating imagery'):
# validate_raster(info['tif'], num_bands, meta_map)
if 'gpkg' in info.keys() and info['gpkg'] and info['gpkg'] not in valid_gpkg_set:
validate_num_classes(vector_file=info['gpkg'],
num_classes=num_classes,
attribute_name=attribute_field,
ignore_index=dontcare,
attribute_values=attr_vals)
assert_crs_match(info['tif'], info['gpkg'])
valid_gpkg_set.add(info['gpkg'])
logging.info('\nSuccessfully validated imagery')
if valid_gpkg_set:
logging.info('\nSuccessfully validated label data for benchmarking')
if task == 'classification':
classifier(params, list_img, model, device,
working_folder) # FIXME: why don't we load from checkpoint in classification?
elif task == 'segmentation':
gdf_ = []
gpkg_name_ = []
# TODO: Add verifications?
if bucket:
bucket.download_file(loaded_checkpoint, "saved_model.pth.tar") # TODO: is this still valid?
model, _ = load_from_checkpoint("saved_model.pth.tar", model)
else:
model, _ = load_from_checkpoint(loaded_checkpoint, model)
# Save tracking TODO put option not just mlflow
if 'tracker_uri' in locals() and 'run_name' in locals():
mode = get_key_def('mode', params, expected_type=str)
task = get_key_def('task_name', params['task'], expected_type=str)
run_name = '{}_{}_{}'.format(run_name, mode, task)
# tracking path + parameters logging
set_tracking_uri(tracker_uri)
set_experiment(experiment_name)
start_run(run_name=run_name)
log_params(dict_path(params, 'inference'))
log_params(dict_path(params, 'dataset'))
log_params(dict_path(params, 'model'))
# LOOP THROUGH LIST OF INPUT IMAGES
for info in tqdm(list_img, desc='Inferring from images', position=0, leave=True):
img_name = Path(info['tif']).name
local_gpkg = Path(info['gpkg']) if 'gpkg' in info.keys() and info['gpkg'] else None
gpkg_name = local_gpkg.stem if local_gpkg else None
if bucket:
local_img = f"Images/{img_name}"
bucket.download_file(info['tif'], local_img)
inference_image = f"Classified_Images/{img_name.split('.')[0]}_inference.tif"
else:
local_img = Path(info['tif'])
Path.mkdir(working_folder.joinpath(local_img.parent.name), parents=True, exist_ok=True)
inference_image = working_folder.joinpath(local_img.parent.name,
f"{img_name.split('.')[0]}_inference.tif")
temp_file = working_folder.joinpath(local_img.parent.name, f"{img_name.split('.')[0]}.dat")
raster = rasterio.open(local_img, 'r')
logging.info(f'\nReading original image: {raster.name}')
inf_meta = raster.meta
label = None
if local_gpkg:
logging.info(f'\nBurning label as raster: {local_gpkg}')
local_img = clip_raster_with_gpkg(raster, local_gpkg)
raster.close()
raster = rasterio.open(local_img, 'r')
logging.info(f'\nReading clipped image: {raster.name}')
inf_meta = raster.meta
label = vector_to_raster(vector_file=local_gpkg,
input_image=raster,
out_shape=(inf_meta['height'], inf_meta['width']),
attribute_name=attribute_field,
fill=0, # background value in rasterized vector.
attribute_values=attr_vals)
if debug:
logging.debug(f'\nUnique values in loaded label as raster: {np.unique(label)}\n'
f'Shape of label as raster: {label.shape}')
pred, gdf = segmentation(param=params,
input_image=raster,
label_arr=label,
num_classes=num_classes_backgr,
gpkg_name=gpkg_name,
model=model,
chunk_size=chunk_size,
device=device,
scale=scale,
BGR_to_RGB=BGR_to_RGB,
tp_mem=temp_file,
debug=debug)
if gdf is not None:
gdf_.append(gdf)
gpkg_name_.append(gpkg_name)
if local_gpkg and 'tracker_uri' in locals():
pixelMetrics = ComputePixelMetrics(label, pred, num_classes_backgr)
log_metrics(pixelMetrics.update(pixelMetrics.iou))
log_metrics(pixelMetrics.update(pixelMetrics.dice))
pred = pred[np.newaxis, :, :].astype(np.uint8)
inf_meta.update({"driver": "GTiff",
"height": pred.shape[1],
"width": pred.shape[2],
"count": pred.shape[0],
"dtype": 'uint8',
"compress": 'lzw'})
logging.info(f'\nSuccessfully inferred on {img_name}\nWriting to file: {inference_image}')
with rasterio.open(inference_image, 'w+', **inf_meta) as dest:
dest.write(pred)
del pred
try:
temp_file.unlink()
except OSError as e:
logging.warning(f'File Error: {temp_file, e.strerror}')
if raster_to_vec:
start_vec = time.time()
inference_vec = working_folder.joinpath(local_img.parent.name,
f"{img_name.split('.')[0]}_inference.gpkg")
ras2vec(inference_image, inference_vec)
end_vec = time.time() - start_vec
logging.info('Vectorization completed in {:.0f}m {:.0f}s'.format(end_vec // 60, end_vec % 60))
if len(gdf_) >= 1:
if not len(gdf_) == len(gpkg_name_):
raise logging.critical(ValueError('\nbenchmarking unable to complete'))
all_gdf = pd.concat(gdf_) # Concatenate all geo data frame into one geo data frame
all_gdf.reset_index(drop=True, inplace=True)
gdf_x = gpd.GeoDataFrame(all_gdf)
bench_gpkg = working_folder / "benchmark.gpkg"
gdf_x.to_file(bench_gpkg, driver="GPKG", index=False)
logging.info(f'\nSuccessfully wrote benchmark geopackage to: {bench_gpkg}')
# log_artifact(working_folder) | [
"torch.mul",
"fiona.crs.to_string",
"utils.utils.get_device_ids",
"utils.verifications.add_background_to_num_class",
"torch.max",
"math.sqrt",
"utils.geoutils.clip_raster_with_gpkg",
"mlflow.set_experiment",
"utils.verifications.assert_crs_match",
"numpy.count_nonzero",
"shapely.geometry.Polygon... | [((1459, 1485), 'utils.utils.get_logger', 'utils.get_logger', (['__name__'], {}), '(__name__)\n', (1475, 1485), False, 'from utils import utils\n'), ((5093, 5108), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5106, 5108), False, 'import torch\n'), ((2740, 2794), 'rasterio.features.shapes', 'features.shapes', (['raster', 'mask'], {'transform': 'src.transform'}), '(raster, mask, transform=src.transform)\n', (2755, 2794), False, 'from rasterio import features\n'), ((6726, 6767), 'utils.utils._window_2D', '_window_2D', ([], {'window_size': 'padded', 'power': '(2.0)'}), '(window_size=padded, power=2.0)\n', (6736, 6767), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((6925, 6999), 'numpy.memmap', 'np.memmap', (['tp_mem'], {'dtype': '"""float16"""', 'mode': '"""w+"""', 'shape': '(h_, w_, num_classes)'}), "(tp_mem, dtype='float16', mode='w+', shape=(h_, w_, num_classes))\n", (6934, 6999), True, 'import numpy as np\n'), ((7386, 7397), 'time.time', 'time.time', ([], {}), '()\n', (7395, 7397), False, 'import time\n'), ((7434, 7564), 'tqdm.tqdm', 'tqdm', (['img_gen'], {'position': '(1)', 'leave': '(False)', 'desc': 'f"""Inferring on window slices of size {chunk_size}"""', 'total': 'total_inf_windows'}), "(img_gen, position=1, leave=False, desc=\n f'Inferring on window slices of size {chunk_size}', total=total_inf_windows\n )\n", (7438, 7564), False, 'from tqdm import tqdm\n'), ((10915, 10988), 'numpy.memmap', 'np.memmap', (['tp_mem'], {'dtype': '"""float16"""', 'mode': '"""r"""', 'shape': '(h_, w_, num_classes)'}), "(tp_mem, dtype='float16', mode='r', shape=(h_, w_, num_classes))\n", (10924, 10988), True, 'import numpy as np\n'), ((11004, 11038), 'numpy.zeros', 'np.zeros', (['(h_, w_)'], {'dtype': 'np.uint8'}), '((h_, w_), dtype=np.uint8)\n', (11012, 11038), True, 'import numpy as np\n'), ((14698, 14728), 'numpy.empty', 'np.empty', (['(0, 2 + num_classes)'], {}), '((0, 2 + num_classes))\n', (14706, 14728), True, 'import numpy as np\n'), ((17244, 17287), 'math.sqrt', 'sqrt', (['(max_pix_per_mb_gpu * smallest_gpu_ram)'], {}), '(max_pix_per_mb_gpu * smallest_gpu_ram)\n', (17248, 17287), False, 'from math import sqrt\n'), ((17915, 17961), 'utils.utils.get_key_def', 'get_key_def', (['"""mode"""', 'params'], {'expected_type': 'str'}), "('mode', params, expected_type=str)\n", (17926, 17961), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((17973, 18032), 'utils.utils.get_key_def', 'get_key_def', (['"""task_name"""', "params['task']"], {'expected_type': 'str'}), "('task_name', params['task'], expected_type=str)\n", (17984, 18032), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((18312, 18376), 'utils.utils.get_key_def', 'get_key_def', (['"""BGR_to_RGB"""', "params['dataset']"], {'expected_type': 'bool'}), "('BGR_to_RGB', params['dataset'], expected_type=bool)\n", (18323, 18376), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((18421, 18484), 'utils.utils.get_key_def', 'get_key_def', (['"""debug"""', 'params'], {'default': '(False)', 'expected_type': 'bool'}), "('debug', params, default=False, expected_type=bool)\n", (18432, 18484), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((19533, 19588), 'pathlib.Path.mkdir', 'Path.mkdir', (['working_folder'], {'parents': '(True)', 'exist_ok': '(True)'}), '(working_folder, parents=True, exist_ok=True)\n', (19543, 19588), False, 'from pathlib import Path\n'), ((19669, 19739), 'utils.utils.get_key_def', 'get_key_def', (['"""project_name"""', "params['general']"], {'default': '"""gdl-training"""'}), "('project_name', params['general'], default='gdl-training')\n", (19680, 19739), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((21067, 21189), 'utils.utils.get_key_def', 'get_key_def', (['"""img_dir_or_csv_file"""', "params['inference']"], {'default': "params['general']['raw_data_csv']", 'expected_type': 'str'}), "('img_dir_or_csv_file', params['inference'], default=params[\n 'general']['raw_data_csv'], expected_type=str)\n", (21078, 21189), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((22144, 22203), 'utils.utils.get_key_def', 'get_key_def', (['"""task_name"""', "params['task']"], {'expected_type': 'str'}), "('task_name', params['task'], expected_type=str)\n", (22155, 22203), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((22492, 22570), 'utils.utils.get_key_def', 'get_key_def', (['"""ignore_index"""', "params['training']"], {'default': '(-1)', 'expected_type': 'int'}), "('ignore_index', params['training'], default=-1, expected_type=int)\n", (22503, 22570), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((22589, 22662), 'utils.utils.get_key_def', 'get_key_def', (['"""num_gpus"""', "params['training']"], {'default': '(0)', 'expected_type': 'int'}), "('num_gpus', params['training'], default=0, expected_type=int)\n", (22600, 22662), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((22712, 22813), 'utils.utils.get_key_def', 'get_key_def', (['"""max_used_ram"""', "params['training']"], {'default': 'default_max_used_ram', 'expected_type': 'int'}), "('max_used_ram', params['training'], default=\n default_max_used_ram, expected_type=int)\n", (22723, 22813), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((22829, 22908), 'utils.utils.get_key_def', 'get_key_def', (['"""max_used_perc"""', "params['training']"], {'default': '(25)', 'expected_type': 'int'}), "('max_used_perc', params['training'], default=25, expected_type=int)\n", (22840, 22908), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((22921, 23016), 'utils.utils.get_key_def', 'get_key_def', (['"""scale_data"""', "params['augmentation']"], {'default': '[0, 1]', 'expected_type': 'ListConfig'}), "('scale_data', params['augmentation'], default=[0, 1],\n expected_type=ListConfig)\n", (22932, 23016), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((23033, 23083), 'utils.utils.get_key_def', 'get_key_def', (['"""ras2vec"""', "params['inference']", '(False)'], {}), "('ras2vec', params['inference'], False)\n", (23044, 23083), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((23199, 23250), 'utils.utils.get_key_def', 'get_key_def', (['"""ignore_index"""', "params['training']", '(-1)'], {}), "('ignore_index', params['training'], -1)\n", (23210, 23250), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((23273, 23347), 'utils.utils.get_key_def', 'get_key_def', (['"""attribute_field"""', "params['dataset']", 'None'], {'expected_type': 'str'}), "('attribute_field', params['dataset'], None, expected_type=str)\n", (23284, 23347), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((23364, 23449), 'utils.utils.get_key_def', 'get_key_def', (['"""attribute_values"""', "params['dataset']", 'None'], {'expected_type': 'Sequence'}), "('attribute_values', params['dataset'], None, expected_type=Sequence\n )\n", (23375, 23449), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((24330, 24371), 'utils.utils.get_key_def', 'get_key_def', (['"""bucket_name"""', "params['AWS']"], {}), "('bucket_name', params['AWS'])\n", (24341, 24371), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((24484, 24577), 'utils.utils.get_device_ids', 'get_device_ids', (['num_devices'], {'max_used_ram_perc': 'max_used_ram', 'max_used_perc': 'max_used_perc'}), '(num_devices, max_used_ram_perc=max_used_ram, max_used_perc=\n max_used_perc)\n', (24498, 24577), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((25369, 25415), 'utils.verifications.add_background_to_num_class', 'add_background_to_num_class', (['task', 'num_classes'], {}), '(task, num_classes)\n', (25396, 25415), False, 'from utils.verifications import add_background_to_num_class, validate_num_classes, assert_crs_match\n'), ((25459, 25642), 'models.model_choice.net', 'net', ([], {'model_name': 'model_name', 'num_bands': 'num_bands', 'num_channels': 'num_classes_backgr', 'dontcare_val': 'dontcare_val', 'num_devices': '(1)', 'net_params': 'params', 'inference_state_dict': 'state_dict'}), '(model_name=model_name, num_bands=num_bands, num_channels=\n num_classes_backgr, dontcare_val=dontcare_val, num_devices=1,\n net_params=params, inference_state_dict=state_dict)\n', (25462, 25642), False, 'from models.model_choice import net\n'), ((27008, 27049), 'tqdm.tqdm', 'tqdm', (['list_img'], {'desc': '"""Validating imagery"""'}), "(list_img, desc='Validating imagery')\n", (27012, 27049), False, 'from tqdm import tqdm\n'), ((1695, 1783), 'numpy.pad', 'np.pad', (['arr', '((0, w_diff), (0, h_diff), (0, 0))', '"""constant"""'], {'constant_values': 'np.nan'}), "(arr, ((0, w_diff), (0, h_diff), (0, 0)), 'constant', constant_values\n =np.nan)\n", (1701, 1783), True, 'import numpy as np\n'), ((1810, 1885), 'numpy.pad', 'np.pad', (['arr', '((0, w_diff), (0, h_diff))', '"""constant"""'], {'constant_values': 'np.nan'}), "(arr, ((0, w_diff), (0, h_diff)), 'constant', constant_values=np.nan)\n", (1816, 1885), True, 'import numpy as np\n'), ((2062, 2123), 'numpy.pad', 'np.pad', (['arr', '((aug, aug), (aug, aug), (0, 0))'], {'mode': '"""reflect"""'}), "(arr, ((aug, aug), (aug, aug), (0, 0)), mode='reflect')\n", (2068, 2123), True, 'import numpy as np\n'), ((2155, 2208), 'numpy.pad', 'np.pad', (['arr', '((aug, aug), (aug, aug))'], {'mode': '"""reflect"""'}), "(arr, ((aug, aug), (aug, aug)), mode='reflect')\n", (2161, 2208), True, 'import numpy as np\n'), ((2423, 2454), 'collections.OrderedDict', 'OrderedDict', (["[('value', 'int')]"], {}), "([('value', 'int')])\n", (2434, 2454), False, 'from collections import OrderedDict, defaultdict\n'), ((2604, 2635), 'rasterio.open', 'rasterio.open', (['raster_file', '"""r"""'], {}), "(raster_file, 'r')\n", (2617, 2635), False, 'import rasterio\n'), ((7690, 7795), 'utils.utils.add_metadata_from_raster_to_sample', 'add_metadata_from_raster_to_sample', ([], {'sat_img_arr': 'sub_image', 'raster_handle': 'input_image', 'raster_info': '{}'}), '(sat_img_arr=sub_image, raster_handle=\n input_image, raster_info={})\n', (7724, 7795), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((7985, 8122), 'utils.augmentation.compose_transforms', 'augmentation.compose_transforms', (['param'], {'dataset': '"""tst"""', 'input_space': 'BGR_to_RGB', 'scale': 'scale', 'aug_type': '"""totensor"""', 'print_log': 'print_log'}), "(param, dataset='tst', input_space=\n BGR_to_RGB, scale=scale, aug_type='totensor', print_log=print_log)\n", (8016, 8122), False, 'from utils import augmentation\n'), ((10379, 10402), 'torch.stack', 'torch.stack', (['output_lst'], {}), '(output_lst)\n', (10390, 10402), False, 'import torch\n'), ((10421, 10457), 'torch.mul', 'torch.mul', (['outputs', 'WINDOW_SPLINE_2D'], {}), '(outputs, WINDOW_SPLINE_2D)\n', (10430, 10457), False, 'import torch\n'), ((10479, 10504), 'torch.max', 'torch.max', (['outputs'], {'dim': '(0)'}), '(outputs, dim=0)\n', (10488, 10504), False, 'import torch\n'), ((11535, 11546), 'time.time', 'time.time', ([], {}), '()\n', (11544, 11546), False, 'import time\n'), ((11843, 11854), 'time.time', 'time.time', ([], {}), '()\n', (11852, 11854), False, 'import time\n'), ((11873, 11890), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (11884, 11890), False, 'from collections import OrderedDict, defaultdict\n'), ((13437, 13483), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['feature'], {'crs': 'input_image.crs'}), '(feature, crs=input_image.crs)\n', (13453, 13483), True, 'import geopandas as gpd\n'), ((14776, 14806), 'os.path.basename', 'os.path.basename', (["image['tif']"], {}), "(image['tif'])\n", (14792, 14806), False, 'import os\n'), ((15080, 15113), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (15111, 15113), False, 'import torchvision\n'), ((16003, 16071), 'numpy.savetxt', 'np.savetxt', (['csv_results', 'classified_results'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "(csv_results, classified_results, fmt='%s', delimiter=',')\n", (16013, 16071), True, 'import numpy as np\n'), ((18230, 18293), 'utils.utils.get_key_def', 'get_key_def', (['"""modalities"""', "params['dataset']"], {'expected_type': 'str'}), "('modalities', params['dataset'], expected_type=str)\n", (18241, 18293), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((19771, 19841), 'utils.utils.get_key_def', 'get_key_def', (['"""uri"""', "params['tracker']"], {'default': 'None', 'expected_type': 'str'}), "('uri', params['tracker'], default=None, expected_type=str)\n", (19782, 19841), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((19908, 19965), 'utils.utils.get_key_def', 'get_key_def', (['"""run_name"""', "params['tracker']"], {'default': '"""gdl"""'}), "('run_name', params['tracker'], default='gdl')\n", (19919, 19965), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((20359, 20388), 'mlflow.set_tracking_uri', 'set_tracking_uri', (['tracker_uri'], {}), '(tracker_uri)\n', (20375, 20388), False, 'from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics\n'), ((20397, 20428), 'mlflow.set_experiment', 'set_experiment', (['experiment_name'], {}), '(experiment_name)\n', (20411, 20428), False, 'from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics\n'), ((20437, 20465), 'mlflow.start_run', 'start_run', ([], {'run_name': 'run_name'}), '(run_name=run_name)\n', (20446, 20465), False, 'from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics\n'), ((21770, 21835), 'utils.utils.find_first_file', 'find_first_file', (['"""checkpoint.pth.tar"""', 'sorted_by_mtime_descending'], {}), "('checkpoint.pth.tar', sorted_by_mtime_descending)\n", (21785, 21835), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((25115, 25193), 'utils.utils.get_key_def', 'get_key_def', (['"""chunk_size"""', "params['inference']"], {'default': '(512)', 'expected_type': 'int'}), "('chunk_size', params['inference'], default=512, expected_type=int)\n", (25126, 25193), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((25301, 25320), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (25313, 25320), False, 'import torch\n'), ((26257, 26303), 'utils.utils.get_key_def', 'get_key_def', (['"""raw_data_dir"""', "params['dataset']"], {}), "('raw_data_dir', params['dataset'])\n", (26268, 26303), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((26460, 26478), 'pathlib.Path', 'Path', (['my_data_path'], {}), '(my_data_path)\n', (26464, 26478), False, 'from pathlib import Path\n'), ((2987, 3014), 'collections.OrderedDict', 'OrderedDict', (["[('value', 0)]"], {}), "([('value', 0)])\n", (2998, 3014), False, 'from collections import OrderedDict, defaultdict\n'), ((6641, 6661), 'ttach.HorizontalFlip', 'tta.HorizontalFlip', ([], {}), '()\n', (6659, 6661), True, 'import ttach as tta\n'), ((7157, 7191), 'numpy.ceil', 'np.ceil', (['(input_image.height / step)'], {}), '(input_image.height / step)\n', (7164, 7191), True, 'import numpy as np\n'), ((7194, 7227), 'numpy.ceil', 'np.ceil', (['(input_image.width / step)'], {}), '(input_image.width / step)\n', (7201, 7227), True, 'import numpy as np\n'), ((13503, 13514), 'time.time', 'time.time', ([], {}), '()\n', (13512, 13514), False, 'import time\n'), ((14200, 14231), 'os.path.join', 'os.path.join', (['class_csv', 'folder'], {}), '(class_csv, folder)\n', (14212, 14231), False, 'import os\n'), ((14261, 14299), 'os.path.join', 'os.path.join', (['class_csv', '"""classes.csv"""'], {}), "(class_csv, 'classes.csv')\n", (14273, 14299), False, 'import os\n'), ((14385, 14401), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (14395, 14401), False, 'import csv\n'), ((14623, 14636), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (14633, 14636), False, 'import csv\n'), ((15188, 15203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15201, 15203), False, 'import torch\n'), ((15298, 15319), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (15307, 15319), False, 'import torch\n'), ((16112, 16153), 'os.path.join', 'os.path.join', (['working_folder', 'csv_results'], {}), '(working_folder, csv_results)\n', (16124, 16153), False, 'import os\n'), ((16201, 16242), 'os.path.join', 'os.path.join', (['working_folder', 'csv_results'], {}), '(working_folder, csv_results)\n', (16213, 16242), False, 'import os\n'), ((18050, 18111), 'utils.utils.get_key_def', 'get_key_def', (['"""model_name"""', "params['model']"], {'expected_type': 'str'}), "('model_name', params['model'], expected_type=str)\n", (18061, 18111), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((20485, 20513), 'utils.logger.dict_path', 'dict_path', (['params', '"""general"""'], {}), "(params, 'general')\n", (20494, 20513), False, 'from utils.logger import dict_path\n'), ((20534, 20562), 'utils.logger.dict_path', 'dict_path', (['params', '"""dataset"""'], {}), "(params, 'dataset')\n", (20543, 20562), False, 'from utils.logger import dict_path\n'), ((20583, 20608), 'utils.logger.dict_path', 'dict_path', (['params', '"""data"""'], {}), "(params, 'data')\n", (20592, 20608), False, 'from utils.logger import dict_path\n'), ((20629, 20655), 'utils.logger.dict_path', 'dict_path', (['params', '"""model"""'], {}), "(params, 'model')\n", (20638, 20655), False, 'from utils.logger import dict_path\n'), ((20676, 20706), 'utils.logger.dict_path', 'dict_path', (['params', '"""inference"""'], {}), "(params, 'inference')\n", (20685, 20706), False, 'from utils.logger import dict_path\n'), ((26057, 26109), 'torch.device', 'torch.device', (["(f'cuda' if gpu_devices_dict else 'cpu')"], {}), "(f'cuda' if gpu_devices_dict else 'cpu')\n", (26069, 26109), False, 'import torch\n'), ((26532, 26578), 'utils.utils.get_key_def', 'get_key_def', (['"""raw_data_dir"""', "params['dataset']"], {}), "('raw_data_dir', params['dataset'])\n", (26543, 26578), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((27213, 27376), 'utils.verifications.validate_num_classes', 'validate_num_classes', ([], {'vector_file': "info['gpkg']", 'num_classes': 'num_classes', 'attribute_name': 'attribute_field', 'ignore_index': 'dontcare', 'attribute_values': 'attr_vals'}), "(vector_file=info['gpkg'], num_classes=num_classes,\n attribute_name=attribute_field, ignore_index=dontcare, attribute_values\n =attr_vals)\n", (27233, 27376), False, 'from utils.verifications import add_background_to_num_class, validate_num_classes, assert_crs_match\n'), ((27512, 27555), 'utils.verifications.assert_crs_match', 'assert_crs_match', (["info['tif']", "info['gpkg']"], {}), "(info['tif'], info['gpkg'])\n", (27528, 27555), False, 'from utils.verifications import add_background_to_num_class, validate_num_classes, assert_crs_match\n'), ((29063, 29131), 'tqdm.tqdm', 'tqdm', (['list_img'], {'desc': '"""Inferring from images"""', 'position': '(0)', 'leave': '(True)'}), "(list_img, desc='Inferring from images', position=0, leave=True)\n", (29067, 29131), False, 'from tqdm import tqdm\n'), ((6807, 6842), 'numpy.moveaxis', 'np.moveaxis', (['WINDOW_SPLINE_2D', '(2)', '(0)'], {}), '(WINDOW_SPLINE_2D, 2, 0)\n', (6818, 6842), True, 'import numpy as np\n'), ((13071, 13128), 'shapely.geometry.Polygon', 'Polygon', (['[(x_1, y_1), (x_2, y_2), (x_3, y_3), (x_4, y_4)]'], {}), '([(x_1, y_1), (x_2, y_2), (x_3, y_3), (x_4, y_4)])\n', (13078, 13128), False, 'from shapely.geometry import Polygon\n'), ((18142, 18188), 'utils.utils.get_key_def', 'get_key_def', (['"""classes_dict"""', "params['dataset']"], {}), "('classes_dict', params['dataset'])\n", (18153, 18188), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((18546, 18590), 'pathlib.Path', 'Path', (["params['inference']['state_dict_path']"], {}), "(params['inference']['state_dict_path'])\n", (18550, 18590), False, 'from pathlib import Path\n'), ((19850, 19867), 'pathlib.Path', 'Path', (['tracker_uri'], {}), '(tracker_uri)\n', (19854, 19867), False, 'from pathlib import Path\n'), ((21632, 21659), 'os.path.join', 'os.path.join', (['state_dict', 'x'], {}), '(state_dict, x)\n', (21644, 21659), False, 'import os\n'), ((26327, 26345), 'pathlib.Path', 'Path', (['raw_data_dir'], {}), '(raw_data_dir)\n', (26331, 26345), False, 'from pathlib import Path\n'), ((28198, 28248), 'utils.utils.load_from_checkpoint', 'load_from_checkpoint', (['"""saved_model.pth.tar"""', 'model'], {}), "('saved_model.pth.tar', model)\n", (28218, 28248), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((28286, 28332), 'utils.utils.load_from_checkpoint', 'load_from_checkpoint', (['loaded_checkpoint', 'model'], {}), '(loaded_checkpoint, model)\n', (28306, 28332), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((28474, 28520), 'utils.utils.get_key_def', 'get_key_def', (['"""mode"""', 'params'], {'expected_type': 'str'}), "('mode', params, expected_type=str)\n", (28485, 28520), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((28540, 28599), 'utils.utils.get_key_def', 'get_key_def', (['"""task_name"""', "params['task']"], {'expected_type': 'str'}), "('task_name', params['task'], expected_type=str)\n", (28551, 28599), False, 'from utils.utils import load_from_checkpoint, get_device_ids, get_key_def, list_input_images, add_metadata_from_raster_to_sample, _window_2D, read_modalities, find_first_file\n'), ((28724, 28753), 'mlflow.set_tracking_uri', 'set_tracking_uri', (['tracker_uri'], {}), '(tracker_uri)\n', (28740, 28753), False, 'from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics\n'), ((28766, 28797), 'mlflow.set_experiment', 'set_experiment', (['experiment_name'], {}), '(experiment_name)\n', (28780, 28797), False, 'from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics\n'), ((28810, 28838), 'mlflow.start_run', 'start_run', ([], {'run_name': 'run_name'}), '(run_name=run_name)\n', (28819, 28838), False, 'from mlflow import log_params, set_tracking_uri, set_experiment, start_run, log_artifact, log_metrics\n'), ((30041, 30070), 'rasterio.open', 'rasterio.open', (['local_img', '"""r"""'], {}), "(local_img, 'r')\n", (30054, 30070), False, 'import rasterio\n'), ((33680, 33695), 'pandas.concat', 'pd.concat', (['gdf_'], {}), '(gdf_)\n', (33689, 33695), True, 'import pandas as pd\n'), ((33831, 33856), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['all_gdf'], {}), '(all_gdf)\n', (33847, 33856), True, 'import geopandas as gpd\n'), ((3766, 3784), 'fiona.crs.to_string', 'to_string', (['src.crs'], {}), '(src.crs)\n', (3775, 3784), False, 'from fiona.crs import to_string\n'), ((10259, 10295), 'torch.nn.functional.softmax', 'F.softmax', (['deaugmented_output'], {'dim': '(1)'}), '(deaugmented_output, dim=1)\n', (10268, 10295), True, 'import torch.nn.functional as F\n'), ((11734, 11773), 'numpy.unique', 'np.unique', (['pred_img'], {'return_counts': '(True)'}), '(pred_img, return_counts=True)\n', (11743, 11773), True, 'import numpy as np\n'), ((14882, 14914), 'PIL.Image.open', 'Image.open', (['f"""Images/{img_name}"""'], {}), "(f'Images/{img_name}')\n", (14892, 14914), False, 'from PIL import Image\n'), ((14991, 15015), 'PIL.Image.open', 'Image.open', (["image['tif']"], {}), "(image['tif'])\n", (15001, 15015), False, 'from PIL import Image\n'), ((21211, 21231), 'pathlib.Path', 'Path', (['img_dir_or_csv'], {}), '(img_dir_or_csv)\n', (21215, 21231), False, 'from pathlib import Path\n'), ((21244, 21264), 'pathlib.Path', 'Path', (['img_dir_or_csv'], {}), '(img_dir_or_csv)\n', (21248, 21264), False, 'from pathlib import Path\n'), ((21669, 21691), 'os.listdir', 'os.listdir', (['state_dict'], {}), '(state_dict)\n', (21679, 21691), False, 'import os\n'), ((28862, 28892), 'utils.logger.dict_path', 'dict_path', (['params', '"""inference"""'], {}), "(params, 'inference')\n", (28871, 28892), False, 'from utils.logger import dict_path\n'), ((28917, 28945), 'utils.logger.dict_path', 'dict_path', (['params', '"""dataset"""'], {}), "(params, 'dataset')\n", (28926, 28945), False, 'from utils.logger import dict_path\n'), ((28970, 28996), 'utils.logger.dict_path', 'dict_path', (['params', '"""model"""'], {}), "(params, 'model')\n", (28979, 28996), False, 'from utils.logger import dict_path\n'), ((29156, 29173), 'pathlib.Path', 'Path', (["info['tif']"], {}), "(info['tif'])\n", (29160, 29173), False, 'from pathlib import Path\n'), ((29204, 29222), 'pathlib.Path', 'Path', (["info['gpkg']"], {}), "(info['gpkg'])\n", (29208, 29222), False, 'from pathlib import Path\n'), ((29612, 29629), 'pathlib.Path', 'Path', (["info['tif']"], {}), "(info['tif'])\n", (29616, 29629), False, 'from pathlib import Path\n'), ((30328, 30369), 'utils.geoutils.clip_raster_with_gpkg', 'clip_raster_with_gpkg', (['raster', 'local_gpkg'], {}), '(raster, local_gpkg)\n', (30349, 30369), False, 'from utils.geoutils import vector_to_raster, clip_raster_with_gpkg\n'), ((30426, 30455), 'rasterio.open', 'rasterio.open', (['local_img', '"""r"""'], {}), "(local_img, 'r')\n", (30439, 30455), False, 'import rasterio\n'), ((30591, 30779), 'utils.geoutils.vector_to_raster', 'vector_to_raster', ([], {'vector_file': 'local_gpkg', 'input_image': 'raster', 'out_shape': "(inf_meta['height'], inf_meta['width'])", 'attribute_name': 'attribute_field', 'fill': '(0)', 'attribute_values': 'attr_vals'}), "(vector_file=local_gpkg, input_image=raster, out_shape=(\n inf_meta['height'], inf_meta['width']), attribute_name=attribute_field,\n fill=0, attribute_values=attr_vals)\n", (30607, 30779), False, 'from utils.geoutils import vector_to_raster, clip_raster_with_gpkg\n'), ((32087, 32139), 'utils.metrics.ComputePixelMetrics', 'ComputePixelMetrics', (['label', 'pred', 'num_classes_backgr'], {}), '(label, pred, num_classes_backgr)\n', (32106, 32139), False, 'from utils.metrics import ComputePixelMetrics\n'), ((32758, 32806), 'rasterio.open', 'rasterio.open', (['inference_image', '"""w+"""'], {}), "(inference_image, 'w+', **inf_meta)\n", (32771, 32806), False, 'import rasterio\n'), ((33085, 33096), 'time.time', 'time.time', ([], {}), '()\n', (33094, 33096), False, 'import time\n'), ((18983, 19026), 'pathlib.Path', 'Path', (["params['general']['save_weights_dir']"], {}), "(params['general']['save_weights_dir'])\n", (18987, 19026), False, 'from pathlib import Path\n'), ((33358, 33369), 'time.time', 'time.time', ([], {}), '()\n', (33367, 33369), False, 'import time\n'), ((12540, 12572), 'numpy.count_nonzero', 'np.count_nonzero', (['(label == c_num)'], {}), '(label == c_num)\n', (12556, 12572), True, 'import numpy as np\n'), ((12639, 12670), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred == c_num)'], {}), '(pred == c_num)\n', (12655, 12670), True, 'import numpy as np\n'), ((21709, 21719), 'os.stat', 'os.stat', (['t'], {}), '(t)\n', (21716, 21719), False, 'import os\n'), ((31124, 31140), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (31133, 31140), True, 'import numpy as np\n')] |
from config import Config as AnnotatorConfig
import imantics as im
import math
import numpy as np
import torch
import torchvision.transforms as transforms
from agrobot_mrcnn.models import MaskrcnnSweetPepperProtected
import logging
logger = logging.getLogger('gunicorn.error')
CUDA_DEVICE_NUM = AnnotatorConfig.CUDA_DEVICE_NUM
MODEL_DIR = "/workspace/models"
MODEL_PATH = AnnotatorConfig.TORCH_MASK_RCNN_FILE
CLASS_NAMES = AnnotatorConfig.TORCH_MASK_RCNN_CLASSES.split(',')
class TorchMaskRCNN():
def __init__(self):
self.device = 'cpu'
# try finding the specified CUDA device, use cpu otherwise
if CUDA_DEVICE_NUM:
try:
assert torch.cuda.is_available()
assert int(CUDA_DEVICE_NUM) < torch.cuda.device_count()
self.device = torch.device(f'cuda:{int(CUDA_DEVICE_NUM)}')
logger.info(f"[Torch] Using CUDA device ({CUDA_DEVICE_NUM})")
except:
logger.info(f"[Torch] Unable find CUDA device ({CUDA_DEVICE_NUM}), using cpu instead")
try:
self.model = MaskrcnnSweetPepperProtected()
self.model.load_state_dict(torch.load(MODEL_PATH))
self.model.eval()
logger.info(f"[Torch] instanciated Torch MaskRCNN model: {MODEL_PATH}")
self.model.to(self.device)
logger.debug(f"[Torch] Sent model to device")
except:
logger.error(f"[Torch] Unable to initialize Torch model")
self.model = None
@torch.no_grad()
def detect(self, image):
if self.model is None:
return {}
logger.info(f"[Torch] Image preprocesing")
width, height = image.size
image = image.convert('RGB')
# Send image to compute device
image = transforms.ToTensor()(image).to(self.device)
torch.cuda.synchronize()
logger.info(f"[Torch] Detecting instances")
outputs = self.model([image])
# Put results in cpu and get masks and labels
outputs = [{k: v.to('cpu') for k, v in t.items()} for t in outputs]
class_ids = torch.squeeze(outputs[0]['labels'])
masks = torch.squeeze(outputs[0]['masks']).numpy()
# Threshhold masks
masks[masks < 0.9] = 0.
return TorchMaskRCNN.to_coco(masks, class_ids)
@torch.no_grad()
def detect_in_box(self, image, cropbox):
if self.model is None:
return {}
logger.info(f"[Torch] Image preprocesing")
width, height = image.size
b_left, b_top, b_right, b_bottom = cropbox
crop_image = image.convert('RGB').crop(cropbox)
# Send image to compute device
crop_image = transforms.ToTensor()(crop_image).to(self.device)
torch.cuda.synchronize()
logger.info(f"[Torch] Detecting instances")
outputs = self.model([crop_image])
# Put results in cpu and get masks and labels
outputs = [{k: v.to('cpu') for k, v in t.items()} for t in outputs]
class_ids = torch.squeeze(outputs[0]['labels'])
crop_masks = torch.squeeze(outputs[0]['masks']).numpy()
# Threshhold masks
crop_masks[crop_masks < 0.9] = 0.
masks = np.zeros((crop_masks.shape[0],height,width))
for i in range(masks.shape[0]):
masks[i] = np.pad(crop_masks[i],
( (b_top, height - b_bottom), # (top, bottom) padding
(b_left, width - b_right)),# (left, right) padding
'constant', constant_values=(0.))
return TorchMaskRCNN.to_coco(masks, class_ids)
@staticmethod
def to_coco(masks, class_ids):
if masks.shape[0] != len(class_ids) or \
masks.shape[0] == 0:
logger.info(f"[Torch] no instances detected")
return im.Image(width=0, height=0).coco()
logger.info(f"[Torch] convert to coco format")
coco_image = im.Image(width=masks[0].shape[1], height=masks[0].shape[0])
for i in range(masks.shape[0]):
category = im.Category(CLASS_NAMES[class_ids[i]])
coco_image.add(im.Mask(masks[i]), category=category)
return coco_image.coco()
model = TorchMaskRCNN()
| [
"logging.getLogger",
"imantics.Category",
"torchvision.transforms.ToTensor",
"torch.load",
"torch.cuda.device_count",
"torch.cuda.synchronize",
"torch.no_grad",
"numpy.zeros",
"imantics.Image",
"torch.cuda.is_available",
"torch.squeeze",
"config.Config.TORCH_MASK_RCNN_CLASSES.split",
"numpy.... | [((243, 278), 'logging.getLogger', 'logging.getLogger', (['"""gunicorn.error"""'], {}), "('gunicorn.error')\n", (260, 278), False, 'import logging\n'), ((428, 478), 'config.Config.TORCH_MASK_RCNN_CLASSES.split', 'AnnotatorConfig.TORCH_MASK_RCNN_CLASSES.split', (['""","""'], {}), "(',')\n", (473, 478), True, 'from config import Config as AnnotatorConfig\n'), ((1533, 1548), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1546, 1548), False, 'import torch\n'), ((2347, 2362), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2360, 2362), False, 'import torch\n'), ((1864, 1888), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1886, 1888), False, 'import torch\n'), ((2131, 2166), 'torch.squeeze', 'torch.squeeze', (["outputs[0]['labels']"], {}), "(outputs[0]['labels'])\n", (2144, 2166), False, 'import torch\n'), ((2777, 2801), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2799, 2801), False, 'import torch\n'), ((3049, 3084), 'torch.squeeze', 'torch.squeeze', (["outputs[0]['labels']"], {}), "(outputs[0]['labels'])\n", (3062, 3084), False, 'import torch\n'), ((3235, 3281), 'numpy.zeros', 'np.zeros', (['(crop_masks.shape[0], height, width)'], {}), '((crop_masks.shape[0], height, width))\n', (3243, 3281), True, 'import numpy as np\n'), ((3985, 4044), 'imantics.Image', 'im.Image', ([], {'width': 'masks[0].shape[1]', 'height': 'masks[0].shape[0]'}), '(width=masks[0].shape[1], height=masks[0].shape[0])\n', (3993, 4044), True, 'import imantics as im\n'), ((1106, 1136), 'agrobot_mrcnn.models.MaskrcnnSweetPepperProtected', 'MaskrcnnSweetPepperProtected', ([], {}), '()\n', (1134, 1136), False, 'from agrobot_mrcnn.models import MaskrcnnSweetPepperProtected\n'), ((3344, 3460), 'numpy.pad', 'np.pad', (['crop_masks[i]', '((b_top, height - b_bottom), (b_left, width - b_right))', '"""constant"""'], {'constant_values': '(0.0)'}), "(crop_masks[i], ((b_top, height - b_bottom), (b_left, width - b_right\n )), 'constant', constant_values=0.0)\n", (3350, 3460), True, 'import numpy as np\n'), ((4108, 4146), 'imantics.Category', 'im.Category', (['CLASS_NAMES[class_ids[i]]'], {}), '(CLASS_NAMES[class_ids[i]])\n', (4119, 4146), True, 'import imantics as im\n'), ((694, 719), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (717, 719), False, 'import torch\n'), ((1176, 1198), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1186, 1198), False, 'import torch\n'), ((2183, 2217), 'torch.squeeze', 'torch.squeeze', (["outputs[0]['masks']"], {}), "(outputs[0]['masks'])\n", (2196, 2217), False, 'import torch\n'), ((3106, 3140), 'torch.squeeze', 'torch.squeeze', (["outputs[0]['masks']"], {}), "(outputs[0]['masks'])\n", (3119, 3140), False, 'import torch\n'), ((4174, 4191), 'imantics.Mask', 'im.Mask', (['masks[i]'], {}), '(masks[i])\n', (4181, 4191), True, 'import imantics as im\n'), ((766, 791), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (789, 791), False, 'import torch\n'), ((1811, 1832), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1830, 1832), True, 'import torchvision.transforms as transforms\n'), ((2719, 2740), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2738, 2740), True, 'import torchvision.transforms as transforms\n'), ((3873, 3900), 'imantics.Image', 'im.Image', ([], {'width': '(0)', 'height': '(0)'}), '(width=0, height=0)\n', (3881, 3900), True, 'import imantics as im\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: benchmark-opencv-resize.py
import cv2
import time
import numpy as np
"""
Some prebuilt opencv is much slower than others.
You should check with this script and make sure it prints < 1s.
On E5-2680v3, archlinux, this script prints:
0.61s for system opencv 3.4.0-2.
>5 s for anaconda opencv 3.3.1 py36h6cbbc71_1.
On E5-2650v4, this script prints:
0.6s for opencv built locally with -DWITH_OPENMP=OFF
0.6s for opencv from `pip install opencv-python`.
1.3s for opencv built locally with -DWITH_OPENMP=ON
2s for opencv from `conda install`.
"""
img = (np.random.rand(256, 256, 3) * 255).astype('uint8')
start = time.time()
for k in range(1000):
out = cv2.resize(img, (384, 384))
print(time.time() - start)
| [
"cv2.resize",
"time.time",
"numpy.random.rand"
] | [((693, 704), 'time.time', 'time.time', ([], {}), '()\n', (702, 704), False, 'import time\n'), ((737, 764), 'cv2.resize', 'cv2.resize', (['img', '(384, 384)'], {}), '(img, (384, 384))\n', (747, 764), False, 'import cv2\n'), ((771, 782), 'time.time', 'time.time', ([], {}), '()\n', (780, 782), False, 'import time\n'), ((633, 660), 'numpy.random.rand', 'np.random.rand', (['(256)', '(256)', '(3)'], {}), '(256, 256, 3)\n', (647, 660), True, 'import numpy as np\n')] |
import numpy as np
from FCS2Corr import correlations
def twoFocusFCS(tau, rhox, rhoy, rhoz, c, D, w0, w1, z0, z1):
"""
Calculate the FCS cross correlation between two PSFs
========== ===============================================================
Input Meaning
tau Lag time [s]
rhox Spatial shift in the x, y, and z direction between two detector
rhoy elements [m]. Usually, rhoz = 0. Rhox and rhoy correspond to
rhoz the spatial shifts in the sample space
c Concentration of fluorophores/particles [/m^3]
D Diffusion coefficient of the fluorophores/particles [µm^2/s]
w0 Lateral 1/e^2 radius of the first PSF
w1 Lateral 1/e^2 radius of the second PSF
z0 Axial 1/e^2 value of the first PSF
z1 Axial 1/e^2 value of the second PSF
========== ===============================================================
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations
========== ===============================================================
"""
factorW = 8 * D * tau + w0**2 + w1**2
factorZ = 8 * D * tau + z0**2 + z1**2
G0 = 2 * np.sqrt(2) / np.pi**(3/2) / c / factorW / np.sqrt(factorZ)
expTerm = 16 * D * tau * (rhox**2 + rhoy**2)
expTerm = expTerm + 2 * (z0**2 + z1**2) * (rhox**2 + rhoy**2)
expTerm = expTerm + 2 * (w0**2 + w1**2) * rhoz**2
expTerm = -1 * expTerm / factorW / factorZ
expTerm = np.exp(expTerm)
Gy = G0 * expTerm
if type(Gy) == np.float64:
Garray = np.zeros((1, 2))
else:
Garray = np.zeros((np.size(Gy, 0), 2))
Garray[:, 0] = tau
Garray[:, 1] = Gy
G = correlations()
setattr(G, 'theory', Garray)
return G, Garray
def simulateTwoFocusCrossCenter(tau, c, D, w, z, shift):
G = correlations()
N = np.size(w, 0)
for i in range(N):
rhoy = np.floor(i / 5)
rhox = np.mod(i, 5)
[xx, Gtemp] = twoFocusFCS(tau, (rhox-2) * shift, (rhoy-2) * shift, 0, c, D, w[12], w[i], z[12], z[i])
setattr(G, 'det12x' + str(i), Gtemp)
return G
def simulateSpatialCorr(tau, rho, c, D, w0, z0):
w0 = np.resize(w0, (5, 5))
z0 = np.resize(z0, (5, 5))
G = correlations()
if type(tau) == float:
tau = [tau]
Nt = len(tau)
Gall = np.zeros((9, 9, Nt))
for i in range(Nt):
Gsinglet = np.zeros((9, 9))
for shifty in np.arange(-4, 5):
print('shifty: ' + str(shifty))
for shiftx in np.arange(-4, 5):
print(' shiftx: ' + str(shiftx))
# go through all detector elements
n = 0 # number of overlapping detector elements
Gtemp = 0
for detx in np.arange(np.max((0, shiftx)), np.min((5, 5+shiftx))):
print(' detx: ' + str(detx))
for dety in np.arange(np.max((0, shifty)), np.min((5, 5+shifty))):
print(' dety: ' + str(dety))
dummy, Gout = twoFocusFCS(tau[i], shiftx*rho, shifty*rho, 0, c, D, w0[dety, detx], w0[dety-shifty, detx-shiftx], z0[dety, detx], z0[dety-shifty, detx-shiftx])
Gtemp += Gout[0, 1]
n += 1
Gtemp /= n
Gsinglet[shifty+4, shiftx+4] = Gtemp
Gall[:,:, i] = Gsinglet
G.autoSpatial = Gall
G.dwellTime = tau[0]
return G
| [
"numpy.sqrt",
"FCS2Corr.correlations",
"numpy.size",
"numpy.floor",
"numpy.max",
"numpy.exp",
"numpy.resize",
"numpy.zeros",
"numpy.min",
"numpy.mod",
"numpy.arange"
] | [((1681, 1696), 'numpy.exp', 'np.exp', (['expTerm'], {}), '(expTerm)\n', (1687, 1696), True, 'import numpy as np\n'), ((1905, 1919), 'FCS2Corr.correlations', 'correlations', ([], {}), '()\n', (1917, 1919), False, 'from FCS2Corr import correlations\n'), ((2042, 2056), 'FCS2Corr.correlations', 'correlations', ([], {}), '()\n', (2054, 2056), False, 'from FCS2Corr import correlations\n'), ((2065, 2078), 'numpy.size', 'np.size', (['w', '(0)'], {}), '(w, 0)\n', (2072, 2078), True, 'import numpy as np\n'), ((2389, 2410), 'numpy.resize', 'np.resize', (['w0', '(5, 5)'], {}), '(w0, (5, 5))\n', (2398, 2410), True, 'import numpy as np\n'), ((2420, 2441), 'numpy.resize', 'np.resize', (['z0', '(5, 5)'], {}), '(z0, (5, 5))\n', (2429, 2441), True, 'import numpy as np\n'), ((2450, 2464), 'FCS2Corr.correlations', 'correlations', ([], {}), '()\n', (2462, 2464), False, 'from FCS2Corr import correlations\n'), ((2541, 2561), 'numpy.zeros', 'np.zeros', (['(9, 9, Nt)'], {}), '((9, 9, Nt))\n', (2549, 2561), True, 'import numpy as np\n'), ((1429, 1445), 'numpy.sqrt', 'np.sqrt', (['factorZ'], {}), '(factorZ)\n', (1436, 1445), True, 'import numpy as np\n'), ((1777, 1793), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (1785, 1793), True, 'import numpy as np\n'), ((2117, 2132), 'numpy.floor', 'np.floor', (['(i / 5)'], {}), '(i / 5)\n', (2125, 2132), True, 'import numpy as np\n'), ((2148, 2160), 'numpy.mod', 'np.mod', (['i', '(5)'], {}), '(i, 5)\n', (2154, 2160), True, 'import numpy as np\n'), ((2605, 2621), 'numpy.zeros', 'np.zeros', (['(9, 9)'], {}), '((9, 9))\n', (2613, 2621), True, 'import numpy as np\n'), ((2644, 2660), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (2653, 2660), True, 'import numpy as np\n'), ((2732, 2748), 'numpy.arange', 'np.arange', (['(-4)', '(5)'], {}), '(-4, 5)\n', (2741, 2748), True, 'import numpy as np\n'), ((1831, 1845), 'numpy.size', 'np.size', (['Gy', '(0)'], {}), '(Gy, 0)\n', (1838, 1845), True, 'import numpy as np\n'), ((2979, 2998), 'numpy.max', 'np.max', (['(0, shiftx)'], {}), '((0, shiftx))\n', (2985, 2998), True, 'import numpy as np\n'), ((3000, 3023), 'numpy.min', 'np.min', (['(5, 5 + shiftx)'], {}), '((5, 5 + shiftx))\n', (3006, 3023), True, 'import numpy as np\n'), ((1387, 1397), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1394, 1397), True, 'import numpy as np\n'), ((3116, 3135), 'numpy.max', 'np.max', (['(0, shifty)'], {}), '((0, shifty))\n', (3122, 3135), True, 'import numpy as np\n'), ((3137, 3160), 'numpy.min', 'np.min', (['(5, 5 + shifty)'], {}), '((5, 5 + shifty))\n', (3143, 3160), True, 'import numpy as np\n')] |
#stats.py
#catalog creation for heliocats
#https://github.com/cmoestl/heliocats
import numpy as np
import pandas as pd
import scipy
from sunpy.time import parse_time
import copy
import matplotlib.dates as mdates
import matplotlib
import seaborn as sns
import datetime
import urllib
import json
import os
import pdb
import scipy.io
import pickle
import sys
import astropy
from astropy.constants import au
import importlib
import cdflib
import matplotlib.pyplot as plt
import heliosat
import heliopy.data.spice as spicedata
import heliopy.spice as spice
from astropy.io.votable import parse_single_table
from config import data_path
from heliocats import data as hd
importlib.reload(hd) #reload again while debugging
#define AU in km
AU=au.value/1e3
######################## general position functions
# def get_mars_position_array():
# ############### Mars position
# planet_kernel=spicedata.get_kernel('planet_trajectories')
# starttime = datetime.datetime(2007, 1, 1)
# endtime = datetime.datetime(2020, 12, 31)
# res_in_hours=1
# mars_time = []
# while starttime < endtime:
# mars_time.append(starttime)
# starttime += datetime.timedelta(hours=res_in_hours)
# mars=spice.Trajectory('4')
# frame='HEEQ'
# mars.generate_positions(mars_time,'Sun',frame)
# mars.change_units(astropy.units.AU)
# [mars_r, mars_lat, mars_lon]=hd.cart2sphere(mars.x,mars.y,mars.z)
# print('mars position done')
# mars_time=np.array(mars_time)
# mars_r=np.array(mars_r)
# mars_lat=np.array(mars_lat)
# mars_lon=np.array(mars_lon)
# return [mars_time,mars_r,np.degrees(mars_lat),np.degrees(mars_lon)]
################################ HI arrival catalog ARRCAT operations ##############################
def load_higeocat_vot(file):
#read HIGEOCAT from https://www.helcats-fp7.eu/catalogues/wp3_cat.html
#https://docs.astropy.org/en/stable/io/votable/
table = parse_single_table('data/HCME_WP3_V06.vot')
higeocat = table.array
#usage e.g.
#higeocat['Date']=parse_time(higeocat['Date'][10]).datetime
#access data
#a=table.array['HM HEEQ Long'][10]
return higeocat
def get_insitu_position_time(time1,insitu_location_string,insitu_str,insitu_kernel):
insitu_exist=True
if insitu_location_string=='PSP':
#exclude if time before launch time
if parse_time(time1).plot_date < parse_time(datetime.datetime(2018, 8, 13)).plot_date:
insitu_exist=False
if insitu_location_string=='Solo':
if parse_time(time1).plot_date < parse_time(datetime.datetime(2020, 3, 1)).plot_date:
insitu_exist=False
if insitu_location_string=='Bepi':
if parse_time(time1).plot_date < parse_time(datetime.datetime(2018, 10, 24)).plot_date:
insitu_exist=False
if insitu_location_string=='STB':
if parse_time(time1).plot_date > parse_time(datetime.datetime(2014, 9, 27)).plot_date:
insitu_exist=False
if insitu_location_string=='Ulysses':
#cut off ulysses when no decent in situ data is available anymore
if parse_time(time1).plot_date > parse_time(datetime.datetime(2008, 5, 1)).plot_date:
insitu_exist=False
if insitu_exist == True:
#insitu_kernel=spicedata.get_kernel('insitu_trajectories')
#this needs to be an array, so make two similar times and take the first entry later
insitu_time=[parse_time(time1).datetime,parse_time(time1).datetime]
insitu=spice.Trajectory(insitu_str)
frame='HEEQ'
insitu.generate_positions(insitu_time,'Sun',frame)
insitu.change_units(astropy.units.AU)
[insitu_r, insitu_lat, insitu_lon]=hd.cart2sphere(insitu.x,insitu.y,insitu.z)
#Earth position to Earth L1
if insitu_str=='3': insitu_r[0]=insitu_r[0]-1.5*1e6/AU
insitu_time=np.array(insitu_time)[0]
insitu_r=np.array(insitu_r)[0]
insitu_lat=np.array(insitu_lat)[0]
insitu_lon=np.array(insitu_lon)[0]
else:
insitu_time=np.nan
insitu_r=np.nan
insitu_lat=np.nan
insitu_lon=np.nan
return [insitu_time,insitu_r,np.degrees(insitu_lat),np.degrees(insitu_lon)]
def calculate_arrival(vsse,delta,lamda,rdist,t0_num):
#calculate arrival time after Möstl and Davies 2013 but using ta=t0+Ri/Visse equivalent to ta=t0+Risse/Vsse
visse=vsse * ( np.cos(np.radians(delta)) \
+ np.sqrt( np.sin(np.radians(lamda))**2-np.sin(np.radians(delta))**2 ) ) \
/(1+np.sin(np.radians(lamda)) )
#arrival time: convert AU to km and seconds to days
ta=t0_num+(rdist*AU/visse)/(3600*24)
return [mdates.num2date(ta),visse]
def make_arrival_catalog_insitu_ssef30(higeocat,arrcat,ac_old, insitu_location_string, column_list):
#get parameters from HIGEOCAT for arrival catalog
higeocat_time=parse_time(higeocat['Date']).datetime #first HI observation
higeocat_t0=parse_time(higeocat['SSE Launch']).datetime #backprojected launch time
higeocat_t0_num=parse_time(higeocat_t0).plot_date
higeocat_vsse=np.array(higeocat['SSE Speed'])
higeocat_vsse_err=np.array(higeocat['SSE Speed Err'])
higeocat_sse_lon=np.array(higeocat['SSE HEEQ Long' ])
higeocat_sse_lat=np.array(higeocat['SSE HEEQ Lat' ])
higeocat_id=np.array(higeocat['ID'])
higeocat_sc=np.array(higeocat['SC'])
higeocat_pan=np.array(higeocat['PA-N'])
higeocat_pas=np.array(higeocat['PA-S'])
higeocat_pafit=np.array(higeocat['PA-fit'])
higeocat_pacenter=abs((higeocat_pan+higeocat_pas)/2)
#load spice here once for each spacecraft
if insitu_location_string=='STB':
insitu_str='-235'
insitu_kernel=spicedata.get_kernel('stereo_b')
target_name='STEREO-B'
if insitu_location_string=='STA':
insitu_str='-234'
insitu_kernel=spicedata.get_kernel('stereo_a_pred')
insitu_kernel2=spicedata.get_kernel('stereo_a')
spice.furnish(insitu_kernel2)
target_name='STEREO-A'
if insitu_location_string=='Mercury':
insitu_str='1'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Mercury'
if insitu_location_string=='Venus':
insitu_str='2'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Venus'
if insitu_location_string=='Earth':
insitu_str='3'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Earth_L1'
if insitu_location_string=='Mars':
insitu_str='4'
insitu_kernel=spicedata.get_kernel('planet_trajectories')
target_name='Mars'
if insitu_location_string=='PSP':
insitu_str='-96'
insitu_kernel=spicedata.get_kernel('psp_pred')
target_name='PSP'
if insitu_location_string=='Solo':
insitu_str='Solar Orbiter'
insitu_kernel=spicedata.get_kernel('solo_2020')
target_name='SolarOrbiter'
if insitu_location_string=='Bepi':
insitu_str='BEPICOLOMBO MPO'
insitu_kernel=spicedata.get_kernel('bepi_pred')
target_name='BepiColombo'
if insitu_location_string=='Ulysses':
insitu_str='ulysses'
insitu_kernel=spicedata.get_kernel('ulysses')
target_name='Ulysses'
spice.furnish(insitu_kernel)
#half width for SSEF30
lamda=30.0
#new version of ARRCAT with iteration
arrcat_insitu_list = []
#old version without iteration
arrcat_insitu_list_old = []
#go through all HIGEOCAT CME events and check for hit at insitu, with 4 iterations in total
for i in np.arange(len(higeocat_time)):
#get insitu position for launch time t0
[insitu_time,insitu_r,insitu_lat,insitu_lon]=get_insitu_position_time(higeocat_t0[i], insitu_location_string,insitu_str, insitu_kernel)
delta=abs(higeocat_sse_lon[i]-insitu_lon)
#print([insitu_time,insitu_r,insitu_lat,insitu_lon])
if delta < 30:
#calculate arrival time
#print(delta,lamda,insitu_r)
[ta,visse]=calculate_arrival(higeocat_vsse[i],delta, lamda, insitu_r,higeocat_t0_num[i])
#make old version of ARRCAT without iteration and errors
list_old=[higeocat_id[i].decode(),higeocat_sc[i].decode(),target_name,\
parse_time(higeocat_t0[i]).iso[:-7],parse_time(ta).iso[:-7],0,\
np.round(insitu_r,3), np.round(insitu_lon,2), np.round(insitu_lat,2),np.round(insitu_lon-higeocat_sse_lon[i],1),\
higeocat_sse_lon[i],higeocat_sse_lat[i],higeocat_vsse[i],\
higeocat_vsse_err[i], int(np.rint(visse)),0,higeocat_pafit[i],higeocat_pan[i],higeocat_pas[i],higeocat_pacenter[i]]
#print(list1)
arrcat_insitu_list_old.append(list_old)
[insitu_time2,insitu_r2,insitu_lat2,insitu_lon2]=get_insitu_position_time(ta, insitu_location_string,insitu_str, insitu_kernel)
#print(insitu_lon-insitu_lon2)
delta2=abs(higeocat_sse_lon[i]-insitu_lon2)
if delta2 <30:
[ta2,visse2]=calculate_arrival(higeocat_vsse[i],delta2, lamda, insitu_r2,higeocat_t0_num[i])
#print(int((parse_time(ta2).plot_date-parse_time(ta).plot_date)*24))
[insitu_time3,insitu_r3,insitu_lat3,insitu_lon3]=get_insitu_position_time(ta2, insitu_location_string,insitu_str, insitu_kernel)
delta3=abs(higeocat_sse_lon[i]-insitu_lon3)
if delta3 <30:
[ta3,visse3]=calculate_arrival(higeocat_vsse[i],delta3, lamda, insitu_r3,higeocat_t0_num[i])
#print(np.round((parse_time(ta3).plot_date-parse_time(ta2).plot_date)*24,1),int(delta3))
[insitu_time4,insitu_r4,insitu_lat4,insitu_lon4]=get_insitu_position_time(ta3, insitu_location_string,insitu_str, insitu_kernel)
delta4=abs(higeocat_sse_lon[i]-insitu_lon4)
if delta4 <30:
#calculate finally iterated arrival time
[ta4,visse4]=calculate_arrival(higeocat_vsse[i],delta4, lamda, insitu_r4,higeocat_t0_num[i])
#print(np.round((parse_time(ta4).plot_date-parse_time(ta3).plot_date)*24,1),int(delta4))
#print(int(delta4-delta))
#estimate error bar on arrival time adding or subtracting the error in the Vsse speed
[ta4_low,visse4_low]=calculate_arrival(higeocat_vsse[i]-higeocat_vsse_err[i],delta4, lamda, insitu_r4,higeocat_t0_num[i])
[ta4_high,visse4_high]=calculate_arrival(higeocat_vsse[i]+higeocat_vsse_err[i],delta4, lamda, insitu_r4,higeocat_t0_num[i])
#calculate difference in ours high / low to original arrival time and convert to hours
ta4_err_low=abs(parse_time(ta4).plot_date-parse_time(ta4_low).plot_date)*24
ta4_err_high=abs(parse_time(ta4).plot_date-parse_time(ta4_high).plot_date)*24
ta4_err=np.round(np.mean([ta4_err_high,ta4_err_low]),1)
#print(ta4_err_low,ta4_err_high,ta4_err)
#same for arrival speed error
visse4_err_low=abs(visse4_low-visse4)
visse4_err_high=abs(visse4_high-visse4)
visse4_err=int(np.rint(np.mean([visse4_err_high,visse4_err_low])))
#print(visse4_err_low,visse4_err_high,visse4_err,higeocat_vsse_err[i])
#print()
list1=[higeocat_id[i].decode(),higeocat_sc[i].decode(),target_name,\
parse_time(higeocat_t0[i]).iso[:-7],parse_time(ta4).iso[:-7],ta4_err,\
np.round(insitu_r4,3), np.round(insitu_lon4,2), np.round(insitu_lat4,2),np.round(insitu_lon4-higeocat_sse_lon[i],1),\
higeocat_sse_lon[i],higeocat_sse_lat[i],higeocat_vsse[i],\
higeocat_vsse_err[i], int(np.rint(visse4)),visse4_err,higeocat_pafit[i],higeocat_pan[i],higeocat_pas[i],higeocat_pacenter[i]]
#print(list1)
arrcat_insitu_list.append(list1)
#arrcat_insitu=np.array(arrcat_insitu_list)
#print(arrcat_insitu_list)
#make dataframe out of list
ac_old1 = pd.DataFrame(arrcat_insitu_list_old, columns = column_list)
ac_old=ac_old.append(ac_old1)
#make dataframe out of list
ac1 = pd.DataFrame(arrcat_insitu_list, columns = column_list)
arrcat=arrcat.append(ac1)
print('SSEF30 events: ',len(arrcat_insitu_list) )
print(insitu_location_string,' SSEF30 arrival catalog finished.')
print()
return [arrcat,ac_old]
###################################### SIRCAT operations ################################
def load_helio4cast_sircat_master_from_excel(file):
''' convert excel master file to pandas dataframe and convert times
to datetime objects
'''
print('load HELCATS SIRCAT from file:', file)
sc=pd.read_excel(file)
sc=sc.drop(columns='Unnamed: 0')
#get beginning of tags for STA to identify allen and jian events
tag_list=[]
for i in np.arange(0,len(sc)):
tag_list.append(sc.sircat_id[i][13]) #j
#convert all times to datetime objects
for i in np.arange(0,sc.shape[0]):
#for STEREO and MAVEN same
if sc.sc_insitu[i] == 'STEREO-A':
#jian events
if tag_list[i] =='J':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'sir_start_time']= parse_time(str(sc.sir_start_time[i]).strip()).datetime
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'sir_end_time']= parse_time(str(sc.sir_end_time[i]).strip()).datetime
#allen events
if tag_list[i] =='A':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'hss_end_time']= parse_time(str(sc.hss_end_time[i]).strip()).datetime
#for Wind PSP convert different - check PSP wind different sources if needed (Allen and Grandin)
if sc.sc_insitu[i] == 'Wind':
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'hss_end_time']=parse_time(str(sc.hss_end_time[i]).strip()).datetime
if sc.sc_insitu[i] == 'PSP':
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'hss_end_time']=parse_time(str(sc.hss_end_time[i]).strip()).datetime
if sc.sc_insitu[i] == 'STEREO-B':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'sir_start_time']= parse_time(str(sc.sir_start_time[i]).strip()).datetime
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'sir_end_time']= parse_time(str(sc.sir_end_time[i]).strip()).datetime
if sc.sc_insitu[i] == 'MAVEN':
#remove leading and ending blank spaces if any and write datetime object into dataframe
sc.at[i,'sir_start_time']= parse_time(str(sc.sir_start_time[i]).strip()).datetime
sc.at[i,'hss_start_time']= parse_time(str(sc.hss_start_time[i]).strip()).datetime
sc.at[i,'sir_end_time']= parse_time(str(sc.sir_end_time[i]).strip()).datetime
return sc
def get_sircat_parameters(sc, sci, scat, name):
'''
get parameters
sc - spacecraft data recarray
sci - indscates for this spacecraft in sircat
scat - scatmecat pandas dataframe
'''
fileind='sircat/indices_sircat/SIRCAT_indices_'+name+'.p'
################ extract indices of ICMEs in the respective data (time consuming, so do it once and save)
if os.path.isfile(fileind) == False:
print('extract indices of SIRs in '+ name+ ' data')
#### get all ICMECAT times for this spacecraft as datenum
sc_sir_start=scat.sir_start_time[sci]
sc_hss_start=scat.hss_start_time[sci]
sc_sir_end=scat.sir_end_time[sci]
sc_hss_end=scat.hss_end_time[sci]
### arrays containing the indices of where the SIRs are in the data
sir_start_ind=np.zeros(len(sci),dtype=int)
hss_start_ind=np.zeros(len(sci),dtype=int)
sir_end_ind=np.zeros(len(sci),dtype=int)
hss_end_ind=np.zeros(len(sci),dtype=int)
#check where vt is < or > 450 km/s
vt_lt_450=np.where(sc.vt < 450)[0]
vt_gt_450=np.where(sc.vt > 450)[0]
#check where vt is < or > 350 km/s
vt_lt_350=np.where(sc.vt < 350)[0]
vt_gt_350=np.where(sc.vt > 350)[0]
#this takes some time, get indices in data for each SIRCAT time
for i in np.arange(sci[0],sci[-1]+1):
print(i-sci[0])
if (name== 'STEREO-A'):
tag=scat.sircat_id[i][13]
if tag=='J': #Jian events
print('J', sc_sir_start[i] )
sir_start_ind[i-sci[0]]=np.where(sc.time > sc_sir_start[i])[0][0]-1
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
sir_end_ind[i-sci[0]]=np.where(sc.time > sc_sir_end[i])[0][0]-1
if tag=='A': #Allen events
print('A', sc_sir_start[i])
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
hss_end_ind[i-sci[0]]=np.where(sc.time > sc_hss_end[i])[0][0]-1
if (name== 'STEREO-B'):
sir_start_ind[i-sci[0]]=np.where(sc.time > sc_sir_start[i])[0][0]-1
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
sir_end_ind[i-sci[0]]=np.where(sc.time > sc_sir_end[i])[0][0]-1
#here the hss_end_time needs to be extracted - criteria similar to Grandin et al. 2018
#where stream goes back to (< 450 km/s) after hss start time
#check the indices in the 450 array that are greater than the hss_start index +0.5 days
#24*60 data points
#and take the first one
#take next data point > 450 km/s after hss_start + 6 hours (for getting rid of rapid variations)
#next450=np.where(vt_gt_450 > hss_start_ind[i-sci[0]])[0][0]+6*60
#print(hss_start_ind[i-sci[0]],vt_gt_450[next450])
#then take next data point below 450 after this
#hss_end_ind[i-sci[0]]=vt_lt_450[ np.where(vt_lt_450 > vt_gt_450[next450])[0][0] ]
#print('hss duration in hours ',(hss_end_ind[i-sci[0]]-hss_start_ind[i-sci[0]])/60)
#print(hss_start_ind[i-sci[0]],hss_end_ind[i-sci[0]])
if name== 'MAVEN':
sir_start_ind[i-sci[0]]=np.where(sc.time > sc_sir_start[i])[0][0]-1
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
sir_end_ind[i-sci[0]]=np.where(sc.time > sc_sir_end[i])[0][0]-1
#hss_end_ind[i-sci[0]]=vt_lt_450[np.where(vt_lt_450 > sir_end_ind[i-sci[0]])[0][0] ]
#take next data point > 450 km/s after hss_start + 2 orbits (for getting rid of rapid variations)
#next350=np.where(vt_gt_350 > hss_start_ind[i-sci[0]])[0][0]+2
#print(hss_start_ind[i-sci[0]],vt_gt_450[next450])
#then take next data point below 450 after this
#hss_end_ind[i-sci[0]]=vt_lt_350[ np.where(vt_lt_350 > vt_gt_350[next350])[0][0] ]
#print('hss duration in hours ',(hss_end_ind[i-sci[0]]-hss_start_ind[i-sci[0]])*4.5)
#print(hss_start_ind[i-sci[0]],hss_end_ind[i-sci[0]])
if name=='Wind':
#here only hss start and hss end exist
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
hss_end_ind[i-sci[0]]=np.where(sc.time > sc_hss_end[i])[0][0]-1
#future update: set hss_start as sir_start, and add time for hss_start by pt max after sir_start
if name=='PSP':
#here only hss start and hss end exist
hss_start_ind[i-sci[0]]=np.where(sc.time > sc_hss_start[i])[0][0]-1
hss_end_ind[i-sci[0]]=np.where(sc.time > sc_hss_end[i])[0][0]-1
#future update: set hss_start as sir_start, and add time for hss_start by pt max after sir_start
pickle.dump([sir_start_ind,hss_start_ind,sir_end_ind,hss_end_ind], open(fileind, 'wb'))
############################################
[sir_start_ind, hss_start_ind,sir_end_ind,hss_end_ind]=pickle.load(open(fileind, 'rb'))
#first make hss end time for STEREO-A/B from hss_end_ind index
#if (name== 'STEREO-A') or (name== 'STEREO-B') or (name== 'MAVEN'):
# for i in np.arange(len(sci))-1:
# scat.at[sci[i],'hss_end_time']=sc.time[hss_end_ind[i]]
print('Get parameters for ',name)
####### position
print('position')
#SIR heliodistance
for i in np.arange(len(sci))-1:
scat.at[sci[i],'sc_heliodistance']=np.round(sc.r[hss_start_ind[i]],4)
#SIR longitude
scat.at[sci[i],'sc_long_heeq']=np.round(sc.lon[hss_start_ind[i]],2)
##SIR latitude
scat.at[sci[i],'sc_lat_heeq']=np.round(sc.lat[hss_start_ind[i]],2)
print('hss')
if (name=='PSP'):
sci_istart=mdates.date2num(scat.hss_start_time[sci])
sci_hss_iend=mdates.date2num(scat.hss_end_time[sci])
scat.at[sci,'hss_duration']=np.round((sci_hss_iend-sci_istart)*24,2)
for i in np.arange(0,len(sci)):
#print(i)
#print('hss duration in hours ',(hss_end_ind[i]-hss_start_ind[i])/60)
#v_max
scat.at[sci[i],'hss_vtmax']=np.nan
try:
vmax=np.round(np.nanmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#if vmax ok:
if np.isnan(vmax)==False:
scat.at[sci[i],'hss_vtmax']=vmax
#vtmaxtime - search for index in sliced array and at beginning of array to see the index in the whole dataset
scat.at[sci[i],'hss_vtmax_time']=sc.time[np.nanargmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]])+hss_start_ind[i]]
except:
print('vmax nan')
# v_mean
try:
scat.at[sci[i],'hss_vtmean']=np.round(np.nanmean(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
except:
print()
#v_bstd
try:
scat.at[sci[i],'hss_vtstd']=np.round(np.nanstd(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
except:
print()
try:
#B_max
scat.at[sci[i],'hss_btmax']=np.round(np.nanmax(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'hss_btmean']=np.round(np.nanmean(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bstd
scat.at[sci[i],'hss_btstd']=np.round(np.nanstd(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bz
scat.at[sci[i],'hss_bzmin']=np.round(np.nanmin(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzmean']=np.round(np.nanmean(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzstd']=np.round(np.nanstd(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
except:
print()
if (name== 'Wind'):
############ HSS duration
sci_istart=mdates.date2num(scat.hss_start_time[sci])
sci_hss_iend=mdates.date2num(scat.hss_end_time[sci])
scat.at[sci,'hss_duration']=np.round((sci_hss_iend-sci_istart)*24,2)
for i in np.arange(0,len(sci)):
#print(i)
#print('hss duration in hours ',(hss_end_ind[i]-hss_start_ind[i])/60)
tag=scat.sircat_id[i][13]
#v_max
scat.at[sci[i],'hss_vtmax']=np.round(np.nanmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#vtmaxtime - search for index in sliced array and at beginning of array to see the index in the whole dataset
scat.at[sci[i],'hss_vtmax_time']=sc.time[np.nanargmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]])+hss_start_ind[i]]
# v_mean
scat.at[sci[i],'hss_vtmean']=np.round(np.nanmean(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'hss_vtstd']=np.round(np.nanstd(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#B_max
scat.at[sci[i],'hss_btmax']=np.round(np.nanmax(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'hss_btmean']=np.round(np.nanmean(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bstd
scat.at[sci[i],'hss_btstd']=np.round(np.nanstd(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bz
scat.at[sci[i],'hss_bzmin']=np.round(np.nanmin(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzmean']=np.round(np.nanmean(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzstd']=np.round(np.nanstd(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
print('sir')
###SIR parameters only for STEREO and MAVEN
############ SIR duration
if (name== 'STEREO-B') or (name== 'MAVEN'):
sci_istart=mdates.date2num(scat.hss_start_time[sci]) ##***Fehler? sir_start?
sci_iend=mdates.date2num(scat.sir_end_time[sci])
scat.at[sci,'sir_duration']=np.round((sci_iend-sci_istart)*24,2)
########## SIR general parameters
for i in np.arange(0,len(sci)):
#v_max
scat.at[sci[i],'sir_vtmax']=np.round(np.nanmax(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
# v_mean
scat.at[sci[i],'sir_vtmean']=np.round(np.nanmean(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'sir_vtstd']=np.round(np.nanstd(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#B_max
scat.at[sci[i],'sir_btmax']=np.round(np.nanmax(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'sir_btmean']=np.round(np.nanmean(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bstd
scat.at[sci[i],'sir_btstd']=np.round(np.nanstd(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bz
scat.at[sci[i],'sir_bzmin']=np.round(np.nanmin(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzmean']=np.round(np.nanmean(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzstd']=np.round(np.nanstd(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
if (name== 'STEREO-A'):
for i in np.arange(0,len(sci)):
#check which catalog
tag=scat.sircat_id[sci[i]][13]
if tag=='J': #Jian events
sci_istart=mdates.date2num(scat.sir_start_time[sci[i]])
sci_iend=mdates.date2num(scat.sir_end_time[sci[i]])
scat.at[sci[i],'sir_duration']=np.round((sci_iend-sci_istart)*24,2)
#v_max
scat.at[sci[i],'sir_vtmax']=np.round(np.nanmax(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
# v_mean
scat.at[sci[i],'sir_vtmean']=np.round(np.nanmean(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'sir_vtstd']=np.round(np.nanstd(sc.vt[sir_start_ind[i]:sir_end_ind[i]]),1)
#B_max
scat.at[sci[i],'sir_btmax']=np.round(np.nanmax(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'sir_btmean']=np.round(np.nanmean(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bstd
scat.at[sci[i],'sir_btstd']=np.round(np.nanstd(sc.bt[sir_start_ind[i]:sir_end_ind[i]]),1)
#bz
scat.at[sci[i],'sir_bzmin']=np.round(np.nanmin(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzmean']=np.round(np.nanmean(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
scat.at[sci[i],'sir_bzstd']=np.round(np.nanstd(sc.bz[sir_start_ind[i]:sir_end_ind[i]]),1)
if tag=='A': #Allen events
############ HSS duration
sci_istart=mdates.date2num(scat.hss_start_time[sci[i]])
sci_hss_iend=mdates.date2num(scat.hss_end_time[sci[i]])
scat.at[sci[i],'hss_duration']=np.round((sci_hss_iend-sci_istart)*24,2)
#v_max
scat.at[sci[i],'hss_vtmax']=np.round(np.nanmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#vtmaxtime - search for index in sliced array and at beginning of array to see the index in the whole dataset
scat.at[sci[i],'hss_vtmax_time']=sc.time[np.nanargmax(sc.vt[hss_start_ind[i]:hss_end_ind[i]])+hss_start_ind[i]]
# v_mean
scat.at[sci[i],'hss_vtmean']=np.round(np.nanmean(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#v_bstd
scat.at[sci[i],'hss_vtstd']=np.round(np.nanstd(sc.vt[hss_start_ind[i]:hss_end_ind[i]]),1)
#B_max
scat.at[sci[i],'hss_btmax']=np.round(np.nanmax(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
# B_mean
scat.at[sci[i],'hss_btmean']=np.round(np.nanmean(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bstd
scat.at[sci[i],'hss_btstd']=np.round(np.nanstd(sc.bt[hss_start_ind[i]:hss_end_ind[i]]),1)
#bz
scat.at[sci[i],'hss_bzmin']=np.round(np.nanmin(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzmean']=np.round(np.nanmean(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
scat.at[sci[i],'hss_bzstd']=np.round(np.nanstd(sc.bz[hss_start_ind[i]:hss_end_ind[i]]),1)
return scat
###################################### ICMECAT operations ################################
def load_helcats_icmecat_master_from_excel(file):
''' convert excel master file to pandas dataframe and convert times
to datetime objects
'''
print('load HELCATS ICMECAT from file:', file)
ic=pd.read_excel(file)
#convert all times to datetime objects
for i in np.arange(0,ic.shape[0]):
#remove leading and ending blank spaces if any and write datetime object into dataframe
ic.at[i,'icme_start_time']= parse_time(str(ic.icme_start_time[i]).strip()).datetime
ic.at[i,'mo_start_time']=parse_time(str(ic.mo_start_time[i]).strip()).datetime
ic.at[i,'mo_end_time']=parse_time(str(ic.mo_end_time[i]).strip()).datetime
return ic
def pdyn(density, speed):
'''
make dynamic pressure from density []# ccm-3] and speed [km/s]
assume pdyn is only due to protons
pdyn=np.zeros(len([density])) #in nano Pascals
'''
proton_mass=1.6726219*1e-27 #kg
pdyn=np.multiply(np.square(speed*1e3),density)*1e6*proton_mass*1e9 #in nanoPascal
return pdyn
def load_pickle(file):
ic=pickle.load( open(file, 'rb'))
return ic
def get_cat_parameters(sc, sci, ic, name):
'''
get parameters
sc - spacecraft data recarray
sci - indices for this spacecraft in icmecat
ic - icmecat pandas dataframe
'''
fileind='icmecat/indices_icmecat/ICMECAT_indices_'+name+'.p'
#### extract indices of ICMEs in the respective data (time consuming, so do it once)
if os.path.isfile(fileind) == False:
print('extract indices of ICMEs in '+ name+ ' data')
#### get all ICMECAT times for this spacecraft as datenum
sc_icme_start=ic.icme_start_time[sci]
sc_mo_start=ic.mo_start_time[sci]
sc_mo_end=ic.mo_end_time[sci]
### arrays containing the indices of where the ICMEs are in the data
icme_start_ind=np.zeros(len(sci),dtype=int)
mo_start_ind=np.zeros(len(sci),dtype=int)
mo_end_ind=np.zeros(len(sci),dtype=int)
#this takes some time, get indices in data for each ICMECAT
for i in np.arange(sci[0],sci[-1]+1):
print(i-sci[0])
icme_start_ind[i-sci[0]]=np.where(sc.time > sc_icme_start[i])[0][0]-1
#print(icme_start_ind[i])
mo_start_ind[i-sci[0]]=np.where(sc.time > sc_mo_start[i])[0][0]-1
mo_end_ind[i-sci[0]]=np.where(sc.time > sc_mo_end[i])[0][0]-1
pickle.dump([icme_start_ind, mo_start_ind,mo_end_ind], open(fileind, 'wb'))
############################################
[icme_start_ind, mo_start_ind,mo_end_ind]=pickle.load(open(fileind, 'rb'))
#plasma available?
if name=='Wind': plasma=True
if name=='STEREO-A': plasma=True
if name=='STEREO-B': plasma=True
if name=='ULYSSES': plasma=True
if name=='MAVEN': plasma=True
if name=='PSP': plasma=True
if name=='VEX': plasma=False
if name=='MESSENGER': plasma=False
if name=='SolarOrbiter': plasma=False
if name=='BepiColombo': plasma=False
print('Get parameters for ',name)
####### position
#MO heliodistance
for i in np.arange(len(sci))-1:
ic.at[sci[i],'mo_sc_heliodistance']=np.round(sc.r[mo_start_ind[i]],4)
#MO longitude
ic.at[sci[i],'mo_sc_long_heeq']=np.round(sc.lon[mo_start_ind[i]],2)
#MO latitude
ic.at[sci[i],'mo_sc_lat_heeq']=np.round(sc.lat[mo_start_ind[i]],2)
############ ICME
# ICME duration
sci_istart=mdates.date2num(ic.icme_start_time[sci])
sci_iend=mdates.date2num(ic.mo_end_time[sci])
ic.at[sci,'icme_duration']=np.round((sci_iend-sci_istart)*24,2)
for i in np.arange(0,len(sci)):
#ICME B_max
ic.at[sci[i],'icme_bmax']=np.round(np.nanmax(sc.bt[icme_start_ind[i]:mo_end_ind[i]]),1)
#ICME B_mean
ic.at[sci[i],'icme_bmean']=np.round(np.nanmean(sc.bt[icme_start_ind[i]:mo_end_ind[i]]),1)
#icme_bstd
ic.at[sci[i],'icme_bstd']=np.round(np.nanstd(sc.bt[icme_start_ind[i]:mo_end_ind[i]]),1)
if plasma==True:
#ICME speed_mean and std
for i in np.arange(len(sci))-1:
ic.at[sci[i],'icme_speed_mean']=np.round(np.nanmean(sc.vt[icme_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'icme_speed_std']=np.round(np.nanstd(sc.vt[icme_start_ind[i]:mo_end_ind[i]]),1)
else: #set nan
for i in np.arange(len(sci))-1:
ic.at[sci[i],'icme_speed_mean']=np.nan
ic.at[sci[i],'icme_speed_std']=np.nan
########### MO
# MO duration
sci_istart=mdates.date2num(ic.mo_start_time[sci])
sci_iend=mdates.date2num(ic.mo_end_time[sci])
ic.at[sci,'mo_duration']=np.round((sci_iend-sci_istart)*24,2)
#print(sci_istart)
#print(sci_iend)
#print(mo_start_ind[i])
#print(mo_end_ind[i])
for i in np.arange(len(sci))-1:
#MO B_max
ic.at[sci[i],'mo_bmax']=np.round(np.nanmax(sc.bt[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO B_mean
ic.at[sci[i],'mo_bmean']=np.round(np.nanmean(sc.bt[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO B_std
ic.at[sci[i],'mo_bstd']=np.round(np.nanstd(sc.bt[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO Bz_mean
ic.at[sci[i],'mo_bzmean']=np.round(np.nanmean(sc.bz[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO Bz_min
ic.at[sci[i],'mo_bzmin']=np.round(np.nanmin(sc.bz[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO Bz_std
ic.at[sci[i],'mo_bzstd']=np.round(np.nanstd(sc.bz[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO By_mean
ic.at[sci[i],'mo_bymean']=np.round(np.nanmean(sc.by[mo_start_ind[i]:mo_end_ind[i]]),1)
#MO By_std
ic.at[sci[i],'mo_bystd']=np.round(np.nanstd(sc.by[mo_start_ind[i]:mo_end_ind[i]]),1)
if plasma==True:
for i in np.arange(len(sci))-1:
#mo speed_mean and std
ic.at[sci[i],'mo_speed_mean']=np.round(np.nanmean(sc.vt[mo_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'mo_speed_std']=np.round(np.nanstd(sc.vt[mo_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'mo_expansion_speed']=np.round( (sc.vt[mo_start_ind[i]]-sc.vt[mo_end_ind[i]])/2 ,1 )
ic.at[sci[i],'mo_density_mean']=np.round(np.nanmean(sc.np[mo_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'mo_density_std']=np.round(np.nanstd(sc.np[mo_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'mo_temperature_mean']=np.round(np.nanmean(sc.tp[mo_start_ind[i]:mo_end_ind[i]]),1)
ic.at[sci[i],'mo_temperature_std']=np.round(np.nanstd(sc.tp[mo_start_ind[i]:mo_end_ind[i]]),1)
pdyn_i=pdyn(sc.np[mo_start_ind[i]:mo_end_ind[i]],sc.vt[mo_start_ind[i]:mo_end_ind[i]])
ic.at[sci[i],'mo_pdyn_mean']=np.round(np.nanmean(pdyn_i),1)
ic.at[sci[i],'mo_pdyn_std']=np.round(np.nanstd(pdyn_i),1)
#icme speed_mean and std
ic.at[sci[i],'sheath_speed_mean']=np.round(np.nanmean(sc.vt[icme_start_ind[i]:mo_start_ind[i]]),1)
ic.at[sci[i],'sheath_speed_std']=np.round(np.nanstd(sc.vt[icme_start_ind[i]:mo_start_ind[i]]),1)
ic.at[sci[i],'sheath_density_mean']=np.round(np.nanmean(sc.np[icme_start_ind[i]:mo_start_ind[i]]),1)
ic.at[sci[i],'sheath_density_std']=np.round(np.nanstd(sc.np[icme_start_ind[i]:mo_start_ind[i]]),1)
pdyn_i=pdyn(sc.np[icme_start_ind[i]:mo_start_ind[i]],sc.vt[icme_start_ind[i]:mo_start_ind[i]])
ic.at[sci[i],'sheath_pdyn_mean']=np.round(np.nanmean(pdyn_i),1)
ic.at[sci[i],'sheath_pdyn_std']=np.round(np.nanstd(pdyn_i),1)
else: #set nan
for i in np.arange(len(sci))-1:
ic.at[sci[i],'mo_speed_mean']=np.nan
ic.at[sci[i],'mo_speed_std']=np.nan
ic.at[sci[i],'mo_expansion_speed']=np.nan
ic.at[sci[i],'mo_density_mean']=np.nan
ic.at[sci[i],'mo_density_std']=np.nan
ic.at[sci[i],'mo_temperature_mean']=np.nan
ic.at[sci[i],'mo_temperature_std']=np.nan
ic.at[sci[i],'mo_pdyn_mean']=np.nan
ic.at[sci[i],'mo_pdyn_std']=np.nan
ic.at[sci[i],'sheath_pdyn_mean']=np.nan
ic.at[sci[i],'sheath_pdyn_std']=np.nan
return ic
| [
"numpy.radians",
"numpy.nanargmax",
"numpy.array",
"numpy.nanmean",
"sunpy.time.parse_time",
"pandas.read_excel",
"numpy.nanmin",
"numpy.arange",
"datetime.datetime",
"numpy.mean",
"numpy.where",
"heliocats.data.cart2sphere",
"numpy.rint",
"numpy.nanmax",
"pandas.DataFrame",
"numpy.deg... | [((670, 690), 'importlib.reload', 'importlib.reload', (['hd'], {}), '(hd)\n', (686, 690), False, 'import importlib\n'), ((1979, 2022), 'astropy.io.votable.parse_single_table', 'parse_single_table', (['"""data/HCME_WP3_V06.vot"""'], {}), "('data/HCME_WP3_V06.vot')\n", (1997, 2022), False, 'from astropy.io.votable import parse_single_table\n'), ((5434, 5465), 'numpy.array', 'np.array', (["higeocat['SSE Speed']"], {}), "(higeocat['SSE Speed'])\n", (5442, 5465), True, 'import numpy as np\n'), ((5488, 5523), 'numpy.array', 'np.array', (["higeocat['SSE Speed Err']"], {}), "(higeocat['SSE Speed Err'])\n", (5496, 5523), True, 'import numpy as np\n'), ((5545, 5580), 'numpy.array', 'np.array', (["higeocat['SSE HEEQ Long']"], {}), "(higeocat['SSE HEEQ Long'])\n", (5553, 5580), True, 'import numpy as np\n'), ((5603, 5637), 'numpy.array', 'np.array', (["higeocat['SSE HEEQ Lat']"], {}), "(higeocat['SSE HEEQ Lat'])\n", (5611, 5637), True, 'import numpy as np\n'), ((5655, 5679), 'numpy.array', 'np.array', (["higeocat['ID']"], {}), "(higeocat['ID'])\n", (5663, 5679), True, 'import numpy as np\n'), ((5696, 5720), 'numpy.array', 'np.array', (["higeocat['SC']"], {}), "(higeocat['SC'])\n", (5704, 5720), True, 'import numpy as np\n'), ((5738, 5764), 'numpy.array', 'np.array', (["higeocat['PA-N']"], {}), "(higeocat['PA-N'])\n", (5746, 5764), True, 'import numpy as np\n'), ((5782, 5808), 'numpy.array', 'np.array', (["higeocat['PA-S']"], {}), "(higeocat['PA-S'])\n", (5790, 5808), True, 'import numpy as np\n'), ((5828, 5856), 'numpy.array', 'np.array', (["higeocat['PA-fit']"], {}), "(higeocat['PA-fit'])\n", (5836, 5856), True, 'import numpy as np\n'), ((7751, 7779), 'heliopy.spice.furnish', 'spice.furnish', (['insitu_kernel'], {}), '(insitu_kernel)\n', (7764, 7779), True, 'import heliopy.spice as spice\n'), ((13292, 13349), 'pandas.DataFrame', 'pd.DataFrame', (['arrcat_insitu_list_old'], {'columns': 'column_list'}), '(arrcat_insitu_list_old, columns=column_list)\n', (13304, 13349), True, 'import pandas as pd\n'), ((13441, 13494), 'pandas.DataFrame', 'pd.DataFrame', (['arrcat_insitu_list'], {'columns': 'column_list'}), '(arrcat_insitu_list, columns=column_list)\n', (13453, 13494), True, 'import pandas as pd\n'), ((14040, 14059), 'pandas.read_excel', 'pd.read_excel', (['file'], {}), '(file)\n', (14053, 14059), True, 'import pandas as pd\n'), ((14332, 14357), 'numpy.arange', 'np.arange', (['(0)', 'sc.shape[0]'], {}), '(0, sc.shape[0])\n', (14341, 14357), True, 'import numpy as np\n'), ((32929, 32948), 'pandas.read_excel', 'pd.read_excel', (['file'], {}), '(file)\n', (32942, 32948), True, 'import pandas as pd\n'), ((33006, 33031), 'numpy.arange', 'np.arange', (['(0)', 'ic.shape[0]'], {}), '(0, ic.shape[0])\n', (33015, 33031), True, 'import numpy as np\n'), ((36315, 36355), 'matplotlib.dates.date2num', 'mdates.date2num', (['ic.icme_start_time[sci]'], {}), '(ic.icme_start_time[sci])\n', (36330, 36355), True, 'import matplotlib.dates as mdates\n'), ((36372, 36408), 'matplotlib.dates.date2num', 'mdates.date2num', (['ic.mo_end_time[sci]'], {}), '(ic.mo_end_time[sci])\n', (36387, 36408), True, 'import matplotlib.dates as mdates\n'), ((36443, 36484), 'numpy.round', 'np.round', (['((sci_iend - sci_istart) * 24)', '(2)'], {}), '((sci_iend - sci_istart) * 24, 2)\n', (36451, 36484), True, 'import numpy as np\n'), ((37433, 37471), 'matplotlib.dates.date2num', 'mdates.date2num', (['ic.mo_start_time[sci]'], {}), '(ic.mo_start_time[sci])\n', (37448, 37471), True, 'import matplotlib.dates as mdates\n'), ((37488, 37524), 'matplotlib.dates.date2num', 'mdates.date2num', (['ic.mo_end_time[sci]'], {}), '(ic.mo_end_time[sci])\n', (37503, 37524), True, 'import matplotlib.dates as mdates\n'), ((37557, 37598), 'numpy.round', 'np.round', (['((sci_iend - sci_istart) * 24)', '(2)'], {}), '((sci_iend - sci_istart) * 24, 2)\n', (37565, 37598), True, 'import numpy as np\n'), ((3673, 3701), 'heliopy.spice.Trajectory', 'spice.Trajectory', (['insitu_str'], {}), '(insitu_str)\n', (3689, 3701), True, 'import heliopy.spice as spice\n'), ((3877, 3921), 'heliocats.data.cart2sphere', 'hd.cart2sphere', (['insitu.x', 'insitu.y', 'insitu.z'], {}), '(insitu.x, insitu.y, insitu.z)\n', (3891, 3921), True, 'from heliocats import data as hd\n'), ((4367, 4389), 'numpy.degrees', 'np.degrees', (['insitu_lat'], {}), '(insitu_lat)\n', (4377, 4389), True, 'import numpy as np\n'), ((4390, 4412), 'numpy.degrees', 'np.degrees', (['insitu_lon'], {}), '(insitu_lon)\n', (4400, 4412), True, 'import numpy as np\n'), ((4999, 5018), 'matplotlib.dates.num2date', 'mdates.num2date', (['ta'], {}), '(ta)\n', (5014, 5018), True, 'import matplotlib.dates as mdates\n'), ((5210, 5238), 'sunpy.time.parse_time', 'parse_time', (["higeocat['Date']"], {}), "(higeocat['Date'])\n", (5220, 5238), False, 'from sunpy.time import parse_time\n'), ((5289, 5323), 'sunpy.time.parse_time', 'parse_time', (["higeocat['SSE Launch']"], {}), "(higeocat['SSE Launch'])\n", (5299, 5323), False, 'from sunpy.time import parse_time\n'), ((5382, 5405), 'sunpy.time.parse_time', 'parse_time', (['higeocat_t0'], {}), '(higeocat_t0)\n', (5392, 5405), False, 'from sunpy.time import parse_time\n'), ((6069, 6101), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""stereo_b"""'], {}), "('stereo_b')\n", (6089, 6101), True, 'import heliopy.data.spice as spicedata\n'), ((6229, 6266), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""stereo_a_pred"""'], {}), "('stereo_a_pred')\n", (6249, 6266), True, 'import heliopy.data.spice as spicedata\n'), ((6290, 6322), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""stereo_a"""'], {}), "('stereo_a')\n", (6310, 6322), True, 'import heliopy.data.spice as spicedata\n'), ((6331, 6360), 'heliopy.spice.furnish', 'spice.furnish', (['insitu_kernel2'], {}), '(insitu_kernel2)\n', (6344, 6360), True, 'import heliopy.spice as spice\n'), ((6488, 6531), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""planet_trajectories"""'], {}), "('planet_trajectories')\n", (6508, 6531), True, 'import heliopy.data.spice as spicedata\n'), ((6657, 6700), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""planet_trajectories"""'], {}), "('planet_trajectories')\n", (6677, 6700), True, 'import heliopy.data.spice as spicedata\n'), ((6823, 6866), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""planet_trajectories"""'], {}), "('planet_trajectories')\n", (6843, 6866), True, 'import heliopy.data.spice as spicedata\n'), ((6992, 7035), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""planet_trajectories"""'], {}), "('planet_trajectories')\n", (7012, 7035), True, 'import heliopy.data.spice as spicedata\n'), ((7166, 7198), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""psp_pred"""'], {}), "('psp_pred')\n", (7186, 7198), True, 'import heliopy.data.spice as spicedata\n'), ((7323, 7356), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""solo_2020"""'], {}), "('solo_2020')\n", (7343, 7356), True, 'import heliopy.data.spice as spicedata\n'), ((7511, 7544), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""bepi_pred"""'], {}), "('bepi_pred')\n", (7531, 7544), True, 'import heliopy.data.spice as spicedata\n'), ((7674, 7705), 'heliopy.data.spice.get_kernel', 'spicedata.get_kernel', (['"""ulysses"""'], {}), "('ulysses')\n", (7694, 7705), True, 'import heliopy.data.spice as spicedata\n'), ((17282, 17305), 'os.path.isfile', 'os.path.isfile', (['fileind'], {}), '(fileind)\n', (17296, 17305), False, 'import os\n'), ((18271, 18301), 'numpy.arange', 'np.arange', (['sci[0]', '(sci[-1] + 1)'], {}), '(sci[0], sci[-1] + 1)\n', (18280, 18301), True, 'import numpy as np\n'), ((23378, 23413), 'numpy.round', 'np.round', (['sc.r[hss_start_ind[i]]', '(4)'], {}), '(sc.r[hss_start_ind[i]], 4)\n', (23386, 23413), True, 'import numpy as np\n'), ((23475, 23512), 'numpy.round', 'np.round', (['sc.lon[hss_start_ind[i]]', '(2)'], {}), '(sc.lon[hss_start_ind[i]], 2)\n', (23483, 23512), True, 'import numpy as np\n'), ((23573, 23610), 'numpy.round', 'np.round', (['sc.lat[hss_start_ind[i]]', '(2)'], {}), '(sc.lat[hss_start_ind[i]], 2)\n', (23581, 23610), True, 'import numpy as np\n'), ((23688, 23729), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_start_time[sci]'], {}), '(scat.hss_start_time[sci])\n', (23703, 23729), True, 'import matplotlib.dates as mdates\n'), ((23758, 23797), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_end_time[sci]'], {}), '(scat.hss_end_time[sci])\n', (23773, 23797), True, 'import matplotlib.dates as mdates\n'), ((23837, 23882), 'numpy.round', 'np.round', (['((sci_hss_iend - sci_istart) * 24)', '(2)'], {}), '((sci_hss_iend - sci_istart) * 24, 2)\n', (23845, 23882), True, 'import numpy as np\n'), ((25972, 26013), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_start_time[sci]'], {}), '(scat.hss_start_time[sci])\n', (25987, 26013), True, 'import matplotlib.dates as mdates\n'), ((26042, 26081), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_end_time[sci]'], {}), '(scat.hss_end_time[sci])\n', (26057, 26081), True, 'import matplotlib.dates as mdates\n'), ((26121, 26166), 'numpy.round', 'np.round', (['((sci_hss_iend - sci_istart) * 24)', '(2)'], {}), '((sci_hss_iend - sci_istart) * 24, 2)\n', (26129, 26166), True, 'import numpy as np\n'), ((27863, 27904), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_start_time[sci]'], {}), '(scat.hss_start_time[sci])\n', (27878, 27904), True, 'import matplotlib.dates as mdates\n'), ((27948, 27987), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.sir_end_time[sci]'], {}), '(scat.sir_end_time[sci])\n', (27963, 27987), True, 'import matplotlib.dates as mdates\n'), ((28027, 28068), 'numpy.round', 'np.round', (['((sci_iend - sci_istart) * 24)', '(2)'], {}), '((sci_iend - sci_istart) * 24, 2)\n', (28035, 28068), True, 'import numpy as np\n'), ((34233, 34256), 'os.path.isfile', 'os.path.isfile', (['fileind'], {}), '(fileind)\n', (34247, 34256), False, 'import os\n'), ((34848, 34878), 'numpy.arange', 'np.arange', (['sci[0]', '(sci[-1] + 1)'], {}), '(sci[0], sci[-1] + 1)\n', (34857, 34878), True, 'import numpy as np\n'), ((36022, 36056), 'numpy.round', 'np.round', (['sc.r[mo_start_ind[i]]', '(4)'], {}), '(sc.r[mo_start_ind[i]], 4)\n', (36030, 36056), True, 'import numpy as np\n'), ((36119, 36155), 'numpy.round', 'np.round', (['sc.lon[mo_start_ind[i]]', '(2)'], {}), '(sc.lon[mo_start_ind[i]], 2)\n', (36127, 36155), True, 'import numpy as np\n'), ((36216, 36252), 'numpy.round', 'np.round', (['sc.lat[mo_start_ind[i]]', '(2)'], {}), '(sc.lat[mo_start_ind[i]], 2)\n', (36224, 36252), True, 'import numpy as np\n'), ((4057, 4078), 'numpy.array', 'np.array', (['insitu_time'], {}), '(insitu_time)\n', (4065, 4078), True, 'import numpy as np\n'), ((4099, 4117), 'numpy.array', 'np.array', (['insitu_r'], {}), '(insitu_r)\n', (4107, 4117), True, 'import numpy as np\n'), ((4140, 4160), 'numpy.array', 'np.array', (['insitu_lat'], {}), '(insitu_lat)\n', (4148, 4160), True, 'import numpy as np\n'), ((4183, 4203), 'numpy.array', 'np.array', (['insitu_lon'], {}), '(insitu_lon)\n', (4191, 4203), True, 'import numpy as np\n'), ((17963, 17984), 'numpy.where', 'np.where', (['(sc.vt < 450)'], {}), '(sc.vt < 450)\n', (17971, 17984), True, 'import numpy as np\n'), ((18006, 18027), 'numpy.where', 'np.where', (['(sc.vt > 450)'], {}), '(sc.vt > 450)\n', (18014, 18027), True, 'import numpy as np\n'), ((18102, 18123), 'numpy.where', 'np.where', (['(sc.vt < 350)'], {}), '(sc.vt < 350)\n', (18110, 18123), True, 'import numpy as np\n'), ((18145, 18166), 'numpy.where', 'np.where', (['(sc.vt > 350)'], {}), '(sc.vt > 350)\n', (18153, 18166), True, 'import numpy as np\n'), ((36595, 36644), 'numpy.nanmax', 'np.nanmax', (['sc.bt[icme_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bt[icme_start_ind[i]:mo_end_ind[i]])\n', (36604, 36644), True, 'import numpy as np\n'), ((36714, 36764), 'numpy.nanmean', 'np.nanmean', (['sc.bt[icme_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bt[icme_start_ind[i]:mo_end_ind[i]])\n', (36724, 36764), True, 'import numpy as np\n'), ((36831, 36880), 'numpy.nanstd', 'np.nanstd', (['sc.bt[icme_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bt[icme_start_ind[i]:mo_end_ind[i]])\n', (36840, 36880), True, 'import numpy as np\n'), ((37820, 37867), 'numpy.nanmax', 'np.nanmax', (['sc.bt[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bt[mo_start_ind[i]:mo_end_ind[i]])\n', (37829, 37867), True, 'import numpy as np\n'), ((37937, 37985), 'numpy.nanmean', 'np.nanmean', (['sc.bt[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bt[mo_start_ind[i]:mo_end_ind[i]])\n', (37947, 37985), True, 'import numpy as np\n'), ((38053, 38100), 'numpy.nanstd', 'np.nanstd', (['sc.bt[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bt[mo_start_ind[i]:mo_end_ind[i]])\n', (38062, 38100), True, 'import numpy as np\n'), ((38168, 38216), 'numpy.nanmean', 'np.nanmean', (['sc.bz[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bz[mo_start_ind[i]:mo_end_ind[i]])\n', (38178, 38216), True, 'import numpy as np\n'), ((38282, 38329), 'numpy.nanmin', 'np.nanmin', (['sc.bz[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bz[mo_start_ind[i]:mo_end_ind[i]])\n', (38291, 38329), True, 'import numpy as np\n'), ((38396, 38443), 'numpy.nanstd', 'np.nanstd', (['sc.bz[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.bz[mo_start_ind[i]:mo_end_ind[i]])\n', (38405, 38443), True, 'import numpy as np\n'), ((38511, 38559), 'numpy.nanmean', 'np.nanmean', (['sc.by[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.by[mo_start_ind[i]:mo_end_ind[i]])\n', (38521, 38559), True, 'import numpy as np\n'), ((38625, 38672), 'numpy.nanstd', 'np.nanstd', (['sc.by[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.by[mo_start_ind[i]:mo_end_ind[i]])\n', (38634, 38672), True, 'import numpy as np\n'), ((39064, 39128), 'numpy.round', 'np.round', (['((sc.vt[mo_start_ind[i]] - sc.vt[mo_end_ind[i]]) / 2)', '(1)'], {}), '((sc.vt[mo_start_ind[i]] - sc.vt[mo_end_ind[i]]) / 2, 1)\n', (39072, 39128), True, 'import numpy as np\n'), ((2430, 2447), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (2440, 2447), False, 'from sunpy.time import parse_time\n'), ((2598, 2615), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (2608, 2615), False, 'from sunpy.time import parse_time\n'), ((2773, 2790), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (2783, 2790), False, 'from sunpy.time import parse_time\n'), ((2960, 2977), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (2970, 2977), False, 'from sunpy.time import parse_time\n'), ((3250, 3267), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (3260, 3267), False, 'from sunpy.time import parse_time\n'), ((3603, 3620), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (3613, 3620), False, 'from sunpy.time import parse_time\n'), ((3630, 3647), 'sunpy.time.parse_time', 'parse_time', (['time1'], {}), '(time1)\n', (3640, 3647), False, 'from sunpy.time import parse_time\n'), ((4771, 4788), 'numpy.radians', 'np.radians', (['lamda'], {}), '(lamda)\n', (4781, 4788), True, 'import numpy as np\n'), ((8941, 8962), 'numpy.round', 'np.round', (['insitu_r', '(3)'], {}), '(insitu_r, 3)\n', (8949, 8962), True, 'import numpy as np\n'), ((8963, 8986), 'numpy.round', 'np.round', (['insitu_lon', '(2)'], {}), '(insitu_lon, 2)\n', (8971, 8986), True, 'import numpy as np\n'), ((8987, 9010), 'numpy.round', 'np.round', (['insitu_lat', '(2)'], {}), '(insitu_lat, 2)\n', (8995, 9010), True, 'import numpy as np\n'), ((9010, 9055), 'numpy.round', 'np.round', (['(insitu_lon - higeocat_sse_lon[i])', '(1)'], {}), '(insitu_lon - higeocat_sse_lon[i], 1)\n', (9018, 9055), True, 'import numpy as np\n'), ((26433, 26482), 'numpy.nanmax', 'np.nanmax', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (26442, 26482), True, 'import numpy as np\n'), ((26812, 26862), 'numpy.nanmean', 'np.nanmean', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (26822, 26862), True, 'import numpy as np\n'), ((26935, 26984), 'numpy.nanstd', 'np.nanstd', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (26944, 26984), True, 'import numpy as np\n'), ((27057, 27106), 'numpy.nanmax', 'np.nanmax', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (27066, 27106), True, 'import numpy as np\n'), ((27181, 27231), 'numpy.nanmean', 'np.nanmean', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (27191, 27231), True, 'import numpy as np\n'), ((27302, 27351), 'numpy.nanstd', 'np.nanstd', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (27311, 27351), True, 'import numpy as np\n'), ((27420, 27469), 'numpy.nanmin', 'np.nanmin', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (27429, 27469), True, 'import numpy as np\n'), ((27523, 27573), 'numpy.nanmean', 'np.nanmean', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (27533, 27573), True, 'import numpy as np\n'), ((27626, 27675), 'numpy.nanstd', 'np.nanstd', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (27635, 27675), True, 'import numpy as np\n'), ((28218, 28267), 'numpy.nanmax', 'np.nanmax', (['sc.vt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.vt[sir_start_ind[i]:sir_end_ind[i]])\n', (28227, 28267), True, 'import numpy as np\n'), ((28342, 28392), 'numpy.nanmean', 'np.nanmean', (['sc.vt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.vt[sir_start_ind[i]:sir_end_ind[i]])\n', (28352, 28392), True, 'import numpy as np\n'), ((28465, 28514), 'numpy.nanstd', 'np.nanstd', (['sc.vt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.vt[sir_start_ind[i]:sir_end_ind[i]])\n', (28474, 28514), True, 'import numpy as np\n'), ((28587, 28636), 'numpy.nanmax', 'np.nanmax', (['sc.bt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bt[sir_start_ind[i]:sir_end_ind[i]])\n', (28596, 28636), True, 'import numpy as np\n'), ((28711, 28761), 'numpy.nanmean', 'np.nanmean', (['sc.bt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bt[sir_start_ind[i]:sir_end_ind[i]])\n', (28721, 28761), True, 'import numpy as np\n'), ((28832, 28881), 'numpy.nanstd', 'np.nanstd', (['sc.bt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bt[sir_start_ind[i]:sir_end_ind[i]])\n', (28841, 28881), True, 'import numpy as np\n'), ((28950, 28999), 'numpy.nanmin', 'np.nanmin', (['sc.bz[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bz[sir_start_ind[i]:sir_end_ind[i]])\n', (28959, 28999), True, 'import numpy as np\n'), ((29053, 29103), 'numpy.nanmean', 'np.nanmean', (['sc.bz[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bz[sir_start_ind[i]:sir_end_ind[i]])\n', (29063, 29103), True, 'import numpy as np\n'), ((29156, 29205), 'numpy.nanstd', 'np.nanstd', (['sc.bz[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bz[sir_start_ind[i]:sir_end_ind[i]])\n', (29165, 29205), True, 'import numpy as np\n'), ((29503, 29547), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.sir_start_time[sci[i]]'], {}), '(scat.sir_start_time[sci[i]])\n', (29518, 29547), True, 'import matplotlib.dates as mdates\n'), ((29576, 29618), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.sir_end_time[sci[i]]'], {}), '(scat.sir_end_time[sci[i]])\n', (29591, 29618), True, 'import matplotlib.dates as mdates\n'), ((29669, 29710), 'numpy.round', 'np.round', (['((sci_iend - sci_istart) * 24)', '(2)'], {}), '((sci_iend - sci_istart) * 24, 2)\n', (29677, 29710), True, 'import numpy as np\n'), ((30960, 31004), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_start_time[sci[i]]'], {}), '(scat.hss_start_time[sci[i]])\n', (30975, 31004), True, 'import matplotlib.dates as mdates\n'), ((31041, 31083), 'matplotlib.dates.date2num', 'mdates.date2num', (['scat.hss_end_time[sci[i]]'], {}), '(scat.hss_end_time[sci[i]])\n', (31056, 31083), True, 'import matplotlib.dates as mdates\n'), ((31134, 31179), 'numpy.round', 'np.round', (['((sci_hss_iend - sci_istart) * 24)', '(2)'], {}), '((sci_hss_iend - sci_istart) * 24, 2)\n', (31142, 31179), True, 'import numpy as np\n'), ((37048, 37098), 'numpy.nanmean', 'np.nanmean', (['sc.vt[icme_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.vt[icme_start_ind[i]:mo_end_ind[i]])\n', (37058, 37098), True, 'import numpy as np\n'), ((37154, 37203), 'numpy.nanstd', 'np.nanstd', (['sc.vt[icme_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.vt[icme_start_ind[i]:mo_end_ind[i]])\n', (37163, 37203), True, 'import numpy as np\n'), ((38851, 38899), 'numpy.nanmean', 'np.nanmean', (['sc.vt[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.vt[mo_start_ind[i]:mo_end_ind[i]])\n', (38861, 38899), True, 'import numpy as np\n'), ((38953, 39000), 'numpy.nanstd', 'np.nanstd', (['sc.vt[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.vt[mo_start_ind[i]:mo_end_ind[i]])\n', (38962, 39000), True, 'import numpy as np\n'), ((39181, 39229), 'numpy.nanmean', 'np.nanmean', (['sc.np[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.np[mo_start_ind[i]:mo_end_ind[i]])\n', (39191, 39229), True, 'import numpy as np\n'), ((39285, 39332), 'numpy.nanstd', 'np.nanstd', (['sc.np[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.np[mo_start_ind[i]:mo_end_ind[i]])\n', (39294, 39332), True, 'import numpy as np\n'), ((39394, 39442), 'numpy.nanmean', 'np.nanmean', (['sc.tp[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.tp[mo_start_ind[i]:mo_end_ind[i]])\n', (39404, 39442), True, 'import numpy as np\n'), ((39502, 39549), 'numpy.nanstd', 'np.nanstd', (['sc.tp[mo_start_ind[i]:mo_end_ind[i]]'], {}), '(sc.tp[mo_start_ind[i]:mo_end_ind[i]])\n', (39511, 39549), True, 'import numpy as np\n'), ((39716, 39734), 'numpy.nanmean', 'np.nanmean', (['pdyn_i'], {}), '(pdyn_i)\n', (39726, 39734), True, 'import numpy as np\n'), ((39787, 39804), 'numpy.nanstd', 'np.nanstd', (['pdyn_i'], {}), '(pdyn_i)\n', (39796, 39804), True, 'import numpy as np\n'), ((39926, 39978), 'numpy.nanmean', 'np.nanmean', (['sc.vt[icme_start_ind[i]:mo_start_ind[i]]'], {}), '(sc.vt[icme_start_ind[i]:mo_start_ind[i]])\n', (39936, 39978), True, 'import numpy as np\n'), ((40036, 40087), 'numpy.nanstd', 'np.nanstd', (['sc.vt[icme_start_ind[i]:mo_start_ind[i]]'], {}), '(sc.vt[icme_start_ind[i]:mo_start_ind[i]])\n', (40045, 40087), True, 'import numpy as np\n'), ((40158, 40210), 'numpy.nanmean', 'np.nanmean', (['sc.np[icme_start_ind[i]:mo_start_ind[i]]'], {}), '(sc.np[icme_start_ind[i]:mo_start_ind[i]])\n', (40168, 40210), True, 'import numpy as np\n'), ((40270, 40321), 'numpy.nanstd', 'np.nanstd', (['sc.np[icme_start_ind[i]:mo_start_ind[i]]'], {}), '(sc.np[icme_start_ind[i]:mo_start_ind[i]])\n', (40279, 40321), True, 'import numpy as np\n'), ((40496, 40514), 'numpy.nanmean', 'np.nanmean', (['pdyn_i'], {}), '(pdyn_i)\n', (40506, 40514), True, 'import numpy as np\n'), ((40571, 40588), 'numpy.nanstd', 'np.nanstd', (['pdyn_i'], {}), '(pdyn_i)\n', (40580, 40588), True, 'import numpy as np\n'), ((2471, 2501), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(8)', '(13)'], {}), '(2018, 8, 13)\n', (2488, 2501), False, 'import datetime\n'), ((2639, 2668), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(1)'], {}), '(2020, 3, 1)\n', (2656, 2668), False, 'import datetime\n'), ((2814, 2845), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(10)', '(24)'], {}), '(2018, 10, 24)\n', (2831, 2845), False, 'import datetime\n'), ((3001, 3031), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(9)', '(27)'], {}), '(2014, 9, 27)\n', (3018, 3031), False, 'import datetime\n'), ((3291, 3320), 'datetime.datetime', 'datetime.datetime', (['(2008)', '(5)', '(1)'], {}), '(2008, 5, 1)\n', (3308, 3320), False, 'import datetime\n'), ((4623, 4640), 'numpy.radians', 'np.radians', (['delta'], {}), '(delta)\n', (4633, 4640), True, 'import numpy as np\n'), ((9178, 9192), 'numpy.rint', 'np.rint', (['visse'], {}), '(visse)\n', (9185, 9192), True, 'import numpy as np\n'), ((24157, 24206), 'numpy.nanmax', 'np.nanmax', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (24166, 24206), True, 'import numpy as np\n'), ((24264, 24278), 'numpy.isnan', 'np.isnan', (['vmax'], {}), '(vmax)\n', (24272, 24278), True, 'import numpy as np\n'), ((24793, 24843), 'numpy.nanmean', 'np.nanmean', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (24803, 24843), True, 'import numpy as np\n'), ((24981, 25030), 'numpy.nanstd', 'np.nanstd', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (24990, 25030), True, 'import numpy as np\n'), ((25173, 25222), 'numpy.nanmax', 'np.nanmax', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (25182, 25222), True, 'import numpy as np\n'), ((25305, 25355), 'numpy.nanmean', 'np.nanmean', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (25315, 25355), True, 'import numpy as np\n'), ((25435, 25484), 'numpy.nanstd', 'np.nanstd', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (25444, 25484), True, 'import numpy as np\n'), ((25561, 25610), 'numpy.nanmin', 'np.nanmin', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (25570, 25610), True, 'import numpy as np\n'), ((25668, 25718), 'numpy.nanmean', 'np.nanmean', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (25678, 25718), True, 'import numpy as np\n'), ((25775, 25824), 'numpy.nanstd', 'np.nanstd', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (25784, 25824), True, 'import numpy as np\n'), ((26662, 26714), 'numpy.nanargmax', 'np.nanargmax', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (26674, 26714), True, 'import numpy as np\n'), ((29784, 29833), 'numpy.nanmax', 'np.nanmax', (['sc.vt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.vt[sir_start_ind[i]:sir_end_ind[i]])\n', (29793, 29833), True, 'import numpy as np\n'), ((29916, 29966), 'numpy.nanmean', 'np.nanmean', (['sc.vt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.vt[sir_start_ind[i]:sir_end_ind[i]])\n', (29926, 29966), True, 'import numpy as np\n'), ((30047, 30096), 'numpy.nanstd', 'np.nanstd', (['sc.vt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.vt[sir_start_ind[i]:sir_end_ind[i]])\n', (30056, 30096), True, 'import numpy as np\n'), ((30177, 30226), 'numpy.nanmax', 'np.nanmax', (['sc.bt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bt[sir_start_ind[i]:sir_end_ind[i]])\n', (30186, 30226), True, 'import numpy as np\n'), ((30309, 30359), 'numpy.nanmean', 'np.nanmean', (['sc.bt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bt[sir_start_ind[i]:sir_end_ind[i]])\n', (30319, 30359), True, 'import numpy as np\n'), ((30438, 30487), 'numpy.nanstd', 'np.nanstd', (['sc.bt[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bt[sir_start_ind[i]:sir_end_ind[i]])\n', (30447, 30487), True, 'import numpy as np\n'), ((30564, 30613), 'numpy.nanmin', 'np.nanmin', (['sc.bz[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bz[sir_start_ind[i]:sir_end_ind[i]])\n', (30573, 30613), True, 'import numpy as np\n'), ((30671, 30721), 'numpy.nanmean', 'np.nanmean', (['sc.bz[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bz[sir_start_ind[i]:sir_end_ind[i]])\n', (30681, 30721), True, 'import numpy as np\n'), ((30778, 30827), 'numpy.nanstd', 'np.nanstd', (['sc.bz[sir_start_ind[i]:sir_end_ind[i]]'], {}), '(sc.bz[sir_start_ind[i]:sir_end_ind[i]])\n', (30787, 30827), True, 'import numpy as np\n'), ((31253, 31302), 'numpy.nanmax', 'np.nanmax', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (31262, 31302), True, 'import numpy as np\n'), ((31648, 31698), 'numpy.nanmean', 'np.nanmean', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (31658, 31698), True, 'import numpy as np\n'), ((31779, 31828), 'numpy.nanstd', 'np.nanstd', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (31788, 31828), True, 'import numpy as np\n'), ((31909, 31958), 'numpy.nanmax', 'np.nanmax', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (31918, 31958), True, 'import numpy as np\n'), ((32041, 32091), 'numpy.nanmean', 'np.nanmean', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (32051, 32091), True, 'import numpy as np\n'), ((32170, 32219), 'numpy.nanstd', 'np.nanstd', (['sc.bt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bt[hss_start_ind[i]:hss_end_ind[i]])\n', (32179, 32219), True, 'import numpy as np\n'), ((32296, 32345), 'numpy.nanmin', 'np.nanmin', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (32305, 32345), True, 'import numpy as np\n'), ((32403, 32453), 'numpy.nanmean', 'np.nanmean', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (32413, 32453), True, 'import numpy as np\n'), ((32510, 32559), 'numpy.nanstd', 'np.nanstd', (['sc.bz[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.bz[hss_start_ind[i]:hss_end_ind[i]])\n', (32519, 32559), True, 'import numpy as np\n'), ((33687, 33712), 'numpy.square', 'np.square', (['(speed * 1000.0)'], {}), '(speed * 1000.0)\n', (33696, 33712), True, 'import numpy as np\n'), ((8858, 8884), 'sunpy.time.parse_time', 'parse_time', (['higeocat_t0[i]'], {}), '(higeocat_t0[i])\n', (8868, 8884), False, 'from sunpy.time import parse_time\n'), ((8894, 8908), 'sunpy.time.parse_time', 'parse_time', (['ta'], {}), '(ta)\n', (8904, 8908), False, 'from sunpy.time import parse_time\n'), ((31490, 31542), 'numpy.nanargmax', 'np.nanargmax', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (31502, 31542), True, 'import numpy as np\n'), ((34952, 34988), 'numpy.where', 'np.where', (['(sc.time > sc_icme_start[i])'], {}), '(sc.time > sc_icme_start[i])\n', (34960, 34988), True, 'import numpy as np\n'), ((35080, 35114), 'numpy.where', 'np.where', (['(sc.time > sc_mo_start[i])'], {}), '(sc.time > sc_mo_start[i])\n', (35088, 35114), True, 'import numpy as np\n'), ((35159, 35191), 'numpy.where', 'np.where', (['(sc.time > sc_mo_end[i])'], {}), '(sc.time > sc_mo_end[i])\n', (35167, 35191), True, 'import numpy as np\n'), ((11876, 11912), 'numpy.mean', 'np.mean', (['[ta4_err_high, ta4_err_low]'], {}), '([ta4_err_high, ta4_err_low])\n', (11883, 11912), True, 'import numpy as np\n'), ((12670, 12692), 'numpy.round', 'np.round', (['insitu_r4', '(3)'], {}), '(insitu_r4, 3)\n', (12678, 12692), True, 'import numpy as np\n'), ((12693, 12717), 'numpy.round', 'np.round', (['insitu_lon4', '(2)'], {}), '(insitu_lon4, 2)\n', (12701, 12717), True, 'import numpy as np\n'), ((12718, 12742), 'numpy.round', 'np.round', (['insitu_lat4', '(2)'], {}), '(insitu_lat4, 2)\n', (12726, 12742), True, 'import numpy as np\n'), ((12742, 12788), 'numpy.round', 'np.round', (['(insitu_lon4 - higeocat_sse_lon[i])', '(1)'], {}), '(insitu_lon4 - higeocat_sse_lon[i], 1)\n', (12750, 12788), True, 'import numpy as np\n'), ((19191, 19226), 'numpy.where', 'np.where', (['(sc.time > sc_sir_start[i])'], {}), '(sc.time > sc_sir_start[i])\n', (19199, 19226), True, 'import numpy as np\n'), ((19278, 19313), 'numpy.where', 'np.where', (['(sc.time > sc_hss_start[i])'], {}), '(sc.time > sc_hss_start[i])\n', (19286, 19313), True, 'import numpy as np\n'), ((19363, 19396), 'numpy.where', 'np.where', (['(sc.time > sc_sir_end[i])'], {}), '(sc.time > sc_sir_end[i])\n', (19371, 19396), True, 'import numpy as np\n'), ((20630, 20665), 'numpy.where', 'np.where', (['(sc.time > sc_sir_start[i])'], {}), '(sc.time > sc_sir_start[i])\n', (20638, 20665), True, 'import numpy as np\n'), ((20717, 20752), 'numpy.where', 'np.where', (['(sc.time > sc_hss_start[i])'], {}), '(sc.time > sc_hss_start[i])\n', (20725, 20752), True, 'import numpy as np\n'), ((20802, 20835), 'numpy.where', 'np.where', (['(sc.time > sc_sir_end[i])'], {}), '(sc.time > sc_sir_end[i])\n', (20810, 20835), True, 'import numpy as np\n'), ((21892, 21927), 'numpy.where', 'np.where', (['(sc.time > sc_hss_start[i])'], {}), '(sc.time > sc_hss_start[i])\n', (21900, 21927), True, 'import numpy as np\n'), ((21977, 22010), 'numpy.where', 'np.where', (['(sc.time > sc_hss_end[i])'], {}), '(sc.time > sc_hss_end[i])\n', (21985, 22010), True, 'import numpy as np\n'), ((22319, 22354), 'numpy.where', 'np.where', (['(sc.time > sc_hss_start[i])'], {}), '(sc.time > sc_hss_start[i])\n', (22327, 22354), True, 'import numpy as np\n'), ((22404, 22437), 'numpy.where', 'np.where', (['(sc.time > sc_hss_end[i])'], {}), '(sc.time > sc_hss_end[i])\n', (22412, 22437), True, 'import numpy as np\n'), ((24543, 24595), 'numpy.nanargmax', 'np.nanargmax', (['sc.vt[hss_start_ind[i]:hss_end_ind[i]]'], {}), '(sc.vt[hss_start_ind[i]:hss_end_ind[i]])\n', (24555, 24595), True, 'import numpy as np\n'), ((4683, 4700), 'numpy.radians', 'np.radians', (['lamda'], {}), '(lamda)\n', (4693, 4700), True, 'import numpy as np\n'), ((4712, 4729), 'numpy.radians', 'np.radians', (['delta'], {}), '(delta)\n', (4722, 4729), True, 'import numpy as np\n'), ((12244, 12286), 'numpy.mean', 'np.mean', (['[visse4_err_high, visse4_err_low]'], {}), '([visse4_err_high, visse4_err_low])\n', (12251, 12286), True, 'import numpy as np\n'), ((12937, 12952), 'numpy.rint', 'np.rint', (['visse4'], {}), '(visse4)\n', (12944, 12952), True, 'import numpy as np\n'), ((18574, 18609), 'numpy.where', 'np.where', (['(sc.time > sc_sir_start[i])'], {}), '(sc.time > sc_sir_start[i])\n', (18582, 18609), True, 'import numpy as np\n'), ((18665, 18700), 'numpy.where', 'np.where', (['(sc.time > sc_hss_start[i])'], {}), '(sc.time > sc_hss_start[i])\n', (18673, 18700), True, 'import numpy as np\n'), ((18754, 18787), 'numpy.where', 'np.where', (['(sc.time > sc_sir_end[i])'], {}), '(sc.time > sc_sir_end[i])\n', (18762, 18787), True, 'import numpy as np\n'), ((18956, 18991), 'numpy.where', 'np.where', (['(sc.time > sc_hss_start[i])'], {}), '(sc.time > sc_hss_start[i])\n', (18964, 18991), True, 'import numpy as np\n'), ((19045, 19078), 'numpy.where', 'np.where', (['(sc.time > sc_hss_end[i])'], {}), '(sc.time > sc_hss_end[i])\n', (19053, 19078), True, 'import numpy as np\n'), ((12567, 12593), 'sunpy.time.parse_time', 'parse_time', (['higeocat_t0[i]'], {}), '(higeocat_t0[i])\n', (12577, 12593), False, 'from sunpy.time import parse_time\n'), ((12603, 12618), 'sunpy.time.parse_time', 'parse_time', (['ta4'], {}), '(ta4)\n', (12613, 12618), False, 'from sunpy.time import parse_time\n'), ((11673, 11688), 'sunpy.time.parse_time', 'parse_time', (['ta4'], {}), '(ta4)\n', (11683, 11688), False, 'from sunpy.time import parse_time\n'), ((11699, 11718), 'sunpy.time.parse_time', 'parse_time', (['ta4_low'], {}), '(ta4_low)\n', (11709, 11718), False, 'from sunpy.time import parse_time\n'), ((11774, 11789), 'sunpy.time.parse_time', 'parse_time', (['ta4'], {}), '(ta4)\n', (11784, 11789), False, 'from sunpy.time import parse_time\n'), ((11800, 11820), 'sunpy.time.parse_time', 'parse_time', (['ta4_high'], {}), '(ta4_high)\n', (11810, 11820), False, 'from sunpy.time import parse_time\n')] |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Style dataset."""
import os
import time
import numpy as np
import torch
from torch.utils.data import Dataset
from megatron import get_tokenizer
from megatron import mpu
from megatron.data.dataset_utils import build_training_sample
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
from megatron import print_rank_0
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
max_seq_length, masked_lm_prob,
short_seq_prob, seed, skip_warmup):
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
data_impl,
skip_warmup)
# Get start and end indices of train/valid/train into doc-idx
# Note that doc-idx is desinged to be num-docs + 1 so we can
# easily iterate over it.
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
start_index = indexed_dataset.doc_idx[splits[index]]
end_index = indexed_dataset.doc_idx[splits[index + 1]]
print_rank_0(' sentence indices in [{}, {}) total of {} '
'sentences'.format(start_index, end_index,
end_index - start_index))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
# Get the pointer to the original doc-idx so we can set it later.
doc_idx_ptr = indexed_dataset.get_doc_idx()
# Slice the doc-idx
start_index = splits[index]
# Add +1 so we can index into the dataset to get the upper bound.
end_index = splits[index + 1] + 1
# New doc_idx view.
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
# Build the dataset accordingly.
dataset = BertDataset(
name=name,
indexed_dataset=indexed_dataset,
data_prefix=data_prefix,
num_epochs=None,
max_num_samples=train_valid_test_num_samples[index],
masked_lm_prob=masked_lm_prob,
max_seq_length=max_seq_length,
short_seq_prob=short_seq_prob,
seed=seed)
# Set the original pointer so dataset remains the main dataset.
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == \
(total_num_of_documents + 1)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
class BertDataset(Dataset):
def __init__(self, name, indexed_dataset, data_prefix,
num_epochs, max_num_samples, masked_lm_prob,
max_seq_length, short_seq_prob, seed):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
# Dataset.
self.indexed_dataset = indexed_dataset
# Build the samples mapping.
self.samples_mapping = get_samples_mapping_(self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
self.max_seq_length,
short_seq_prob,
self.seed,
self.name)
# Vocab stuff.
tokenizer = get_tokenizer()
self.vocab_id_list = list(tokenizer.inv_vocab.keys())
self.vocab_id_to_token_dict = tokenizer.inv_vocab
self.cls_id = tokenizer.cls
self.sep_id = tokenizer.sep
self.mask_id = tokenizer.mask
self.pad_id = tokenizer.pad
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_index, end_index, seq_length = self.samples_mapping[idx]
sample = []
for index in range(start_index, end_index):
sample.append(self.indexed_dataset[index])
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
return build_training_sample(sample, seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id, self.sep_id,
self.mask_id, self.pad_id,
self.masked_lm_prob, np_rng)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix,
data_impl,
skip_warmup)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' > indexed dataset stats:')
print_rank_0(' number of documents: {}'.format(
indexed_dataset.doc_idx.shape[0] - 1))
print_rank_0(' number of sentences: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] +
int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
def get_samples_mapping_(indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
name):
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
indexmap_filename += '_{}s'.format(seed)
indexmap_filename += '.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0 and \
not os.path.isfile(indexmap_filename):
print(' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename))
# Make sure the types match the helpers input types.
assert indexed_dataset.doc_idx.dtype == np.int64
assert indexed_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = torch.distributed.get_rank() == 0
start_time = time.time()
print_rank_0(' > building sapmles index mapping for {} ...'.format(
name))
# First compile and then import.
from megatron.data.dataset_utils import compile_helper
compile_helper()
from megatron.data import helpers
samples_mapping = helpers.build_mapping(
indexed_dataset.doc_idx,
indexed_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length - 3, # account for added tokens
short_seq_prob,
seed,
verbose)
print_rank_0(' > done building sapmles index maping')
np.save(indexmap_filename, samples_mapping, allow_pickle=True)
print_rank_0(' > saved the index mapping in {}'.format(
indexmap_filename))
# Make sure all the ranks have built the mapping
print_rank_0(' > elasped time to build and save samples mapping '
'(seconds): {:4f}'.format(
time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
assert counts[0].item() == torch.distributed.get_world_size(
group=mpu.get_data_parallel_group())
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(
indexmap_filename))
start_time = time.time()
samples_mapping = np.load(indexmap_filename, allow_pickle=True)
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
samples_mapping.shape[0]))
return samples_mapping
| [
"torch.cuda.LongTensor",
"megatron.data.indexed_dataset.make_dataset",
"megatron.data.dataset_utils.compile_helper",
"megatron.mpu.get_data_parallel_group",
"megatron.data.dataset_utils.build_training_sample",
"numpy.iinfo",
"megatron.print_rank_0",
"os.path.isfile",
"numpy.save",
"torch.distribut... | [((1803, 1836), 'megatron.print_rank_0', 'print_rank_0', (['""" > dataset split:"""'], {}), "(' > dataset split:')\n", (1815, 1836), False, 'from megatron import print_rank_0\n'), ((6484, 6529), 'megatron.print_rank_0', 'print_rank_0', (['""" > building dataset index ..."""'], {}), "(' > building dataset index ...')\n", (6496, 6529), False, 'from megatron import print_rank_0\n'), ((6548, 6559), 'time.time', 'time.time', ([], {}), '()\n', (6557, 6559), False, 'import time\n'), ((6582, 6639), 'megatron.data.indexed_dataset.make_dataset', 'make_indexed_dataset', (['data_prefix', 'data_impl', 'skip_warmup'], {}), '(data_prefix, data_impl, skip_warmup)\n', (6602, 6639), True, 'from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset\n'), ((6931, 6972), 'megatron.print_rank_0', 'print_rank_0', (['""" > indexed dataset stats:"""'], {}), "(' > indexed dataset stats:')\n", (6943, 6972), False, 'from megatron import print_rank_0\n'), ((11070, 11096), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['[1]'], {}), '([1])\n', (11091, 11096), False, 'import torch\n'), ((11421, 11432), 'time.time', 'time.time', ([], {}), '()\n', (11430, 11432), False, 'import time\n'), ((11455, 11500), 'numpy.load', 'np.load', (['indexmap_filename'], {'allow_pickle': '(True)'}), '(indexmap_filename, allow_pickle=True)\n', (11462, 11500), True, 'import numpy as np\n'), ((5169, 5184), 'megatron.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (5182, 5184), False, 'from megatron import get_tokenizer\n'), ((5915, 5958), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(self.seed + idx)'}), '(seed=self.seed + idx)\n', (5936, 5958), True, 'import numpy as np\n'), ((5976, 6178), 'megatron.data.dataset_utils.build_training_sample', 'build_training_sample', (['sample', 'seq_length', 'self.max_seq_length', 'self.vocab_id_list', 'self.vocab_id_to_token_dict', 'self.cls_id', 'self.sep_id', 'self.mask_id', 'self.pad_id', 'self.masked_lm_prob', 'np_rng'], {}), '(sample, seq_length, self.max_seq_length, self.\n vocab_id_list, self.vocab_id_to_token_dict, self.cls_id, self.sep_id,\n self.mask_id, self.pad_id, self.masked_lm_prob, np_rng)\n', (5997, 6178), False, 'from megatron.data.dataset_utils import build_training_sample\n'), ((9886, 9897), 'time.time', 'time.time', ([], {}), '()\n', (9895, 9897), False, 'import time\n'), ((10105, 10121), 'megatron.data.dataset_utils.compile_helper', 'compile_helper', ([], {}), '()\n', (10119, 10121), False, 'from megatron.data.dataset_utils import compile_helper\n'), ((10190, 10347), 'megatron.data.helpers.build_mapping', 'helpers.build_mapping', (['indexed_dataset.doc_idx', 'indexed_dataset.sizes', 'num_epochs', 'max_num_samples', '(max_seq_length - 3)', 'short_seq_prob', 'seed', 'verbose'], {}), '(indexed_dataset.doc_idx, indexed_dataset.sizes,\n num_epochs, max_num_samples, max_seq_length - 3, short_seq_prob, seed,\n verbose)\n', (10211, 10347), False, 'from megatron.data import helpers\n'), ((10473, 10526), 'megatron.print_rank_0', 'print_rank_0', (['""" > done building sapmles index maping"""'], {}), "(' > done building sapmles index maping')\n", (10485, 10526), False, 'from megatron import print_rank_0\n'), ((10535, 10597), 'numpy.save', 'np.save', (['indexmap_filename', 'samples_mapping'], {'allow_pickle': '(True)'}), '(indexmap_filename, samples_mapping, allow_pickle=True)\n', (10542, 10597), True, 'import numpy as np\n'), ((9379, 9407), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (9405, 9407), False, 'import torch\n'), ((9430, 9463), 'os.path.isfile', 'os.path.isfile', (['indexmap_filename'], {}), '(indexmap_filename)\n', (9444, 9463), False, 'import os\n'), ((9831, 9859), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (9857, 9859), False, 'import torch\n'), ((11144, 11173), 'megatron.mpu.get_data_parallel_group', 'mpu.get_data_parallel_group', ([], {}), '()\n', (11171, 11173), False, 'from megatron import mpu\n'), ((6899, 6910), 'time.time', 'time.time', ([], {}), '()\n', (6908, 6910), False, 'import time\n'), ((8670, 8688), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (8678, 8688), True, 'import numpy as np\n'), ((8751, 8769), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (8759, 8769), True, 'import numpy as np\n'), ((8926, 8944), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (8934, 8944), True, 'import numpy as np\n'), ((9038, 9056), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (9046, 9056), True, 'import numpy as np\n'), ((11254, 11283), 'megatron.mpu.get_data_parallel_group', 'mpu.get_data_parallel_group', ([], {}), '()\n', (11281, 11283), False, 'from megatron import mpu\n'), ((11579, 11590), 'time.time', 'time.time', ([], {}), '()\n', (11588, 11590), False, 'import time\n'), ((10898, 10909), 'time.time', 'time.time', ([], {}), '()\n', (10907, 10909), False, 'import time\n')] |
# IMPORTING MODULES
#----------------------------------------------------------------------------
# coding: utf-8
from __future__ import print_function, division
import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.autograd import Variable
from PIL import Image
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
import pickle
#---------------------------------------------------------------------------
# IMPORTANT PARAMETERS
#---------------------------------------------------------------------------
device = "cuda" if torch.cuda.is_available() else 'cpu'
root_dir = "../Dataset/"
epochs = 1
batch_size = 8
maxFaces = 15
random_seed = 8
#---------------------------------------------------------------------------
# DATASET AND LOADERS
#---------------------------------------------------------------------------
neg_train = sorted(os.listdir('../Dataset/emotiw/train/'+'Negative/'))
neu_train = sorted(os.listdir('../Dataset/emotiw/train/'+'Neutral/'))
pos_train = sorted(os.listdir('../Dataset/emotiw/train/'+'Positive/'))
train_filelist = neg_train + neu_train + pos_train
val_filelist = []
test_filelist = []
with open('../Dataset/val_list', 'rb') as fp:
val_filelist = pickle.load(fp)
with open('../Dataset/test_list', 'rb') as fp:
test_filelist = pickle.load(fp)
for i in train_filelist:
if i[0] != 'p' and i[0] != 'n':
train_filelist.remove(i)
for i in val_filelist:
if i[0] != 'p' and i[0] != 'n':
val_filelist.remove(i)
dataset_sizes = [len(train_filelist), len(val_filelist), len(test_filelist)]
print(dataset_sizes)
train_global_data_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
val_global_data_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_global_data_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_faces_data_transform = transforms.Compose([
transforms.Resize((96,112)),
transforms.ToTensor()
])
val_faces_data_transform = transforms.Compose([
transforms.Resize((96,112)),
transforms.ToTensor()
])
test_faces_data_transform = transforms.Compose([
transforms.Resize((96,112)),
transforms.ToTensor()
])
class EmotiWDataset(Dataset):
def __init__(self, filelist, root_dir, loadTrain=True, transformGlobal=transforms.ToTensor(), transformFaces = transforms.ToTensor()):
"""
Args:
filelist: List of names of image/feature files.
root_dir: Dataset directory
transform (callable, optional): Optional transformer to be applied
on an image sample.
"""
self.filelist = filelist
self.root_dir = root_dir
self.transformGlobal = transformGlobal
self.transformFaces = transformFaces
self.loadTrain = loadTrain
def __len__(self):
return (len(self.filelist))
def __getitem__(self, idx):
train = ''
if self.loadTrain:
train = 'train'
else:
train = 'val'
filename = self.filelist[idx].split('.')[0]
labeldict = {'neg':'Negative',
'neu':'Neutral',
'pos':'Positive',
'Negative': 0,
'Neutral': 1,
'Positive':2}
labelname = labeldict[filename.split('_')[0]]
#IMAGE
image = Image.open(self.root_dir+'emotiw/'+train+'/'+labelname+'/'+filename+'.jpg')
if self.transformGlobal:
image = self.transformGlobal(image)
if image.shape[0] == 1:
image_1 = np.zeros((3, 224, 224), dtype = float)
image_1[0] = image
image_1[1] = image
image_1[2] = image
image = image_1
image = torch.FloatTensor(image.tolist())
#FEATURES FROM MTCNN
features = np.load(self.root_dir+'FaceFeatures/'+train+'/'+labelname+'/'+filename+'.npz')['a']
numberFaces = features.shape[0]
maxNumber = min(numberFaces, maxFaces)
features1 = np.zeros((maxFaces, 256), dtype = 'float32')
for i in range(maxNumber):
features1[i] = features[i]
features1 = torch.from_numpy(features1)
#ALIGNED CROPPED FACE IMAGES
features2 = np.zeros((maxFaces, 3, 96, 112), dtype = 'float32')
# print(maxNumber)
for i in range(maxNumber):
face = Image.open(self.root_dir + 'AlignedCroppedImages/'+train+'/'+ labelname + '/' + filename+ '_' + str(i) + '.jpg')
if self.transformFaces:
face = self.transformFaces(face)
features2[i] = face.numpy()
features2 = torch.from_numpy(features2)
#SAMPLE
sample = {'image': image, 'features_mtcnn': features1, 'features_aligned':features2, 'label':labeldict[labelname], 'numberFaces': numberFaces}
return sample
train_dataset = EmotiWDataset(train_filelist, root_dir, loadTrain = True, transformGlobal=train_global_data_transform, transformFaces=train_faces_data_transform)
train_dataloader = DataLoader(train_dataset, shuffle=False, batch_size=batch_size, num_workers=0)
val_dataset = EmotiWDataset(val_filelist, root_dir, loadTrain=False, transformGlobal = val_global_data_transform, transformFaces=val_faces_data_transform)
val_dataloader = DataLoader(val_dataset, shuffle =False, batch_size = batch_size, num_workers = 0)
test_dataset = EmotiWDataset(test_filelist, root_dir, loadTrain=False, transformGlobal = test_global_data_transform, transformFaces = test_faces_data_transform)
test_dataloader = DataLoader(test_dataset, shuffle = False, batch_size = batch_size, num_workers = 0)
print('Dataset Loaded')
#---------------------------------------------------------------------------
# SPHEREFACE MODEL FOR ALIGNED MODELS
#---------------------------------------------------------------------------
class LSoftmaxLinear(nn.Module):
def __init__(self, input_dim, output_dim, margin):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.margin = margin
self.weight = nn.Parameter(torch.FloatTensor(input_dim, output_dim))
self.divisor = math.pi / self.margin
self.coeffs = binom(margin, range(0, margin + 1, 2))
self.cos_exps = range(self.margin, -1, -2)
self.sin_sq_exps = range(len(self.cos_exps))
self.signs = [1]
for i in range(1, len(self.sin_sq_exps)):
self.signs.append(self.signs[-1] * -1)
def reset_parameters(self):
nn.init.kaiming_normal(self.weight.data.t())
def find_k(self, cos):
acos = cos.acos()
k = (acos / self.divisor).floor().detach()
return k
def forward(self, input, target=None):
if self.training:
assert target is not None
logit = input.matmul(self.weight)
batch_size = logit.size(0)
logit_target = logit[range(batch_size), target]
weight_target_norm = self.weight[:, target].norm(p=2, dim=0)
input_norm = input.norm(p=2, dim=1)
norm_target_prod = weight_target_norm * input_norm
cos_target = logit_target / (norm_target_prod + 1e-10)
sin_sq_target = 1 - cos_target**2
weight_nontarget_norm = self.weight.norm(p=2, dim=0)
norm_nontarget_prod = torch.zeros((batch_size,numClasses), dtype = torch.float)
logit2 = torch.zeros((batch_size,numClasses), dtype = torch.float)
logit3 = torch.zeros((batch_size,numClasses), dtype = torch.float)
norm_nontarget_prod = norm_nontarget_prod.to(device)
logit2 = logit2.to(device)
logit3 = logit3.to(device)
for i in range(numClasses):
norm_nontarget_prod[:, i] = weight_nontarget_norm[i] * input_norm
logit2[:, i] = norm_target_prod / (norm_nontarget_prod[:, i] + 1e-10)
for i in range(batch_size):
for j in range(numClasses):
logit3[i][j] = logit2[i][j] * logit[i][j]
num_ns = self.margin//2 + 1
coeffs = Variable(input.data.new(self.coeffs))
cos_exps = Variable(input.data.new(self.cos_exps))
sin_sq_exps = Variable(input.data.new(self.sin_sq_exps))
signs = Variable(input.data.new(self.signs))
cos_terms = cos_target.unsqueeze(1) ** cos_exps.unsqueeze(0)
sin_sq_terms = (sin_sq_target.unsqueeze(1)
** sin_sq_exps.unsqueeze(0))
cosm_terms = (signs.unsqueeze(0) * coeffs.unsqueeze(0)
* cos_terms * sin_sq_terms)
cosm = cosm_terms.sum(1)
k = self.find_k(cos_target)
ls_target = norm_target_prod * (((-1)**k * cosm) - 2*k)
logit3[range(batch_size), target] = ls_target
return logit
else:
assert target is None
return input.matmul(self.weight)
class sphere20a(nn.Module):
def __init__(self,classnum=3,feature=False):
super(sphere20a, self).__init__()
self.classnum = classnum
self.feature = feature
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6,512)
self.fc6 = LSoftmaxLinear(512,self.classnum, 4)
def forward(self, x, y):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x.view(x.size(0),-1)
x = (self.fc5(x))
# print(x)
if self.feature: return x
x = self.fc6(x)
# x = self.fc6(x, None)
return x
#---------------------------------------------------------------------------
# MODEL 1
# Pretrained EmotiW DenseNet (DenseNet161_EmotiW)
#---------------------------------------------------------------------------
global_model = torch.load('../TrainedModels/TrainDataset/DenseNet161_EmotiW', map_location=lambda storage, loc: storage)
model1 = global_model
print('Pretrained EmotiW DenseNet Loaded! (Model 1)')
#---------------------------------------------------------------------------
# MODEL 2
# Pretrained EmotiC DenseNet (densenet_emotiw_pretrainemotic_lr001)
#---------------------------------------------------------------------------
model2 = models.densenet161(pretrained=False)
num_ftrs = model2.classifier.in_features
model2.classifier = nn.Linear(num_ftrs, 3)
model2 = model2.to(device)
model2 = nn.DataParallel(model2)
model2.load_state_dict(torch.load('../TrainedModels/TrainDataset/densenet_emotiw_pretrainemotic_lr001.pt', map_location=lambda storage, loc: storage))
model2 = model2.module
print('Pretrained EmotiC DenseNet Loaded! (Model 2)')
#---------------------------------------------------------------------------
# MODEL 3
# Aligned Model Global Level (AlignedModelTrainerSoftmax_AlignedModel_EmotiW_lr01_Softmax)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, non_align_model):
super(FaceAttention, self).__init__()
self.non_align_model = non_align_model
def forward(self, face_features_initial, numberFaces, labels):
maxNumber = np.minimum(numberFaces, maxFaces).float()
maxNumber = maxNumber.to(device)
face_features = torch.zeros((face_features_initial.shape[0],maxFaces,3), dtype = torch.float)
for j in range(face_features_initial.shape[0]):
face = face_features_initial[j]
tensor = torch.zeros((2,), dtype=torch.long)
faceLabels = tensor.new_full((maxFaces,), labels[j], dtype = torch.long)
faceLabels = faceLabels.to(device)
face_features[j, :, :] = self.non_align_model.forward(face, faceLabels)
face_features = face_features.to(device)
face_features_sum = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_sum = face_features_sum.to(device)
face_features_avg = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_avg = face_features_avg.to(device)
for i in range(face_features_initial.shape[0]):
for j in range(int(maxNumber[i])):
face_features_sum[i] = face_features_sum[i] + face_features[i][j]
if int(maxNumber[i]) != 0:
y = float(maxNumber[i])
face_features_avg[i] = face_features_sum[i] / y
return face_features_avg
aligned_model_global_level_path = "../TrainedModels/TrainDataset/AlignedModelTrainerSoftmax_AlignedModel_EmotiW_lr01_Softmax"
align_model = torch.load(aligned_model_global_level_path, map_location=lambda storage, loc: storage).module
model3 = align_model
print('Aligned Model Global Level Loaded! (Model 3)')
#---------------------------------------------------------------------------
# MODEL 4
# Aligned Model Image Level Trained (AlignedModel_EmotiW_lr01_Softmax)
#---------------------------------------------------------------------------
aligned_model_image_level_path = '../TrainedModels/TrainDataset/AlignedModel_EmotiW_lr01_Softmax'
align_model = torch.load(aligned_model_image_level_path, map_location=lambda storage, loc: storage).module
class FaceAttention(nn.Module):
def __init__(self, non_align_model):
super(FaceAttention, self).__init__()
self.non_align_model = non_align_model
def forward(self, face_features_initial, numberFaces, labels):
maxNumber = np.minimum(numberFaces, maxFaces).float()
maxNumber = maxNumber.to(device)
face_features = torch.zeros((face_features_initial.shape[0],maxFaces,3), dtype = torch.float)
for j in range(face_features_initial.shape[0]):
face = face_features_initial[j]
tensor = torch.zeros((2,), dtype=torch.long)
faceLabels = tensor.new_full((maxFaces,), labels[j], dtype = torch.long)
faceLabels = faceLabels.to(device)
face_features[j, :, :] = self.non_align_model.forward(face, faceLabels)
face_features = face_features.to(device)
face_features_sum = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_sum = face_features_sum.to(device)
face_features_avg = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_avg = face_features_avg.to(device)
for i in range(face_features_initial.shape[0]):
for j in range(int(maxNumber[i])):
face_features_sum[i] = face_features_sum[i] + face_features[i][j]
if int(maxNumber[i]) != 0:
y = float(maxNumber[i])
face_features_avg[i] = face_features_sum[i] / y
return face_features_avg
model4 = FaceAttention(align_model)
print('Aligned Model Image Level Loaded! (Model 4)')
#---------------------------------------------------------------------------
# MODEL 5
# Avg. Face Features Concat Model (PretrainedDenseNetAvgFaceFeatures-FineTune-2208-3-NoSoftmax-Reg-lr001)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.global_fc3_debug = nn.Linear(2464, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.bn_global = nn.BatchNorm1d(2208, affine=False)
self.bn_face_features = nn.BatchNorm1d(256, affine=False)
self.dropout_classifier = nn.Dropout(0.5)
def forward(self, image, face_features, numberFaces):
features = self.global_model.forward(image)
out = F.relu(features, inplace=True)
global_features_initial = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
global_features_initial = Variable(global_features_initial)
batch_size = face_features.shape[0]
global_features_initial = global_features_initial.view(-1,2208)
face_features_sum = torch.sum(face_features, dim=1)
face_features_sum = face_features_sum.view(-1, 256)
for i in range(batch_size):
faces_num_div = float(min(numberFaces[i], maxFaces))
if faces_num_div != 0:
face_features_sum[i] = face_features_sum[i] / faces_num_div
#THE face_features_sum TENSOR NOW CONTAINS AVERAGE OF THE FACE FEATURES
face_features_sum = self.bn_face_features(face_features_sum)
global_features_initial = self.bn_global(global_features_initial)
final_features = torch.cat((face_features_sum, global_features_initial), dim=1)
final_features = self.dropout_classifier(final_features)
x = (self.global_fc3_debug(final_features))
return x
model5 = torch.load('../TrainedModels/TrainDataset/PretrainedDenseNet-FineTune-2208-3-lr001-Regularized-Corrected', map_location=lambda storage, loc: storage).module
print('Avg. Face Features Concat Model Loaded! (Model 5)')
#---------------------------------------------------------------------------
# MODEL 6
# Face Attention Model (EmotiC) using 3rd Para Attention
# (FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01_EmotiC)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc3_debug = nn.Linear(320, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.global_fc = nn.Linear(256, 64)
nn.init.kaiming_normal_(self.global_fc.weight)
self.global_fc.bias.data.fill_(0.01)
self.global_fc_dropout = nn.Dropout(p = 0.5)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(64, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
global_features_main = self.global_fc_main_dropout(features)
global_features = self.global_fc_dropout(self.global_fc(global_features_main))
global_features = global_features.view(-1,1,64)
batch_size = global_features.shape[0]
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,64), dtype = torch.float)
face_features = face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model.forward(face, labels)
face_features = self.non_align_model_dropout(face_features)
face_features = face_features.view(batch_size, 64, -1)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = torch.bmm(global_features, face_features) #(batch_size, 1, 256) x (batch_size, 256, nFaces) = (batch_size, 1, nFaces)
attention_scores = attention_scores+mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = attention_scores
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 64) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features_main = self.bn_debug_global(global_features_main)
final_features = torch.cat((attended_face_features, global_features_main), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model6 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01_EmotiC', map_location=lambda storage, loc: storage).module
print('Face Attention Model (EmotiC) using 3rd Para Attention Loaded! (Model 6)')
#---------------------------------------------------------------------------
# MODEL 7
# Face Attention Model (EmotiC) using 4th Para Attention
# (FaceAttention_AlignedModel_FullTrain_4para_lr001_dropout_BN_SoftmaxLr01_EmotiC)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc3_debug = nn.Linear(512, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.attentionfc1 = nn.Linear(256, 64)
nn.init.kaiming_normal_(self.attentionfc1.weight)
self.attentionfc1.bias.data.fill_(0.01)
self.attentionfc2 = nn.Linear(64, 1)
nn.init.kaiming_normal_(self.attentionfc2.weight)
self.attentionfc2.bias.data.fill_(0.01)
self.attentionfc1_dropout = nn.Dropout(p = 0.5)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(256, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
global_features = self.global_fc_main_dropout(features)
batch_size = global_features.shape[0]
global_features = global_features.view(-1,1,256)
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,256), dtype = torch.float)
face_features = face_features.to(device)
mid_face_features = torch.zeros((batch_size, maxFaces, 1), dtype = torch.float)
face_features_inter = torch.zeros((batch_size, maxFaces, 64), dtype = torch.float)
face_features_inter = face_features_inter.to(device)
mid_face_features = mid_face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model_dropout(self.non_align_model.forward(face, labels))
face_features_inter[j] = self.attentionfc1_dropout(self.attentionfc1(face_features[j]))
mid_face_features[j] = self.attentionfc2(face_features_inter[j])
mid_face_features = mid_face_features.view(batch_size, 1, maxFaces)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = mid_face_features + mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 256) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
global_features = global_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features = self.bn_debug_global(global_features)
final_features = torch.cat((attended_face_features, global_features), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model7 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr001_dropout_BN_SoftmaxLr01_EmotiC', map_location=lambda storage, loc: storage).module
print('Face Attention Model (EmotiC) using 4rd Para Attention Loaded! (Model 7)')
#---------------------------------------------------------------------------
# MODEL 8
# Face Attention Model using 4th Para Attention (FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc3_debug = nn.Linear(512, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.attentionfc1 = nn.Linear(256, 64)
nn.init.kaiming_normal_(self.attentionfc1.weight)
self.attentionfc1.bias.data.fill_(0.01)
self.attentionfc2 = nn.Linear(64, 1)
nn.init.kaiming_normal_(self.attentionfc2.weight)
self.attentionfc2.bias.data.fill_(0.01)
self.global_fc_main = nn.Linear(2208, 256)
nn.init.kaiming_normal_(self.global_fc_main.weight)
self.global_fc_main.bias.data.fill_(0.01)
self.attentionfc1_dropout = nn.Dropout(p = 0.5)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(256, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
out = F.relu(features, inplace = False)
global_features_initial = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
global_features_initial = Variable(global_features_initial)
global_features_initial = global_features_initial.view(-1,2208)
global_features = self.global_fc_main_dropout(self.global_fc_main(global_features_initial))
batch_size = global_features.shape[0]
global_features = global_features.view(-1,1,256)
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,256), dtype = torch.float)
face_features = face_features.to(device)
mid_face_features = torch.zeros((batch_size, maxFaces, 1), dtype = torch.float)
face_features_inter = torch.zeros((batch_size, maxFaces, 64), dtype = torch.float)
face_features_inter = face_features_inter.to(device)
mid_face_features = mid_face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model_dropout(self.non_align_model.forward(face, labels))
face_features_inter[j] = self.attentionfc1_dropout(self.attentionfc1(face_features[j]))
mid_face_features[j] = self.attentionfc2(face_features_inter[j])
mid_face_features = mid_face_features.view(batch_size, 1, maxFaces)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = mid_face_features + mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 256) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
global_features = global_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features = self.bn_debug_global(global_features)
final_features = torch.cat((attended_face_features, global_features), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model8 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01', map_location=lambda storage, loc: storage).module
print('Face Attention Model using 4rd Para Attention Loaded! (Model 8)')
#---------------------------------------------------------------------------
# MODEL 9
# Aligned Model Global Level (AlignedModelTrainerSoftmax_AlignedModel_EmotiW_lr01_Softmax_br128)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, non_align_model):
super(FaceAttention, self).__init__()
self.non_align_model = non_align_model
def forward(self, face_features_initial, numberFaces, labels):
maxNumber = np.minimum(numberFaces, maxFaces).float()
maxNumber = maxNumber.to(device)
face_features = torch.zeros((face_features_initial.shape[0],maxFaces,3), dtype = torch.float)
for j in range(face_features_initial.shape[0]):
face = face_features_initial[j]
tensor = torch.zeros((2,), dtype=torch.long)
faceLabels = tensor.new_full((maxFaces,), labels[j], dtype = torch.long)
faceLabels = faceLabels.to(device)
face_features[j, :, :] = self.non_align_model.forward(face, faceLabels)
face_features = face_features.to(device)
face_features_sum = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_sum = face_features_sum.to(device)
face_features_avg = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_avg = face_features_avg.to(device)
for i in range(face_features_initial.shape[0]):
for j in range(int(maxNumber[i])):
face_features_sum[i] = face_features_sum[i] + face_features[i][j]
if int(maxNumber[i]) != 0:
y = float(maxNumber[i])
face_features_avg[i] = face_features_sum[i] / y
return face_features_avg
aligned_model_global_level_path = "../TrainedModels/TrainDataset/AlignedModelTrainerSoftmax_AlignedModel_EmotiW_lr01_Softmax_br128"
align_model = torch.load(aligned_model_global_level_path, map_location=lambda storage, loc: storage).module
model9 = align_model
print('Aligned Model Global Level Loaded! (Model 9)')
#---------------------------------------------------------------------------
# MODEL 10
# Aligned Model Global Level Trained (AlignedModelTrainerLSoftmax_AlignedModel_EmotiW_lr001)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, non_align_model):
super(FaceAttention, self).__init__()
self.non_align_model = non_align_model
def forward(self, face_features_initial, numberFaces, labels, phase):
maxNumber = np.minimum(numberFaces, maxFaces).float()
maxNumber = maxNumber.to(device)
face_features = torch.zeros((face_features_initial.shape[0],maxFaces,3), dtype = torch.float)
for j in range(face_features_initial.shape[0]):
face = face_features_initial[j]
tensor = torch.zeros((2,), dtype=torch.long)
faceLabels = tensor.new_full((maxFaces,), labels[j], dtype = torch.long)
faceLabels = faceLabels.to(device)
if phase == 0:
face_features[j, :, :] = self.non_align_model.forward(face, faceLabels)
else:
face_features[j, :, :] = self.non_align_model.forward(face, None)
face_features = face_features.to(device)
face_features_sum = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_sum = face_features_sum.to(device)
face_features_avg = torch.zeros((face_features_initial.shape[0], 3), dtype = torch.float)
face_features_avg = face_features_avg.to(device)
for i in range(face_features_initial.shape[0]):
for j in range(int(maxNumber[i])):
face_features_sum[i] = face_features_sum[i] + face_features[i][j]
if int(maxNumber[i]) != 0:
y = float(maxNumber[i])
face_features_avg[i] = face_features_sum[i] / y
return face_features_avg
aligned_model_global_level_path = "../TrainedModels/TrainDataset/AlignedModelTrainerLSoftmax_AlignedModel_EmotiW_lr001"
align_model = torch.load(aligned_model_global_level_path, map_location=lambda storage, loc: storage).module
model10 = align_model
print('Aligned L-softmax Model Global Level Loaded! (Model 10)')
#---------------------------------------------------------------------------
# MODEL 11
# FaceAttention Similarity Attention Mechanism (FaceAttention_AlignedModel_FullTrain_lr001_dropout_BN_SoftmaxLr01)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc_main = nn.Linear(2208, 256)
nn.init.kaiming_normal_(self.global_fc_main.weight)
self.global_fc_main.bias.data.fill_(0.01)
self.global_fc3_debug = nn.Linear(512, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(256, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
out = F.relu(features, inplace = False)
global_features_initial = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
global_features_initial = Variable(global_features_initial)
global_features_initial = global_features_initial.view(-1,2208)
global_features = self.global_fc_main_dropout(self.global_fc_main(global_features_initial))
global_features = global_features.view(-1,1,256)
batch_size = global_features.shape[0]
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,256), dtype = torch.float)
face_features = face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model.forward(face, labels)
face_features = self.non_align_model_dropout(face_features)
face_features = face_features.view(batch_size, 256, -1)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = torch.bmm(global_features, face_features) #(batch_size, 1, 256) x (batch_size, 256, nFaces) = (batch_size, 1, nFaces)
attention_scores = attention_scores+mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = attention_scores
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 256) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
global_features = global_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features = self.bn_debug_global(global_features)
final_features = torch.cat((attended_face_features, global_features), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model11 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_lr001_dropout_BN_SoftmaxLr01', map_location=lambda storage, loc: storage).module
print('FaceAttention Similarity Attention Mechanism Model Loaded! (Model 11)')
#---------------------------------------------------------------------------
# MODEL 12
# Face Attention Model using 3rd Para Attention
# (FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc3_debug = nn.Linear(320, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.global_fc = nn.Linear(256, 64)
nn.init.kaiming_normal_(self.global_fc.weight)
self.global_fc.bias.data.fill_(0.01)
self.global_fc_dropout = nn.Dropout(p = 0.5)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(64, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
self.global_fc_main = nn.Linear(2208, 256)
nn.init.kaiming_normal_(self.global_fc_main.weight)
self.global_fc_main.bias.data.fill_(0.01)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
out = F.relu(features, inplace = False)
global_features_initial = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
global_features_initial = Variable(global_features_initial)
global_features_initial = global_features_initial.view(-1,2208)
global_features_main = self.global_fc_main_dropout(self.global_fc_main(global_features_initial))
global_features = self.global_fc_dropout(self.global_fc(global_features_main))
global_features = global_features.view(-1,1,64)
batch_size = global_features.shape[0]
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,64), dtype = torch.float)
face_features = face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model.forward(face, labels)
face_features = self.non_align_model_dropout(face_features)
face_features = face_features.view(batch_size, 64, -1)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = torch.bmm(global_features, face_features) #(batch_size, 1, 256) x (batch_size, 256, nFaces) = (batch_size, 1, nFaces)
attention_scores = attention_scores+mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = attention_scores
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 64) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features_main = self.bn_debug_global(global_features_main)
final_features = torch.cat((attended_face_features, global_features_main), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model12 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01', map_location=lambda storage, loc: storage).module
print('Face Attention Model using 3rd Para Attention Loaded! (Model 12)')
#---------------------------------------------------------------------------
# MODEL 13
# Face Attention Model (EmotiC) using 4th Para Attention
# (FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01_EmotiC)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc3_debug = nn.Linear(512, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.attentionfc1 = nn.Linear(256, 64)
nn.init.kaiming_normal_(self.attentionfc1.weight)
self.attentionfc1.bias.data.fill_(0.01)
self.attentionfc2 = nn.Linear(64, 1)
nn.init.kaiming_normal_(self.attentionfc2.weight)
self.attentionfc2.bias.data.fill_(0.01)
self.attentionfc1_dropout = nn.Dropout(p = 0.5)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(256, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
global_features = self.global_fc_main_dropout(features)
batch_size = global_features.shape[0]
global_features = global_features.view(-1,1,256)
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,256), dtype = torch.float)
face_features = face_features.to(device)
mid_face_features = torch.zeros((batch_size, maxFaces, 1), dtype = torch.float)
face_features_inter = torch.zeros((batch_size, maxFaces, 64), dtype = torch.float)
face_features_inter = face_features_inter.to(device)
mid_face_features = mid_face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model_dropout(self.non_align_model.forward(face, labels))
face_features_inter[j] = self.attentionfc1_dropout(self.attentionfc1(face_features[j]))
mid_face_features[j] = self.attentionfc2(face_features_inter[j])
mid_face_features = mid_face_features.view(batch_size, 1, maxFaces)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = mid_face_features + mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 256) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
global_features = global_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features = self.bn_debug_global(global_features)
final_features = torch.cat((attended_face_features, global_features), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model13 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01_EmotiC', map_location=lambda storage, loc: storage).module
print('Face Attention Model (EmotiC) using 4rd Para Attention Loaded! (Model 13)')
#---------------------------------------------------------------------------
# MODEL 14
# Face Attention Model using 4th Para Attention (FaceAttention_AlignedModel_FullTrain_4para_adam_dropout_BN_SoftmaxLr01)
#---------------------------------------------------------------------------
class FaceAttention(nn.Module):
def __init__(self, global_model, non_align_model):
super(FaceAttention, self).__init__()
self.global_model = global_model
self.non_align_model = non_align_model
self.global_fc3_debug = nn.Linear(512, 3)
nn.init.kaiming_normal_(self.global_fc3_debug.weight)
self.global_fc3_debug.bias.data.fill_(0.01)
self.attentionfc1 = nn.Linear(256, 64)
nn.init.kaiming_normal_(self.attentionfc1.weight)
self.attentionfc1.bias.data.fill_(0.01)
self.attentionfc2 = nn.Linear(64, 1)
nn.init.kaiming_normal_(self.attentionfc2.weight)
self.attentionfc2.bias.data.fill_(0.01)
self.global_fc_main = nn.Linear(2208, 256)
nn.init.kaiming_normal_(self.global_fc_main.weight)
self.global_fc_main.bias.data.fill_(0.01)
self.attentionfc1_dropout = nn.Dropout(p = 0.5)
self.global_fc_main_dropout = nn.Dropout(p = 0.5)
self.non_align_model_dropout = nn.Dropout(p = 0.5)
self.bn_debug_face = nn.BatchNorm1d(256, affine=False)
self.bn_debug_global = nn.BatchNorm1d(256, affine=False)
def forward(self, image, face_features_initial, numberFaces, labels):
features = self.global_model.forward(image)
out = F.relu(features, inplace = False)
global_features_initial = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
global_features_initial = Variable(global_features_initial)
global_features_initial = global_features_initial.view(-1,2208)
global_features = self.global_fc_main_dropout(self.global_fc_main(global_features_initial))
batch_size = global_features.shape[0]
global_features = global_features.view(-1,1,256)
maxNumber = np.minimum(numberFaces, maxFaces)
face_features = torch.zeros((batch_size,maxFaces,256), dtype = torch.float)
face_features = face_features.to(device)
mid_face_features = torch.zeros((batch_size, maxFaces, 1), dtype = torch.float)
face_features_inter = torch.zeros((batch_size, maxFaces, 64), dtype = torch.float)
face_features_inter = face_features_inter.to(device)
mid_face_features = mid_face_features.to(device)
for j in range(batch_size):
face = face_features_initial[j]
face_features[j, :, :] = self.non_align_model_dropout(self.non_align_model.forward(face, labels))
face_features_inter[j] = self.attentionfc1_dropout(self.attentionfc1(face_features[j]))
mid_face_features[j] = self.attentionfc2(face_features_inter[j])
mid_face_features = mid_face_features.view(batch_size, 1, maxFaces)
mask = np.zeros((batch_size,1,maxFaces), dtype = 'float32')
for j in range(batch_size):
for i in range(maxFaces - (int(maxNumber[j]))):
mask[j][0][int(numberFaces[j]) + i] = float('-inf')
mask = torch.from_numpy(mask)
mask = mask.to(device)
attention_scores = mid_face_features + mask
#Convert Scores to Weight
attention_scores = F.softmax(attention_scores, dim = -1)
attention_weights = Variable(attention_scores)
for i in range(len(maxNumber)):
if maxNumber[i] == 0:
for j in range(maxFaces):
attention_weights[i][0][j] = 0
#Taking Weighted Average of Face Featrues
face_features = face_features.view(batch_size, -1, 256) #(batch_size, nFaces, 256)
attention_scores = attention_weights.view(batch_size, 1, -1) #(batch_size, 1, nFaces)
attended_face_features = torch.bmm(attention_scores, face_features)
#Concatenating Global and Attended Face Features
attended_face_features = attended_face_features.view(batch_size, -1)
global_features = global_features.view(batch_size, -1)
attended_face_features = self.bn_debug_face(attended_face_features)
global_features = self.bn_debug_global(global_features)
final_features = torch.cat((attended_face_features, global_features), dim=1)
x = (self.global_fc3_debug(final_features))
return x
model14 = torch.load('../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_adam_dropout_BN_SoftmaxLr01', map_location=lambda storage, loc: storage).module
print('Face Attention Model using 4rd Para Attention Loaded! (Model 14)')
#---------------------------------------------------------------------------
# ENSEMBLE
#---------------------------------------------------------------------------
class Ensemble(nn.Module):
def __init__(self, model_1, model_2, model_3, model_4, model_5, model_6, model_7, model_8, model_9, model_10, model_11, model_12, model_13, model_14):
super(Ensemble, self).__init__()
self.model_1 = model_1
self.model_2 = model_2
self.model_3 = model_3
self.model_4 = model_4
self.model_5 = model_5
self.model_6 = model_6
self.model_7 = model_7
self.model_8 = model_8
self.model_9 = model_9
self.model_10 = model_10
self.model_11 = model_11
self.model_12 = model_12
self.model_13 = model_13
self.model_14 = model_14
def forward(self, image, labels, face_features_mtcnn, face_features_aligned, numberFaces, phase):
output1 = self.model_1(image)
output2 = self.model_2(image)
output3 = self.model_3(face_features_aligned, numberFaces, labels)
output4 = self.model_4(face_features_aligned, numberFaces, labels)
output5 = self.model_5(image, face_features_mtcnn, numberFaces)
output6 = self.model_6(image, face_features_aligned, numberFaces, labels)
output7 = self.model_7(image, face_features_aligned, numberFaces, labels)
output8 = self.model_8(image, face_features_aligned, numberFaces, labels)
output9 = self.model_9(face_features_aligned, numberFaces, labels)
output10 = self.model_10(face_features_aligned, numberFaces, labels, phase)
output11 = self.model_11(image, face_features_aligned, numberFaces, labels)
output12 = self.model_12(image, face_features_aligned, numberFaces, labels)
output13 = self.model_13(image, face_features_aligned, numberFaces, labels)
output14 = self.model_14(image, face_features_aligned, numberFaces, labels)
output = 0 * output1 + 5 * output2 + 10 * output3 + 10 * output4 + 1 * output5 + 5 * output6 + 2 * output7 + 5 * output8
return output, output1, output2, output3, output4, output5, output6, output7, output8, output9, output10, output11, output12, output13, output14
model_ft = Ensemble(model1, model2, model3, model4, model5, model6, model7, model8, model9, model10, model11, model12, model13, model14)
model_ft = model_ft.to(device)
model_ft = nn.DataParallel(model_ft)
print("Ensemble Loaded.")
#---------------------------------------------------------------------------
# TRAINING
#---------------------------------------------------------------------------
output_train_model1 = []
output_train_model2 = []
output_train_model3 = []
output_train_model4 = []
output_train_model5 = []
output_train_model6 = []
output_train_model7 = []
output_train_model8 = []
output_train_model9 = []
output_train_model10 = []
output_train_model11 = []
output_train_model12 = []
output_train_model13 = []
output_train_model14 = []
output_train = []
output_valid_model1 = []
output_valid_model2 = []
output_valid_model3 = []
output_valid_model4 = []
output_valid_model5 = []
output_valid_model6 = []
output_valid_model7 = []
output_valid_model8 = []
output_valid_model9 = []
output_valid_model10 = []
output_valid_model11 = []
output_valid_model12 = []
output_valid_model13 = []
output_valid_model14 = []
output_valid = []
output_test_model1 = []
output_test_model2 = []
output_test_model3 = []
output_test_model4 = []
output_test_model5 = []
output_test_model6 = []
output_test_model7 = []
output_test_model8 = []
output_test_model9 = []
output_test_model10 = []
output_test_model11 = []
output_test_model12 = []
output_test_model13 = []
output_test_model14 = []
output_test = []
label_train = []
label_valid = []
label_test = []
def train_model(model, criterion, optimizer=None, scheduler=None, num_epochs = 1):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print('-' * 10)
for phase in range(0, 3):
if phase == 0:
dataloaders = train_dataloader
# scheduler.step()
model.eval()
elif phase == 1:
dataloaders = val_dataloader
model.eval()
elif phase == 2:
dataloaders = test_dataloader
model.eval()
running_loss = 0.0
running_corrects = 0
for i_batch, sample_batched in enumerate(dataloaders):
inputs = sample_batched['image']
labels = sample_batched['label']
face_features_mtcnn = sample_batched['features_mtcnn']
face_features_aligned = sample_batched['features_aligned']
numberFaces = sample_batched['numberFaces']
inputs = inputs.to(device)
labels = labels.to(device)
face_features_mtcnn= face_features_mtcnn.to(device)
face_features_aligned = face_features_aligned.to(device)
numberFaces = numberFaces.to(device)
with torch.set_grad_enabled(phase == 0):
outputs, output1, output2, output3, output4, output5, output6, output7, output8, output9, output10, output11, output12, output13, output14 = model(inputs, labels, face_features_mtcnn, face_features_aligned, numberFaces, phase)
if phase == 0:
output_train.extend(outputs.data.cpu().numpy())
output_train_model1.extend(output1.data.cpu().numpy())
output_train_model2.extend(output2.data.cpu().numpy())
output_train_model3.extend(output3.data.cpu().numpy())
output_train_model4.extend(output4.data.cpu().numpy())
output_train_model5.extend(output5.data.cpu().numpy())
output_train_model6.extend(output6.data.cpu().numpy())
output_train_model7.extend(output7.data.cpu().numpy())
output_train_model8.extend(output8.data.cpu().numpy())
output_train_model9.extend(output9.data.cpu().numpy())
output_train_model10.extend(output10.data.cpu().numpy())
output_train_model11.extend(output11.data.cpu().numpy())
output_train_model12.extend(output12.data.cpu().numpy())
output_train_model13.extend(output13.data.cpu().numpy())
output_train_model14.extend(output14.data.cpu().numpy())
label_train.extend(labels.cpu().numpy())
elif phase == 1:
output_valid.extend(outputs.data.cpu().numpy())
output_valid_model1.extend(output1.data.cpu().numpy())
output_valid_model2.extend(output2.data.cpu().numpy())
output_valid_model3.extend(output3.data.cpu().numpy())
output_valid_model4.extend(output4.data.cpu().numpy())
output_valid_model5.extend(output5.data.cpu().numpy())
output_valid_model6.extend(output6.data.cpu().numpy())
output_valid_model7.extend(output7.data.cpu().numpy())
output_valid_model8.extend(output8.data.cpu().numpy())
output_valid_model9.extend(output9.data.cpu().numpy())
output_valid_model10.extend(output10.data.cpu().numpy())
output_valid_model11.extend(output11.data.cpu().numpy())
output_valid_model12.extend(output12.data.cpu().numpy())
output_valid_model13.extend(output13.data.cpu().numpy())
output_valid_model14.extend(output14.data.cpu().numpy())
label_valid.extend(labels.cpu().numpy())
else:
output_test.extend(outputs.data.cpu().numpy())
output_test_model1.extend(output1.data.cpu().numpy())
output_test_model2.extend(output2.data.cpu().numpy())
output_test_model3.extend(output3.data.cpu().numpy())
output_test_model4.extend(output4.data.cpu().numpy())
output_test_model5.extend(output5.data.cpu().numpy())
output_test_model6.extend(output6.data.cpu().numpy())
output_test_model7.extend(output7.data.cpu().numpy())
output_test_model8.extend(output8.data.cpu().numpy())
output_test_model9.extend(output9.data.cpu().numpy())
output_test_model10.extend(output10.data.cpu().numpy())
output_test_model11.extend(output11.data.cpu().numpy())
output_test_model12.extend(output12.data.cpu().numpy())
output_test_model13.extend(output13.data.cpu().numpy())
output_test_model14.extend(output14.data.cpu().numpy())
label_test.extend(labels.cpu().numpy())
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
np.savez('fourteen_models_output_data',
output_train = output_train,
output_train_model1 = output_train_model1,
output_train_model2 = output_train_model2,
output_train_model3 = output_train_model3,
output_train_model4 = output_train_model4,
output_train_model5 = output_train_model5,
output_train_model6 = output_train_model6,
output_train_model7 = output_train_model7,
output_train_model8 = output_train_model8,
output_train_model9 = output_train_model9,
output_train_model10 = output_train_model10,
output_train_model11 = output_train_model11,
output_train_model12 = output_train_model12,
output_train_model13 = output_train_model13,
output_train_model14 = output_train_model14,
output_valid = output_valid,
output_valid_model1 = output_valid_model1,
output_valid_model2 = output_valid_model2,
output_valid_model3 = output_valid_model3,
output_valid_model4 = output_valid_model4,
output_valid_model5 = output_valid_model5,
output_valid_model6 = output_valid_model6,
output_valid_model7 = output_valid_model7,
output_valid_model8 = output_valid_model8,
output_valid_model9 = output_valid_model9,
output_valid_model10 = output_valid_model10,
output_valid_model11 = output_valid_model11,
output_valid_model12 = output_valid_model12,
output_valid_model13 = output_valid_model13,
output_valid_model14 = output_valid_model14,
output_test = output_test,
output_test_model1 = output_test_model1,
output_test_model2 = output_test_model2,
output_test_model3 = output_test_model3,
output_test_model4 = output_test_model4,
output_test_model5 = output_test_model5,
output_test_model6 = output_test_model6,
output_test_model7 = output_test_model7,
output_test_model8 = output_test_model8,
output_test_model9 = output_test_model9,
output_test_model10 = output_test_model10,
output_test_model11 = output_test_model11,
output_test_model12 = output_test_model12,
output_test_model13 = output_test_model13,
output_test_model14 = output_test_model14,
label_train = label_train,
label_valid = label_valid,
label_test = label_test)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == 1 and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {: .0f}m {:0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:.4f}'.format(best_acc))
return model
criterion = nn.CrossEntropyLoss()
model = train_model(model_ft, criterion, None, None, num_epochs=epochs)
np.savez('fourteen_models_output_data',
output_train = output_train,
output_train_model1 = output_train_model1,
output_train_model2 = output_train_model2,
output_train_model3 = output_train_model3,
output_train_model4 = output_train_model4,
output_train_model5 = output_train_model5,
output_train_model6 = output_train_model6,
output_train_model7 = output_train_model7,
output_train_model8 = output_train_model8,
output_train_model9 = output_train_model9,
output_train_model10 = output_train_model10,
output_train_model11 = output_train_model11,
output_train_model12 = output_train_model12,
output_train_model13 = output_train_model13,
output_train_model14 = output_train_model14,
output_valid = output_valid,
output_valid_model1 = output_valid_model1,
output_valid_model2 = output_valid_model2,
output_valid_model3 = output_valid_model3,
output_valid_model4 = output_valid_model4,
output_valid_model5 = output_valid_model5,
output_valid_model6 = output_valid_model6,
output_valid_model7 = output_valid_model7,
output_valid_model8 = output_valid_model8,
output_valid_model9 = output_valid_model9,
output_valid_model10 = output_valid_model10,
output_valid_model11 = output_valid_model11,
output_valid_model12 = output_valid_model12,
output_valid_model13 = output_valid_model13,
output_valid_model14 = output_valid_model14,
output_test = output_test,
output_test_model1 = output_test_model1,
output_test_model2 = output_test_model2,
output_test_model3 = output_test_model3,
output_test_model4 = output_test_model4,
output_test_model5 = output_test_model5,
output_test_model6 = output_test_model6,
output_test_model7 = output_test_model7,
output_test_model8 = output_test_model8,
output_test_model9 = output_test_model9,
output_test_model10 = output_test_model10,
output_test_model11 = output_test_model11,
output_test_model12 = output_test_model12,
output_test_model13 = output_test_model13,
output_test_model14 = output_test_model14,
label_train = label_train,
label_valid = label_valid,
label_test = label_test) | [
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torchvision.models.densenet161",
"torch.max",
"torch.from_numpy",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.sum",
"torch.bmm",
"torch.nn.functional.softmax",
"numpy.savez",
"os.listdir",
"torch.nn.init.kaiming_normal_",
"tor... | [((178, 211), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (201, 211), False, 'import warnings\n'), ((5868, 5946), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': '(0)'}), '(train_dataset, shuffle=False, batch_size=batch_size, num_workers=0)\n', (5878, 5946), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6121, 6197), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': '(0)'}), '(val_dataset, shuffle=False, batch_size=batch_size, num_workers=0)\n', (6131, 6197), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6384, 6461), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': '(0)'}), '(test_dataset, shuffle=False, batch_size=batch_size, num_workers=0)\n', (6394, 6461), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13211, 13321), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/DenseNet161_EmotiW"""'], {'map_location': '(lambda storage, loc: storage)'}), "('../TrainedModels/TrainDataset/DenseNet161_EmotiW', map_location\n =lambda storage, loc: storage)\n", (13221, 13321), False, 'import torch\n'), ((13636, 13672), 'torchvision.models.densenet161', 'models.densenet161', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (13654, 13672), False, 'from torchvision import datasets, models, transforms\n'), ((13734, 13756), 'torch.nn.Linear', 'nn.Linear', (['num_ftrs', '(3)'], {}), '(num_ftrs, 3)\n', (13743, 13756), True, 'import torch.nn as nn\n'), ((13794, 13817), 'torch.nn.DataParallel', 'nn.DataParallel', (['model2'], {}), '(model2)\n', (13809, 13817), True, 'import torch.nn as nn\n'), ((58951, 58976), 'torch.nn.DataParallel', 'nn.DataParallel', (['model_ft'], {}), '(model_ft)\n', (58966, 58976), True, 'import torch.nn as nn\n'), ((69532, 69553), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (69551, 69553), True, 'import torch.nn as nn\n'), ((69628, 71677), 'numpy.savez', 'np.savez', (['"""fourteen_models_output_data"""'], {'output_train': 'output_train', 'output_train_model1': 'output_train_model1', 'output_train_model2': 'output_train_model2', 'output_train_model3': 'output_train_model3', 'output_train_model4': 'output_train_model4', 'output_train_model5': 'output_train_model5', 'output_train_model6': 'output_train_model6', 'output_train_model7': 'output_train_model7', 'output_train_model8': 'output_train_model8', 'output_train_model9': 'output_train_model9', 'output_train_model10': 'output_train_model10', 'output_train_model11': 'output_train_model11', 'output_train_model12': 'output_train_model12', 'output_train_model13': 'output_train_model13', 'output_train_model14': 'output_train_model14', 'output_valid': 'output_valid', 'output_valid_model1': 'output_valid_model1', 'output_valid_model2': 'output_valid_model2', 'output_valid_model3': 'output_valid_model3', 'output_valid_model4': 'output_valid_model4', 'output_valid_model5': 'output_valid_model5', 'output_valid_model6': 'output_valid_model6', 'output_valid_model7': 'output_valid_model7', 'output_valid_model8': 'output_valid_model8', 'output_valid_model9': 'output_valid_model9', 'output_valid_model10': 'output_valid_model10', 'output_valid_model11': 'output_valid_model11', 'output_valid_model12': 'output_valid_model12', 'output_valid_model13': 'output_valid_model13', 'output_valid_model14': 'output_valid_model14', 'output_test': 'output_test', 'output_test_model1': 'output_test_model1', 'output_test_model2': 'output_test_model2', 'output_test_model3': 'output_test_model3', 'output_test_model4': 'output_test_model4', 'output_test_model5': 'output_test_model5', 'output_test_model6': 'output_test_model6', 'output_test_model7': 'output_test_model7', 'output_test_model8': 'output_test_model8', 'output_test_model9': 'output_test_model9', 'output_test_model10': 'output_test_model10', 'output_test_model11': 'output_test_model11', 'output_test_model12': 'output_test_model12', 'output_test_model13': 'output_test_model13', 'output_test_model14': 'output_test_model14', 'label_train': 'label_train', 'label_valid': 'label_valid', 'label_test': 'label_test'}), "('fourteen_models_output_data', output_train=output_train,\n output_train_model1=output_train_model1, output_train_model2=\n output_train_model2, output_train_model3=output_train_model3,\n output_train_model4=output_train_model4, output_train_model5=\n output_train_model5, output_train_model6=output_train_model6,\n output_train_model7=output_train_model7, output_train_model8=\n output_train_model8, output_train_model9=output_train_model9,\n output_train_model10=output_train_model10, output_train_model11=\n output_train_model11, output_train_model12=output_train_model12,\n output_train_model13=output_train_model13, output_train_model14=\n output_train_model14, output_valid=output_valid, output_valid_model1=\n output_valid_model1, output_valid_model2=output_valid_model2,\n output_valid_model3=output_valid_model3, output_valid_model4=\n output_valid_model4, output_valid_model5=output_valid_model5,\n output_valid_model6=output_valid_model6, output_valid_model7=\n output_valid_model7, output_valid_model8=output_valid_model8,\n output_valid_model9=output_valid_model9, output_valid_model10=\n output_valid_model10, output_valid_model11=output_valid_model11,\n output_valid_model12=output_valid_model12, output_valid_model13=\n output_valid_model13, output_valid_model14=output_valid_model14,\n output_test=output_test, output_test_model1=output_test_model1,\n output_test_model2=output_test_model2, output_test_model3=\n output_test_model3, output_test_model4=output_test_model4,\n output_test_model5=output_test_model5, output_test_model6=\n output_test_model6, output_test_model7=output_test_model7,\n output_test_model8=output_test_model8, output_test_model9=\n output_test_model9, output_test_model10=output_test_model10,\n output_test_model11=output_test_model11, output_test_model12=\n output_test_model12, output_test_model13=output_test_model13,\n output_test_model14=output_test_model14, label_train=label_train,\n label_valid=label_valid, label_test=label_test)\n", (69636, 71677), True, 'import numpy as np\n'), ((832, 857), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (855, 857), False, 'import torch\n'), ((1147, 1199), 'os.listdir', 'os.listdir', (["('../Dataset/emotiw/train/' + 'Negative/')"], {}), "('../Dataset/emotiw/train/' + 'Negative/')\n", (1157, 1199), False, 'import os\n'), ((1218, 1269), 'os.listdir', 'os.listdir', (["('../Dataset/emotiw/train/' + 'Neutral/')"], {}), "('../Dataset/emotiw/train/' + 'Neutral/')\n", (1228, 1269), False, 'import os\n'), ((1288, 1340), 'os.listdir', 'os.listdir', (["('../Dataset/emotiw/train/' + 'Positive/')"], {}), "('../Dataset/emotiw/train/' + 'Positive/')\n", (1298, 1340), False, 'import os\n'), ((1496, 1511), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1507, 1511), False, 'import pickle\n'), ((1580, 1595), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1591, 1595), False, 'import pickle\n'), ((13841, 13976), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/densenet_emotiw_pretrainemotic_lr001.pt"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/densenet_emotiw_pretrainemotic_lr001.pt',\n map_location=lambda storage, loc: storage)\n", (13851, 13976), False, 'import torch\n'), ((16059, 16149), 'torch.load', 'torch.load', (['aligned_model_global_level_path'], {'map_location': '(lambda storage, loc: storage)'}), '(aligned_model_global_level_path, map_location=lambda storage,\n loc: storage)\n', (16069, 16149), False, 'import torch\n'), ((16577, 16666), 'torch.load', 'torch.load', (['aligned_model_image_level_path'], {'map_location': '(lambda storage, loc: storage)'}), '(aligned_model_image_level_path, map_location=lambda storage, loc:\n storage)\n', (16587, 16666), False, 'import torch\n'), ((20399, 20558), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/PretrainedDenseNet-FineTune-2208-3-lr001-Regularized-Corrected"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/PretrainedDenseNet-FineTune-2208-3-lr001-Regularized-Corrected'\n , map_location=lambda storage, loc: storage)\n", (20409, 20558), False, 'import torch\n'), ((24388, 24563), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01_EmotiC"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01_EmotiC'\n , map_location=lambda storage, loc: storage)\n", (24398, 24563), False, 'import torch\n'), ((28804, 28979), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr001_dropout_BN_SoftmaxLr01_EmotiC"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr001_dropout_BN_SoftmaxLr01_EmotiC'\n , map_location=lambda storage, loc: storage)\n", (28814, 28979), False, 'import torch\n'), ((33692, 33859), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01'\n , map_location=lambda storage, loc: storage)\n", (33702, 33859), False, 'import torch\n'), ((35954, 36044), 'torch.load', 'torch.load', (['aligned_model_global_level_path'], {'map_location': '(lambda storage, loc: storage)'}), '(aligned_model_global_level_path, map_location=lambda storage,\n loc: storage)\n', (35964, 36044), False, 'import torch\n'), ((38264, 38354), 'torch.load', 'torch.load', (['aligned_model_global_level_path'], {'map_location': '(lambda storage, loc: storage)'}), '(aligned_model_global_level_path, map_location=lambda storage,\n loc: storage)\n', (38274, 38354), False, 'import torch\n'), ((42453, 42615), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_lr001_dropout_BN_SoftmaxLr01"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_lr001_dropout_BN_SoftmaxLr01'\n , map_location=lambda storage, loc: storage)\n", (42463, 42615), False, 'import torch\n'), ((46952, 47120), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_3para_lr001_dropout_BN_SoftmaxLr01'\n , map_location=lambda storage, loc: storage)\n", (46962, 47120), False, 'import torch\n'), ((51354, 51528), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01_EmotiC"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_lr01_dropout_BN_SoftmaxLr01_EmotiC'\n , map_location=lambda storage, loc: storage)\n", (51364, 51528), False, 'import torch\n'), ((56245, 56412), 'torch.load', 'torch.load', (['"""../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_adam_dropout_BN_SoftmaxLr01"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n '../TrainedModels/TrainDataset/FaceAttention_AlignedModel_FullTrain_4para_adam_dropout_BN_SoftmaxLr01'\n , map_location=lambda storage, loc: storage)\n", (56255, 56412), False, 'import torch\n'), ((60425, 60436), 'time.time', 'time.time', ([], {}), '()\n', (60434, 60436), False, 'import time\n'), ((1949, 1978), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1966, 1978), False, 'from torchvision import datasets, models, transforms\n'), ((1988, 2009), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2007, 2009), False, 'from torchvision import datasets, models, transforms\n'), ((2019, 2085), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2039, 2085), False, 'from torchvision import datasets, models, transforms\n'), ((2147, 2176), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2164, 2176), False, 'from torchvision import datasets, models, transforms\n'), ((2186, 2207), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2205, 2207), False, 'from torchvision import datasets, models, transforms\n'), ((2217, 2283), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2237, 2283), False, 'from torchvision import datasets, models, transforms\n'), ((2350, 2379), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2367, 2379), False, 'from torchvision import datasets, models, transforms\n'), ((2389, 2410), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2408, 2410), False, 'from torchvision import datasets, models, transforms\n'), ((2420, 2486), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2440, 2486), False, 'from torchvision import datasets, models, transforms\n'), ((2553, 2581), 'torchvision.transforms.Resize', 'transforms.Resize', (['(96, 112)'], {}), '((96, 112))\n', (2570, 2581), False, 'from torchvision import datasets, models, transforms\n'), ((2590, 2611), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2609, 2611), False, 'from torchvision import datasets, models, transforms\n'), ((2676, 2704), 'torchvision.transforms.Resize', 'transforms.Resize', (['(96, 112)'], {}), '((96, 112))\n', (2693, 2704), False, 'from torchvision import datasets, models, transforms\n'), ((2713, 2734), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2732, 2734), False, 'from torchvision import datasets, models, transforms\n'), ((2800, 2828), 'torchvision.transforms.Resize', 'transforms.Resize', (['(96, 112)'], {}), '((96, 112))\n', (2817, 2828), False, 'from torchvision import datasets, models, transforms\n'), ((2837, 2858), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2856, 2858), False, 'from torchvision import datasets, models, transforms\n'), ((2977, 2998), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2996, 2998), False, 'from torchvision import datasets, models, transforms\n'), ((3017, 3038), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3036, 3038), False, 'from torchvision import datasets, models, transforms\n'), ((4108, 4201), 'PIL.Image.open', 'Image.open', (["(self.root_dir + 'emotiw/' + train + '/' + labelname + '/' + filename + '.jpg')"], {}), "(self.root_dir + 'emotiw/' + train + '/' + labelname + '/' +\n filename + '.jpg')\n", (4118, 4201), False, 'from PIL import Image\n'), ((4793, 4835), 'numpy.zeros', 'np.zeros', (['(maxFaces, 256)'], {'dtype': '"""float32"""'}), "((maxFaces, 256), dtype='float32')\n", (4801, 4835), True, 'import numpy as np\n'), ((4932, 4959), 'torch.from_numpy', 'torch.from_numpy', (['features1'], {}), '(features1)\n', (4948, 4959), False, 'import torch\n'), ((5019, 5068), 'numpy.zeros', 'np.zeros', (['(maxFaces, 3, 96, 112)'], {'dtype': '"""float32"""'}), "((maxFaces, 3, 96, 112), dtype='float32')\n", (5027, 5068), True, 'import numpy as np\n'), ((5466, 5493), 'torch.from_numpy', 'torch.from_numpy', (['features2'], {}), '(features2)\n', (5482, 5493), False, 'import torch\n'), ((10109, 10134), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)', '(2)', '(1)'], {}), '(3, 64, 3, 2, 1)\n', (10118, 10134), True, 'import torch.nn as nn\n'), ((10168, 10180), 'torch.nn.PReLU', 'nn.PReLU', (['(64)'], {}), '(64)\n', (10176, 10180), True, 'import torch.nn as nn\n'), ((10204, 10230), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(1)'], {}), '(64, 64, 3, 1, 1)\n', (10213, 10230), True, 'import torch.nn as nn\n'), ((10250, 10262), 'torch.nn.PReLU', 'nn.PReLU', (['(64)'], {}), '(64)\n', (10258, 10262), True, 'import torch.nn as nn\n'), ((10286, 10312), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(1)'], {}), '(64, 64, 3, 1, 1)\n', (10295, 10312), True, 'import torch.nn as nn\n'), ((10332, 10344), 'torch.nn.PReLU', 'nn.PReLU', (['(64)'], {}), '(64)\n', (10340, 10344), True, 'import torch.nn as nn\n'), ((10369, 10396), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(2)', '(1)'], {}), '(64, 128, 3, 2, 1)\n', (10378, 10396), True, 'import torch.nn as nn\n'), ((10431, 10444), 'torch.nn.PReLU', 'nn.PReLU', (['(128)'], {}), '(128)\n', (10439, 10444), True, 'import torch.nn as nn\n'), ((10468, 10496), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (10477, 10496), True, 'import torch.nn as nn\n'), ((10516, 10529), 'torch.nn.PReLU', 'nn.PReLU', (['(128)'], {}), '(128)\n', (10524, 10529), True, 'import torch.nn as nn\n'), ((10553, 10581), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (10562, 10581), True, 'import torch.nn as nn\n'), ((10601, 10614), 'torch.nn.PReLU', 'nn.PReLU', (['(128)'], {}), '(128)\n', (10609, 10614), True, 'import torch.nn as nn\n'), ((10639, 10667), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (10648, 10667), True, 'import torch.nn as nn\n'), ((10702, 10715), 'torch.nn.PReLU', 'nn.PReLU', (['(128)'], {}), '(128)\n', (10710, 10715), True, 'import torch.nn as nn\n'), ((10739, 10767), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (10748, 10767), True, 'import torch.nn as nn\n'), ((10787, 10800), 'torch.nn.PReLU', 'nn.PReLU', (['(128)'], {}), '(128)\n', (10795, 10800), True, 'import torch.nn as nn\n'), ((10826, 10854), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)', '(2)', '(1)'], {}), '(128, 256, 3, 2, 1)\n', (10835, 10854), True, 'import torch.nn as nn\n'), ((10889, 10902), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (10897, 10902), True, 'import torch.nn as nn\n'), ((10926, 10954), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (10935, 10954), True, 'import torch.nn as nn\n'), ((10974, 10987), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (10982, 10987), True, 'import torch.nn as nn\n'), ((11011, 11039), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11020, 11039), True, 'import torch.nn as nn\n'), ((11059, 11072), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11067, 11072), True, 'import torch.nn as nn\n'), ((11097, 11125), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11106, 11125), True, 'import torch.nn as nn\n'), ((11160, 11173), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11168, 11173), True, 'import torch.nn as nn\n'), ((11197, 11225), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11206, 11225), True, 'import torch.nn as nn\n'), ((11245, 11258), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11253, 11258), True, 'import torch.nn as nn\n'), ((11283, 11311), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11292, 11311), True, 'import torch.nn as nn\n'), ((11346, 11359), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11354, 11359), True, 'import torch.nn as nn\n'), ((11383, 11411), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11392, 11411), True, 'import torch.nn as nn\n'), ((11431, 11444), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11439, 11444), True, 'import torch.nn as nn\n'), ((11469, 11497), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11478, 11497), True, 'import torch.nn as nn\n'), ((11532, 11545), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11540, 11545), True, 'import torch.nn as nn\n'), ((11569, 11597), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)', '(1)', '(1)'], {}), '(256, 256, 3, 1, 1)\n', (11578, 11597), True, 'import torch.nn as nn\n'), ((11617, 11630), 'torch.nn.PReLU', 'nn.PReLU', (['(256)'], {}), '(256)\n', (11625, 11630), True, 'import torch.nn as nn\n'), ((11655, 11683), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)', '(2)', '(1)'], {}), '(256, 512, 3, 2, 1)\n', (11664, 11683), True, 'import torch.nn as nn\n'), ((11716, 11729), 'torch.nn.PReLU', 'nn.PReLU', (['(512)'], {}), '(512)\n', (11724, 11729), True, 'import torch.nn as nn\n'), ((11753, 11781), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3)', '(1)', '(1)'], {}), '(512, 512, 3, 1, 1)\n', (11762, 11781), True, 'import torch.nn as nn\n'), ((11801, 11814), 'torch.nn.PReLU', 'nn.PReLU', (['(512)'], {}), '(512)\n', (11809, 11814), True, 'import torch.nn as nn\n'), ((11838, 11866), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3)', '(1)', '(1)'], {}), '(512, 512, 3, 1, 1)\n', (11847, 11866), True, 'import torch.nn as nn\n'), ((11886, 11899), 'torch.nn.PReLU', 'nn.PReLU', (['(512)'], {}), '(512)\n', (11894, 11899), True, 'import torch.nn as nn\n'), ((11920, 11947), 'torch.nn.Linear', 'nn.Linear', (['(512 * 7 * 6)', '(512)'], {}), '(512 * 7 * 6, 512)\n', (11929, 11947), True, 'import torch.nn as nn\n'), ((14688, 14765), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], maxFaces, 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], maxFaces, 3), dtype=torch.float)\n', (14699, 14765), False, 'import torch\n'), ((15247, 15314), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (15258, 15314), False, 'import torch\n'), ((15411, 15478), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (15422, 15478), False, 'import torch\n'), ((17055, 17132), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], maxFaces, 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], maxFaces, 3), dtype=torch.float)\n', (17066, 17132), False, 'import torch\n'), ((17614, 17681), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (17625, 17681), False, 'import torch\n'), ((17778, 17845), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (17789, 17845), False, 'import torch\n'), ((18846, 18864), 'torch.nn.Linear', 'nn.Linear', (['(2464)', '(3)'], {}), '(2464, 3)\n', (18855, 18864), True, 'import torch.nn as nn\n'), ((18873, 18926), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (18896, 18926), True, 'import torch.nn as nn\n'), ((19004, 19038), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(2208)'], {'affine': '(False)'}), '(2208, affine=False)\n', (19018, 19038), True, 'import torch.nn as nn\n'), ((19071, 19104), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (19085, 19104), True, 'import torch.nn as nn\n'), ((19139, 19154), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (19149, 19154), True, 'import torch.nn as nn\n'), ((19285, 19315), 'torch.nn.functional.relu', 'F.relu', (['features'], {'inplace': '(True)'}), '(features, inplace=True)\n', (19291, 19315), True, 'import torch.nn.functional as F\n'), ((19455, 19488), 'torch.autograd.Variable', 'Variable', (['global_features_initial'], {}), '(global_features_initial)\n', (19463, 19488), False, 'from torch.autograd import Variable\n'), ((19633, 19664), 'torch.sum', 'torch.sum', (['face_features'], {'dim': '(1)'}), '(face_features, dim=1)\n', (19642, 19664), False, 'import torch\n'), ((20187, 20249), 'torch.cat', 'torch.cat', (['(face_features_sum, global_features_initial)'], {'dim': '(1)'}), '((face_features_sum, global_features_initial), dim=1)\n', (20196, 20249), False, 'import torch\n'), ((21193, 21210), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(3)'], {}), '(320, 3)\n', (21202, 21210), True, 'import torch.nn as nn\n'), ((21219, 21272), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (21242, 21272), True, 'import torch.nn as nn\n'), ((21351, 21369), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (21360, 21369), True, 'import torch.nn as nn\n'), ((21378, 21424), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc.weight'], {}), '(self.global_fc.weight)\n', (21401, 21424), True, 'import torch.nn as nn\n'), ((21507, 21524), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (21517, 21524), True, 'import torch.nn as nn\n'), ((21565, 21582), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (21575, 21582), True, 'import torch.nn as nn\n'), ((21624, 21641), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (21634, 21641), True, 'import torch.nn as nn\n'), ((21674, 21706), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {'affine': '(False)'}), '(64, affine=False)\n', (21688, 21706), True, 'import torch.nn as nn\n'), ((21738, 21771), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (21752, 21771), True, 'import torch.nn as nn\n'), ((22203, 22236), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (22213, 22236), True, 'import numpy as np\n'), ((22262, 22320), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 64)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 64), dtype=torch.float)\n', (22273, 22320), False, 'import torch\n'), ((22697, 22749), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (22705, 22749), True, 'import numpy as np\n'), ((22929, 22951), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (22945, 22951), False, 'import torch\n'), ((23010, 23051), 'torch.bmm', 'torch.bmm', (['global_features', 'face_features'], {}), '(global_features, face_features)\n', (23019, 23051), False, 'import torch\n'), ((23255, 23290), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (23264, 23290), True, 'import torch.nn.functional as F\n'), ((23384, 23410), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (23392, 23410), False, 'from torch.autograd import Variable\n'), ((23865, 23907), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (23874, 23907), False, 'import torch\n'), ((24227, 24291), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features_main)'], {'dim': '(1)'}), '((attended_face_features, global_features_main), dim=1)\n', (24236, 24291), False, 'import torch\n'), ((25221, 25238), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(3)'], {}), '(512, 3)\n', (25230, 25238), True, 'import torch.nn as nn\n'), ((25247, 25300), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (25270, 25300), True, 'import torch.nn as nn\n'), ((25382, 25400), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (25391, 25400), True, 'import torch.nn as nn\n'), ((25409, 25458), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc1.weight'], {}), '(self.attentionfc1.weight)\n', (25432, 25458), True, 'import torch.nn as nn\n'), ((25539, 25555), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (25548, 25555), True, 'import torch.nn as nn\n'), ((25564, 25613), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc2.weight'], {}), '(self.attentionfc2.weight)\n', (25587, 25613), True, 'import torch.nn as nn\n'), ((25699, 25716), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (25709, 25716), True, 'import torch.nn as nn\n'), ((25757, 25774), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (25767, 25774), True, 'import torch.nn as nn\n'), ((25816, 25833), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (25826, 25833), True, 'import torch.nn as nn\n'), ((25866, 25899), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (25880, 25899), True, 'import torch.nn as nn\n'), ((25931, 25964), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (25945, 25964), True, 'import torch.nn as nn\n'), ((26305, 26338), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (26315, 26338), True, 'import numpy as np\n'), ((26364, 26423), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 256)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 256), dtype=torch.float)\n', (26375, 26423), False, 'import torch\n'), ((26511, 26568), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 1)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 1), dtype=torch.float)\n', (26522, 26568), False, 'import torch\n'), ((26601, 26659), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 64)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 64), dtype=torch.float)\n', (26612, 26659), False, 'import torch\n'), ((27254, 27306), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (27262, 27306), True, 'import numpy as np\n'), ((27486, 27508), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (27502, 27508), False, 'import torch\n'), ((27662, 27697), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (27671, 27697), True, 'import torch.nn.functional as F\n'), ((27737, 27763), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (27745, 27763), False, 'from torch.autograd import Variable\n'), ((28219, 28261), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (28228, 28261), False, 'import torch\n'), ((28644, 28703), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features)'], {'dim': '(1)'}), '((attended_face_features, global_features), dim=1)\n', (28653, 28703), False, 'import torch\n'), ((29617, 29634), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(3)'], {}), '(512, 3)\n', (29626, 29634), True, 'import torch.nn as nn\n'), ((29643, 29696), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (29666, 29696), True, 'import torch.nn as nn\n'), ((29778, 29796), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (29787, 29796), True, 'import torch.nn as nn\n'), ((29805, 29854), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc1.weight'], {}), '(self.attentionfc1.weight)\n', (29828, 29854), True, 'import torch.nn as nn\n'), ((29935, 29951), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (29944, 29951), True, 'import torch.nn as nn\n'), ((29960, 30009), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc2.weight'], {}), '(self.attentionfc2.weight)\n', (29983, 30009), True, 'import torch.nn as nn\n'), ((30089, 30109), 'torch.nn.Linear', 'nn.Linear', (['(2208)', '(256)'], {}), '(2208, 256)\n', (30098, 30109), True, 'import torch.nn as nn\n'), ((30118, 30169), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc_main.weight'], {}), '(self.global_fc_main.weight)\n', (30141, 30169), True, 'import torch.nn as nn\n'), ((30257, 30274), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (30267, 30274), True, 'import torch.nn as nn\n'), ((30315, 30332), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (30325, 30332), True, 'import torch.nn as nn\n'), ((30374, 30391), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (30384, 30391), True, 'import torch.nn as nn\n'), ((30424, 30457), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (30438, 30457), True, 'import torch.nn as nn\n'), ((30489, 30522), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (30503, 30522), True, 'import torch.nn as nn\n'), ((30670, 30701), 'torch.nn.functional.relu', 'F.relu', (['features'], {'inplace': '(False)'}), '(features, inplace=False)\n', (30676, 30701), True, 'import torch.nn.functional as F\n'), ((30843, 30876), 'torch.autograd.Variable', 'Variable', (['global_features_initial'], {}), '(global_features_initial)\n', (30851, 30876), False, 'from torch.autograd import Variable\n'), ((31194, 31227), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (31204, 31227), True, 'import numpy as np\n'), ((31253, 31312), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 256)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 256), dtype=torch.float)\n', (31264, 31312), False, 'import torch\n'), ((31400, 31457), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 1)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 1), dtype=torch.float)\n', (31411, 31457), False, 'import torch\n'), ((31490, 31548), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 64)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 64), dtype=torch.float)\n', (31501, 31548), False, 'import torch\n'), ((32142, 32194), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (32150, 32194), True, 'import numpy as np\n'), ((32374, 32396), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (32390, 32396), False, 'import torch\n'), ((32550, 32585), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (32559, 32585), True, 'import torch.nn.functional as F\n'), ((32625, 32651), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (32633, 32651), False, 'from torch.autograd import Variable\n'), ((33107, 33149), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (33116, 33149), False, 'import torch\n'), ((33532, 33591), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features)'], {'dim': '(1)'}), '((attended_face_features, global_features), dim=1)\n', (33541, 33591), False, 'import torch\n'), ((34577, 34654), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], maxFaces, 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], maxFaces, 3), dtype=torch.float)\n', (34588, 34654), False, 'import torch\n'), ((35136, 35203), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (35147, 35203), False, 'import torch\n'), ((35300, 35367), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (35311, 35367), False, 'import torch\n'), ((36774, 36851), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], maxFaces, 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], maxFaces, 3), dtype=torch.float)\n', (36785, 36851), False, 'import torch\n'), ((37456, 37523), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (37467, 37523), False, 'import torch\n'), ((37621, 37688), 'torch.zeros', 'torch.zeros', (['(face_features_initial.shape[0], 3)'], {'dtype': 'torch.float'}), '((face_features_initial.shape[0], 3), dtype=torch.float)\n', (37632, 37688), False, 'import torch\n'), ((38996, 39016), 'torch.nn.Linear', 'nn.Linear', (['(2208)', '(256)'], {}), '(2208, 256)\n', (39005, 39016), True, 'import torch.nn as nn\n'), ((39025, 39076), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc_main.weight'], {}), '(self.global_fc_main.weight)\n', (39048, 39076), True, 'import torch.nn as nn\n'), ((39160, 39177), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(3)'], {}), '(512, 3)\n', (39169, 39177), True, 'import torch.nn as nn\n'), ((39186, 39239), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (39209, 39239), True, 'import torch.nn as nn\n'), ((39334, 39351), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (39344, 39351), True, 'import torch.nn as nn\n'), ((39393, 39410), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (39403, 39410), True, 'import torch.nn as nn\n'), ((39443, 39476), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (39457, 39476), True, 'import torch.nn as nn\n'), ((39508, 39541), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (39522, 39541), True, 'import torch.nn as nn\n'), ((39689, 39720), 'torch.nn.functional.relu', 'F.relu', (['features'], {'inplace': '(False)'}), '(features, inplace=False)\n', (39695, 39720), True, 'import torch.nn.functional as F\n'), ((39862, 39895), 'torch.autograd.Variable', 'Variable', (['global_features_initial'], {}), '(global_features_initial)\n', (39870, 39895), False, 'from torch.autograd import Variable\n'), ((40212, 40245), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (40222, 40245), True, 'import numpy as np\n'), ((40271, 40330), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 256)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 256), dtype=torch.float)\n', (40282, 40330), False, 'import torch\n'), ((40708, 40760), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (40716, 40760), True, 'import numpy as np\n'), ((40940, 40962), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (40956, 40962), False, 'import torch\n'), ((41021, 41062), 'torch.bmm', 'torch.bmm', (['global_features', 'face_features'], {}), '(global_features, face_features)\n', (41030, 41062), False, 'import torch\n'), ((41266, 41301), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (41275, 41301), True, 'import torch.nn.functional as F\n'), ((41395, 41421), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (41403, 41421), False, 'from torch.autograd import Variable\n'), ((41877, 41919), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (41886, 41919), False, 'import torch\n'), ((42292, 42351), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features)'], {'dim': '(1)'}), '((attended_face_features, global_features), dim=1)\n', (42301, 42351), False, 'import torch\n'), ((43255, 43272), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(3)'], {}), '(320, 3)\n', (43264, 43272), True, 'import torch.nn as nn\n'), ((43281, 43334), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (43304, 43334), True, 'import torch.nn as nn\n'), ((43413, 43431), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (43422, 43431), True, 'import torch.nn as nn\n'), ((43440, 43486), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc.weight'], {}), '(self.global_fc.weight)\n', (43463, 43486), True, 'import torch.nn as nn\n'), ((43569, 43586), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (43579, 43586), True, 'import torch.nn as nn\n'), ((43627, 43644), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (43637, 43644), True, 'import torch.nn as nn\n'), ((43686, 43703), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (43696, 43703), True, 'import torch.nn as nn\n'), ((43736, 43768), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {'affine': '(False)'}), '(64, affine=False)\n', (43750, 43768), True, 'import torch.nn as nn\n'), ((43800, 43833), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (43814, 43833), True, 'import torch.nn as nn\n'), ((43873, 43893), 'torch.nn.Linear', 'nn.Linear', (['(2208)', '(256)'], {}), '(2208, 256)\n', (43882, 43893), True, 'import torch.nn as nn\n'), ((43902, 43953), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc_main.weight'], {}), '(self.global_fc_main.weight)\n', (43925, 43953), True, 'import torch.nn as nn\n'), ((44151, 44182), 'torch.nn.functional.relu', 'F.relu', (['features'], {'inplace': '(False)'}), '(features, inplace=False)\n', (44157, 44182), True, 'import torch.nn.functional as F\n'), ((44324, 44357), 'torch.autograd.Variable', 'Variable', (['global_features_initial'], {}), '(global_features_initial)\n', (44332, 44357), False, 'from torch.autograd import Variable\n'), ((44766, 44799), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (44776, 44799), True, 'import numpy as np\n'), ((44825, 44883), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 64)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 64), dtype=torch.float)\n', (44836, 44883), False, 'import torch\n'), ((45260, 45312), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (45268, 45312), True, 'import numpy as np\n'), ((45492, 45514), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (45508, 45514), False, 'import torch\n'), ((45573, 45614), 'torch.bmm', 'torch.bmm', (['global_features', 'face_features'], {}), '(global_features, face_features)\n', (45582, 45614), False, 'import torch\n'), ((45818, 45853), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (45827, 45853), True, 'import torch.nn.functional as F\n'), ((45947, 45973), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (45955, 45973), False, 'from torch.autograd import Variable\n'), ((46428, 46470), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (46437, 46470), False, 'import torch\n'), ((46790, 46854), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features_main)'], {'dim': '(1)'}), '((attended_face_features, global_features_main), dim=1)\n', (46799, 46854), False, 'import torch\n'), ((47770, 47787), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(3)'], {}), '(512, 3)\n', (47779, 47787), True, 'import torch.nn as nn\n'), ((47796, 47849), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (47819, 47849), True, 'import torch.nn as nn\n'), ((47931, 47949), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (47940, 47949), True, 'import torch.nn as nn\n'), ((47958, 48007), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc1.weight'], {}), '(self.attentionfc1.weight)\n', (47981, 48007), True, 'import torch.nn as nn\n'), ((48088, 48104), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (48097, 48104), True, 'import torch.nn as nn\n'), ((48113, 48162), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc2.weight'], {}), '(self.attentionfc2.weight)\n', (48136, 48162), True, 'import torch.nn as nn\n'), ((48248, 48265), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (48258, 48265), True, 'import torch.nn as nn\n'), ((48306, 48323), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (48316, 48323), True, 'import torch.nn as nn\n'), ((48365, 48382), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (48375, 48382), True, 'import torch.nn as nn\n'), ((48415, 48448), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (48429, 48448), True, 'import torch.nn as nn\n'), ((48480, 48513), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (48494, 48513), True, 'import torch.nn as nn\n'), ((48854, 48887), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (48864, 48887), True, 'import numpy as np\n'), ((48913, 48972), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 256)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 256), dtype=torch.float)\n', (48924, 48972), False, 'import torch\n'), ((49060, 49117), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 1)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 1), dtype=torch.float)\n', (49071, 49117), False, 'import torch\n'), ((49150, 49208), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 64)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 64), dtype=torch.float)\n', (49161, 49208), False, 'import torch\n'), ((49803, 49855), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (49811, 49855), True, 'import numpy as np\n'), ((50035, 50057), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (50051, 50057), False, 'import torch\n'), ((50211, 50246), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (50220, 50246), True, 'import torch.nn.functional as F\n'), ((50286, 50312), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (50294, 50312), False, 'from torch.autograd import Variable\n'), ((50768, 50810), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (50777, 50810), False, 'import torch\n'), ((51193, 51252), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features)'], {'dim': '(1)'}), '((attended_face_features, global_features), dim=1)\n', (51202, 51252), False, 'import torch\n'), ((52168, 52185), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(3)'], {}), '(512, 3)\n', (52177, 52185), True, 'import torch.nn as nn\n'), ((52194, 52247), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc3_debug.weight'], {}), '(self.global_fc3_debug.weight)\n', (52217, 52247), True, 'import torch.nn as nn\n'), ((52329, 52347), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (52338, 52347), True, 'import torch.nn as nn\n'), ((52356, 52405), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc1.weight'], {}), '(self.attentionfc1.weight)\n', (52379, 52405), True, 'import torch.nn as nn\n'), ((52486, 52502), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (52495, 52502), True, 'import torch.nn as nn\n'), ((52511, 52560), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.attentionfc2.weight'], {}), '(self.attentionfc2.weight)\n', (52534, 52560), True, 'import torch.nn as nn\n'), ((52640, 52660), 'torch.nn.Linear', 'nn.Linear', (['(2208)', '(256)'], {}), '(2208, 256)\n', (52649, 52660), True, 'import torch.nn as nn\n'), ((52669, 52720), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.global_fc_main.weight'], {}), '(self.global_fc_main.weight)\n', (52692, 52720), True, 'import torch.nn as nn\n'), ((52808, 52825), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (52818, 52825), True, 'import torch.nn as nn\n'), ((52866, 52883), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (52876, 52883), True, 'import torch.nn as nn\n'), ((52925, 52942), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (52935, 52942), True, 'import torch.nn as nn\n'), ((52975, 53008), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (52989, 53008), True, 'import torch.nn as nn\n'), ((53040, 53073), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {'affine': '(False)'}), '(256, affine=False)\n', (53054, 53073), True, 'import torch.nn as nn\n'), ((53221, 53252), 'torch.nn.functional.relu', 'F.relu', (['features'], {'inplace': '(False)'}), '(features, inplace=False)\n', (53227, 53252), True, 'import torch.nn.functional as F\n'), ((53394, 53427), 'torch.autograd.Variable', 'Variable', (['global_features_initial'], {}), '(global_features_initial)\n', (53402, 53427), False, 'from torch.autograd import Variable\n'), ((53745, 53778), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (53755, 53778), True, 'import numpy as np\n'), ((53804, 53863), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 256)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 256), dtype=torch.float)\n', (53815, 53863), False, 'import torch\n'), ((53951, 54008), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 1)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 1), dtype=torch.float)\n', (53962, 54008), False, 'import torch\n'), ((54041, 54099), 'torch.zeros', 'torch.zeros', (['(batch_size, maxFaces, 64)'], {'dtype': 'torch.float'}), '((batch_size, maxFaces, 64), dtype=torch.float)\n', (54052, 54099), False, 'import torch\n'), ((54694, 54746), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, maxFaces)'], {'dtype': '"""float32"""'}), "((batch_size, 1, maxFaces), dtype='float32')\n", (54702, 54746), True, 'import numpy as np\n'), ((54926, 54948), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (54942, 54948), False, 'import torch\n'), ((55102, 55137), 'torch.nn.functional.softmax', 'F.softmax', (['attention_scores'], {'dim': '(-1)'}), '(attention_scores, dim=-1)\n', (55111, 55137), True, 'import torch.nn.functional as F\n'), ((55177, 55203), 'torch.autograd.Variable', 'Variable', (['attention_scores'], {}), '(attention_scores)\n', (55185, 55203), False, 'from torch.autograd import Variable\n'), ((55659, 55701), 'torch.bmm', 'torch.bmm', (['attention_scores', 'face_features'], {}), '(attention_scores, face_features)\n', (55668, 55701), False, 'import torch\n'), ((56084, 56143), 'torch.cat', 'torch.cat', (['(attended_face_features, global_features)'], {'dim': '(1)'}), '((attended_face_features, global_features), dim=1)\n', (56093, 56143), False, 'import torch\n'), ((69326, 69337), 'time.time', 'time.time', ([], {}), '()\n', (69335, 69337), False, 'import time\n'), ((4319, 4355), 'numpy.zeros', 'np.zeros', (['(3, 224, 224)'], {'dtype': 'float'}), '((3, 224, 224), dtype=float)\n', (4327, 4355), True, 'import numpy as np\n'), ((4592, 4688), 'numpy.load', 'np.load', (["(self.root_dir + 'FaceFeatures/' + train + '/' + labelname + '/' + filename +\n '.npz')"], {}), "(self.root_dir + 'FaceFeatures/' + train + '/' + labelname + '/' +\n filename + '.npz')\n", (4599, 4688), True, 'import numpy as np\n'), ((6940, 6980), 'torch.FloatTensor', 'torch.FloatTensor', (['input_dim', 'output_dim'], {}), '(input_dim, output_dim)\n', (6957, 6980), False, 'import torch\n'), ((8202, 8258), 'torch.zeros', 'torch.zeros', (['(batch_size, numClasses)'], {'dtype': 'torch.float'}), '((batch_size, numClasses), dtype=torch.float)\n', (8213, 8258), False, 'import torch\n'), ((8281, 8337), 'torch.zeros', 'torch.zeros', (['(batch_size, numClasses)'], {'dtype': 'torch.float'}), '((batch_size, numClasses), dtype=torch.float)\n', (8292, 8337), False, 'import torch\n'), ((8360, 8416), 'torch.zeros', 'torch.zeros', (['(batch_size, numClasses)'], {'dtype': 'torch.float'}), '((batch_size, numClasses), dtype=torch.float)\n', (8371, 8416), False, 'import torch\n'), ((14896, 14931), 'torch.zeros', 'torch.zeros', (['(2,)'], {'dtype': 'torch.long'}), '((2,), dtype=torch.long)\n', (14907, 14931), False, 'import torch\n'), ((17263, 17298), 'torch.zeros', 'torch.zeros', (['(2,)'], {'dtype': 'torch.long'}), '((2,), dtype=torch.long)\n', (17274, 17298), False, 'import torch\n'), ((34785, 34820), 'torch.zeros', 'torch.zeros', (['(2,)'], {'dtype': 'torch.long'}), '((2,), dtype=torch.long)\n', (34796, 34820), False, 'import torch\n'), ((36982, 37017), 'torch.zeros', 'torch.zeros', (['(2,)'], {'dtype': 'torch.long'}), '((2,), dtype=torch.long)\n', (36993, 37017), False, 'import torch\n'), ((66411, 68460), 'numpy.savez', 'np.savez', (['"""fourteen_models_output_data"""'], {'output_train': 'output_train', 'output_train_model1': 'output_train_model1', 'output_train_model2': 'output_train_model2', 'output_train_model3': 'output_train_model3', 'output_train_model4': 'output_train_model4', 'output_train_model5': 'output_train_model5', 'output_train_model6': 'output_train_model6', 'output_train_model7': 'output_train_model7', 'output_train_model8': 'output_train_model8', 'output_train_model9': 'output_train_model9', 'output_train_model10': 'output_train_model10', 'output_train_model11': 'output_train_model11', 'output_train_model12': 'output_train_model12', 'output_train_model13': 'output_train_model13', 'output_train_model14': 'output_train_model14', 'output_valid': 'output_valid', 'output_valid_model1': 'output_valid_model1', 'output_valid_model2': 'output_valid_model2', 'output_valid_model3': 'output_valid_model3', 'output_valid_model4': 'output_valid_model4', 'output_valid_model5': 'output_valid_model5', 'output_valid_model6': 'output_valid_model6', 'output_valid_model7': 'output_valid_model7', 'output_valid_model8': 'output_valid_model8', 'output_valid_model9': 'output_valid_model9', 'output_valid_model10': 'output_valid_model10', 'output_valid_model11': 'output_valid_model11', 'output_valid_model12': 'output_valid_model12', 'output_valid_model13': 'output_valid_model13', 'output_valid_model14': 'output_valid_model14', 'output_test': 'output_test', 'output_test_model1': 'output_test_model1', 'output_test_model2': 'output_test_model2', 'output_test_model3': 'output_test_model3', 'output_test_model4': 'output_test_model4', 'output_test_model5': 'output_test_model5', 'output_test_model6': 'output_test_model6', 'output_test_model7': 'output_test_model7', 'output_test_model8': 'output_test_model8', 'output_test_model9': 'output_test_model9', 'output_test_model10': 'output_test_model10', 'output_test_model11': 'output_test_model11', 'output_test_model12': 'output_test_model12', 'output_test_model13': 'output_test_model13', 'output_test_model14': 'output_test_model14', 'label_train': 'label_train', 'label_valid': 'label_valid', 'label_test': 'label_test'}), "('fourteen_models_output_data', output_train=output_train,\n output_train_model1=output_train_model1, output_train_model2=\n output_train_model2, output_train_model3=output_train_model3,\n output_train_model4=output_train_model4, output_train_model5=\n output_train_model5, output_train_model6=output_train_model6,\n output_train_model7=output_train_model7, output_train_model8=\n output_train_model8, output_train_model9=output_train_model9,\n output_train_model10=output_train_model10, output_train_model11=\n output_train_model11, output_train_model12=output_train_model12,\n output_train_model13=output_train_model13, output_train_model14=\n output_train_model14, output_valid=output_valid, output_valid_model1=\n output_valid_model1, output_valid_model2=output_valid_model2,\n output_valid_model3=output_valid_model3, output_valid_model4=\n output_valid_model4, output_valid_model5=output_valid_model5,\n output_valid_model6=output_valid_model6, output_valid_model7=\n output_valid_model7, output_valid_model8=output_valid_model8,\n output_valid_model9=output_valid_model9, output_valid_model10=\n output_valid_model10, output_valid_model11=output_valid_model11,\n output_valid_model12=output_valid_model12, output_valid_model13=\n output_valid_model13, output_valid_model14=output_valid_model14,\n output_test=output_test, output_test_model1=output_test_model1,\n output_test_model2=output_test_model2, output_test_model3=\n output_test_model3, output_test_model4=output_test_model4,\n output_test_model5=output_test_model5, output_test_model6=\n output_test_model6, output_test_model7=output_test_model7,\n output_test_model8=output_test_model8, output_test_model9=\n output_test_model9, output_test_model10=output_test_model10,\n output_test_model11=output_test_model11, output_test_model12=\n output_test_model12, output_test_model13=output_test_model13,\n output_test_model14=output_test_model14, label_train=label_train,\n label_valid=label_valid, label_test=label_test)\n", (66419, 68460), True, 'import numpy as np\n'), ((14580, 14613), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (14590, 14613), True, 'import numpy as np\n'), ((16947, 16980), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (16957, 16980), True, 'import numpy as np\n'), ((19350, 19392), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out'], {'kernel_size': '(7)', 'stride': '(1)'}), '(out, kernel_size=7, stride=1)\n', (19362, 19392), True, 'import torch.nn.functional as F\n'), ((30738, 30780), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out'], {'kernel_size': '(7)', 'stride': '(1)'}), '(out, kernel_size=7, stride=1)\n', (30750, 30780), True, 'import torch.nn.functional as F\n'), ((34469, 34502), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (34479, 34502), True, 'import numpy as np\n'), ((36666, 36699), 'numpy.minimum', 'np.minimum', (['numberFaces', 'maxFaces'], {}), '(numberFaces, maxFaces)\n', (36676, 36699), True, 'import numpy as np\n'), ((39757, 39799), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out'], {'kernel_size': '(7)', 'stride': '(1)'}), '(out, kernel_size=7, stride=1)\n', (39769, 39799), True, 'import torch.nn.functional as F\n'), ((44219, 44261), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out'], {'kernel_size': '(7)', 'stride': '(1)'}), '(out, kernel_size=7, stride=1)\n', (44231, 44261), True, 'import torch.nn.functional as F\n'), ((53289, 53331), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out'], {'kernel_size': '(7)', 'stride': '(1)'}), '(out, kernel_size=7, stride=1)\n', (53301, 53331), True, 'import torch.nn.functional as F\n'), ((66203, 66234), 'torch.sum', 'torch.sum', (['(preds == labels.data)'], {}), '(preds == labels.data)\n', (66212, 66234), False, 'import torch\n'), ((61835, 61869), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(phase == 0)'], {}), '(phase == 0)\n', (61857, 61869), False, 'import torch\n'), ((66013, 66034), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (66022, 66034), False, 'import torch\n')] |
"""Compiled Theano functions, as well as NumPy equivalents of other symbolic functions."""
import sys
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import theano
import theano.tensor as T
from ucs.constants import EPS, hues, M_SRGB_to_XYZ
from ucs import Conditions, symbolic
_srgb_to_ucs = None
_ucs_to_srgb_helper = None
def _h_to_H(h):
"""Converts CIECAM02/CAM02-UCS raw hue angle (h) to hue composition (H)."""
h = h % 360
if h < hues[1].h:
h += 360
i = 1
for i in range(1, 5):
if h < hues[i+1].h:
break
H_i_l = (h - hues[i].h) / hues[i].e
H_i_r = (hues[i+1].h - h) / hues[i+1].e
return hues[i].H + 100 * H_i_l / (H_i_l + H_i_r)
h_to_H = np.vectorize(_h_to_H)
def _H_to_h(H):
"""Converts CIECAM02/CAM02-UCS hue composition (H) to raw hue angle (h)."""
x0 = H % 400 * 360 / 400
h, _, _ = fmin_l_bfgs_b(lambda x: abs(h_to_H(x) - H), x0, approx_grad=True)
return h % 360
H_to_h = np.vectorize(_H_to_h)
def srgb_to_xyz(RGB):
"""Converts sRGB (gamma=2.2) colors to XYZ."""
RGB_linear = np.maximum(EPS, RGB)**2.2
return np.dot(RGB_linear, M_SRGB_to_XYZ)
def srgb_to_ucs(RGB, conds=None):
"""Converts sRGB (gamma=2.2) colors to CAM02-UCS (Luo et al. (2006)) Jab."""
global _srgb_to_ucs
if _srgb_to_ucs is None:
print('Building srgb_to_ucs()...', file=sys.stderr)
rgb = T.matrix('rgb')
conditions = T.scalars('Y_w', 'L_A', 'Y_b', 'F', 'c', 'N_c')
ucs = symbolic.srgb_to_ucs(rgb, *conditions)
_srgb_to_ucs = theano.function([rgb] + conditions, ucs,
allow_input_downcast=True, on_unused_input='ignore')
conds = conds or Conditions()
return _srgb_to_ucs(np.atleast_2d(RGB), *list(conds))
def ucs_to_srgb_helper(X, Jab, Y_w, L_A, Y_b, F, c, N_c):
"""Loss and gradient at point X (sRGB space) of the distance between the corresponding
Jab color and a target Jab color. Descending this gradient will approximately invert
srgb_to_ucs()."""
global _ucs_to_srgb_helper
if _ucs_to_srgb_helper is None:
print('Building ucs_to_srgb_helper()...', file=sys.stderr)
conditions = T.scalars('Y_w', 'L_A', 'Y_b', 'F', 'c', 'N_c')
x, jab = T.vectors('x', 'jab')
jab_x = symbolic.srgb_to_ucs(x, *conditions)
loss = symbolic.delta_e(jab_x, jab)**2
grad = T.grad(loss, x)
_ucs_to_srgb_helper = theano.function([x, jab] + conditions, [loss, grad],
allow_input_downcast=True, on_unused_input='ignore')
return _ucs_to_srgb_helper(np.squeeze(X), np.squeeze(Jab), Y_w, L_A, Y_b, F, c, N_c)
def ucs_to_srgb(Jab, conds=None):
"""Approximately inverts srgb_to_ucs() for a single color."""
conds = conds or Conditions()
x, _, _ = fmin_l_bfgs_b(ucs_to_srgb_helper, np.float64([0.5, 0.5, 0.5]),
args=[np.squeeze(Jab)] + list(conds))
return x
def ucs_to_srgb_b(Jab, conds=None):
"""Approximately inverts srgb_to_ucs() for a single color subject to sRGB gamut limits."""
conds = conds or Conditions()
x, _, _ = fmin_l_bfgs_b(ucs_to_srgb_helper, np.float64([0.5, 0.5, 0.5]),
args=[np.squeeze(Jab)] + list(conds), bounds=[(0, 1)]*3)
return x
def delta_e(Jab1, Jab2):
"""Returns the Euclidean distance between two CAM02-UCS Jab colors."""
return np.sqrt(np.sum(np.square(Jab1 - Jab2)))
def jab_to_jmh(Jab):
"""Converts rectangular (Jab) CAM02-UCS colors to cylindrical (JMh) format."""
Jab = np.atleast_1d(Jab)
J, a, b = Jab[..., 0], Jab[..., 1], Jab[..., 2]
M = np.sqrt(a**2 + b**2)
h = np.rad2deg(np.arctan2(b, a))
return np.stack([J, M, h], axis=-1)
def jmh_to_jab(JMh):
"""Converts cylindrical (JMh) CAM02-UCS colors to rectangular (Jab) format."""
JMh = np.atleast_1d(JMh)
J, M, h = JMh[..., 0], JMh[..., 1], JMh[..., 2]
a = M * np.cos(np.deg2rad(h))
b = M * np.sin(np.deg2rad(h))
return np.stack([J, a, b], axis=-1)
| [
"numpy.sqrt",
"ucs.Conditions",
"numpy.arctan2",
"theano.tensor.scalars",
"ucs.symbolic.delta_e",
"numpy.atleast_2d",
"theano.function",
"numpy.float64",
"numpy.stack",
"numpy.dot",
"numpy.maximum",
"numpy.squeeze",
"ucs.symbolic.srgb_to_ucs",
"numpy.square",
"numpy.deg2rad",
"numpy.ve... | [((725, 746), 'numpy.vectorize', 'np.vectorize', (['_h_to_H'], {}), '(_h_to_H)\n', (737, 746), True, 'import numpy as np\n'), ((983, 1004), 'numpy.vectorize', 'np.vectorize', (['_H_to_h'], {}), '(_H_to_h)\n', (995, 1004), True, 'import numpy as np\n'), ((1134, 1167), 'numpy.dot', 'np.dot', (['RGB_linear', 'M_SRGB_to_XYZ'], {}), '(RGB_linear, M_SRGB_to_XYZ)\n', (1140, 1167), True, 'import numpy as np\n'), ((3609, 3627), 'numpy.atleast_1d', 'np.atleast_1d', (['Jab'], {}), '(Jab)\n', (3622, 3627), True, 'import numpy as np\n'), ((3688, 3712), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (3695, 3712), True, 'import numpy as np\n'), ((3757, 3785), 'numpy.stack', 'np.stack', (['[J, M, h]'], {'axis': '(-1)'}), '([J, M, h], axis=-1)\n', (3765, 3785), True, 'import numpy as np\n'), ((3902, 3920), 'numpy.atleast_1d', 'np.atleast_1d', (['JMh'], {}), '(JMh)\n', (3915, 3920), True, 'import numpy as np\n'), ((4052, 4080), 'numpy.stack', 'np.stack', (['[J, a, b]'], {'axis': '(-1)'}), '([J, a, b], axis=-1)\n', (4060, 4080), True, 'import numpy as np\n'), ((1097, 1117), 'numpy.maximum', 'np.maximum', (['EPS', 'RGB'], {}), '(EPS, RGB)\n', (1107, 1117), True, 'import numpy as np\n'), ((1413, 1428), 'theano.tensor.matrix', 'T.matrix', (['"""rgb"""'], {}), "('rgb')\n", (1421, 1428), True, 'import theano.tensor as T\n'), ((1450, 1497), 'theano.tensor.scalars', 'T.scalars', (['"""Y_w"""', '"""L_A"""', '"""Y_b"""', '"""F"""', '"""c"""', '"""N_c"""'], {}), "('Y_w', 'L_A', 'Y_b', 'F', 'c', 'N_c')\n", (1459, 1497), True, 'import theano.tensor as T\n'), ((1512, 1550), 'ucs.symbolic.srgb_to_ucs', 'symbolic.srgb_to_ucs', (['rgb', '*conditions'], {}), '(rgb, *conditions)\n', (1532, 1550), False, 'from ucs import Conditions, symbolic\n'), ((1574, 1671), 'theano.function', 'theano.function', (['([rgb] + conditions)', 'ucs'], {'allow_input_downcast': '(True)', 'on_unused_input': '"""ignore"""'}), "([rgb] + conditions, ucs, allow_input_downcast=True,\n on_unused_input='ignore')\n", (1589, 1671), False, 'import theano\n'), ((1728, 1740), 'ucs.Conditions', 'Conditions', ([], {}), '()\n', (1738, 1740), False, 'from ucs import Conditions, symbolic\n'), ((1765, 1783), 'numpy.atleast_2d', 'np.atleast_2d', (['RGB'], {}), '(RGB)\n', (1778, 1783), True, 'import numpy as np\n'), ((2217, 2264), 'theano.tensor.scalars', 'T.scalars', (['"""Y_w"""', '"""L_A"""', '"""Y_b"""', '"""F"""', '"""c"""', '"""N_c"""'], {}), "('Y_w', 'L_A', 'Y_b', 'F', 'c', 'N_c')\n", (2226, 2264), True, 'import theano.tensor as T\n'), ((2282, 2303), 'theano.tensor.vectors', 'T.vectors', (['"""x"""', '"""jab"""'], {}), "('x', 'jab')\n", (2291, 2303), True, 'import theano.tensor as T\n'), ((2320, 2356), 'ucs.symbolic.srgb_to_ucs', 'symbolic.srgb_to_ucs', (['x', '*conditions'], {}), '(x, *conditions)\n', (2340, 2356), False, 'from ucs import Conditions, symbolic\n'), ((2419, 2434), 'theano.tensor.grad', 'T.grad', (['loss', 'x'], {}), '(loss, x)\n', (2425, 2434), True, 'import theano.tensor as T\n'), ((2465, 2575), 'theano.function', 'theano.function', (['([x, jab] + conditions)', '[loss, grad]'], {'allow_input_downcast': '(True)', 'on_unused_input': '"""ignore"""'}), "([x, jab] + conditions, [loss, grad], allow_input_downcast=\n True, on_unused_input='ignore')\n", (2480, 2575), False, 'import theano\n'), ((2648, 2661), 'numpy.squeeze', 'np.squeeze', (['X'], {}), '(X)\n', (2658, 2661), True, 'import numpy as np\n'), ((2663, 2678), 'numpy.squeeze', 'np.squeeze', (['Jab'], {}), '(Jab)\n', (2673, 2678), True, 'import numpy as np\n'), ((2829, 2841), 'ucs.Conditions', 'Conditions', ([], {}), '()\n', (2839, 2841), False, 'from ucs import Conditions, symbolic\n'), ((2890, 2917), 'numpy.float64', 'np.float64', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (2900, 2917), True, 'import numpy as np\n'), ((3152, 3164), 'ucs.Conditions', 'Conditions', ([], {}), '()\n', (3162, 3164), False, 'from ucs import Conditions, symbolic\n'), ((3213, 3240), 'numpy.float64', 'np.float64', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (3223, 3240), True, 'import numpy as np\n'), ((3728, 3744), 'numpy.arctan2', 'np.arctan2', (['b', 'a'], {}), '(b, a)\n', (3738, 3744), True, 'import numpy as np\n'), ((2372, 2400), 'ucs.symbolic.delta_e', 'symbolic.delta_e', (['jab_x', 'jab'], {}), '(jab_x, jab)\n', (2388, 2400), False, 'from ucs import Conditions, symbolic\n'), ((3468, 3490), 'numpy.square', 'np.square', (['(Jab1 - Jab2)'], {}), '(Jab1 - Jab2)\n', (3477, 3490), True, 'import numpy as np\n'), ((3992, 4005), 'numpy.deg2rad', 'np.deg2rad', (['h'], {}), '(h)\n', (4002, 4005), True, 'import numpy as np\n'), ((4026, 4039), 'numpy.deg2rad', 'np.deg2rad', (['h'], {}), '(h)\n', (4036, 4039), True, 'import numpy as np\n'), ((2953, 2968), 'numpy.squeeze', 'np.squeeze', (['Jab'], {}), '(Jab)\n', (2963, 2968), True, 'import numpy as np\n'), ((3276, 3291), 'numpy.squeeze', 'np.squeeze', (['Jab'], {}), '(Jab)\n', (3286, 3291), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestFcOperator(hu.HypothesisTestCase):
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
mask = np.arange(3)
np.random.shuffle(mask)
input_str = ['indices', 'values', 'default']
input_data = [indices, values, default]
if use_length and n > 1:
input_str.append('lengths')
input_data.append(lengths)
output_str = ['output']
op = core.CreateOperator(
'SparseToDenseMask',
input_str,
output_str,
mask=mask,
)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, input_data, [0])
# Gradient check for values
self.assertGradientChecks(
gc, op, input_data, 1, [0])
@given(n=st.integers(1, 10), k=st.integers(1, 5),
use_length=st.booleans(), **hu.gcs_cpu_only)
def test_sparse_to_dense_mask_with_int64(self, n, k, use_length, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
int64_mask = 10000000000
indices = np.random.randint(5, size=N) + int64_mask
values = np.random.rand(N, 2).astype(np.float32)
default = np.random.rand(2).astype(np.float32)
mask = np.arange(3) + int64_mask
np.random.shuffle(mask)
input_str = ['indices', 'values', 'default']
input_data = [indices, values, default]
if use_length and n > 1:
input_str.append('lengths')
input_data.append(lengths)
output_str = ['output']
op = core.CreateOperator(
'SparseToDenseMask',
input_str,
output_str,
mask=mask,
)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, input_data, [0])
# Gradient check for values
self.assertGradientChecks(
gc, op, input_data, 1, [0])
@given(n=st.integers(1, 10), k=st.integers(1, 5),
dim=st.integers(1, 3), **hu.gcs_cpu_only)
def test_sparse_to_dense_mask_high_dim(self, n, k, dim, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
N = sum(lengths)
indices = np.random.randint(5, size=N)
shape = np.random.randint(5, size=dim).astype(np.int32) + 1
values = np.random.rand(*((N,) + tuple(shape))).astype(np.float32)
default = np.random.rand(*shape).astype(np.float32)
mask = np.arange(3)
np.random.shuffle(mask)
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=mask,
)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, [indices, values, default, lengths], [0])
# Gradient check for values
self.assertGradientChecks(
gc, op, [indices, values, default, lengths], 1, [0])
if __name__ == "__main__":
import unittest
unittest.main()
| [
"numpy.random.rand",
"hypothesis.strategies.integers",
"numpy.random.randint",
"hypothesis.strategies.booleans",
"caffe2.python.core.CreateOperator",
"unittest.main",
"numpy.arange",
"numpy.random.shuffle"
] | [((3722, 3737), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3735, 3737), False, 'import unittest\n'), ((648, 676), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': 'N'}), '(5, size=N)\n', (665, 676), True, 'import numpy as np\n'), ((804, 816), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (813, 816), True, 'import numpy as np\n'), ((825, 848), 'numpy.random.shuffle', 'np.random.shuffle', (['mask'], {}), '(mask)\n', (842, 848), True, 'import numpy as np\n'), ((1109, 1183), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""SparseToDenseMask"""', 'input_str', 'output_str'], {'mask': 'mask'}), "('SparseToDenseMask', input_str, output_str, mask=mask)\n", (1128, 1183), False, 'from caffe2.python import core\n'), ((1999, 2022), 'numpy.random.shuffle', 'np.random.shuffle', (['mask'], {}), '(mask)\n', (2016, 2022), True, 'import numpy as np\n'), ((2283, 2357), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""SparseToDenseMask"""', 'input_str', 'output_str'], {'mask': 'mask'}), "('SparseToDenseMask', input_str, output_str, mask=mask)\n", (2302, 2357), False, 'from caffe2.python import core\n'), ((2925, 2953), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': 'N'}), '(5, size=N)\n', (2942, 2953), True, 'import numpy as np\n'), ((3172, 3184), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (3181, 3184), True, 'import numpy as np\n'), ((3193, 3216), 'numpy.random.shuffle', 'np.random.shuffle', (['mask'], {}), '(mask)\n', (3210, 3216), True, 'import numpy as np\n'), ((3231, 3343), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""SparseToDenseMask"""', "['indices', 'values', 'default', 'lengths']", "['output']"], {'mask': 'mask'}), "('SparseToDenseMask', ['indices', 'values', 'default',\n 'lengths'], ['output'], mask=mask)\n", (3250, 3343), False, 'from caffe2.python import core\n'), ((373, 391), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(10)'], {}), '(1, 10)\n', (384, 391), True, 'import hypothesis.strategies as st\n'), ((395, 412), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (406, 412), True, 'import hypothesis.strategies as st\n'), ((436, 449), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (447, 449), True, 'import hypothesis.strategies as st\n'), ((1796, 1824), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': 'N'}), '(5, size=N)\n', (1813, 1824), True, 'import numpy as np\n'), ((1965, 1977), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1974, 1977), True, 'import numpy as np\n'), ((1477, 1495), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(10)'], {}), '(1, 10)\n', (1488, 1495), True, 'import hypothesis.strategies as st\n'), ((1499, 1516), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (1510, 1516), True, 'import hypothesis.strategies as st\n'), ((1540, 1553), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1551, 1553), True, 'import hypothesis.strategies as st\n'), ((2651, 2669), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(10)'], {}), '(1, 10)\n', (2662, 2669), True, 'import hypothesis.strategies as st\n'), ((2673, 2690), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (2684, 2690), True, 'import hypothesis.strategies as st\n'), ((2707, 2724), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(3)'], {}), '(1, 3)\n', (2718, 2724), True, 'import hypothesis.strategies as st\n'), ((694, 714), 'numpy.random.rand', 'np.random.rand', (['N', '(2)'], {}), '(N, 2)\n', (708, 714), True, 'import numpy as np\n'), ((752, 769), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (766, 769), True, 'import numpy as np\n'), ((1855, 1875), 'numpy.random.rand', 'np.random.rand', (['N', '(2)'], {}), '(N, 2)\n', (1869, 1875), True, 'import numpy as np\n'), ((1913, 1930), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1927, 1930), True, 'import numpy as np\n'), ((3115, 3137), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (3129, 3137), True, 'import numpy as np\n'), ((555, 583), 'numpy.random.randint', 'np.random.randint', (['k'], {'size': 'n'}), '(k, size=n)\n', (572, 583), True, 'import numpy as np\n'), ((1670, 1698), 'numpy.random.randint', 'np.random.randint', (['k'], {'size': 'n'}), '(k, size=n)\n', (1687, 1698), True, 'import numpy as np\n'), ((2832, 2860), 'numpy.random.randint', 'np.random.randint', (['k'], {'size': 'n'}), '(k, size=n)\n', (2849, 2860), True, 'import numpy as np\n'), ((2970, 3000), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': 'dim'}), '(5, size=dim)\n', (2987, 3000), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
mean = [69, 0]
cov = [[15, 8], [8, 15]]
np.random.seed(5)
x, y = np.random.multivariate_normal(mean, cov, 2000).T
y += 180
plt.scatter(x, y, color='m')
plt.title("Men's Height vs Weight")
plt.xlabel("Height (in)")
plt.ylabel("Weight (lbs)")
plt.show()
| [
"matplotlib.pyplot.ylabel",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.xlabel",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((115, 132), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (129, 132), True, 'import numpy as np\n'), ((199, 227), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""m"""'}), "(x, y, color='m')\n", (210, 227), True, 'import matplotlib.pyplot as plt\n'), ((228, 263), 'matplotlib.pyplot.title', 'plt.title', (['"""Men\'s Height vs Weight"""'], {}), '("Men\'s Height vs Weight")\n', (237, 263), True, 'import matplotlib.pyplot as plt\n'), ((264, 289), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Height (in)"""'], {}), "('Height (in)')\n", (274, 289), True, 'import matplotlib.pyplot as plt\n'), ((290, 316), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Weight (lbs)"""'], {}), "('Weight (lbs)')\n", (300, 316), True, 'import matplotlib.pyplot as plt\n'), ((317, 327), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (325, 327), True, 'import matplotlib.pyplot as plt\n'), ((140, 186), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(2000)'], {}), '(mean, cov, 2000)\n', (169, 186), True, 'import numpy as np\n')] |
import os
import cv2
from tqdm import tqdm
import numpy as np
from semantic_segmentation.data_structure.folder import Folder
from semantic_segmentation.options.config import load_config
from semantic_segmentation.convolutional_neural_network.semantic_segmentation_model import SemanticSegmentationModel
class MaskHandler:
def __init__(self, model_folder, target_classes):
cfg = load_config(model_folder)
self.model_h = SemanticSegmentationModel(model_folder, cfg)
self.model_h.batch_size = 1
self.model_h.build()
self.target_classes = target_classes
def mask_tag(self, tag):
mask = self.mask_data(tag.load_data())
tag.set_mask(mask)
def mask_tag_set(self, tag_set):
print("Masking tags in set...")
for tid in tqdm(tag_set):
self.mask_tag(tag_set[tid])
return tag_set
def mask_data(self, data):
color_coding = self.model_h.predict(data)
h, w = color_coding.shape[:2]
mask = np.zeros((h, w))
for x in range(w):
for y in range(h):
for tc in self.target_classes:
a = color_coding[y, x] == self.model_h.color_coding[tc][1]
if a.all():
mask[y, x] = 1
return mask
def mask_data_set(self, path_to_data_set):
ifol = os.path.join(path_to_data_set, "images")
mfol = Folder(os.path.join(path_to_data_set, "masks"))
mfol.check_n_make_dir()
for img_f in tqdm(os.listdir(ifol)):
img_data = cv2.imread(os.path.join(ifol, img_f))
mask = self.mask_data(img_data)
cv2.imwrite(os.path.join(mfol.path(), img_f[:-4]+".png"), mask)
| [
"os.listdir",
"tqdm.tqdm",
"os.path.join",
"semantic_segmentation.convolutional_neural_network.semantic_segmentation_model.SemanticSegmentationModel",
"numpy.zeros",
"semantic_segmentation.options.config.load_config"
] | [((394, 419), 'semantic_segmentation.options.config.load_config', 'load_config', (['model_folder'], {}), '(model_folder)\n', (405, 419), False, 'from semantic_segmentation.options.config import load_config\n'), ((443, 487), 'semantic_segmentation.convolutional_neural_network.semantic_segmentation_model.SemanticSegmentationModel', 'SemanticSegmentationModel', (['model_folder', 'cfg'], {}), '(model_folder, cfg)\n', (468, 487), False, 'from semantic_segmentation.convolutional_neural_network.semantic_segmentation_model import SemanticSegmentationModel\n'), ((800, 813), 'tqdm.tqdm', 'tqdm', (['tag_set'], {}), '(tag_set)\n', (804, 813), False, 'from tqdm import tqdm\n'), ((1013, 1029), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (1021, 1029), True, 'import numpy as np\n'), ((1368, 1408), 'os.path.join', 'os.path.join', (['path_to_data_set', '"""images"""'], {}), "(path_to_data_set, 'images')\n", (1380, 1408), False, 'import os\n'), ((1431, 1470), 'os.path.join', 'os.path.join', (['path_to_data_set', '"""masks"""'], {}), "(path_to_data_set, 'masks')\n", (1443, 1470), False, 'import os\n'), ((1530, 1546), 'os.listdir', 'os.listdir', (['ifol'], {}), '(ifol)\n', (1540, 1546), False, 'import os\n'), ((1583, 1608), 'os.path.join', 'os.path.join', (['ifol', 'img_f'], {}), '(ifol, img_f)\n', (1595, 1608), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
from scipy.integrate import odeint,quad
from scipy.stats import kde,beta
import seaborn as sns
from importlib import reload
pi=np.pi
import ucovid
####### Calcul de l'indicateur P de Heesterbek et Roberts
#########"" 28 septembre 2020
#la donnee d'une matrice 1 periodique est faite via la fonction gena
def pindic(gena,T,nbpts=100,voir=False):
def a(t):
return(gena(t/T))
def msisi(x,t):
return(np.dot(a(t),x))
#on determine la solution fondamentale
timeint=np.arange(0,T+1/nbpts,T/nbpts)
dim=(a(0).shape)[0]
z=np.zeros(shape=(nbpts+1,dim,dim))
for i in range(dim):
y=np.zeros(shape=dim)
y[i]=1.0
z[:,:,i]=np.array(odeint(msisi,y,timeint))
#la matrice de monodromie est obtenue en prenant pour colonnes les valeurs
#des solutions au temps T
E=np.array([z[-1,:,i] for i in range(dim)])
E=E.transpose()
l,v=ucovid.vecetspectralrad(E)
#puis on considere la solution x issue du vecteur propre de la matrice de
#monodromie
x=v[0]*z[:,:,0]
for i in np.arange(1,dim):
x=x+v[i]*z[:,:,i]
#supposons que les elements diagonaux de A soient constants negatifs
#histoire de simplifier les calculs
P=1.0
azero=a(0)
if voir:
print("azero=",azero)
#la correction pour obtenir une solution periodique
corr=np.exp(-timeint*np.log(l))
for i in range(dim):
xi=x[:,i]
ip1=(i+1)% dim
xm=np.array([(a(timeint[j])[ip1,i])*xi[j] for j in range(len(xi))])
if voir:
plt.plot(xi*corr,label="x["+str(i))
plt.plot(xm,label="xm["+str(i))
print("P=",P,"sommes : xi=",np.sum(xi),"xmi=",np.sum(xm),-azero[ip1,ip1])
P=P*(np.sum(xm))
P=P/(np.sum(xi))
P=P/(-azero[ip1,ip1])
if voir:
plt.plot([a(timeint[j])[1,0] for j in range(len(xi))],label="a(t)")
plt.legend()
return(l,P)
return(P)
def genAH(t):
return(ucovid.genafricanhorseper(epsilon=0.5,t=t))
azero=genAH(0)
#pindic(genAH,T=1)
def genazero(t):
return(np.array([[-2,2],[1,-1]]))
#puis maintenant on regarde la dependance de P en espilon
def pepsindic(epsmax,genepsa,T,nbeps=50,voir=False):
ept=np.linspace(0.0,epsmax,nbeps)
pe=np.zeros(shape=nbeps)
for i,e in enumerate(ept):
def gena(t):
return(genepsa(e,t))
pe[i]=pindic(gena,T=T)
if voir:
plt.plot(ept,np.log(pe))
return(pe)
#pour le modele africanhorse
#pepsindic(0.5,ucovid.genafricanhorseper,T=1,voir=True)
#pour le modele bacaer
def genbaca(e,t):
return(ucovid.genex2per(epsilon=e,t=t,b12=0,b21=1))
#pepsindic(0.5,genbaca,T=1,voir=True)
def genbaca1(t):
return(genbaca(1,t))
def genbaca0(t):
return(genbaca(0,t))
#maintenant tracons les trois courbes
#(1/T) ln(lambda_d), msa et ln(P) en fonction de epsilon
def plamsa(gena,epsilonmax=0.5,T=1,recalage=True):
nbeps=50
ept=np.linspace(0.0,epsilonmax,nbeps)
x=np.array([ucovid.lamsaetapp(gena,epsilon=e,T=T) for e in ept])
lamd=x[:,0]
ustlnlam=np.log(lamd)/T
if recalage:
ustlnlam=ustlnlam-ustlnlam[0]
plt.plot(ept,ustlnlam,label=r"$\frac{1}{T} \ln(\lambda_d)$") #on voit bien que c'est en delta^2
msa=x[:,1]
if recalage:
msa = msa -msa[0]
plt.plot(ept,msa,label=r"$MSA=\int s(A(u))\, du$")
pe=np.zeros(shape=nbeps)
for i,e in enumerate(ept):
def mongena(t):
return(gena(e,t))
pe[i]=pindic(mongena,T=T)
print("P[0]=",pe[0])
pe=pe/pe[0]
plt.plot(ept,np.log(pe),label="ln(P/P[0])")
plt.xlabel(r"$\epsilon$")
plt.legend()
#plt.savefig("ex2msaperiodic.pdf",bbox_inches='tight' )
#maintenant il faut voir si l'indicateur P resite au decalage du rayon spectral
#s'il varie dans le bon sens
def genbacadecal(e,t,dec=-0.5):
return(genbaca(e,t)+ dec*np.identity(2))
| [
"numpy.identity",
"ucovid.genex2per",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"ucovid.lamsaetapp",
"ucovid.genafricanhorseper",
"ucovid.vecetspectralrad",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy... | [((613, 651), 'numpy.arange', 'np.arange', (['(0)', '(T + 1 / nbpts)', '(T / nbpts)'], {}), '(0, T + 1 / nbpts, T / nbpts)\n', (622, 651), True, 'import numpy as np\n'), ((674, 711), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nbpts + 1, dim, dim)'}), '(shape=(nbpts + 1, dim, dim))\n', (682, 711), True, 'import numpy as np\n'), ((1016, 1042), 'ucovid.vecetspectralrad', 'ucovid.vecetspectralrad', (['E'], {}), '(E)\n', (1039, 1042), False, 'import ucovid\n'), ((1171, 1188), 'numpy.arange', 'np.arange', (['(1)', 'dim'], {}), '(1, dim)\n', (1180, 1188), True, 'import numpy as np\n'), ((2091, 2134), 'ucovid.genafricanhorseper', 'ucovid.genafricanhorseper', ([], {'epsilon': '(0.5)', 't': 't'}), '(epsilon=0.5, t=t)\n', (2116, 2134), False, 'import ucovid\n'), ((2200, 2228), 'numpy.array', 'np.array', (['[[-2, 2], [1, -1]]'], {}), '([[-2, 2], [1, -1]])\n', (2208, 2228), True, 'import numpy as np\n'), ((2348, 2379), 'numpy.linspace', 'np.linspace', (['(0.0)', 'epsmax', 'nbeps'], {}), '(0.0, epsmax, nbeps)\n', (2359, 2379), True, 'import numpy as np\n'), ((2385, 2406), 'numpy.zeros', 'np.zeros', ([], {'shape': 'nbeps'}), '(shape=nbeps)\n', (2393, 2406), True, 'import numpy as np\n'), ((2723, 2769), 'ucovid.genex2per', 'ucovid.genex2per', ([], {'epsilon': 'e', 't': 't', 'b12': '(0)', 'b21': '(1)'}), '(epsilon=e, t=t, b12=0, b21=1)\n', (2739, 2769), False, 'import ucovid\n'), ((3062, 3097), 'numpy.linspace', 'np.linspace', (['(0.0)', 'epsilonmax', 'nbeps'], {}), '(0.0, epsilonmax, nbeps)\n', (3073, 3097), True, 'import numpy as np\n'), ((3268, 3332), 'matplotlib.pyplot.plot', 'plt.plot', (['ept', 'ustlnlam'], {'label': '"""$\\\\frac{1}{T} \\\\ln(\\\\lambda_d)$"""'}), "(ept, ustlnlam, label='$\\\\frac{1}{T} \\\\ln(\\\\lambda_d)$')\n", (3276, 3332), True, 'import matplotlib.pyplot as plt\n'), ((3426, 3479), 'matplotlib.pyplot.plot', 'plt.plot', (['ept', 'msa'], {'label': '"""$MSA=\\\\int s(A(u))\\\\, du$"""'}), "(ept, msa, label='$MSA=\\\\int s(A(u))\\\\, du$')\n", (3434, 3479), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3505), 'numpy.zeros', 'np.zeros', ([], {'shape': 'nbeps'}), '(shape=nbeps)\n', (3492, 3505), True, 'import numpy as np\n'), ((3719, 3744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\epsilon$"""'], {}), "('$\\\\epsilon$')\n", (3729, 3744), True, 'import matplotlib.pyplot as plt\n'), ((3749, 3761), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3759, 3761), True, 'import matplotlib.pyplot as plt\n'), ((743, 762), 'numpy.zeros', 'np.zeros', ([], {'shape': 'dim'}), '(shape=dim)\n', (751, 762), True, 'import numpy as np\n'), ((2010, 2022), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2020, 2022), True, 'import matplotlib.pyplot as plt\n'), ((3194, 3206), 'numpy.log', 'np.log', (['lamd'], {}), '(lamd)\n', (3200, 3206), True, 'import numpy as np\n'), ((3684, 3694), 'numpy.log', 'np.log', (['pe'], {}), '(pe)\n', (3690, 3694), True, 'import numpy as np\n'), ((806, 831), 'scipy.integrate.odeint', 'odeint', (['msisi', 'y', 'timeint'], {}), '(msisi, y, timeint)\n', (812, 831), False, 'from scipy.integrate import odeint, quad\n'), ((1846, 1856), 'numpy.sum', 'np.sum', (['xm'], {}), '(xm)\n', (1852, 1856), True, 'import numpy as np\n'), ((1871, 1881), 'numpy.sum', 'np.sum', (['xi'], {}), '(xi)\n', (1877, 1881), True, 'import numpy as np\n'), ((2557, 2567), 'numpy.log', 'np.log', (['pe'], {}), '(pe)\n', (2563, 2567), True, 'import numpy as np\n'), ((3112, 3151), 'ucovid.lamsaetapp', 'ucovid.lamsaetapp', (['gena'], {'epsilon': 'e', 'T': 'T'}), '(gena, epsilon=e, T=T)\n', (3129, 3151), False, 'import ucovid\n'), ((3994, 4008), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (4005, 4008), True, 'import numpy as np\n'), ((1485, 1494), 'numpy.log', 'np.log', (['l'], {}), '(l)\n', (1491, 1494), True, 'import numpy as np\n'), ((1787, 1797), 'numpy.sum', 'np.sum', (['xi'], {}), '(xi)\n', (1793, 1797), True, 'import numpy as np\n'), ((1805, 1815), 'numpy.sum', 'np.sum', (['xm'], {}), '(xm)\n', (1811, 1815), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import gym
import numpy as np
from gym import error, spaces, utils
from gym.utils import seeding
from numpy import array, linspace, deg2rad, zeros
from sympy import symbols
from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod
from scipy.integrate import odeint
from pydy.codegen.ode_function_generators import generate_ode_function
import matplotlib.pyplot as plt
class MultipendulumEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.num_links = 5 # Number of links
total_link_length = 1.
total_link_mass = 1.
self.ind_link_length = total_link_length / self.num_links
ind_link_com_length = self.ind_link_length / 2.
ind_link_mass = total_link_mass / self.num_links
ind_link_inertia = ind_link_mass * (ind_link_com_length ** 2)
# =======================#
# Parameters for step() #
# =======================#
# Maximum number of steps before episode termination
self.max_steps = 200
# For ODE integration
self.dt = .0001 # Simultaion time step = 1ms
self.sim_steps = 51 # Number of simulation steps in 1 learning step
self.dt_step = np.linspace(0., self.dt * self.sim_steps, num=self.sim_steps) # Learning time step = 50ms
# Termination conditions for simulation
self.num_steps = 0 # Step counter
self.done = False
# For visualisation
self.viewer = None
self.ax = False
# Constraints for observation
min_angle = -np.pi # Angle
max_angle = np.pi
min_omega = -10. # Angular velocity
max_omega = 10.
min_torque = -10. # Torque
max_torque = 10.
low_state_angle = np.full(self.num_links, min_angle) # Min angle
low_state_omega = np.full(self.num_links, min_omega) # Min angular velocity
low_state = np.append(low_state_angle, low_state_omega)
high_state_angle = np.full(self.num_links, max_angle) # Max angle
high_state_omega = np.full(self.num_links, max_omega) # Max angular velocity
high_state = np.append(high_state_angle, high_state_omega)
low_action = np.full(self.num_links, min_torque) # Min torque
high_action = np.full(self.num_links, max_torque) # Max torque
self.action_space = spaces.Box(low=low_action, high=high_action)
self.observation_space = spaces.Box(low=low_state, high=high_state)
# Minimum reward
self.min_reward = -(max_angle ** 2 + .1 * max_omega ** 2 + .001 * max_torque ** 2) * self.num_links
# Seeding
self.seed()
# ==============#
# Orientations #
# ==============#
self.inertial_frame = ReferenceFrame('I')
self.link_frame = []
self.theta = []
for i in range(self.num_links):
temp_angle_name = "theta{}".format(i + 1)
temp_link_name = "L{}".format(i + 1)
self.theta.append(dynamicsymbols(temp_angle_name))
self.link_frame.append(ReferenceFrame(temp_link_name))
if i == 0: # First link
self.link_frame[i].orient(self.inertial_frame, 'Axis', (self.theta[i], self.inertial_frame.z))
else: # Second link, third link...
self.link_frame[i].orient(self.link_frame[i - 1], 'Axis', (self.theta[i], self.link_frame[i - 1].z))
# =================#
# Point Locations #
# =================#
# --------#
# Joints #
# --------#
self.link_length = []
self.link_joint = []
for i in range(self.num_links):
temp_link_length_name = "l_L{}".format(i + 1)
temp_link_joint_name = "A{}".format(i)
self.link_length.append(symbols(temp_link_length_name))
self.link_joint.append(Point(temp_link_joint_name))
if i > 0: # Set position started from link2, then link3, link4...
self.link_joint[i].set_pos(self.link_joint[i - 1], self.link_length[i - 1] * self.link_frame[i - 1].y)
# --------------------------#
# Centre of mass locations #
# --------------------------#
self.link_com_length = []
self.link_mass_centre = []
for i in range(self.num_links):
temp_link_com_length_name = "d_L{}".format(i + 1)
temp_link_mass_centre_name = "L{}_o".format(i + 1)
self.link_com_length.append(symbols(temp_link_com_length_name))
self.link_mass_centre.append(Point(temp_link_mass_centre_name))
self.link_mass_centre[i].set_pos(self.link_joint[i], self.link_com_length[i] * self.link_frame[i].y)
# ===========================================#
# Define kinematical differential equations #
# ===========================================#
self.omega = []
self.kinematical_differential_equations = []
self.time = symbols('t')
for i in range(self.num_links):
temp_omega_name = "omega{}".format(i + 1)
self.omega.append(dynamicsymbols(temp_omega_name))
self.kinematical_differential_equations.append(self.omega[i] - self.theta[i].diff(self.time))
# ====================#
# Angular Velocities #
# ====================#
for i in range(self.num_links):
if i == 0: # First link
self.link_frame[i].set_ang_vel(self.inertial_frame, self.omega[i] * self.inertial_frame.z)
else: # Second link, third link...
self.link_frame[i].set_ang_vel(self.link_frame[i - 1], self.omega[i] * self.link_frame[i - 1].z)
# ===================#
# Linear Velocities #
# ===================#
for i in range(self.num_links):
if i == 0: # First link
self.link_joint[i].set_vel(self.inertial_frame, 0)
else: # Second link, third link...
self.link_joint[i].v2pt_theory(self.link_joint[i - 1], self.inertial_frame, self.link_frame[i - 1])
self.link_mass_centre[i].v2pt_theory(self.link_joint[i], self.inertial_frame, self.link_frame[i])
# ======#
# Mass #
# ======#
self.link_mass = []
for i in range(self.num_links):
temp_link_mass_name = "m_L{}".format(i + 1)
self.link_mass.append(symbols(temp_link_mass_name))
# =========#
# Inertia #
# =========#
self.link_inertia = []
self.link_inertia_dyadic = []
self.link_central_inertia = []
for i in range(self.num_links):
temp_link_inertia_name = "I_L{}z".format(i + 1)
self.link_inertia.append(symbols(temp_link_inertia_name))
self.link_inertia_dyadic.append(inertia(self.link_frame[i], 0, 0, self.link_inertia[i]))
self.link_central_inertia.append((self.link_inertia_dyadic[i], self.link_mass_centre[i]))
# ==============#
# Rigid Bodies #
# ==============#
self.link = []
for i in range(self.num_links):
temp_link_name = "link{}".format(i + 1)
self.link.append(RigidBody(temp_link_name, self.link_mass_centre[i], self.link_frame[i],
self.link_mass[i], self.link_central_inertia[i]))
# =========#
# Gravity #
# =========#
self.g = symbols('g')
self.link_grav_force = []
for i in range(self.num_links):
self.link_grav_force.append((self.link_mass_centre[i],
-self.link_mass[i] * self.g * self.inertial_frame.y))
# ===============#
# Joint Torques #
# ===============#
self.link_joint_torque = []
self.link_torque = []
for i in range(self.num_links):
temp_link_joint_torque_name = "T_a{}".format(i + 1)
self.link_joint_torque.append(dynamicsymbols(temp_link_joint_torque_name))
for i in range(self.num_links):
if (i + 1) == self.num_links: # Last link
self.link_torque.append((self.link_frame[i],
self.link_joint_torque[i] * self.inertial_frame.z))
else: # Other links
self.link_torque.append((self.link_frame[i],
self.link_joint_torque[i] * self.inertial_frame.z
- self.link_joint_torque[i + 1] * self.inertial_frame.z))
# =====================#
# Equations of Motion #
# =====================#
self.coordinates = []
self.speeds = []
self.loads = []
self.bodies = []
for i in range(self.num_links):
self.coordinates.append(self.theta[i])
self.speeds.append(self.omega[i])
self.loads.append(self.link_grav_force[i])
self.loads.append(self.link_torque[i])
self.bodies.append(self.link[i])
self.kane = KanesMethod(self.inertial_frame,
self.coordinates,
self.speeds,
self.kinematical_differential_equations)
self.fr, self.frstar = self.kane.kanes_equations(self.bodies, self.loads)
self.mass_matrix = self.kane.mass_matrix_full
self.forcing_vector = self.kane.forcing_full
# =============================#
# List the symbolic arguments #
# =============================#
# -----------#
# Constants #
# -----------#
self.constants = []
for i in range(self.num_links):
if (i + 1) != self.num_links:
self.constants.append(self.link_length[i])
self.constants.append(self.link_com_length[i])
self.constants.append(self.link_mass[i])
self.constants.append(self.link_inertia[i])
self.constants.append(self.g)
# --------------#
# Time Varying #
# --------------#
self.coordinates = []
self.speeds = []
self.specified = []
for i in range(self.num_links):
self.coordinates.append(self.theta[i])
self.speeds.append(self.omega[i])
self.specified.append(self.link_joint_torque[i])
# =======================#
# Generate RHS Function #
# =======================#
self.right_hand_side = generate_ode_function(self.forcing_vector, self.coordinates, self.speeds,
self.constants, mass_matrix=self.mass_matrix,
specifieds=self.specified)
# ==============================#
# Specify Numerical Quantities #
# ==============================#
self.x = np.zeros(self.num_links * 2)
self.x[:self.num_links] = deg2rad(2.0)
self.numerical_constants = []
for i in range(self.num_links):
if (i + 1) != self.num_links:
self.numerical_constants.append(self.ind_link_length)
self.numerical_constants.append(ind_link_com_length)
self.numerical_constants.append(ind_link_mass)
self.numerical_constants.append(ind_link_inertia)
self.numerical_constants.append(9.81)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.num_steps = 0 # Step counter
self.done = False # Done flag
self.x = np.random.randn(self.num_links * 2) # State
return self._get_obs()
def _get_obs(self):
return self.x
def sample_action(self):
return np.random.randn(self.num_links)
def step(self, action):
if self.done == True or self.num_steps > self.max_steps:
self.done = True
# Normalised reward
reward = 0.
return self.x, reward, self.done, {}
else:
# Increment the step counter
self.num_steps += 1
# Simulation
self.x = odeint(self.right_hand_side, self.x, self.dt_step, args=(action, self.numerical_constants))[-1]
# Normalise joint angles to -pi ~ pi
self.x[:self.num_links] = self.angle_normalise(self.x[:self.num_links])
# n-link case
reward = 0.
# Cost due to angle and torque
for i in range(self.num_links):
reward -= (self.x[i] ** 2 + .001 * action[i] ** 2)
# Cost due to angular velocity
for i in range(self.num_links, self.num_links * 2):
reward -= (.1 * self.x[i] ** 2)
# Normalised reward
reward = (reward - self.min_reward) / (-self.min_reward)
return self.x, reward, self.done, {}
def angle_normalise(self, angle_input):
return (((angle_input + np.pi) % (2 * np.pi)) - np.pi)
def render(self, mode='human'):
if not self.ax:
fig, ax = plt.subplots()
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([-1.5, 1.5])
ax.set_aspect('equal')
self.ax = ax
else:
self.ax.clear()
self.ax.set_xlim([-1.5, 1.5])
self.ax.set_ylim([-1.5, 1.5])
self.ax.set_aspect('equal')
x = [0.]
y = [0.]
for i in range(self.num_links):
x.append(x[i] + self.ind_link_length*np.cos(self.x[i] + np.pi / 2.))
y.append(y[i] + self.ind_link_length*np.sin(self.x[i] + np.pi / 2.))
plt.plot(x, y)
plt.pause(0.01)
| [
"sympy.physics.mechanics.Point",
"sympy.physics.mechanics.RigidBody",
"numpy.sin",
"gym.utils.seeding.np_random",
"matplotlib.pyplot.plot",
"sympy.physics.mechanics.KanesMethod",
"numpy.linspace",
"sympy.physics.mechanics.inertia",
"pydy.codegen.ode_function_generators.generate_ode_function",
"sci... | [((1309, 1371), 'numpy.linspace', 'np.linspace', (['(0.0)', '(self.dt * self.sim_steps)'], {'num': 'self.sim_steps'}), '(0.0, self.dt * self.sim_steps, num=self.sim_steps)\n', (1320, 1371), True, 'import numpy as np\n'), ((1856, 1890), 'numpy.full', 'np.full', (['self.num_links', 'min_angle'], {}), '(self.num_links, min_angle)\n', (1863, 1890), True, 'import numpy as np\n'), ((1930, 1964), 'numpy.full', 'np.full', (['self.num_links', 'min_omega'], {}), '(self.num_links, min_omega)\n', (1937, 1964), True, 'import numpy as np\n'), ((2009, 2052), 'numpy.append', 'np.append', (['low_state_angle', 'low_state_omega'], {}), '(low_state_angle, low_state_omega)\n', (2018, 2052), True, 'import numpy as np\n'), ((2080, 2114), 'numpy.full', 'np.full', (['self.num_links', 'max_angle'], {}), '(self.num_links, max_angle)\n', (2087, 2114), True, 'import numpy as np\n'), ((2155, 2189), 'numpy.full', 'np.full', (['self.num_links', 'max_omega'], {}), '(self.num_links, max_omega)\n', (2162, 2189), True, 'import numpy as np\n'), ((2235, 2280), 'numpy.append', 'np.append', (['high_state_angle', 'high_state_omega'], {}), '(high_state_angle, high_state_omega)\n', (2244, 2280), True, 'import numpy as np\n'), ((2302, 2337), 'numpy.full', 'np.full', (['self.num_links', 'min_torque'], {}), '(self.num_links, min_torque)\n', (2309, 2337), True, 'import numpy as np\n'), ((2374, 2409), 'numpy.full', 'np.full', (['self.num_links', 'max_torque'], {}), '(self.num_links, max_torque)\n', (2381, 2409), True, 'import numpy as np\n'), ((2452, 2496), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'low_action', 'high': 'high_action'}), '(low=low_action, high=high_action)\n', (2462, 2496), False, 'from gym import error, spaces, utils\n'), ((2530, 2572), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'low_state', 'high': 'high_state'}), '(low=low_state, high=high_state)\n', (2540, 2572), False, 'from gym import error, spaces, utils\n'), ((2854, 2873), 'sympy.physics.mechanics.ReferenceFrame', 'ReferenceFrame', (['"""I"""'], {}), "('I')\n", (2868, 2873), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((5073, 5085), 'sympy.symbols', 'symbols', (['"""t"""'], {}), "('t')\n", (5080, 5085), False, 'from sympy import symbols\n'), ((7550, 7562), 'sympy.symbols', 'symbols', (['"""g"""'], {}), "('g')\n", (7557, 7562), False, 'from sympy import symbols\n'), ((9181, 9290), 'sympy.physics.mechanics.KanesMethod', 'KanesMethod', (['self.inertial_frame', 'self.coordinates', 'self.speeds', 'self.kinematical_differential_equations'], {}), '(self.inertial_frame, self.coordinates, self.speeds, self.\n kinematical_differential_equations)\n', (9192, 9290), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((10632, 10782), 'pydy.codegen.ode_function_generators.generate_ode_function', 'generate_ode_function', (['self.forcing_vector', 'self.coordinates', 'self.speeds', 'self.constants'], {'mass_matrix': 'self.mass_matrix', 'specifieds': 'self.specified'}), '(self.forcing_vector, self.coordinates, self.speeds,\n self.constants, mass_matrix=self.mass_matrix, specifieds=self.specified)\n', (10653, 10782), False, 'from pydy.codegen.ode_function_generators import generate_ode_function\n'), ((11028, 11056), 'numpy.zeros', 'np.zeros', (['(self.num_links * 2)'], {}), '(self.num_links * 2)\n', (11036, 11056), True, 'import numpy as np\n'), ((11091, 11103), 'numpy.deg2rad', 'deg2rad', (['(2.0)'], {}), '(2.0)\n', (11098, 11103), False, 'from numpy import array, linspace, deg2rad, zeros\n'), ((11590, 11613), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (11607, 11613), False, 'from gym.utils import seeding\n'), ((11757, 11792), 'numpy.random.randn', 'np.random.randn', (['(self.num_links * 2)'], {}), '(self.num_links * 2)\n', (11772, 11792), True, 'import numpy as np\n'), ((11925, 11956), 'numpy.random.randn', 'np.random.randn', (['self.num_links'], {}), '(self.num_links)\n', (11940, 11956), True, 'import numpy as np\n'), ((13809, 13823), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (13817, 13823), True, 'import matplotlib.pyplot as plt\n'), ((13833, 13848), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (13842, 13848), True, 'import matplotlib.pyplot as plt\n'), ((13249, 13263), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13261, 13263), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3131), 'sympy.physics.mechanics.dynamicsymbols', 'dynamicsymbols', (['temp_angle_name'], {}), '(temp_angle_name)\n', (3114, 3131), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((3168, 3198), 'sympy.physics.mechanics.ReferenceFrame', 'ReferenceFrame', (['temp_link_name'], {}), '(temp_link_name)\n', (3182, 3198), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((3904, 3934), 'sympy.symbols', 'symbols', (['temp_link_length_name'], {}), '(temp_link_length_name)\n', (3911, 3934), False, 'from sympy import symbols\n'), ((3971, 3998), 'sympy.physics.mechanics.Point', 'Point', (['temp_link_joint_name'], {}), '(temp_link_joint_name)\n', (3976, 3998), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((4586, 4620), 'sympy.symbols', 'symbols', (['temp_link_com_length_name'], {}), '(temp_link_com_length_name)\n', (4593, 4620), False, 'from sympy import symbols\n'), ((4663, 4696), 'sympy.physics.mechanics.Point', 'Point', (['temp_link_mass_centre_name'], {}), '(temp_link_mass_centre_name)\n', (4668, 4696), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((5210, 5241), 'sympy.physics.mechanics.dynamicsymbols', 'dynamicsymbols', (['temp_omega_name'], {}), '(temp_omega_name)\n', (5224, 5241), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((6513, 6541), 'sympy.symbols', 'symbols', (['temp_link_mass_name'], {}), '(temp_link_mass_name)\n', (6520, 6541), False, 'from sympy import symbols\n'), ((6851, 6882), 'sympy.symbols', 'symbols', (['temp_link_inertia_name'], {}), '(temp_link_inertia_name)\n', (6858, 6882), False, 'from sympy import symbols\n'), ((6928, 6983), 'sympy.physics.mechanics.inertia', 'inertia', (['self.link_frame[i]', '(0)', '(0)', 'self.link_inertia[i]'], {}), '(self.link_frame[i], 0, 0, self.link_inertia[i])\n', (6935, 6983), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((7309, 7433), 'sympy.physics.mechanics.RigidBody', 'RigidBody', (['temp_link_name', 'self.link_mass_centre[i]', 'self.link_frame[i]', 'self.link_mass[i]', 'self.link_central_inertia[i]'], {}), '(temp_link_name, self.link_mass_centre[i], self.link_frame[i],\n self.link_mass[i], self.link_central_inertia[i])\n', (7318, 7433), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((8092, 8135), 'sympy.physics.mechanics.dynamicsymbols', 'dynamicsymbols', (['temp_link_joint_torque_name'], {}), '(temp_link_joint_torque_name)\n', (8106, 8135), False, 'from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point, inertia, RigidBody, KanesMethod\n'), ((12318, 12414), 'scipy.integrate.odeint', 'odeint', (['self.right_hand_side', 'self.x', 'self.dt_step'], {'args': '(action, self.numerical_constants)'}), '(self.right_hand_side, self.x, self.dt_step, args=(action, self.\n numerical_constants))\n', (12324, 12414), False, 'from scipy.integrate import odeint\n'), ((13688, 13719), 'numpy.cos', 'np.cos', (['(self.x[i] + np.pi / 2.0)'], {}), '(self.x[i] + np.pi / 2.0)\n', (13694, 13719), True, 'import numpy as np\n'), ((13769, 13800), 'numpy.sin', 'np.sin', (['(self.x[i] + np.pi / 2.0)'], {}), '(self.x[i] + np.pi / 2.0)\n', (13775, 13800), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Identify change points throughout the entire TU of each individual sample.
The premise of this approach is to identify significant change points in the
cumulative read sum (CRS) distribution as a function of position. It identifies
the following change point types: DistalTSS, TandemTSS, DistalPolyA, TandemAPA,
Junction, Exon, and Intron.
"""
import os
import sys
import argparse
import math
from collections import defaultdict, OrderedDict, deque
from datetime import datetime
import numpy as np # v1.10.4
import peakutils # v1.0.3
import pysam # v0.9.0
import pybedtools as pb
from scipy import stats # v0.15.1
from loguru import logger
try:
from functions import sort_bedfile, run_command
except ImportError:
from src.functions import sort_bedfile, run_command
# median filter
from bisect import bisect_left, insort
from itertools import islice
# plotting
import matplotlib
matplotlib.use('pdf') # force matplotlib to not use any Xwindows backend for plotting
import matplotlib.pyplot as pyplot
from matplotlib.backends.backend_pdf import PdfPages
def bedgraph_per_gene_ss(genes, bg_plus, bg_minus, bgfile):
"""
bedtools intersect genes with each of bg_plus and bg_minus.
Run separately so that gene coverage is consecutive by strand.
"""
# === split annotation ===
plus_bed = bgfile + '.genes.plus'
minus_bed = bgfile + '.genes.minus'
p = open(plus_bed, 'w')
m = open(minus_bed, 'w')
with open(genes, 'r') as f:
for line in f:
if not line.startswith('track'):
strand = line.rstrip().split('\t')[5]
if strand == '+':
p.write(line)
elif strand == '-':
m.write(line)
else:
logger.error('do not recognize strand: ' + strand)
logger.error(line)
sys.exit(1)
p.close()
m.close()
# === bedtools intersect: concatenate + & - strands ===
sort_bedfile(bg_plus, bg_plus)
pb.BedTool(plus_bed).intersect(bg_plus, wo=True, sorted=True).saveas(bgfile + '.plus')
# os.remove(bg_plus)
sort_bedfile(bg_minus, bg_minus)
pb.BedTool(minus_bed).intersect(bg_minus, wo=True, sorted=True).saveas(bgfile + '.minus')
# os.remove(bg_minus + ".sorted")
t = open(bgfile, 'w')
t.write(open(bgfile + '.plus').read())
t.write(open(bgfile + '.minus').read())
t.close()
for file in [bgfile + '.plus', bgfile + '.minus', plus_bed, minus_bed]:
os.remove(file)
def bedgraph_per_gene_nss(genes, bg, bgfile):
"""Bedtools intersect, non-strand-specific"""
sort_bedfile(bg, bg)
pb.BedTool(genes).intersect(bg, wo=True, sorted=True).saveas(bgfile)
# os.remove(bg + ".sorted")
def get_exon_cov(exon_list, cov_list):
"""Calculate average reads/bp in each exon and report the maximum exon coverage."""
total_sum = 0
total_len = 0
max_cov = 0
for e, exon in enumerate(exon_list):
this_start, this_end = map(int, exon.split(':'))
this_cov = cov_list[this_start:this_end]
total_sum += sum(this_cov)
total_len += this_end - this_start
this_exon_cov = float(sum(this_cov)) / float(this_end - this_start)
if this_exon_cov > max_cov:
max_cov = this_exon_cov
cov_avg_all_exons = float(total_sum) / float(total_len)
return cov_avg_all_exons, max_cov
def crs(cov_array):
"""Calculate cumulative read sum """
vert_array = np.insert(np.ediff1d(cov_array), [0], 0)
vert_sum_array = np.cumsum(np.absolute(vert_array))
if max(vert_sum_array) == 0:
vert_sum_norm_array = ['NA']
else:
vert_sum_norm_array = vert_sum_array / max(vert_sum_array)
return vert_sum_norm_array, vert_array
def ks_test(vert_sum_array, make_plots, out_prefix):
"""KS test: cumulative distance vs. line y=ax"""
line_array = np.arange(0, max(vert_sum_array), max(vert_sum_array) / vert_sum_array.size)
ks_stat, ksp = stats.ks_2samp(vert_sum_array, line_array)
y0 = vert_sum_array[0]
xmax = vert_sum_array.size - 1
ymax = max(vert_sum_array)
slope = (ymax - y0) / xmax
if slope == 0:
ksp = 1
if make_plots:
x1 = np.linspace(0, xmax, vert_sum_array.size) / 1000
y1 = vert_sum_array.tolist()
x2 = [0, float(xmax) / float(1000)]
y2 = [y0, ymax]
out_plot = out_prefix.replace(':', '_') + '_crs.pdf'
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x1, y1, color='k')
pyplot.plot(x2, y2, color='0.75')
pyplot.title('KS test p = ' + str(round(ksp, 3)))
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Cumulative Vertical Distance')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
return ksp
def distance(x1, y1, x2, y2, x0, y0):
"""Calculate distance from a point x0, y0 to a line defined by x1, y1 and x2, y2"""
num = (y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1
den = math.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
return float(num) / float(den)
def get_slope(x1, y1, x2, y2):
""""Get slope between 2 points"""
num = y2 - y1
den = x2 - x1
return float(num) / float(den)
def median_filter(seq, window_size):
"""
Median filter.
Reference: https://groups.google.com/forum/#!topic/comp.lang.python/0OARyHF0wtA
"""
zeros = [0] * (window_size // 2) # pad zeros
seq = zeros + seq + zeros
seq = iter(seq)
d = deque()
s = []
result = []
# first window
for item in islice(seq, window_size):
d.append(item)
insort(s, item)
if len(s) % 2 == 0:
med = float((s[len(d) // 2 - 1] + s[len(d) // 2])) / float(2) # even number: take average of 2
result.append(med)
else:
result.append(float(s[len(d) // 2]))
# all other windows
m = window_size // 2
for item in seq:
old = d.popleft()
d.append(item)
del s[bisect_left(s, old)]
insort(s, item)
if len(s) % 2 == 0:
med = float((s[m - 1] + s[m])) / float(2) # even number: take average of 2
result.append(med)
else:
result.append(float(s[m]))
return result
def get_linedist(vert_sum_array, denoise_winsize):
"""De-noise with median filter & get distance of all points to the line y=ax"""
x1 = 0
y1 = 0
x2 = vert_sum_array.size - 1
y2 = 1
x0 = np.arange(vert_sum_array.size)
y0 = vert_sum_array
# distance from (x0, y0) to the line formed by (x1, y1) and (x2, y2)
line_dist_array = ((y2 - y1) * x0 - (x2 - x1) * y0 + x2 * y1 - y2 * x1) / math.sqrt(
(y2 - y1)**2 + (x2 - x1)**2)
# de-noising: median filter. window size should be odd for medfilt to function
denoise_winsize = denoise_winsize + 1 if denoise_winsize % 2 == 0 else denoise_winsize
line_dist_list_denoise = median_filter(line_dist_array.tolist(), denoise_winsize)
line_dist_array_denoise = np.asarray(line_dist_list_denoise)
return line_dist_array, line_dist_array_denoise
def get_introns_exons(intron2jxnCount, gene_len, chrom, start, end, strand):
"""get introns (0-based): junction read-spanning regions with at least 2 reads"""
jxn_list = []
start2end = {}
end2start = {}
either2either = {}
(start, end) = map(int, [start, end])
for intron in intron2jxnCount:
x = intron.split(':')
if len(x) == 4:
[ichrom, istart, iend, istrand] = x
else:
[ichrom, istart, iend] = x
istrand = 0
if ichrom == chrom and istrand == strand:
# start is 0-based, last pos on exon; end is 1-based, first pos on following exon
[istart, iend] = map(int, [istart, iend])
if int(start) <= istart < int(end) and int(start) < iend <= int(end): # intron in this gene
if istart - start != 0:
jxn_list.append(istart - start)
if iend - 1 - start != gene_len:
jxn_list.append(iend - 1 - start) # 1-based -> 0-based
# get exons
if istart not in start2end:
start2end[istart] = iend
elif iend - istart < start2end[istart] - istart:
start2end[istart] = iend
if iend not in end2start:
end2start[iend] = istart
elif iend - istart < iend - end2start[iend]:
end2start[iend] = istart
# get exon list
intron_list = []
for istart in start2end:
intron_list.append((istart, start2end[istart]))
for iend in end2start:
if (end2start[iend], iend) not in intron_list:
intron_list.append((end2start[iend], iend))
intron_list = sorted(intron_list)
exon_list = []
for i, intron in enumerate(intron_list):
estart = intron_list[i - 1][1] - 1 - start if i > 0 else 0
eend = intron[0] - start
if eend > estart:
exon_list.append(':'.join(map(str, [estart, eend])))
if i == len(intron_list) - 1: # last exon
estart = intron[1] - 1 - start
eend = gene_len
exon_list.append(':'.join(map(str, [estart, eend])))
if len(exon_list) == 0:
exon_list.append(':'.join(map(str, [0, end - 1 - start])))
return sorted(list(set(jxn_list))), exon_list, intron_list
def increase_precision(peak_inds, denoise_winsize, line_dist_array, max_or_min, peak_thresh):
"""call peak in pre-de-noised data"""
for i, ind in enumerate(peak_inds):
start = max(ind - denoise_winsize, 0)
end = min(ind + denoise_winsize, len(line_dist_array))
window = line_dist_array[start:end]
if max_or_min == 'max':
peak_inds_new = peakutils.peak.indexes(window, thres=peak_thresh,
min_dist=denoise_winsize * 2)
elif max_or_min == 'min':
peak_inds_new = peakutils.peak.indexes(-1 * window, thres=peak_thresh,
min_dist=denoise_winsize * 2)
else:
sys.exit(1)
if len(peak_inds_new) == 1:
peak_ind_new = peak_inds_new[0] + start
peak_inds[i] = peak_ind_new
elif len(peak_inds_new) != 0:
logger.error('too many new peak indexes detected')
logger.error(' '.join([start, end, denoise_winsize, num_mins]) + '\n')
sys.exit(1)
return np.sort(peak_inds)
def get_ttest(peak_inds, cov_array, denoise_winsize, test_thresh):
"""Calculate t-test of read coverage per nucleotide before vs. after the change point"""
peak_inds_ttest = []
ind2tp = {}
if len(peak_inds) > 0:
for ind in peak_inds:
next_cut = min(cov_array.size - 1, ind + denoise_winsize)
prev_cut = max(0, ind - denoise_winsize)
if ind - prev_cut > 1 and next_cut - ind > 1:
cov_before = cov_array[prev_cut:ind]
cov_after = cov_array[ind:next_cut]
if np.std(cov_before) != 0.0 or np.std(cov_after) != 0.0: # make sure denominator != 0
(t_stat, tp) = stats.ttest_ind(cov_before, cov_after, equal_var=False)
else:
tp = 1
if tp < test_thresh:
peak_inds_ttest.append(ind)
ind2tp[ind] = tp
peak_inds_ttest = np.sort(peak_inds_ttest).tolist()
return peak_inds_ttest, ind2tp
def get_end_cov(utr5, utr3, cov_list, peak_inds_ttest):
"""Require increasing & decreasing coverage at left & right ends respecitvely"""
to_delete = []
if len(utr5) > 0:
u2prev_mean = {}
for u, u5cp in enumerate(utr5):
if u < len(utr5) - 1:
prev_cp = 0 if u == 0 else prev_cp
next_cp = utr5[u + 1]
this_mean = np.mean(cov_list[u5cp:next_cp])
prev_mean = np.mean(cov_list[prev_cp:u5cp])
u2prev_mean[u] = prev_mean
del_ind = u
prev_u = u - 1
flag = 0
while this_mean < prev_mean:
flag = 1
to_delete.append(del_ind)
if prev_u > 0:
if utr5[prev_u] not in to_delete:
this_mean = np.mean(cov_list[utr5[prev_u]:next_cp])
prev_mean = u2prev_mean[prev_u]
del_ind = prev_u
prev_cp = utr5[prev_u]
prev_u -= 1
else:
prev_u -= 1
else:
break
if flag != 1:
prev_cp = u5cp
else:
prev_cp = u5cp
if len(utr3) > 0:
u2prev_mean = {}
for u, u3cp in enumerate(utr3):
if u > 0:
next_cp = len(cov_list) if u == len(utr3) - 1 else utr3[u + 1]
this_mean = np.mean(cov_list[u3cp:next_cp])
prev_mean = np.mean(cov_list[prev_cp:u3cp])
u2prev_mean[u] = prev_mean
del_ind = len(peak_inds_ttest) - len(utr3) + u
prev_u = u - 1
flag = 0
while this_mean > prev_mean:
flag = 1
to_delete.append(del_ind)
if prev_u > 0:
if utr3[prev_u] not in to_delete:
this_mean = np.mean(cov_list[utr3[prev_u]:next_cp])
prev_mean = u2prev_mean[prev_u]
del_ind = len(peak_inds_ttest) - len(utr3) + prev_u
prev_cp = utr3[prev_u]
prev_u -= 1
else:
prev_u -= 1
else:
break
if flag != 1:
prev_cp = u3cp
else:
prev_cp = u3cp
peak_inds_ttest = sorted(np.delete(peak_inds_ttest, to_delete))
return peak_inds_ttest
def infer_strand(intron_list, chrom, genome, verbose):
"""Infer strand of TU from non-strand-specific RNA-Seq by checking the GT-AG splice site signal"""
count_minus = 0
count_plus = 0
count_unknown = 0
intron_label_list = []
for (istart, iend) in intron_list:
# beginning of intron (junction is 0-based & 1st position on intron)
start_coord = chrom + ':' + str(istart + 1) + '-' + str(istart + 2)
start_seq = pysam.faidx(genome, start_coord)
# end of intron (junction is 1-based & last position on intron)
end_coord = chrom + ':' + str(iend - 1) + '-' + str(iend)
end_seq = pysam.faidx(genome, end_coord)
if start_seq.split('\n')[1].upper() == 'CT': # 3' splice site AG on minus strand
count_minus += 1
intron_label_list.append('-')
elif start_seq.split('\n')[1].upper() == 'GT': # 5' splice site GT on plus strand
count_plus += 1
intron_label_list.append('+')
else:
count_unknown += 1
intron_label_list.append('?')
if end_seq.split('\n')[1].upper() == 'AC': # 5' splice site GT on minus strand
count_minus += 1
intron_label_list.append('-')
elif end_seq.split('\n')[1].upper() == 'AG': # 3' splice site AG on plus strand
count_plus += 1
intron_label_list.append('+')
else:
count_unknown += 1
intron_label_list.append('?')
if max(count_minus, count_plus, count_unknown) == count_unknown:
strand_inferred = 'NA'
elif count_minus == count_plus:
strand_inferred = 'NA'
elif max(count_minus, count_plus, count_unknown) == count_minus:
strand_inferred = '-'
elif max(count_minus, count_plus, count_unknown) == count_plus:
strand_inferred = '+'
if verbose:
logger.info('inferred strand: {}', strand_inferred)
logger.info('counts: minus {} {} {} {} {}', count_minus, 'plus', count_plus, 'unknown', count_unknown)
return strand_inferred
def run(
input_bg, input_bg_minus, input_regions, output,
genome, plot, junc, minjxncount, juncdist,
min_length, test_thresh,
min_expn, min_expn_distal,
winsize, peak_thresh, peak_min_dist,
fcthresh, max_end_ru, verbose):
# --------------------------------------------------
# main routine
# --------------------------------------------------
logger.info('job starting: {}', str(datetime.now().time()))
if not input_bg:
logger.error('EXIT: Please provide --input_bg')
sys.exit(1)
if not input_regions:
logger.error('EXIT: Please provide --input_regions')
sys.exit(1)
if not output:
logger.error('EXIT: Please provide --output')
sys.exit(1)
if not input_bg_minus and not genome:
logger.error('EXIT: for non-strand-specific RNA-Seq, please provide a --genome file')
sys.exit(1)
plot_dir = os.path.splitext(output)[0] + '_plots'
if plot:
if os.path.exists(plot_dir):
for filename in os.listdir(plot_dir):
os.remove(os.path.join(plot_dir, filename))
else:
os.mkdir(plot_dir)
# === get bedgraph per gene ===
logger.info('bedtools intersect {}', str(datetime.now().time()))
bgfile = os.path.splitext(output)[0] + '_gene_bedgraph_intersect.txt'
if input_bg_minus: # strand-specific
bedgraph_per_gene_ss(input_regions, input_bg, input_bg_minus, bgfile)
else: # non-strand-specific
bedgraph_per_gene_nss(input_regions, input_bg, bgfile)
# === get introns from junction counts ===
logger.info('getting junction reads {}', str(datetime.now().time()))
intron2jxnCount = OrderedDict()
f = open(junc, 'r')
for line in f:
if not line.startswith('track'):
x = line.rstrip().split('\t')
if len(x) == 6:
(chrom, start, end, name, count, strand) = x
intron = ':'.join([chrom, start, end, strand])
elif len(x) == 5:
(chrom, start, end, name, count) = x
intron = ':'.join([chrom, start, end])
else:
logger.error('EXIT: did not recognize junction file format\n')
sys.exit(1)
if int(count) >= minjxncount:
if intron not in intron2jxnCount:
intron2jxnCount[intron] = count
else:
logger.error('seen intron')
sys.exit(1)
f.close()
# === find change points in each gene ===
count_genes_with_reads = 0
count_min_length_filter = 0
count_min_length_keep = 0
count_min_expn_filter = 0
count_min_expn_keep = 0
count_high_ksp = 0
count_vert_sum_max0 = 0
count_vert2_sum_max0 = 0
count_no_cps_afterfiltering = 0
count_no_cps_ttest0 = 0
count_cps_called = 0
count_filter123 = 0
count_genes_with_reads_annotated = 0
count_min_length_filter_annotated = 0
count_min_expn_filter_annotated = 0
logger.info('finding change points in each gene {}', str(datetime.now().time()))
maxl = 0
with open(bgfile, 'r') as f:
for l, line in enumerate(f):
maxl = l
o = open(output, 'w')
with open(bgfile, 'r') as f:
for l, line in enumerate(f):
# not EOF -> read the line
if line != '':
x = line.rstrip().split('\t')
if len(x) == 11:
(achrom, astart, aend, ageneid, ascore, astrand, bchrom, bstart, bend, bcov, overlap_len) = x
elif len(x) == 9:
(achrom, astart, aend, ageneid, bchrom, bstart, bend, bcov, overlap_len) = x
elif len(x) == 10:
(achrom, astart, aend, ageneid, ascore, bchrom, bstart, bend, bcov, overlap_len) = x
else:
logger.error('EXIT: do not recognize bedgraph intersect format\n')
logger.error(line)
sys.exit(1)
if not input_bg_minus:
astrand = 0
astart = int(astart)
aend = int(aend)
bstart = int(bstart)
bend = int(bend)
bcov = float(bcov)
else:
x = ''
if l == 0: # first line
prev_gene = ':'.join(x[:5]) if astrand == 0 else ':'.join(x[:6])
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array = np.zeros(aend - astart)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
# === next round ===
prev_astart = astart
prev_aend = aend
prev_geneid = ':'.join(map(str, [ageneid, astart, aend, achrom, astrand]))
prev_chrom = achrom
prev_strand = astrand
if l == maxl:
cov_sum = sum(prev_cov_array)
new_start = np.nonzero(prev_cov_array)[0][0]
new_end = np.nonzero(prev_cov_array)[0][-1]
prev_cov_array = prev_cov_array[new_start:(new_end + 1)]
geneid = prev_geneid
start = prev_astart
end = prev_aend
chrom = prev_chrom
strand = prev_strand
else:
this_gene = ':'.join(x[:5]) if astrand == 0 else ':'.join(x[:6])
if line == '' and this_gene == prev_gene and this_gene == '': # EOF
break
elif this_gene == prev_gene and l != maxl: # get coverage
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
# === next round ===
prev_astart = astart
prev_aend = aend
prev_geneid = ':'.join(map(str, [ageneid, astart, aend, achrom, astrand]))
prev_chrom = achrom
prev_strand = astrand
else: # finished reading all info for one gene -> call change points
# === get per-base coverage ===
if l == maxl:
# === last line of this gene & last line of the file ===
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
cov_sum = sum(prev_cov_array)
new_start = np.nonzero(prev_cov_array)[0][0]
new_end = np.nonzero(prev_cov_array)[0][-1]
prev_cov_array = prev_cov_array[new_start:(new_end + 1)]
# === call change points in this gene ===
geneid = prev_geneid
start = prev_astart
end = prev_aend
chrom = prev_chrom
strand = prev_strand
cov_array = prev_cov_array
new_start = new_start + start
new_end = new_end + start + 1
else:
cov_sum = sum(prev_cov_array)
new_start = np.nonzero(prev_cov_array)[0][0]
new_end = np.nonzero(prev_cov_array)[0][-1]
prev_cov_array = prev_cov_array[new_start:(new_end + 1)]
# === call change points in the previous gene ===
geneid = prev_geneid
start = prev_astart
end = prev_aend
chrom = prev_chrom
strand = prev_strand
cov_array = prev_cov_array
new_start = new_start + start
new_end = new_end + start + 1
# === next round: first line of the next gene ===
this_start = max(astart, bstart)
this_end = min(aend, bend)
prev_cov_array = np.zeros(aend - astart)
prev_cov_array[(this_start - astart):(this_end - astart)] += bcov
prev_astart = astart
prev_aend = aend
prev_geneid = ':'.join(map(str, [ageneid, astart, aend, achrom, astrand]))
prev_chrom = achrom
prev_strand = astrand
prev_gene = this_gene
count_genes_with_reads += 1
if 'novel' not in geneid:
count_genes_with_reads_annotated += 1
if verbose:
logger.info('gene: {} {}:{}-{}:{} {}-{} {} {}', geneid, chrom, start, end, strand, new_start, new_end, \
cov_array.size, str(datetime.now().time()))
if cov_array.size >= min_length:
count_min_length_keep += 1
# === get per-base coverage ===
cov_avg = float(cov_sum) / float(cov_array.size)
# === get introns ===
intron_list = []
exon_list = []
jxn_list = []
jxn_list, exon_list, intron_list = get_introns_exons(intron2jxnCount, cov_array.size, chrom, new_start, new_end, strand)
if len(jxn_list) > 0:
cov_avg_exon_with_utr, max_exon_cov = get_exon_cov(exon_list, cov_array)
if strand != 0:
strand_inferred = strand
elif strand == 0:
# === infer strand ===
strand_inferred = infer_strand(intron_list, chrom, genome, verbose)
else:
cov_avg_exon_with_utr = cov_avg
max_exon_cov = cov_avg
strand_inferred = strand if strand != 0 else 'NA'
if verbose:
logger.debug('junctions: {}', jxn_list)
logger.debug('exons: {}', exon_list)
logger.debug('strand: {}', strand_inferred)
logger.debug('{} {}', cov_avg_exon_with_utr, max_exon_cov)
if min_expn != -1:
if cov_avg_exon_with_utr < min_expn:
count_min_expn_filter += 1
if 'novel' not in geneid:
count_min_expn_filter_annotated += 1
if verbose:
logger.debug('did not meet min expression level: {} {}', min_expn, cov_avg_exon_with_utr)
continue
else:
count_min_expn_keep += 1
# === calculate cumulative vertical distance per gene ===
vert_sum_array, vert_array = crs(cov_array)
if vert_sum_array[0] == 'NA':
count_vert_sum_max0 += 1
continue
# === KS test ===
ksp = ks_test(vert_sum_array, plot, os.path.join(plot_dir, geneid))
peak_inds_ttest_opt = []
if ksp < test_thresh:
vert2_array = vert_array * vert_array / max(vert_array)
vert2_vert_sum_array, vert2_vert_array = crs(vert2_array)
if vert2_vert_sum_array[0] == 'NA':
count_vert2_sum_max0 += 1
continue
# === get parameters ===
if winsize != -1:
denoise_winsize_list = [winsize]
else:
winsize_max = max(100, int(round(vert_sum_array.size / 100, -2)))
denoise_winsize_list = []
winsize_min = 100
while winsize_min <= min(winsize_max, 500):
denoise_winsize_list.append(winsize_min)
winsize_min = winsize_min + 100
denoise_winsize_list = sorted(denoise_winsize_list)
if verbose:
logger.debug('de-noising window sizes: {}', denoise_winsize_list)
amp_thresh_list = [peak_thresh] if peak_thresh != -1.0 else [0.05, 0.1, 0.15]
if verbose:
logger.debug('amplitude thresholds: {}', amp_thresh_list)
peak_min_dist_list = [peak_min_dist] if peak_min_dist != -1 else [10, 50]
if verbose:
logger.debug('min distance between peaks: {}', peak_min_dist_list)
h = 0
njxns_detected = -100
other_detected = 100
param2cp = {}
param2cpopt = {}
param2fcopt = {}
denoise_winsize_opt = 0
amp_thresh_opt = 0
peak_min_dist_opt = 0
peak_inds_ttest = []
peak_inds_ttest_with_ends = []
for denoise_winsize in denoise_winsize_list:
# get distance to line & denoise
if verbose:
logger.debug ('de-noising {} {}', denoise_winsize, str(datetime.now().time()))
line_dist_array2, line_dist_array2_denoise = get_linedist(vert2_vert_sum_array, denoise_winsize)
line_dist_array, line_dist_array_denoise = get_linedist(vert_sum_array, denoise_winsize)
a2totcp = {}
for a, amp_thresh in enumerate(amp_thresh_list):
if a > 0 and a2totcp[a - 1] == 0:
if verbose:
logger.debug('skipping higher amplitude thresholds because previous gave 0 change points')
a2totcp[a] = 0
else:
for peak_min_dist in peak_min_dist_list:
# --------------------------------------------------
# peak calling
# --------------------------------------------------
if verbose:
logger.debug('parameters: window size {} {} {} {} {} {}', denoise_winsize, 'amplitude', amp_thresh, 'min distance', peak_min_dist, str(datetime.now().time()))
peak_inds_ip_combined_filtered = []
peak_inds_ip_combined = []
peak_inds2_ip_combined = []
if np.unique(line_dist_array2_denoise).size != 1:
# === crs^2 ===
# call peaks
peak_inds2 = peakutils.peak.indexes(line_dist_array2_denoise, thres=amp_thresh, min_dist=peak_min_dist)
peak_inds_min2 = peakutils.peak.indexes(-1 * line_dist_array2_denoise, thres=amp_thresh, min_dist=peak_min_dist)
# increase precision in case de-noising smoothed too much: call peaks on the real data just in the window around the peaks called on smooth data
peak_inds2_ip = increase_precision(peak_inds2, denoise_winsize, line_dist_array2, 'max', amp_thresh)
peak_inds2_min_ip = increase_precision(peak_inds_min2, denoise_winsize, line_dist_array2, 'min', amp_thresh)
# combine
peak_inds2_ip_combined = np.sort(np.append(peak_inds2_ip, peak_inds2_min_ip))
if verbose:
logger.debug ('peak inds max: {} {}', len(peak_inds2), str(datetime.now().time()))
logger.debug ('peak inds min: {} {}', len(peak_inds_min2), str(datetime.now().time()))
logger.debug ('increased precision max: {} {}', len(peak_inds2_ip), str(datetime.now().time()))
logger.debug ('increased precision min: {} {}', len(peak_inds2_min_ip), str(datetime.now().time()))
logger.debug ('peak inds crs^2: {} {}', len(peak_inds2_ip_combined), str(datetime.now().time()))
if np.unique(line_dist_array_denoise).size != 1:
# === original crs ===
# call peaks
peak_inds = peakutils.peak.indexes(line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
peak_inds_min = peakutils.peak.indexes(-1 * line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
# increase precision in case de-noising smoothed too much: call peaks on the real data just in the window around the peaks called on smooth data
peak_inds_ip = increase_precision(peak_inds, denoise_winsize, line_dist_array, 'max', amp_thresh)
peak_inds_min_ip = increase_precision(peak_inds_min, denoise_winsize, line_dist_array, 'min', amp_thresh)
# combine
peak_inds_ip_combined = np.sort(np.append(peak_inds_ip, peak_inds_min_ip))
if verbose:
logger.debug ('peak inds max: {} {}', len(peak_inds), str(datetime.now().time()))
logger.debug ('peak inds min: {} {}', len(peak_inds_min), str(datetime.now().time()))
logger.debug ('increased precision max: {} {}', len(peak_inds_ip), str(datetime.now().time()))
logger.debug ('increased precision min: {} {}', len(peak_inds_min_ip), str(datetime.now().time()))
logger.debug ('peak inds crs: {} {}', len(peak_inds_ip_combined), str(datetime.now().time()))
if np.unique(line_dist_array2_denoise).size != 1:
if peak_inds_ip_combined.size != 0 and peak_inds2_ip_combined.size != 0:
peak_inds_ip_combined_filtered = np.append(peak_inds2_ip_combined, peak_inds_ip_combined[peak_inds_ip_combined < min(peak_inds2_ip_combined)])
peak_inds_ip_combined_filtered = np.unique(np.append(peak_inds_ip_combined_filtered, peak_inds_ip_combined[peak_inds_ip_combined > max(peak_inds2_ip_combined)]))
else:
peak_inds_ip_combined_filtered = np.unique(np.append(peak_inds2_ip_combined, peak_inds_ip_combined))
else:
peak_inds_ip_combined_filtered = np.copy(peak_inds_ip_combined)
# === UTRs: local crs ===
if len(jxn_list) > 0:
temp_cov_array = cov_array[:jxn_list[0]]
if len(temp_cov_array) > 0:
temp_vert_sum_array, temp_vert_array = crs(temp_cov_array)
if temp_vert_sum_array[0] != 'NA' and len(temp_vert_sum_array) > 0 and max(temp_vert_sum_array) > 0:
temp_ksp = ks_test(temp_vert_sum_array, 0, os.path.join(plot_dir, geneid) + '_temp1')
if temp_ksp < test_thresh:
temp_line_dist_array, temp_line_dist_array_denoise = get_linedist(temp_vert_sum_array, denoise_winsize)
if np.unique(temp_line_dist_array_denoise).size != 1:
temp_peak_inds = peakutils.peak.indexes(temp_line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
temp_peak_inds_min = peakutils.peak.indexes(-1 * temp_line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
temp_peak_inds_ip = increase_precision(temp_peak_inds, denoise_winsize, temp_line_dist_array, 'max', amp_thresh)
temp_peak_inds_min_ip = increase_precision(temp_peak_inds_min, denoise_winsize, temp_line_dist_array, 'min', amp_thresh)
temp_peak_inds_ip_combined = np.append(temp_peak_inds_ip, temp_peak_inds_min_ip)
peak_inds_ip_combined_filtered = np.append(peak_inds_ip_combined_filtered, temp_peak_inds_ip_combined)
temp_cov_array = cov_array[jxn_list[-1] - 1:]
if len(temp_cov_array) > 0:
temp_vert_sum_array, temp_vert_array = crs(temp_cov_array)
if temp_vert_sum_array[0] != 'NA' and len(temp_vert_sum_array) > 0 and max(temp_vert_sum_array) > 0:
temp_ksp = ks_test(temp_vert_sum_array, 0, os.path.join(plot_dir, geneid) + '_temp2')
if temp_ksp < test_thresh:
temp_line_dist_array, temp_line_dist_array_denoise = get_linedist(temp_vert_sum_array, denoise_winsize)
if np.unique(temp_line_dist_array_denoise).size != 1:
temp_peak_inds = peakutils.peak.indexes(temp_line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
temp_peak_inds_min = peakutils.peak.indexes(-1 * temp_line_dist_array_denoise, thres=amp_thresh, min_dist=peak_min_dist)
temp_peak_inds_ip = increase_precision(temp_peak_inds, denoise_winsize, temp_line_dist_array, 'max', amp_thresh)
temp_peak_inds_min_ip = increase_precision(temp_peak_inds_min, denoise_winsize, temp_line_dist_array, 'min', amp_thresh)
temp_peak_inds_ip_combined = np.append(temp_peak_inds_ip, temp_peak_inds_min_ip)
peak_inds_ip_combined_filtered = np.append(peak_inds_ip_combined_filtered, temp_peak_inds_ip_combined)
else:
peak_inds_ip_combined_filtered = np.copy(peak_inds2_ip_combined)
peak_inds_ip_combined_filtered = np.sort(np.unique(peak_inds_ip_combined_filtered))
if verbose:
logger.debug('peak inds combined: {} {}', len(peak_inds_ip_combined_filtered), str(datetime.now().time()))
logger.debug(peak_inds_ip_combined_filtered)
# --------------------------------------------------
# peak filtering
# --------------------------------------------------
if len(peak_inds_ip_combined_filtered) != 0:
# === t-test: +/- window ===
peak_inds_ttest, ind2tp = get_ttest(peak_inds_ip_combined_filtered, cov_array, denoise_winsize * 2, test_thresh)
param2cp[(denoise_winsize, amp_thresh, peak_min_dist)] = peak_inds_ttest
if verbose:
logger.debug('t-test: {} {} {}', len(peak_inds_ttest), str(datetime.now().time()), peak_inds_ttest)
# === filter #1: enforce peak_min_dist ===
to_delete = []
peak_inds_ttest_with_ends = [0] + peak_inds_ttest + [cov_array.size - 1] # include ends
for i, pos in enumerate(peak_inds_ttest_with_ends):
if i > 0 and pos - peak_inds_ttest_with_ends[i - 1] <= peak_min_dist:
ind = i - 1 # because we added the ends
if i == 1: # keep the end & delete the change point
to_delete.append(ind)
elif i == len(peak_inds_ttest_with_ends) - 1: # keep the end & delete the change point
to_delete.append(ind)
else: # keep the change point with lower t-test p-value
to_delete.append(ind) if ind2tp[peak_inds_ttest[ind]] > ind2tp[peak_inds_ttest[ind - 1]] else to_delete.append(ind - 1)
to_delete = [x for x in to_delete if x < len(peak_inds_ttest)]
peak_inds_ttest = np.delete(peak_inds_ttest, to_delete)
if verbose:
logger.debug('filtered by -d: {} {} {}', len(peak_inds_ttest), str(datetime.now().time()), peak_inds_ttest)
# === filter #2: fold change of first bps ===
to_delete = []
for i, peak in enumerate(peak_inds_ttest):
if i > 0:
prev_range_end = min(prev_peak + denoise_winsize, peak)
this_range_end = min(peak + denoise_winsize, peak_inds_ttest[i + 1]) if i != len(peak_inds_ttest) - 1 else peak + denoise_winsize
this_mean = np.mean(cov_array[peak:this_range_end])
prev_mean = np.mean(cov_array[prev_peak:prev_range_end])
log2fc = math.log((this_mean + 1) / (prev_mean + 1), 2)
if abs(log2fc) < math.log(fcthresh, 2):
if ind2tp[peak] > ind2tp[prev_peak]:
to_delete.append(peak)
else:
to_delete.append(prev_peak)
prev_peak = peak
else:
prev_peak = peak
else:
prev_peak = peak
peak_inds_ttest = np.asarray([x for x in peak_inds_ttest if x not in to_delete])
if verbose:
logger.debug('filtered by fold change of first bps: {} {} {}', len(peak_inds_ttest), str(datetime.now().time()), peak_inds_ttest)
# === filter #3: require increasing at 5' end & decreasing at 3' end ===
if len(jxn_list) > 0:
utr5 = peak_inds_ttest[peak_inds_ttest <= jxn_list[0] + juncdist]
utr3 = peak_inds_ttest[peak_inds_ttest >= jxn_list[-1] - juncdist]
peak_inds_ttest = get_end_cov(utr5, utr3, cov_array, peak_inds_ttest)
if verbose:
logger.debug('filtered by 5\'/3\' ends: {} {} {}', len(peak_inds_ttest), str(datetime.now().time()), peak_inds_ttest)
# === filter #4: filter distal ends with low coverage ===
to_delete = []
peak_inds_ttest_with_ends = [0] + np.asarray(peak_inds_ttest).tolist() + [cov_array.size - 1] # include ends
if min_expn_distal != 0:
if len(jxn_list) > 0:
peak_inds_ttest_tandem_left = [x for x in peak_inds_ttest_with_ends if x <= int(jxn_list[0]) - juncdist]
peak_inds_ttest_tandem_right = [x for x in peak_inds_ttest_with_ends if x >= int(jxn_list[-1]) + juncdist]
# consider change points before the first junction
if len(peak_inds_ttest_tandem_left) > 1:
for i, ind in enumerate(peak_inds_ttest_tandem_left):
next_cut = peak_inds_ttest_with_ends[i + 1]
this_mean = round(np.mean(cov_array[ind:next_cut]), 2)
if this_mean <= min_expn_distal:
to_delete.append(ind)
# consider change points after the last junction
if len(peak_inds_ttest_tandem_right) > 1:
for i, ind in enumerate(peak_inds_ttest_tandem_right):
prev_cut = peak_inds_ttest_with_ends[peak_inds_ttest_with_ends.index(ind) - 1]
prev_mean = round(np.mean(cov_array[prev_cut:ind]), 2)
if prev_mean <= min_expn_distal:
to_delete.append(ind)
for ind in to_delete:
peak_inds_ttest_with_ends.remove(ind)
if verbose:
logger.debug('filter distal ends with low coverage: {} {}', len(peak_inds_ttest_with_ends), peak_inds_ttest_with_ends)
# === filter #5: fold change of whole segments ===
to_delete = []
fc_list = []
if len(peak_inds_ttest_with_ends) > 0:
for i, ind in enumerate(peak_inds_ttest_with_ends):
next_cut = peak_inds_ttest_with_ends[i + 1] if i != len(peak_inds_ttest_with_ends) - 1 else cov_array.size
if i == 0:
this_mean = np.mean(cov_array[ind:next_cut])
prev_mean = 0
this_sd = np.std(cov_array[ind:next_cut])
prev_sd = 0
elif i == len(peak_inds_ttest_with_ends) - 1:
this_mean = 0
prev_mean = np.mean(cov_array[prev_cut:ind])
this_sd = 0
prev_sd = np.std(cov_array[prev_cut:ind])
else:
this_mean = np.mean(cov_array[ind:next_cut])
prev_mean = np.mean(cov_array[prev_cut:ind])
this_sd = np.std(cov_array[ind:next_cut])
prev_sd = np.std(cov_array[prev_cut:ind])
log2fc = math.log((this_mean + 1) / (prev_mean + 1), 2)
if abs(log2fc) < math.log(fcthresh, 2) and i != 0 and i != len(peak_inds_ttest_with_ends) - 1:
to_delete.append(i)
else:
prev_cut = ind
peak_inds_ttest_with_ends = np.delete(peak_inds_ttest_with_ends, to_delete)
# calculate fold change of whole segments
for i, ind in enumerate(peak_inds_ttest_with_ends):
next_cut = peak_inds_ttest_with_ends[i + 1] if i != len(peak_inds_ttest_with_ends) - 1 else cov_array.size
if i == 0:
this_mean = np.mean(cov_array[ind:next_cut])
prev_mean = 0
this_sd = np.std(cov_array[ind:next_cut])
prev_sd = 0
elif i == len(peak_inds_ttest_with_ends) - 1:
this_mean = 0
prev_mean = np.mean(cov_array[prev_cut:ind])
this_sd = 0
prev_sd = np.std(cov_array[prev_cut:ind])
else:
this_mean = np.mean(cov_array[ind:next_cut])
prev_mean = np.mean(cov_array[prev_cut:ind])
this_sd = np.std(cov_array[ind:next_cut])
prev_sd = np.std(cov_array[prev_cut:ind])
log2fc = math.log((this_mean + 1) / (prev_mean + 1), 2)
prev_cut = ind
fc_list.append(':'.join(map(str, [this_mean, log2fc])))
else:
count_filter123 += 1
if verbose:
logger.debug('fold change whole segment: {} {}', len(peak_inds_ttest_with_ends), peak_inds_ttest_with_ends)
param2cpopt[(denoise_winsize, amp_thresh, peak_min_dist)] = peak_inds_ttest_with_ends
param2fcopt[(denoise_winsize, amp_thresh, peak_min_dist)] = fc_list
elif verbose:
count_no_cps += 1
logger.debug('no change points called')
# get total change points for this amp threshold
if a not in a2totcp:
a2totcp[a] = len(peak_inds_ttest_with_ends)
elif len(peak_inds_ttest_with_ends) > a2totcp[a]:
a2totcp[a] = len(peak_inds_ttest_with_ends)
# optimization criteria
if len(peak_inds_ttest_with_ends) > 0:
if len(jxn_list) > 0:
# optimal parameters: detect junctions within 50bp
jxn_closest = [min(peak_inds_ttest_with_ends, key=lambda x:abs(x - j)) for j in jxn_list]
jxn_peak_dif = [abs(j - c) for j, c in zip(jxn_list, jxn_closest)]
tot_near_jxns = len([x for x in jxn_peak_dif if x <= juncdist])
tot_other = len(peak_inds_ttest_with_ends) - tot_near_jxns
if tot_near_jxns - tot_other > njxns_detected:
njxns_detected = tot_near_jxns - tot_other
denoise_winsize_opt = denoise_winsize
amp_thresh_opt = amp_thresh
peak_min_dist_opt = peak_min_dist
elif winsize != -1:
njxns_detected = tot_near_jxns - tot_other
denoise_winsize_opt = denoise_winsize
amp_thresh_opt = amp_thresh
peak_min_dist_opt = peak_min_dist
else:
# optimal parameters: fewest total change points
if len(peak_inds_ttest_with_ends) < other_detected:
other_detected = len(peak_inds_ttest_with_ends)
denoise_winsize_opt = denoise_winsize
amp_thresh_opt = amp_thresh
peak_min_dist_opt = peak_min_dist
else:
count_no_cps_ttest0 += 1
if verbose:
logger.debug('no change points found: total passing t-test = 0')
# --------------------------------------------------
# optimal change points
# --------------------------------------------------
if denoise_winsize_opt == 0 or amp_thresh_opt == 0 or peak_min_dist_opt == 0:
count_no_cps_afterfiltering += 1
if verbose:
logger.debug('no change points called {}', str(datetime.now().time()))
else:
count_cps_called += 1
if verbose:
logger.debug('optimal parameters: jxns detected {} {} {} {} {} {} {} {} {} {}', njxns_detected, '/', len(jxn_list), ': window size', denoise_winsize_opt, 'amplitude', amp_thresh_opt, 'min distance', peak_min_dist_opt, str(datetime.now().time()))
peak_inds_ttest_opt = param2cpopt[(denoise_winsize_opt, amp_thresh_opt, peak_min_dist_opt)].tolist()
peak_inds_ttest_preopt = param2cp[(denoise_winsize_opt, amp_thresh_opt, peak_min_dist_opt)]
fc_list_opt = param2fcopt[(denoise_winsize_opt, amp_thresh_opt, peak_min_dist_opt)]
# === filter low coverage ends ===
if max_end_ru != 0:
cov_mean_list = [float(x.split(':')[0]) for x in fc_list_opt]
ru_all = [x / max(cov_mean_list) for x in cov_mean_list]
while ru_all[0] < max_end_ru:
if verbose:
logger.debug('removing left end: {} {}', ru_all[0], peak_inds_ttest_opt[0] + new_start)
del ru_all[0]
del peak_inds_ttest_opt[0]
del fc_list_opt[0]
if len(ru_all) > 1:
while ru_all[-2] < max_end_ru:
if verbose:
logger.debug('removing right end: {} {}', ru_all[-1], peak_inds_ttest_opt[-1] + new_start)
del ru_all[-1]
del peak_inds_ttest_opt[-1]
del fc_list_opt[-1]
# label change points
label_list = []
for i, peak_ind in enumerate(peak_inds_ttest_opt):
if i == 0:
if strand_inferred == '+':
label = 'DistalTSS'
elif strand_inferred == '-':
label = 'DistalPolyA'
else:
label = 'DistalLeft'
elif i == len(peak_inds_ttest_opt) - 1:
if strand_inferred == '+':
label = 'DistalPolyA'
elif strand_inferred == '-':
label = 'DistalTSS'
else:
label = 'DistalRight'
elif len(jxn_list) > 0:
jxn_distances = [abs(int(peak_ind) - int(x)) for x in jxn_list]
exon_start_end_list = [map(int, exon.split(':')) for exon in exon_list]
exon_flag_list = [1 if estart <= int(peak_ind) < eend else 0 for estart, eend in exon_start_end_list]
if any(x <= juncdist for x in jxn_distances):
label = 'Junction'
elif int(peak_ind) <= int(jxn_list[0]) - juncdist:
if strand_inferred == '-':
label = 'TandemAPA'
elif strand_inferred == '+':
label = 'TandemATSS'
else:
label = 'TandemLeft'
elif int(peak_ind) >= int(jxn_list[-1]) + juncdist:
if strand_inferred == '+':
label = 'TandemAPA'
elif strand_inferred == '-':
label = 'TandemATSS'
else:
label = 'TandemRight'
else:
if sum(exon_flag_list) == 0:
label = 'Intron'
else:
label = 'Exon'
else:
label = 'Exon'
label_list.append(label)
if int(peak_ind) > cov_array.size - 1:
logger.debug('gene: {} {}:{}-{}:{} {}-{} {} {}', geneid, start, end, chrom, strand, new_start, new_end, cov_array.size, str(datetime.now().time()))
logger.debug('{} {}', cov_array.size, peak_ind)
sys.exit(1)
this_gs = geneid.split(':')[0]
this_pos = str(int(peak_ind) + new_start)
this_fc = fc_list_opt[i]
cov_exons, fc = this_fc.split(':')
# === write output ===
if strand == '1':
strand = '+'
if strand == '-1':
strand = '-'
strand_inferred = strand if strand == '+' or strand == '-' else strand_inferred
name = ':'.join(map(str, [label, this_gs, start, end, strand_inferred,
denoise_winsize_opt,
cov_exons,
cov_avg_exon_with_utr]))
# the change point is 1-based before the change -> adjust by 1bp if change point goes from high to low so that reported change points are outside of the higher-coverage region
if float(fc) < 0:
this_pos = int(this_pos) - 1
if strand != '+' and strand != '-':
if strand_inferred == '+' or strand_inferred == '-':
o.write('\t'.join([chrom, str(int(this_pos)), str(int(this_pos) + 1), name, fc, strand_inferred]) + '\n')
else:
o.write('\t'.join([chrom, str(int(this_pos)), str(int(this_pos) + 1), name, fc]) + '\n')
else:
o.write('\t'.join([chrom, str(int(this_pos)), str(int(this_pos) + 1), name, fc, strand]) + '\n')
else:
count_high_ksp += 1
if verbose:
logger.debug('no change points called: KS p = {}', ksp)
# --------------------------------------------------
# plots
# --------------------------------------------------
if plot:
if verbose:
logger.debug('plotting')
# plot coverage without change points
x = np.linspace(0, len(cov_array) - 1, len(cov_array)) / 1000
y = cov_array
out_plot = os.path.join(plot_dir, geneid.replace(':', '_') + '_cov.pdf')
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x, y, color='k')
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Coverage')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
# if len(jxn_list) > 0:
# # plot coverage without change points, excluding introns
# x = np.linspace(0, len(temp_cov_list) - 1, len(temp_cov_list)) / 1000
# y = np.asarray(temp_cov_list)
# out_plot = os.path.join(plot_dir, geneid + '_cov_noIntrons.pdf')
# pdf = PdfPages(out_plot)
# fig = pyplot.figure(figsize=(3, 3))
# pyplot.plot(x, y, color='k')
# pyplot.xlabel('Position (kb)')
# pyplot.ylabel('Coverage')
# pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
# pdf.savefig()
# pdf.close()
# pyplot.close(fig)
# plot coverage (black) + + change points (red)
x = np.linspace(0, len(cov_array) - 1, len(cov_array)) / 1000
y = np.asarray(cov_array)
out_plot = os.path.join(plot_dir, geneid.replace(':', '_') + '_cov_segs.pdf')
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x, y, color='k')
if len(peak_inds_ttest_opt) > 0:
for seg in peak_inds_ttest_opt:
seg = float(seg) / float(1000)
pyplot.axvline(x=seg, color='r')
pyplot.title('Segmentation: n = ' + str(len(peak_inds_ttest_opt)))
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Coverage')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
# plot coverage (black) + + change points (red): left
junction_indexes = [i for i, x in enumerate(label_list) if x == 'Junction']
if len(junction_indexes) != 0 and junction_indexes[0] != 0:
x = np.linspace(0, len(cov_array) - 1, len(cov_array)) / 1000
y = np.asarray(cov_array)
out_plot = os.path.join(plot_dir, geneid.replace(':', '_') + '_cov_segs_left.pdf')
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x, y, color='k')
if len(peak_inds_ttest_opt) > 0:
for seg in peak_inds_ttest_opt:
seg = float(seg) / float(1000)
pyplot.axvline(x=seg, color='r')
pyplot.xlim(0, float(peak_inds_ttest_opt[junction_indexes[0]]) / float(1000))
pyplot.title('Segmentation: n = ' + str(len(peak_inds_ttest_opt)))
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Coverage')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
# plot coverage (black) + + change points (red): right
if len(junction_indexes) != 0 and junction_indexes[-1] != len(label_list) - 1:
x = np.linspace(0, len(cov_array) - 1, len(cov_array)) / 1000
y = np.asarray(cov_array)
out_plot = os.path.join(plot_dir, geneid.replace(':', '_') + '_cov_segs_right.pdf')
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x, y, color='k')
if len(peak_inds_ttest_opt) > 0:
for seg in peak_inds_ttest_opt:
seg = float(seg) / float(1000)
pyplot.axvline(x=seg, color='r')
pyplot.xlim(float(peak_inds_ttest_opt[junction_indexes[-1]]) / float(1000), float(len(cov_array)) / float(1000))
pyplot.title('Segmentation: n = ' + str(len(peak_inds_ttest_opt)))
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Coverage')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
if ksp < test_thresh:
x = np.linspace(0, len(line_dist_array_denoise) - 1, len(line_dist_array_denoise)) / 1000
y1 = line_dist_array2_denoise
y2 = line_dist_array2
y3 = line_dist_array_denoise
y4 = line_dist_array
out_plot = os.path.join(plot_dir, geneid.replace(':', '_') + '_crsToLine.pdf')
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x, y1, color='b')
pyplot.plot(x, y2, color='g')
pyplot.plot(x, y3, color='c')
pyplot.plot(x, y4, color='k')
pyplot.title('crs to line: n = ' + str(len(peak_inds_ttest_preopt)))
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Distance to line y=ax')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
x = np.linspace(0, len(line_dist_array_denoise) - 1, len(line_dist_array_denoise)) / 1000
y1 = line_dist_array2_denoise
y2 = line_dist_array2
y3 = line_dist_array_denoise
y4 = line_dist_array
out_plot = os.path.join(plot_dir, geneid.replace(':', '_') + '_crsToLine_segs.pdf')
pdf = PdfPages(out_plot)
fig = pyplot.figure(figsize=(3, 3))
pyplot.plot(x, y1, color='b')
pyplot.plot(x, y2, color='g')
pyplot.plot(x, y3, color='c')
pyplot.plot(x, y4, color='k')
if len(peak_inds_ttest_preopt) > 0:
for seg in peak_inds_ttest_preopt:
seg = float(seg) / float(1000)
pyplot.axvline(x=seg, color='r')
pyplot.title('crs to line: n = ' + str(len(peak_inds_ttest_preopt)))
pyplot.xlabel('Position (kb)')
pyplot.ylabel('Distance to line y=ax')
pyplot.gcf().subplots_adjust(bottom=0.3, left=0.3)
pdf.savefig()
pdf.close()
pyplot.close(fig)
else:
count_min_length_filter += 1
if 'novel' not in geneid:
count_min_length_filter_annotated += 1
if verbose:
logger.debug ('gene length <', min_length, '-> skipping')
o.close()
sort_bedfile(output, output, sort_by_bedtools = True)
if verbose:
logger.debug('{} total TUs with reads, {} annotated', count_genes_with_reads, count_genes_with_reads_annotated)
logger.debug('{} total TUs filtered by min TU length >= {} ({} annotated), {} kept', count_min_length_filter, min_length, count_min_length_filter_annotated, count_min_length_keep)
logger.debug('{} total TUs filtered by min expression >= {}, ({} annotated), {} kept', count_min_expn_filter, min_expn, count_min_expn_filter_annotated, count_min_expn_keep)
logger.debug('{} total TUs with ksp >= {}', count_high_ksp, test_thresh)
logger.debug('{} total TUs filtered by having max CVS = 0', count_vert_sum_max0)
logger.debug('{} total TUs filtered by having max CVS2 = 0', count_vert2_sum_max0)
logger.debug('{} total TUs filtered by having no cps passing ttest', count_no_cps_ttest0)
logger.debug('{} total filtered by filters 1-3', count_filter123)
logger.debug('{} total TUs with no cps after all filters', count_no_cps_afterfiltering, )
logger.debug('{} total TUs with change points', count_cps_called)
# === delete temporary files ===
os.remove(bgfile)
logger.debug('finished: {}', str(datetime.now().time()))
def main(argv):
# --------------------------------------------------
# get args
# --------------------------------------------------
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Identify change points throughout each TU using the cumulative read sum (CRS).')
group = parser.add_argument_group('Input')
group.add_argument('-i', '--input_bg', dest='input_bg', type=str, metavar='',
help='Bedgraph: non-strand-specific or plus strand.')
group.add_argument('-m', '--input_bg_minus', dest='input_bg_minus', type=str, metavar='',
help='Bedgraph, minus strand (strand-specific only).')
group.add_argument('-g', '--input_regions', dest='input_regions', type=str, metavar='',
help='Bed file of transcription units.')
group.add_argument('-j', '--junc', dest='junc', type=str, metavar='',
help='Bed file of junction read counts.')
group.add_argument('-x', '--genome', dest='genome', type=str, metavar='',
help='Genome fasta file.')
group = parser.add_argument_group('Parameters')
group.add_argument('-a', '--peak_thresh', dest='peak_thresh', type=float, default=-1.0, metavar='',
help='Normalized threshold (float between [0., 1.]). Only the peaks with amplitude higher than the threshold will be detected. -1.0 indicates optimizing between [0.01, 0.05, 0.1] for each TU.')
group.add_argument('-d', '--peak_min_dist', dest='peak_min_dist', type=int, default=-1, metavar='',
help='Minimum distance between each detected peak. The peak with the highest amplitude is preferred to satisfy this constraint. -1 indicates optimizing between [10, 50] for each TU.')
group.add_argument('-w', '--winsize', dest='winsize', type=int, default=-1, metavar='',
help='Window size for de-noising and increasing precision. -1 indicates optimizing between [50, max(100, gene_length / 100) * 2].')
group.add_argument('-t', '--test_thresh', dest='test_thresh', type=float, default=0.001, metavar='',
help='Maximum p-value threshold for KS test and t-test.')
group.add_argument('-l', '--min_length', dest='min_length', type=int, default=1000, metavar='',
help='Minimum gene length for running mountain climber.')
group.add_argument('-e', '--min_expn', dest='min_expn', type=int, default=10, metavar='',
help='Minimum expression level (average # reads per bp)')
group.add_argument('-s', '--min_expn_distal', dest='min_expn_distal', type=int, default=1, metavar='',
help='Minimum distal expression level (average # reads per bp).')
group.add_argument('-f', '--fcthresh', dest='fcthresh', type=float, default=1.5, metavar='',
help='Minimum fold change.')
group.add_argument('-u', '--juncdist', dest='juncdist', type=int, default=10, metavar='',
help='Minimum distance to exon-intron junction.')
group.add_argument('-n', '--minjxncount', dest='minjxncount', type=int, default=2, metavar='',
help='Minimum junction read count.')
group.add_argument('-z', '--max_end_ru', dest='max_end_ru', type=float, default=0.01, metavar='',
help='Maximum end relative usage = coverage of end / max segment coverage.')
group = parser.add_argument_group('Output')
group.add_argument('-o', '--output', dest='output', type=str, metavar='',
help='Output prefix. Bed file of change points has name field = CPlabel:gene:TUstart:TUend:inferred_strand:winsize:segment_coverage:average_exon_coverage. Score = log2(fold change).')
group.add_argument('-p', '--plot', dest='plot', action='store_true',
help='Plot the cumulative read sum (CRS), the distance from CRS to line y=ax, and the coverage with predicted change points.')
group.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print progress.')
args = parser.parse_args()
run(
input_bg=args.input_bg, input_bg_minus=args.input_bg_minus,
input_regions=args.input_regions, output=args.output,
genome=args.genome, plot=args.plot,
junc=args.junc, minjxncount=args.minjxncount, juncdist=args.juncdist,
min_length=args.min_length, min_expn=args.min_expn, min_expn_distal=args.min_expn_distal,
test_thresh=args.test_thresh, winsize=args.winsize, peak_min_dist=args.peak_min_dist,
peak_thresh=args.peak_thresh, fcthresh=args.fcthresh, max_end_ru=args.max_end_ru,
verbose=args.verbose
)
# boilerplate
if __name__ == '__main__':
main(sys.argv[1:])
| [
"matplotlib.pyplot.ylabel",
"pysam.faidx",
"math.sqrt",
"math.log",
"pybedtools.BedTool",
"scipy.stats.ttest_ind",
"sys.exit",
"numpy.arange",
"os.remove",
"os.path.exists",
"numpy.mean",
"os.listdir",
"collections.deque",
"argparse.ArgumentParser",
"numpy.delete",
"numpy.sort",
"mat... | [((933, 954), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (947, 954), False, 'import matplotlib\n'), ((1852, 1882), 'src.functions.sort_bedfile', 'sort_bedfile', (['bg_plus', 'bg_plus'], {}), '(bg_plus, bg_plus)\n', (1864, 1882), False, 'from src.functions import sort_bedfile, run_command\n'), ((1995, 2027), 'src.functions.sort_bedfile', 'sort_bedfile', (['bg_minus', 'bg_minus'], {}), '(bg_minus, bg_minus)\n', (2007, 2027), False, 'from src.functions import sort_bedfile, run_command\n'), ((2459, 2479), 'src.functions.sort_bedfile', 'sort_bedfile', (['bg', 'bg'], {}), '(bg, bg)\n', (2471, 2479), False, 'from src.functions import sort_bedfile, run_command\n'), ((3715, 3757), 'scipy.stats.ks_2samp', 'stats.ks_2samp', (['vert_sum_array', 'line_array'], {}), '(vert_sum_array, line_array)\n', (3729, 3757), False, 'from scipy import stats\n'), ((4688, 4730), 'math.sqrt', 'math.sqrt', (['((y2 - y1) ** 2 + (x2 - x1) ** 2)'], {}), '((y2 - y1) ** 2 + (x2 - x1) ** 2)\n', (4697, 4730), False, 'import math\n'), ((5136, 5143), 'collections.deque', 'deque', ([], {}), '()\n', (5141, 5143), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((5195, 5219), 'itertools.islice', 'islice', (['seq', 'window_size'], {}), '(seq, window_size)\n', (5201, 5219), False, 'from itertools import islice\n'), ((5958, 5988), 'numpy.arange', 'np.arange', (['vert_sum_array.size'], {}), '(vert_sum_array.size)\n', (5967, 5988), True, 'import numpy as np\n'), ((6477, 6511), 'numpy.asarray', 'np.asarray', (['line_dist_list_denoise'], {}), '(line_dist_list_denoise)\n', (6487, 6511), True, 'import numpy as np\n'), ((9428, 9446), 'numpy.sort', 'np.sort', (['peak_inds'], {}), '(peak_inds)\n', (9435, 9446), True, 'import numpy as np\n'), ((15477, 15490), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15488, 15490), False, 'from collections import defaultdict, OrderedDict, deque\n'), ((53370, 53421), 'src.functions.sort_bedfile', 'sort_bedfile', (['output', 'output'], {'sort_by_bedtools': '(True)'}), '(output, output, sort_by_bedtools=True)\n', (53382, 53421), False, 'from src.functions import sort_bedfile, run_command\n'), ((54509, 54526), 'os.remove', 'os.remove', (['bgfile'], {}), '(bgfile)\n', (54518, 54526), False, 'import os\n'), ((54733, 54921), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Identify change points throughout each TU using the cumulative read sum (CRS)."""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Identify change points throughout each TU using the cumulative read sum (CRS).'\n )\n", (54756, 54921), False, 'import argparse\n'), ((2346, 2361), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (2355, 2361), False, 'import os\n'), ((3250, 3271), 'numpy.ediff1d', 'np.ediff1d', (['cov_array'], {}), '(cov_array)\n', (3260, 3271), True, 'import numpy as np\n'), ((3309, 3332), 'numpy.absolute', 'np.absolute', (['vert_array'], {}), '(vert_array)\n', (3320, 3332), True, 'import numpy as np\n'), ((4122, 4140), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (4130, 4140), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((4149, 4178), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (4162, 4178), True, 'import matplotlib.pyplot as pyplot\n'), ((4181, 4211), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x1', 'y1'], {'color': '"""k"""'}), "(x1, y1, color='k')\n", (4192, 4211), True, 'import matplotlib.pyplot as pyplot\n'), ((4214, 4247), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x2', 'y2'], {'color': '"""0.75"""'}), "(x2, y2, color='0.75')\n", (4225, 4247), True, 'import matplotlib.pyplot as pyplot\n'), ((4302, 4332), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (4315, 4332), True, 'import matplotlib.pyplot as pyplot\n'), ((4335, 4380), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Cumulative Vertical Distance"""'], {}), "('Cumulative Vertical Distance')\n", (4348, 4380), True, 'import matplotlib.pyplot as pyplot\n'), ((4466, 4483), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (4478, 4483), True, 'import matplotlib.pyplot as pyplot\n'), ((5240, 5255), 'bisect.insort', 'insort', (['s', 'item'], {}), '(s, item)\n', (5246, 5255), False, 'from bisect import bisect_left, insort\n'), ((5572, 5587), 'bisect.insort', 'insort', (['s', 'item'], {}), '(s, item)\n', (5578, 5587), False, 'from bisect import bisect_left, insort\n'), ((6156, 6198), 'math.sqrt', 'math.sqrt', (['((y2 - y1) ** 2 + (x2 - x1) ** 2)'], {}), '((y2 - y1) ** 2 + (x2 - x1) ** 2)\n', (6165, 6198), False, 'import math\n'), ((12056, 12093), 'numpy.delete', 'np.delete', (['peak_inds_ttest', 'to_delete'], {}), '(peak_inds_ttest, to_delete)\n', (12065, 12093), True, 'import numpy as np\n'), ((12544, 12576), 'pysam.faidx', 'pysam.faidx', (['genome', 'start_coord'], {}), '(genome, start_coord)\n', (12555, 12576), False, 'import pysam\n'), ((12716, 12746), 'pysam.faidx', 'pysam.faidx', (['genome', 'end_coord'], {}), '(genome, end_coord)\n', (12727, 12746), False, 'import pysam\n'), ((13763, 13814), 'loguru.logger.info', 'logger.info', (['"""inferred strand: {}"""', 'strand_inferred'], {}), "('inferred strand: {}', strand_inferred)\n", (13774, 13814), False, 'from loguru import logger\n'), ((13817, 13923), 'loguru.logger.info', 'logger.info', (['"""counts: minus {} {} {} {} {}"""', 'count_minus', '"""plus"""', 'count_plus', '"""unknown"""', 'count_unknown'], {}), "('counts: minus {} {} {} {} {}', count_minus, 'plus', count_plus,\n 'unknown', count_unknown)\n", (13828, 13923), False, 'from loguru import logger\n'), ((14381, 14428), 'loguru.logger.error', 'logger.error', (['"""EXIT: Please provide --input_bg"""'], {}), "('EXIT: Please provide --input_bg')\n", (14393, 14428), False, 'from loguru import logger\n'), ((14431, 14442), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14439, 14442), False, 'import sys\n'), ((14469, 14521), 'loguru.logger.error', 'logger.error', (['"""EXIT: Please provide --input_regions"""'], {}), "('EXIT: Please provide --input_regions')\n", (14481, 14521), False, 'from loguru import logger\n'), ((14524, 14535), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14532, 14535), False, 'import sys\n'), ((14555, 14600), 'loguru.logger.error', 'logger.error', (['"""EXIT: Please provide --output"""'], {}), "('EXIT: Please provide --output')\n", (14567, 14600), False, 'from loguru import logger\n'), ((14603, 14614), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14611, 14614), False, 'import sys\n'), ((14657, 14747), 'loguru.logger.error', 'logger.error', (['"""EXIT: for non-strand-specific RNA-Seq, please provide a --genome file"""'], {}), "(\n 'EXIT: for non-strand-specific RNA-Seq, please provide a --genome file')\n", (14669, 14747), False, 'from loguru import logger\n'), ((14745, 14756), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14753, 14756), False, 'import sys\n'), ((14824, 14848), 'os.path.exists', 'os.path.exists', (['plot_dir'], {}), '(plot_dir)\n', (14838, 14848), False, 'import os\n'), ((53440, 53555), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs with reads, {} annotated"""', 'count_genes_with_reads', 'count_genes_with_reads_annotated'], {}), "('{} total TUs with reads, {} annotated',\n count_genes_with_reads, count_genes_with_reads_annotated)\n", (53452, 53555), False, 'from loguru import logger\n'), ((53554, 53746), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs filtered by min TU length >= {} ({} annotated), {} kept"""', 'count_min_length_filter', 'min_length', 'count_min_length_filter_annotated', 'count_min_length_keep'], {}), "(\n '{} total TUs filtered by min TU length >= {} ({} annotated), {} kept',\n count_min_length_filter, min_length, count_min_length_filter_annotated,\n count_min_length_keep)\n", (53566, 53746), False, 'from loguru import logger\n'), ((53736, 53922), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs filtered by min expression >= {}, ({} annotated), {} kept"""', 'count_min_expn_filter', 'min_expn', 'count_min_expn_filter_annotated', 'count_min_expn_keep'], {}), "(\n '{} total TUs filtered by min expression >= {}, ({} annotated), {} kept',\n count_min_expn_filter, min_expn, count_min_expn_filter_annotated,\n count_min_expn_keep)\n", (53748, 53922), False, 'from loguru import logger\n'), ((53912, 53984), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs with ksp >= {}"""', 'count_high_ksp', 'test_thresh'], {}), "('{} total TUs with ksp >= {}', count_high_ksp, test_thresh)\n", (53924, 53984), False, 'from loguru import logger\n'), ((53987, 54072), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs filtered by having max CVS = 0"""', 'count_vert_sum_max0'], {}), "('{} total TUs filtered by having max CVS = 0', count_vert_sum_max0\n )\n", (53999, 54072), False, 'from loguru import logger\n'), ((54070, 54156), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs filtered by having max CVS2 = 0"""', 'count_vert2_sum_max0'], {}), "('{} total TUs filtered by having max CVS2 = 0',\n count_vert2_sum_max0)\n", (54082, 54156), False, 'from loguru import logger\n'), ((54155, 54248), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs filtered by having no cps passing ttest"""', 'count_no_cps_ttest0'], {}), "('{} total TUs filtered by having no cps passing ttest',\n count_no_cps_ttest0)\n", (54167, 54248), False, 'from loguru import logger\n'), ((54247, 54312), 'loguru.logger.debug', 'logger.debug', (['"""{} total filtered by filters 1-3"""', 'count_filter123'], {}), "('{} total filtered by filters 1-3', count_filter123)\n", (54259, 54312), False, 'from loguru import logger\n'), ((54315, 54406), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs with no cps after all filters"""', 'count_no_cps_afterfiltering'], {}), "('{} total TUs with no cps after all filters',\n count_no_cps_afterfiltering)\n", (54327, 54406), False, 'from loguru import logger\n'), ((54407, 54472), 'loguru.logger.debug', 'logger.debug', (['"""{} total TUs with change points"""', 'count_cps_called'], {}), "('{} total TUs with change points', count_cps_called)\n", (54419, 54472), False, 'from loguru import logger\n'), ((3922, 3963), 'numpy.linspace', 'np.linspace', (['(0)', 'xmax', 'vert_sum_array.size'], {}), '(0, xmax, vert_sum_array.size)\n', (3933, 3963), True, 'import numpy as np\n'), ((5549, 5568), 'bisect.bisect_left', 'bisect_left', (['s', 'old'], {}), '(s, old)\n', (5560, 5568), False, 'from bisect import bisect_left, insort\n'), ((8874, 8953), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['window'], {'thres': 'peak_thresh', 'min_dist': '(denoise_winsize * 2)'}), '(window, thres=peak_thresh, min_dist=denoise_winsize * 2)\n', (8896, 8953), False, 'import peakutils\n'), ((10215, 10239), 'numpy.sort', 'np.sort', (['peak_inds_ttest'], {}), '(peak_inds_ttest)\n', (10222, 10239), True, 'import numpy as np\n'), ((14770, 14794), 'os.path.splitext', 'os.path.splitext', (['output'], {}), '(output)\n', (14786, 14794), False, 'import os\n'), ((14869, 14889), 'os.listdir', 'os.listdir', (['plot_dir'], {}), '(plot_dir)\n', (14879, 14889), False, 'import os\n'), ((14950, 14968), 'os.mkdir', 'os.mkdir', (['plot_dir'], {}), '(plot_dir)\n', (14958, 14968), False, 'import os\n'), ((15079, 15103), 'os.path.splitext', 'os.path.splitext', (['output'], {}), '(output)\n', (15095, 15103), False, 'import os\n'), ((4383, 4395), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (4393, 4395), True, 'import matplotlib.pyplot as pyplot\n'), ((9016, 9105), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['(-1 * window)'], {'thres': 'peak_thresh', 'min_dist': '(denoise_winsize * 2)'}), '(-1 * window, thres=peak_thresh, min_dist=\n denoise_winsize * 2)\n', (9038, 9105), False, 'import peakutils\n'), ((9127, 9138), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9135, 9138), False, 'import sys\n'), ((9279, 9329), 'loguru.logger.error', 'logger.error', (['"""too many new peak indexes detected"""'], {}), "('too many new peak indexes detected')\n", (9291, 9329), False, 'from loguru import logger\n'), ((9407, 9418), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9415, 9418), False, 'import sys\n'), ((10615, 10646), 'numpy.mean', 'np.mean', (['cov_list[u5cp:next_cp]'], {}), '(cov_list[u5cp:next_cp])\n', (10622, 10646), True, 'import numpy as np\n'), ((10663, 10694), 'numpy.mean', 'np.mean', (['cov_list[prev_cp:u5cp]'], {}), '(cov_list[prev_cp:u5cp])\n', (10670, 10694), True, 'import numpy as np\n'), ((11372, 11403), 'numpy.mean', 'np.mean', (['cov_list[u3cp:next_cp]'], {}), '(cov_list[u3cp:next_cp])\n', (11379, 11403), True, 'import numpy as np\n'), ((11420, 11451), 'numpy.mean', 'np.mean', (['cov_list[prev_cp:u3cp]'], {}), '(cov_list[prev_cp:u3cp])\n', (11427, 11451), True, 'import numpy as np\n'), ((17721, 17744), 'numpy.zeros', 'np.zeros', (['(aend - astart)'], {}), '(aend - astart)\n', (17729, 17744), True, 'import numpy as np\n'), ((1884, 1904), 'pybedtools.BedTool', 'pb.BedTool', (['plus_bed'], {}), '(plus_bed)\n', (1894, 1904), True, 'import pybedtools as pb\n'), ((2029, 2050), 'pybedtools.BedTool', 'pb.BedTool', (['minus_bed'], {}), '(minus_bed)\n', (2039, 2050), True, 'import pybedtools as pb\n'), ((2481, 2498), 'pybedtools.BedTool', 'pb.BedTool', (['genes'], {}), '(genes)\n', (2491, 2498), True, 'import pybedtools as pb\n'), ((10036, 10091), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['cov_before', 'cov_after'], {'equal_var': '(False)'}), '(cov_before, cov_after, equal_var=False)\n', (10051, 10091), False, 'from scipy import stats\n'), ((14336, 14350), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14348, 14350), False, 'from datetime import datetime\n'), ((14905, 14937), 'os.path.join', 'os.path.join', (['plot_dir', 'filename'], {}), '(plot_dir, filename)\n', (14917, 14937), False, 'import os\n'), ((15045, 15059), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15057, 15059), False, 'from datetime import datetime\n'), ((15434, 15448), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15446, 15448), False, 'from datetime import datetime\n'), ((15833, 15895), 'loguru.logger.error', 'logger.error', (['"""EXIT: did not recognize junction file format\n"""'], {}), "('EXIT: did not recognize junction file format\\n')\n", (15845, 15895), False, 'from loguru import logger\n'), ((15900, 15911), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15908, 15911), False, 'import sys\n'), ((16036, 16063), 'loguru.logger.error', 'logger.error', (['"""seen intron"""'], {}), "('seen intron')\n", (16048, 16063), False, 'from loguru import logger\n'), ((16069, 16080), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16077, 16080), False, 'import sys\n'), ((16618, 16632), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16630, 16632), False, 'from datetime import datetime\n'), ((54561, 54575), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (54573, 54575), False, 'from datetime import datetime\n'), ((1679, 1729), 'loguru.logger.error', 'logger.error', (["('do not recognize strand: ' + strand)"], {}), "('do not recognize strand: ' + strand)\n", (1691, 1729), False, 'from loguru import logger\n'), ((1735, 1753), 'loguru.logger.error', 'logger.error', (['line'], {}), '(line)\n', (1747, 1753), False, 'from loguru import logger\n'), ((1759, 1770), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1767, 1770), False, 'import sys\n'), ((9931, 9949), 'numpy.std', 'np.std', (['cov_before'], {}), '(cov_before)\n', (9937, 9949), True, 'import numpy as np\n'), ((9960, 9977), 'numpy.std', 'np.std', (['cov_after'], {}), '(cov_after)\n', (9966, 9977), True, 'import numpy as np\n'), ((10931, 10970), 'numpy.mean', 'np.mean', (['cov_list[utr5[prev_u]:next_cp]'], {}), '(cov_list[utr5[prev_u]:next_cp])\n', (10938, 10970), True, 'import numpy as np\n'), ((11723, 11762), 'numpy.mean', 'np.mean', (['cov_list[utr3[prev_u]:next_cp]'], {}), '(cov_list[utr3[prev_u]:next_cp])\n', (11730, 11762), True, 'import numpy as np\n'), ((17245, 17311), 'loguru.logger.error', 'logger.error', (['"""EXIT: do not recognize bedgraph intersect format\n"""'], {}), "('EXIT: do not recognize bedgraph intersect format\\n')\n", (17257, 17311), False, 'from loguru import logger\n'), ((17317, 17335), 'loguru.logger.error', 'logger.error', (['line'], {}), '(line)\n', (17329, 17335), False, 'from loguru import logger\n'), ((17341, 17352), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (17349, 17352), False, 'import sys\n'), ((18087, 18113), 'numpy.nonzero', 'np.nonzero', (['prev_cov_array'], {}), '(prev_cov_array)\n', (18097, 18113), True, 'import numpy as np\n'), ((18135, 18161), 'numpy.nonzero', 'np.nonzero', (['prev_cov_array'], {}), '(prev_cov_array)\n', (18145, 18161), True, 'import numpy as np\n'), ((20397, 20420), 'numpy.zeros', 'np.zeros', (['(aend - astart)'], {}), '(aend - astart)\n', (20405, 20420), True, 'import numpy as np\n'), ((21845, 21884), 'loguru.logger.debug', 'logger.debug', (['"""junctions: {}"""', 'jxn_list'], {}), "('junctions: {}', jxn_list)\n", (21857, 21884), False, 'from loguru import logger\n'), ((21892, 21928), 'loguru.logger.debug', 'logger.debug', (['"""exons: {}"""', 'exon_list'], {}), "('exons: {}', exon_list)\n", (21904, 21928), False, 'from loguru import logger\n'), ((21936, 21979), 'loguru.logger.debug', 'logger.debug', (['"""strand: {}"""', 'strand_inferred'], {}), "('strand: {}', strand_inferred)\n", (21948, 21979), False, 'from loguru import logger\n'), ((21987, 22045), 'loguru.logger.debug', 'logger.debug', (['"""{} {}"""', 'cov_avg_exon_with_utr', 'max_exon_cov'], {}), "('{} {}', cov_avg_exon_with_utr, max_exon_cov)\n", (21999, 22045), False, 'from loguru import logger\n'), ((22679, 22709), 'os.path.join', 'os.path.join', (['plot_dir', 'geneid'], {}), '(plot_dir, geneid)\n', (22691, 22709), False, 'import os\n'), ((47493, 47511), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (47501, 47511), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((47525, 47554), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (47538, 47554), True, 'import matplotlib.pyplot as pyplot\n'), ((47562, 47590), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {'color': '"""k"""'}), "(x, y, color='k')\n", (47573, 47590), True, 'import matplotlib.pyplot as pyplot\n'), ((47598, 47628), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (47611, 47628), True, 'import matplotlib.pyplot as pyplot\n'), ((47636, 47661), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Coverage"""'], {}), "('Coverage')\n", (47649, 47661), True, 'import matplotlib.pyplot as pyplot\n'), ((47767, 47784), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (47779, 47784), True, 'import matplotlib.pyplot as pyplot\n'), ((48547, 48568), 'numpy.asarray', 'np.asarray', (['cov_array'], {}), '(cov_array)\n', (48557, 48568), True, 'import numpy as np\n'), ((48667, 48685), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (48675, 48685), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((48699, 48728), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (48712, 48728), True, 'import matplotlib.pyplot as pyplot\n'), ((48736, 48764), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {'color': '"""k"""'}), "(x, y, color='k')\n", (48747, 48764), True, 'import matplotlib.pyplot as pyplot\n'), ((49008, 49038), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (49021, 49038), True, 'import matplotlib.pyplot as pyplot\n'), ((49046, 49071), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Coverage"""'], {}), "('Coverage')\n", (49059, 49071), True, 'import matplotlib.pyplot as pyplot\n'), ((49177, 49194), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (49189, 49194), True, 'import matplotlib.pyplot as pyplot\n'), ((53298, 53354), 'loguru.logger.debug', 'logger.debug', (['"""gene length <"""', 'min_length', '"""-> skipping"""'], {}), "('gene length <', min_length, '-> skipping')\n", (53310, 53354), False, 'from loguru import logger\n'), ((19318, 19344), 'numpy.nonzero', 'np.nonzero', (['prev_cov_array'], {}), '(prev_cov_array)\n', (19328, 19344), True, 'import numpy as np\n'), ((19367, 19393), 'numpy.nonzero', 'np.nonzero', (['prev_cov_array'], {}), '(prev_cov_array)\n', (19377, 19393), True, 'import numpy as np\n'), ((19810, 19836), 'numpy.nonzero', 'np.nonzero', (['prev_cov_array'], {}), '(prev_cov_array)\n', (19820, 19836), True, 'import numpy as np\n'), ((19859, 19885), 'numpy.nonzero', 'np.nonzero', (['prev_cov_array'], {}), '(prev_cov_array)\n', (19869, 19885), True, 'import numpy as np\n'), ((23470, 23535), 'loguru.logger.debug', 'logger.debug', (['"""de-noising window sizes: {}"""', 'denoise_winsize_list'], {}), "('de-noising window sizes: {}', denoise_winsize_list)\n", (23482, 23535), False, 'from loguru import logger\n'), ((23649, 23706), 'loguru.logger.debug', 'logger.debug', (['"""amplitude thresholds: {}"""', 'amp_thresh_list'], {}), "('amplitude thresholds: {}', amp_thresh_list)\n", (23661, 23706), False, 'from loguru import logger\n'), ((23816, 23882), 'loguru.logger.debug', 'logger.debug', (['"""min distance between peaks: {}"""', 'peak_min_dist_list'], {}), "('min distance between peaks: {}', peak_min_dist_list)\n", (23828, 23882), False, 'from loguru import logger\n'), ((47009, 47064), 'loguru.logger.debug', 'logger.debug', (['"""no change points called: KS p = {}"""', 'ksp'], {}), "('no change points called: KS p = {}', ksp)\n", (47021, 47064), False, 'from loguru import logger\n'), ((47240, 47264), 'loguru.logger.debug', 'logger.debug', (['"""plotting"""'], {}), "('plotting')\n", (47252, 47264), False, 'from loguru import logger\n'), ((49489, 49510), 'numpy.asarray', 'np.asarray', (['cov_array'], {}), '(cov_array)\n', (49499, 49510), True, 'import numpy as np\n'), ((49616, 49634), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (49624, 49634), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((49649, 49678), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (49662, 49678), True, 'import matplotlib.pyplot as pyplot\n'), ((49687, 49715), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {'color': '"""k"""'}), "(x, y, color='k')\n", (49698, 49715), True, 'import matplotlib.pyplot as pyplot\n'), ((50051, 50081), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (50064, 50081), True, 'import matplotlib.pyplot as pyplot\n'), ((50090, 50115), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Coverage"""'], {}), "('Coverage')\n", (50103, 50115), True, 'import matplotlib.pyplot as pyplot\n'), ((50225, 50242), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (50237, 50242), True, 'import matplotlib.pyplot as pyplot\n'), ((50474, 50495), 'numpy.asarray', 'np.asarray', (['cov_array'], {}), '(cov_array)\n', (50484, 50495), True, 'import numpy as np\n'), ((50602, 50620), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (50610, 50620), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((50635, 50664), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (50648, 50664), True, 'import matplotlib.pyplot as pyplot\n'), ((50673, 50701), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {'color': '"""k"""'}), "(x, y, color='k')\n", (50684, 50701), True, 'import matplotlib.pyplot as pyplot\n'), ((51072, 51102), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (51085, 51102), True, 'import matplotlib.pyplot as pyplot\n'), ((51111, 51136), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Coverage"""'], {}), "('Coverage')\n", (51124, 51136), True, 'import matplotlib.pyplot as pyplot\n'), ((51246, 51263), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (51258, 51263), True, 'import matplotlib.pyplot as pyplot\n'), ((51627, 51645), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (51635, 51645), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((51660, 51689), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (51673, 51689), True, 'import matplotlib.pyplot as pyplot\n'), ((51698, 51727), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y1'], {'color': '"""b"""'}), "(x, y1, color='b')\n", (51709, 51727), True, 'import matplotlib.pyplot as pyplot\n'), ((51736, 51765), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y2'], {'color': '"""g"""'}), "(x, y2, color='g')\n", (51747, 51765), True, 'import matplotlib.pyplot as pyplot\n'), ((51774, 51803), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y3'], {'color': '"""c"""'}), "(x, y3, color='c')\n", (51785, 51803), True, 'import matplotlib.pyplot as pyplot\n'), ((51812, 51841), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y4'], {'color': '"""k"""'}), "(x, y4, color='k')\n", (51823, 51841), True, 'import matplotlib.pyplot as pyplot\n'), ((51927, 51957), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (51940, 51957), True, 'import matplotlib.pyplot as pyplot\n'), ((51966, 52004), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Distance to line y=ax"""'], {}), "('Distance to line y=ax')\n", (51979, 52004), True, 'import matplotlib.pyplot as pyplot\n'), ((52114, 52131), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (52126, 52131), True, 'import matplotlib.pyplot as pyplot\n'), ((52471, 52489), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['out_plot'], {}), '(out_plot)\n', (52479, 52489), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((52504, 52533), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (52517, 52533), True, 'import matplotlib.pyplot as pyplot\n'), ((52542, 52571), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y1'], {'color': '"""b"""'}), "(x, y1, color='b')\n", (52553, 52571), True, 'import matplotlib.pyplot as pyplot\n'), ((52580, 52609), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y2'], {'color': '"""g"""'}), "(x, y2, color='g')\n", (52591, 52609), True, 'import matplotlib.pyplot as pyplot\n'), ((52618, 52647), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y3'], {'color': '"""c"""'}), "(x, y3, color='c')\n", (52629, 52647), True, 'import matplotlib.pyplot as pyplot\n'), ((52656, 52685), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y4'], {'color': '"""k"""'}), "(x, y4, color='k')\n", (52667, 52685), True, 'import matplotlib.pyplot as pyplot\n'), ((52943, 52973), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Position (kb)"""'], {}), "('Position (kb)')\n", (52956, 52973), True, 'import matplotlib.pyplot as pyplot\n'), ((52982, 53020), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Distance to line y=ax"""'], {}), "('Distance to line y=ax')\n", (52995, 53020), True, 'import matplotlib.pyplot as pyplot\n'), ((53130, 53147), 'matplotlib.pyplot.close', 'pyplot.close', (['fig'], {}), '(fig)\n', (53142, 53147), True, 'import matplotlib.pyplot as pyplot\n'), ((22260, 22353), 'loguru.logger.debug', 'logger.debug', (['"""did not meet min expression level: {} {}"""', 'min_expn', 'cov_avg_exon_with_utr'], {}), "('did not meet min expression level: {} {}', min_expn,\n cov_avg_exon_with_utr)\n", (22272, 22353), False, 'from loguru import logger\n'), ((47669, 47681), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (47679, 47681), True, 'import matplotlib.pyplot as pyplot\n'), ((48894, 48926), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'seg', 'color': '"""r"""'}), "(x=seg, color='r')\n", (48908, 48926), True, 'import matplotlib.pyplot as pyplot\n'), ((49079, 49091), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (49089, 49091), True, 'import matplotlib.pyplot as pyplot\n'), ((20972, 20986), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20984, 20986), False, 'from datetime import datetime\n'), ((45575, 45622), 'loguru.logger.debug', 'logger.debug', (['"""{} {}"""', 'cov_array.size', 'peak_ind'], {}), "('{} {}', cov_array.size, peak_ind)\n", (45587, 45622), False, 'from loguru import logger\n'), ((45633, 45644), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (45641, 45644), False, 'import sys\n'), ((49849, 49881), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'seg', 'color': '"""r"""'}), "(x=seg, color='r')\n", (49863, 49881), True, 'import matplotlib.pyplot as pyplot\n'), ((50124, 50136), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (50134, 50136), True, 'import matplotlib.pyplot as pyplot\n'), ((50835, 50867), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'seg', 'color': '"""r"""'}), "(x=seg, color='r')\n", (50849, 50867), True, 'import matplotlib.pyplot as pyplot\n'), ((51145, 51157), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (51155, 51157), True, 'import matplotlib.pyplot as pyplot\n'), ((52013, 52025), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (52023, 52025), True, 'import matplotlib.pyplot as pyplot\n'), ((52825, 52857), 'matplotlib.pyplot.axvline', 'pyplot.axvline', ([], {'x': 'seg', 'color': '"""r"""'}), "(x=seg, color='r')\n", (52839, 52857), True, 'import matplotlib.pyplot as pyplot\n'), ((53029, 53041), 'matplotlib.pyplot.gcf', 'pyplot.gcf', ([], {}), '()\n', (53039, 53041), True, 'import matplotlib.pyplot as pyplot\n'), ((24733, 24833), 'loguru.logger.debug', 'logger.debug', (['"""skipping higher amplitude thresholds because previous gave 0 change points"""'], {}), "(\n 'skipping higher amplitude thresholds because previous gave 0 change points'\n )\n", (24745, 24833), False, 'from loguru import logger\n'), ((43225, 43316), 'loguru.logger.debug', 'logger.debug', (['"""removing left end: {} {}"""', 'ru_all[0]', '(peak_inds_ttest_opt[0] + new_start)'], {}), "('removing left end: {} {}', ru_all[0], peak_inds_ttest_opt[0] +\n new_start)\n", (43237, 43316), False, 'from loguru import logger\n'), ((25526, 25621), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['line_dist_array2_denoise'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(line_dist_array2_denoise, thres=amp_thresh, min_dist\n =peak_min_dist)\n', (25548, 25621), False, 'import peakutils\n'), ((25646, 25745), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['(-1 * line_dist_array2_denoise)'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(-1 * line_dist_array2_denoise, thres=amp_thresh,\n min_dist=peak_min_dist)\n', (25668, 25745), False, 'import peakutils\n'), ((26944, 27038), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['line_dist_array_denoise'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(line_dist_array_denoise, thres=amp_thresh, min_dist=\n peak_min_dist)\n', (26966, 27038), False, 'import peakutils\n'), ((27062, 27160), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['(-1 * line_dist_array_denoise)'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(-1 * line_dist_array_denoise, thres=amp_thresh,\n min_dist=peak_min_dist)\n', (27084, 27160), False, 'import peakutils\n'), ((31793, 31824), 'numpy.copy', 'np.copy', (['peak_inds2_ip_combined'], {}), '(peak_inds2_ip_combined)\n', (31800, 31824), True, 'import numpy as np\n'), ((31878, 31919), 'numpy.unique', 'np.unique', (['peak_inds_ip_combined_filtered'], {}), '(peak_inds_ip_combined_filtered)\n', (31887, 31919), True, 'import numpy as np\n'), ((32077, 32121), 'loguru.logger.debug', 'logger.debug', (['peak_inds_ip_combined_filtered'], {}), '(peak_inds_ip_combined_filtered)\n', (32089, 32121), False, 'from loguru import logger\n'), ((33664, 33701), 'numpy.delete', 'np.delete', (['peak_inds_ttest', 'to_delete'], {}), '(peak_inds_ttest, to_delete)\n', (33673, 33701), True, 'import numpy as np\n'), ((34808, 34870), 'numpy.asarray', 'np.asarray', (['[x for x in peak_inds_ttest if x not in to_delete]'], {}), '([x for x in peak_inds_ttest if x not in to_delete])\n', (34818, 34870), True, 'import numpy as np\n'), ((43509, 43604), 'loguru.logger.debug', 'logger.debug', (['"""removing right end: {} {}"""', 'ru_all[-1]', '(peak_inds_ttest_opt[-1] + new_start)'], {}), "('removing right end: {} {}', ru_all[-1], peak_inds_ttest_opt[-\n 1] + new_start)\n", (43521, 43604), False, 'from loguru import logger\n'), ((24352, 24366), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24364, 24366), False, 'from datetime import datetime\n'), ((25401, 25436), 'numpy.unique', 'np.unique', (['line_dist_array2_denoise'], {}), '(line_dist_array2_denoise)\n', (25410, 25436), True, 'import numpy as np\n'), ((26202, 26245), 'numpy.append', 'np.append', (['peak_inds2_ip', 'peak_inds2_min_ip'], {}), '(peak_inds2_ip, peak_inds2_min_ip)\n', (26211, 26245), True, 'import numpy as np\n'), ((26814, 26848), 'numpy.unique', 'np.unique', (['line_dist_array_denoise'], {}), '(line_dist_array_denoise)\n', (26823, 26848), True, 'import numpy as np\n'), ((27610, 27651), 'numpy.append', 'np.append', (['peak_inds_ip', 'peak_inds_min_ip'], {}), '(peak_inds_ip, peak_inds_min_ip)\n', (27619, 27651), True, 'import numpy as np\n'), ((28883, 28913), 'numpy.copy', 'np.copy', (['peak_inds_ip_combined'], {}), '(peak_inds_ip_combined)\n', (28890, 28913), True, 'import numpy as np\n'), ((38537, 38584), 'numpy.delete', 'np.delete', (['peak_inds_ttest_with_ends', 'to_delete'], {}), '(peak_inds_ttest_with_ends, to_delete)\n', (38546, 38584), True, 'import numpy as np\n'), ((40126, 40165), 'loguru.logger.debug', 'logger.debug', (['"""no change points called"""'], {}), "('no change points called')\n", (40138, 40165), False, 'from loguru import logger\n'), ((41880, 41944), 'loguru.logger.debug', 'logger.debug', (['"""no change points found: total passing t-test = 0"""'], {}), "('no change points found: total passing t-test = 0')\n", (41892, 41944), False, 'from loguru import logger\n'), ((42299, 42313), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (42311, 42313), False, 'from datetime import datetime\n'), ((42617, 42631), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (42629, 42631), False, 'from datetime import datetime\n'), ((28219, 28254), 'numpy.unique', 'np.unique', (['line_dist_array2_denoise'], {}), '(line_dist_array2_denoise)\n', (28228, 28254), True, 'import numpy as np\n'), ((34251, 34290), 'numpy.mean', 'np.mean', (['cov_array[peak:this_range_end]'], {}), '(cov_array[peak:this_range_end])\n', (34258, 34290), True, 'import numpy as np\n'), ((34317, 34361), 'numpy.mean', 'np.mean', (['cov_array[prev_peak:prev_range_end]'], {}), '(cov_array[prev_peak:prev_range_end])\n', (34324, 34361), True, 'import numpy as np\n'), ((34385, 34431), 'math.log', 'math.log', (['((this_mean + 1) / (prev_mean + 1))', '(2)'], {}), '((this_mean + 1) / (prev_mean + 1), 2)\n', (34393, 34431), False, 'import math\n'), ((38253, 38299), 'math.log', 'math.log', (['((this_mean + 1) / (prev_mean + 1))', '(2)'], {}), '((this_mean + 1) / (prev_mean + 1), 2)\n', (38261, 38299), False, 'import math\n'), ((39536, 39582), 'math.log', 'math.log', (['((this_mean + 1) / (prev_mean + 1))', '(2)'], {}), '((this_mean + 1) / (prev_mean + 1), 2)\n', (39544, 39582), False, 'import math\n'), ((45541, 45555), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (45553, 45555), False, 'from datetime import datetime\n'), ((28761, 28817), 'numpy.append', 'np.append', (['peak_inds2_ip_combined', 'peak_inds_ip_combined'], {}), '(peak_inds2_ip_combined, peak_inds_ip_combined)\n', (28770, 28817), True, 'import numpy as np\n'), ((34464, 34485), 'math.log', 'math.log', (['fcthresh', '(2)'], {}), '(fcthresh, 2)\n', (34472, 34485), False, 'import math\n'), ((37596, 37628), 'numpy.mean', 'np.mean', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (37603, 37628), True, 'import numpy as np\n'), ((37683, 37714), 'numpy.std', 'np.std', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (37689, 37714), True, 'import numpy as np\n'), ((38879, 38911), 'numpy.mean', 'np.mean', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (38886, 38911), True, 'import numpy as np\n'), ((38966, 38997), 'numpy.std', 'np.std', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (38972, 38997), True, 'import numpy as np\n'), ((25239, 25253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25251, 25253), False, 'from datetime import datetime\n'), ((32041, 32055), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (32053, 32055), False, 'from datetime import datetime\n'), ((35701, 35728), 'numpy.asarray', 'np.asarray', (['peak_inds_ttest'], {}), '(peak_inds_ttest)\n', (35711, 35728), True, 'import numpy as np\n'), ((37858, 37890), 'numpy.mean', 'np.mean', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (37865, 37890), True, 'import numpy as np\n'), ((37943, 37974), 'numpy.std', 'np.std', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (37949, 37974), True, 'import numpy as np\n'), ((38022, 38054), 'numpy.mean', 'np.mean', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (38029, 38054), True, 'import numpy as np\n'), ((38082, 38114), 'numpy.mean', 'np.mean', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (38089, 38114), True, 'import numpy as np\n'), ((38140, 38171), 'numpy.std', 'np.std', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (38146, 38171), True, 'import numpy as np\n'), ((38197, 38228), 'numpy.std', 'np.std', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (38203, 38228), True, 'import numpy as np\n'), ((38332, 38353), 'math.log', 'math.log', (['fcthresh', '(2)'], {}), '(fcthresh, 2)\n', (38340, 38353), False, 'import math\n'), ((39141, 39173), 'numpy.mean', 'np.mean', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (39148, 39173), True, 'import numpy as np\n'), ((39226, 39257), 'numpy.std', 'np.std', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (39232, 39257), True, 'import numpy as np\n'), ((39305, 39337), 'numpy.mean', 'np.mean', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (39312, 39337), True, 'import numpy as np\n'), ((39365, 39397), 'numpy.mean', 'np.mean', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (39372, 39397), True, 'import numpy as np\n'), ((39423, 39454), 'numpy.std', 'np.std', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (39429, 39454), True, 'import numpy as np\n'), ((39480, 39511), 'numpy.std', 'np.std', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (39486, 39511), True, 'import numpy as np\n'), ((26343, 26357), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26355, 26357), False, 'from datetime import datetime\n'), ((26443, 26457), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26455, 26457), False, 'from datetime import datetime\n'), ((26552, 26566), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26564, 26566), False, 'from datetime import datetime\n'), ((26665, 26679), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26677, 26679), False, 'from datetime import datetime\n'), ((26775, 26789), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26787, 26789), False, 'from datetime import datetime\n'), ((27749, 27763), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27761, 27763), False, 'from datetime import datetime\n'), ((27849, 27863), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27861, 27863), False, 'from datetime import datetime\n'), ((27958, 27972), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (27970, 27972), False, 'from datetime import datetime\n'), ((28071, 28085), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28083, 28085), False, 'from datetime import datetime\n'), ((28179, 28193), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28191, 28193), False, 'from datetime import datetime\n'), ((29328, 29358), 'os.path.join', 'os.path.join', (['plot_dir', 'geneid'], {}), '(plot_dir, geneid)\n', (29340, 29358), False, 'import os\n'), ((29637, 29735), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['temp_line_dist_array_denoise'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(temp_line_dist_array_denoise, thres=amp_thresh,\n min_dist=peak_min_dist)\n', (29659, 29735), False, 'import peakutils\n'), ((29770, 29873), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['(-1 * temp_line_dist_array_denoise)'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(-1 * temp_line_dist_array_denoise, thres=amp_thresh,\n min_dist=peak_min_dist)\n', (29792, 29873), False, 'import peakutils\n'), ((30184, 30235), 'numpy.append', 'np.append', (['temp_peak_inds_ip', 'temp_peak_inds_min_ip'], {}), '(temp_peak_inds_ip, temp_peak_inds_min_ip)\n', (30193, 30235), True, 'import numpy as np\n'), ((30286, 30355), 'numpy.append', 'np.append', (['peak_inds_ip_combined_filtered', 'temp_peak_inds_ip_combined'], {}), '(peak_inds_ip_combined_filtered, temp_peak_inds_ip_combined)\n', (30295, 30355), True, 'import numpy as np\n'), ((30703, 30733), 'os.path.join', 'os.path.join', (['plot_dir', 'geneid'], {}), '(plot_dir, geneid)\n', (30715, 30733), False, 'import os\n'), ((31012, 31110), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['temp_line_dist_array_denoise'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(temp_line_dist_array_denoise, thres=amp_thresh,\n min_dist=peak_min_dist)\n', (31034, 31110), False, 'import peakutils\n'), ((31145, 31248), 'peakutils.peak.indexes', 'peakutils.peak.indexes', (['(-1 * temp_line_dist_array_denoise)'], {'thres': 'amp_thresh', 'min_dist': 'peak_min_dist'}), '(-1 * temp_line_dist_array_denoise, thres=amp_thresh,\n min_dist=peak_min_dist)\n', (31167, 31248), False, 'import peakutils\n'), ((31559, 31610), 'numpy.append', 'np.append', (['temp_peak_inds_ip', 'temp_peak_inds_min_ip'], {}), '(temp_peak_inds_ip, temp_peak_inds_min_ip)\n', (31568, 31610), True, 'import numpy as np\n'), ((31661, 31730), 'numpy.append', 'np.append', (['peak_inds_ip_combined_filtered', 'temp_peak_inds_ip_combined'], {}), '(peak_inds_ip_combined_filtered, temp_peak_inds_ip_combined)\n', (31670, 31730), True, 'import numpy as np\n'), ((32682, 32696), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (32694, 32696), False, 'from datetime import datetime\n'), ((33806, 33820), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (33818, 33820), False, 'from datetime import datetime\n'), ((34997, 35011), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (35009, 35011), False, 'from datetime import datetime\n'), ((36373, 36405), 'numpy.mean', 'np.mean', (['cov_array[ind:next_cut]'], {}), '(cov_array[ind:next_cut])\n', (36380, 36405), True, 'import numpy as np\n'), ((36817, 36849), 'numpy.mean', 'np.mean', (['cov_array[prev_cut:ind]'], {}), '(cov_array[prev_cut:ind])\n', (36824, 36849), True, 'import numpy as np\n'), ((29552, 29591), 'numpy.unique', 'np.unique', (['temp_line_dist_array_denoise'], {}), '(temp_line_dist_array_denoise)\n', (29561, 29591), True, 'import numpy as np\n'), ((30927, 30966), 'numpy.unique', 'np.unique', (['temp_line_dist_array_denoise'], {}), '(temp_line_dist_array_denoise)\n', (30936, 30966), True, 'import numpy as np\n'), ((35516, 35530), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (35528, 35530), False, 'from datetime import datetime\n')] |
#!/bin/python3
import numpy as np
import psycopg2
from logger import *
def get_vectors(filename, logger, max_count=10**9, normalization=True):
f = open(filename)
line_splits = f.readline().split()
logger.log(Logger.INFO, str(line_splits))
size = int(line_splits[0])
d = int(line_splits[1])
words, vectors, count = [],np.zeros((size, d), dtype='float32'), 0
logger.log(Logger.INFO, str(count))
logger.log(Logger.INFO, str(line_splits))
while (line_splits) and (count < max_count):
line = f.readline()
line_splits = line.split()
if not line_splits:
break
word = line_splits[0]
vector = []
for elem in line_splits[1:]:
try:
vector.append(float(elem))
except:
break
if normalization:
v_len = np.linalg.norm(vector)
vector = [x / v_len for x in vector]
if (len(vector) == d) and (len(word) < 100):
vectors[count] = vector
words.append(word)
count += 1
else:
logger.log(Logger.INFO, 'Can not decode the following line: ' + str(line));
if count % 10000 == 0:
logger.log(Logger.INFO, 'Read ' + str(count) + ' vectors')
return words, vectors, count
def init_tables(con, cur, table_information, logger):
query_drop = "DROP TABLE IF EXISTS "
for (name, schema) in table_information:
query_drop += (" " + name + ",")
query_drop = query_drop[:-1] + " CASCADE;"
result = cur.execute(query_drop)
# commit drop
con.commit()
for (name, schema) in table_information:
query_create_table = "CREATE TABLE " + name + " " + schema + ";"
result = cur.execute(query_create_table)
# commit changes
con.commit()
logger.log(Logger.INFO, 'Created new table ' + str(name))
return
def serialize_vector(vec):
output_vec = '{'
for elem in vec:
output_vec += str(elem) + ','
return output_vec[:-1] + '}'
def disable_triggers(table_name, con, cur):
cur.execute('ALTER TABLE ' + table_name + ' DISABLE trigger ALL;')
con.commit();
def enable_triggers(table_name, con, cur):
cur.execute('ALTER TABLE ' + table_name + ' ENABLE trigger ALL;')
con.commit();
def create_index(table_name, index_name, column_name, con, cur, logger):
query_drop = "DROP INDEX IF EXISTS " + index_name + ";"
result = cur.execute(query_drop)
con.commit()
query_create_index = "CREATE INDEX " + index_name + " ON " + table_name + " (" + column_name + ");"
cur.execute(query_create_index)
con.commit()
logger.log(Logger.INFO, 'Created index ' + str(index_name) + ' on table ' + str(table_name) + ' for column ' + str(column_name))
def create_statistics_table(table_name, column_name, coarse_table_name, con, cur, logger):
query = "SELECT create_statistics('" + table_name + "', '" + column_name + "', '" + coarse_table_name + "')"
cur.execute(query)
con.commit()
logger.log(Logger.INFO, 'Created statistics table')
| [
"numpy.zeros",
"numpy.linalg.norm"
] | [((344, 380), 'numpy.zeros', 'np.zeros', (['(size, d)'], {'dtype': '"""float32"""'}), "((size, d), dtype='float32')\n", (352, 380), True, 'import numpy as np\n'), ((863, 885), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (877, 885), True, 'import numpy as np\n')] |
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/WenmuZhou/DBNet.pytorch/blob/master/models/losses/basic_loss.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
from paddle import nn
import paddle.nn.functional as F
class BalanceLoss(nn.Layer):
def __init__(self,
balance_loss=True,
main_loss_type='DiceLoss',
negative_ratio=3,
return_origin=False,
eps=1e-6,
**kwargs):
"""
The BalanceLoss for Differentiable Binarization text detection
args:
balance_loss (bool): whether balance loss or not, default is True
main_loss_type (str): can only be one of ['CrossEntropy','DiceLoss',
'Euclidean','BCELoss', 'MaskL1Loss'], default is 'DiceLoss'.
negative_ratio (int|float): float, default is 3.
return_origin (bool): whether return unbalanced loss or not, default is False.
eps (float): default is 1e-6.
"""
super(BalanceLoss, self).__init__()
self.balance_loss = balance_loss
self.main_loss_type = main_loss_type
self.negative_ratio = negative_ratio
self.return_origin = return_origin
self.eps = eps
if self.main_loss_type == "CrossEntropy":
self.loss = nn.CrossEntropyLoss()
elif self.main_loss_type == "Euclidean":
self.loss = nn.MSELoss()
elif self.main_loss_type == "DiceLoss":
self.loss = DiceLoss(self.eps)
elif self.main_loss_type == "BCELoss":
self.loss = BCELoss(reduction='none')
elif self.main_loss_type == "MaskL1Loss":
self.loss = MaskL1Loss(self.eps)
else:
loss_type = [
'CrossEntropy', 'DiceLoss', 'Euclidean', 'BCELoss', 'MaskL1Loss'
]
raise Exception(
"main_loss_type in BalanceLoss() can only be one of {}".format(
loss_type))
def forward(self, pred, gt, mask=None):
"""
The BalanceLoss for Differentiable Binarization text detection
args:
pred (variable): predicted feature maps.
gt (variable): ground truth feature maps.
mask (variable): masked maps.
return: (variable) balanced loss
"""
# if self.main_loss_type in ['DiceLoss']:
# # For the loss that returns to scalar value, perform ohem on the mask
# mask = ohem_batch(pred, gt, mask, self.negative_ratio)
# loss = self.loss(pred, gt, mask)
# return loss
positive = gt * mask
negative = (1 - gt) * mask
positive_count = int(positive.sum())
negative_count = int(
min(negative.sum(), positive_count * self.negative_ratio))
loss = self.loss(pred, gt, mask=mask)
if not self.balance_loss:
return loss
positive_loss = positive * loss
negative_loss = negative * loss
negative_loss = paddle.reshape(negative_loss, shape=[-1])
if negative_count > 0:
sort_loss = negative_loss.sort(descending=True)
negative_loss = sort_loss[:negative_count]
# negative_loss, _ = paddle.topk(negative_loss, k=negative_count_int)
balance_loss = (positive_loss.sum() + negative_loss.sum()) / (
positive_count + negative_count + self.eps)
else:
balance_loss = positive_loss.sum() / (positive_count + self.eps)
if self.return_origin:
return balance_loss, loss
return balance_loss
class DiceLoss(nn.Layer):
def __init__(self, eps=1e-6):
super(DiceLoss, self).__init__()
self.eps = eps
def forward(self, pred, gt, mask, weights=None):
"""
DiceLoss function.
"""
assert pred.shape == gt.shape
assert pred.shape == mask.shape
if weights is not None:
assert weights.shape == mask.shape
mask = weights * mask
intersection = paddle.sum(pred * gt * mask)
union = paddle.sum(pred * mask) + paddle.sum(gt * mask) + self.eps
loss = 1 - 2.0 * intersection / union
assert loss <= 1
return loss
class MaskL1Loss(nn.Layer):
def __init__(self, eps=1e-6):
super(MaskL1Loss, self).__init__()
self.eps = eps
def forward(self, pred, gt, mask):
"""
Mask L1 Loss
"""
loss = (paddle.abs(pred - gt) * mask).sum() / (mask.sum() + self.eps)
loss = paddle.mean(loss)
return loss
class BCELoss(nn.Layer):
def __init__(self, reduction='mean'):
super(BCELoss, self).__init__()
self.reduction = reduction
def forward(self, input, label, mask=None, weight=None, name=None):
loss = F.binary_cross_entropy(input, label, reduction=self.reduction)
return loss
def ohem_single(score, gt_text, training_mask, ohem_ratio):
pos_num = (int)(np.sum(gt_text > 0.5)) - (
int)(np.sum((gt_text > 0.5) & (training_mask <= 0.5)))
if pos_num == 0:
# selected_mask = gt_text.copy() * 0 # may be not good
selected_mask = training_mask
selected_mask = selected_mask.reshape(
1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_num = (int)(np.sum(gt_text <= 0.5))
neg_num = (int)(min(pos_num * ohem_ratio, neg_num))
if neg_num == 0:
selected_mask = training_mask
selected_mask = selected_mask.reshape(
1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
neg_score = score[gt_text <= 0.5]
# 将负样本得分从高到低排序
neg_score_sorted = np.sort(-neg_score)
threshold = -neg_score_sorted[neg_num - 1]
# 选出 得分高的 负样本 和正样本 的 mask
selected_mask = ((score >= threshold) |
(gt_text > 0.5)) & (training_mask > 0.5)
selected_mask = selected_mask.reshape(
1, selected_mask.shape[0], selected_mask.shape[1]).astype('float32')
return selected_mask
def ohem_batch(scores, gt_texts, training_masks, ohem_ratio):
scores = scores.numpy()
gt_texts = gt_texts.numpy()
training_masks = training_masks.numpy()
selected_masks = []
for i in range(scores.shape[0]):
selected_masks.append(
ohem_single(scores[i, :, :], gt_texts[i, :, :], training_masks[
i, :, :], ohem_ratio))
selected_masks = np.concatenate(selected_masks, 0)
selected_masks = paddle.to_tensor(selected_masks)
return selected_masks
| [
"numpy.sort",
"paddle.nn.CrossEntropyLoss",
"paddle.mean",
"numpy.sum",
"paddle.to_tensor",
"paddle.nn.functional.binary_cross_entropy",
"paddle.reshape",
"numpy.concatenate",
"paddle.abs",
"paddle.nn.MSELoss",
"paddle.sum"
] | [((6561, 6580), 'numpy.sort', 'np.sort', (['(-neg_score)'], {}), '(-neg_score)\n', (6568, 6580), True, 'import numpy as np\n'), ((7307, 7340), 'numpy.concatenate', 'np.concatenate', (['selected_masks', '(0)'], {}), '(selected_masks, 0)\n', (7321, 7340), True, 'import numpy as np\n'), ((7362, 7394), 'paddle.to_tensor', 'paddle.to_tensor', (['selected_masks'], {}), '(selected_masks)\n', (7378, 7394), False, 'import paddle\n'), ((3813, 3854), 'paddle.reshape', 'paddle.reshape', (['negative_loss'], {'shape': '[-1]'}), '(negative_loss, shape=[-1])\n', (3827, 3854), False, 'import paddle\n'), ((4853, 4881), 'paddle.sum', 'paddle.sum', (['(pred * gt * mask)'], {}), '(pred * gt * mask)\n', (4863, 4881), False, 'import paddle\n'), ((5357, 5374), 'paddle.mean', 'paddle.mean', (['loss'], {}), '(loss)\n', (5368, 5374), False, 'import paddle\n'), ((5627, 5689), 'paddle.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['input', 'label'], {'reduction': 'self.reduction'}), '(input, label, reduction=self.reduction)\n', (5649, 5689), True, 'import paddle.nn.functional as F\n'), ((6183, 6205), 'numpy.sum', 'np.sum', (['(gt_text <= 0.5)'], {}), '(gt_text <= 0.5)\n', (6189, 6205), True, 'import numpy as np\n'), ((2104, 2125), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2123, 2125), False, 'from paddle import nn\n'), ((5792, 5813), 'numpy.sum', 'np.sum', (['(gt_text > 0.5)'], {}), '(gt_text > 0.5)\n', (5798, 5813), True, 'import numpy as np\n'), ((5832, 5880), 'numpy.sum', 'np.sum', (['((gt_text > 0.5) & (training_mask <= 0.5))'], {}), '((gt_text > 0.5) & (training_mask <= 0.5))\n', (5838, 5880), True, 'import numpy as np\n'), ((2199, 2211), 'paddle.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2209, 2211), False, 'from paddle import nn\n'), ((4899, 4922), 'paddle.sum', 'paddle.sum', (['(pred * mask)'], {}), '(pred * mask)\n', (4909, 4922), False, 'import paddle\n'), ((4925, 4946), 'paddle.sum', 'paddle.sum', (['(gt * mask)'], {}), '(gt * mask)\n', (4935, 4946), False, 'import paddle\n'), ((5280, 5301), 'paddle.abs', 'paddle.abs', (['(pred - gt)'], {}), '(pred - gt)\n', (5290, 5301), False, 'import paddle\n')] |
from typing import Callable, Union, Any
from enum import Enum
import numpy as np
from casadi import sum1, if_else, vertcat, lt, SX, MX
from .path_conditions import Bounds
from .penalty import PenaltyFunctionAbstract, PenaltyOption, PenaltyNodeList
from ..interfaces.biorbd_interface import BiorbdInterface
from ..misc.enums import Node, InterpolationType, ConstraintType
from ..misc.options import OptionList
class Constraint(PenaltyOption):
"""
A placeholder for a constraint
Attributes
----------
min_bound: np.ndarray
The vector of minimum bound of the constraint. Default is 0
max_bound: np.ndarray
The vector of maximal bound of the constraint. Default is 0
"""
def __init__(
self,
constraint: Any,
min_bound: Union[np.ndarray, float] = None,
max_bound: Union[np.ndarray, float] = None,
quadratic: bool = False,
phase: int = -1,
**params: Any,
):
"""
Parameters
----------
constraint: ConstraintFcn
The chosen constraint
min_bound: np.ndarray
The vector of minimum bound of the constraint. Default is 0
max_bound: np.ndarray
The vector of maximal bound of the constraint. Default is 0
phase: int
The index of the phase to apply the constraint
quadratic: bool
If the penalty is quadratic
params:
Generic parameters for options
"""
custom_function = None
if not isinstance(constraint, (ConstraintFcn, ImplicitConstraintFcn)):
custom_function = constraint
constraint = ConstraintFcn.CUSTOM
super(Constraint, self).__init__(
penalty=constraint, phase=phase, quadratic=quadratic, custom_function=custom_function, **params
)
self.min_bound = min_bound
self.max_bound = max_bound
self.bounds = Bounds(interpolation=InterpolationType.CONSTANT)
def set_penalty(self, penalty: Union[MX, SX], all_pn: PenaltyNodeList):
super(Constraint, self).set_penalty(penalty, all_pn)
self.min_bound = 0 if self.min_bound is None else self.min_bound
self.max_bound = 0 if self.max_bound is None else self.max_bound
def add_or_replace_to_penalty_pool(self, ocp, nlp):
if self.type == ConstraintFcn.TIME_CONSTRAINT:
self.node = Node.END
super(Constraint, self).add_or_replace_to_penalty_pool(ocp, nlp)
self.min_bound = np.array(self.min_bound) if isinstance(self.min_bound, (list, tuple)) else self.min_bound
self.max_bound = np.array(self.max_bound) if isinstance(self.max_bound, (list, tuple)) else self.max_bound
if self.bounds.shape[0] == 0:
for i in self.rows:
min_bound = (
self.min_bound[i]
if hasattr(self.min_bound, "__getitem__") and self.min_bound.shape[0] > 1
else self.min_bound
)
max_bound = (
self.max_bound[i]
if hasattr(self.max_bound, "__getitem__") and self.max_bound.shape[0] > 1
else self.max_bound
)
self.bounds.concatenate(Bounds(min_bound, max_bound, interpolation=InterpolationType.CONSTANT))
elif self.bounds.shape[0] != len(self.rows):
raise RuntimeError(f"bounds rows is {self.bounds.shape[0]} but should be {self.rows} or empty")
def _add_penalty_to_pool(self, all_pn: PenaltyNodeList):
if self.constraint_type == ConstraintType.INTERNAL:
pool = all_pn.nlp.g_internal if all_pn is not None and all_pn.nlp else all_pn.ocp.g_internal
elif self.constraint_type == ConstraintType.IMPLICIT:
pool = all_pn.nlp.g_implicit if all_pn is not None and all_pn.nlp else all_pn.ocp.g_implicit
elif self.constraint_type == ConstraintType.USER:
pool = all_pn.nlp.g if all_pn is not None and all_pn.nlp else all_pn.ocp.g
else:
raise ValueError(f"Invalid constraint type {self.contraint_type}.")
pool[self.list_index] = self
def clear_penalty(self, ocp, nlp):
if self.constraint_type == ConstraintType.INTERNAL:
g_to_add_to = nlp.g_internal if nlp else ocp.g_internal
elif self.constraint_type == ConstraintType.IMPLICIT:
g_to_add_to = nlp.g_implicit if nlp else ocp.g_implicit
elif self.constraint_type == ConstraintType.USER:
g_to_add_to = nlp.g if nlp else ocp.g
else:
raise ValueError(f"Invalid Type of Constraint {self.constraint_type}")
if self.list_index < 0:
for i, j in enumerate(g_to_add_to):
if not j:
self.list_index = i
return
else:
g_to_add_to.append([])
self.list_index = len(g_to_add_to) - 1
else:
while self.list_index >= len(g_to_add_to):
g_to_add_to.append([])
g_to_add_to[self.list_index] = []
class ConstraintList(OptionList):
"""
A list of Constraint if more than one is required
Methods
-------
add(self, constraint: Union[Callable, "ConstraintFcn"], **extra_arguments)
Add a new Constraint to the list
print(self)
Print the ConstraintList to the console
"""
def add(self, constraint: Union[Callable, Constraint, Any], **extra_arguments: Any):
"""
Add a new constraint to the list
Parameters
----------
constraint: Union[Callable, Constraint, ConstraintFcn]
The chosen constraint
extra_arguments: dict
Any parameters to pass to Constraint
"""
if isinstance(constraint, Constraint):
self.copy(constraint)
else:
super(ConstraintList, self)._add(option_type=Constraint, constraint=constraint, **extra_arguments)
def print(self):
"""
Print the ConstraintList to the console
"""
# TODO: Print all elements in the console
raise NotImplementedError("Printing of ConstraintList is not ready yet")
class ConstraintFunction(PenaltyFunctionAbstract):
"""
Internal (re)implementation of the penalty functions
Methods
-------
inner_phase_continuity(ocp)
Add continuity constraints between each nodes of a phase.
inter_phase_continuity(ocp)
Add phase transition constraints between two phases.
clear_penalty(ocp: OptimalControlProgram, nlp: NonLinearProgram, penalty: Constraint)
Resets a penalty. A negative penalty index creates a new empty penalty.
penalty_nature() -> str
Get the nature of the penalty
"""
class Functions:
"""
Implementation of all the constraint functions
"""
@staticmethod
def non_slipping(
constraint: Constraint,
all_pn: PenaltyNodeList,
tangential_component_idx: int,
normal_component_idx: int,
static_friction_coefficient: float,
):
"""
Add a constraint of static friction at contact points constraining for small tangential forces.
This function make the assumption that normal_force is always positive
That is mu*normal_force = tangential_force. To prevent from using a square root, the previous
equation is squared
Parameters
----------
constraint: Constraint
The actual constraint to declare
all_pn: PenaltyNodeList
The penalty node elements
tangential_component_idx: int
Index of the tangential component of the contact force.
[0] = x_indices, [1] = y_indices / or [0] = component
normal_component_idx: int
Index of the normal component of the contact force
static_friction_coefficient: float
Static friction coefficient
"""
nlp = all_pn.nlp
if isinstance(tangential_component_idx, int):
tangential_component_idx = [tangential_component_idx]
elif not isinstance(tangential_component_idx, (tuple, list)):
raise RuntimeError("tangential_component_idx must be a unique integer or a list of integer")
if isinstance(normal_component_idx, int):
normal_component_idx = [normal_component_idx]
elif not isinstance(normal_component_idx, (tuple, list)):
raise RuntimeError("normal_component_idx must be a unique integer or a list of integer")
mu_squared = static_friction_coefficient ** 2
constraint.min_bound = np.array([0, 0])
constraint.max_bound = np.array([np.inf, np.inf])
contact = all_pn.nlp.contact_forces_func(nlp.states.cx, nlp.controls.cx, nlp.parameters.cx)
normal_contact_force_squared = sum1(contact[normal_component_idx, 0]) ** 2
if len(tangential_component_idx) == 1:
tangential_contact_force_squared = sum1(contact[tangential_component_idx[0], 0]) ** 2
elif len(tangential_component_idx) == 2:
tangential_contact_force_squared = (
sum1(contact[tangential_component_idx[0], 0]) ** 2
+ sum1(contact[tangential_component_idx[1], 0]) ** 2
)
else:
raise (ValueError("tangential_component_idx should either be x and y or only one component"))
slipping = vertcat(
mu_squared * normal_contact_force_squared - tangential_contact_force_squared,
mu_squared * normal_contact_force_squared + tangential_contact_force_squared,
)
return slipping
@staticmethod
def torque_max_from_q_and_qdot(constraint: Constraint, all_pn: PenaltyNodeList, min_torque=None):
"""
Non linear maximal values of joint torques computed from the torque-position-velocity relationship
Parameters
----------
constraint: Constraint
The actual constraint to declare
all_pn: PenaltyNodeList
The penalty node elements
min_torque: float
Minimum joint torques. This prevent from having too small torques, but introduces an if statement
"""
nlp = all_pn.nlp
if min_torque and min_torque < 0:
raise ValueError("min_torque cannot be negative in tau_max_from_actuators")
bound = nlp.model.torqueMax(nlp.states["q"].mx, nlp.states["qdot"].mx)
min_bound = BiorbdInterface.mx_to_cx(
"min_bound",
nlp.controls["tau"].mapping.to_first.map(bound[1].to_mx()),
nlp.states["q"],
nlp.states["qdot"],
)
max_bound = BiorbdInterface.mx_to_cx(
"max_bound",
nlp.controls["tau"].mapping.to_first.map(bound[0].to_mx()),
nlp.states["q"],
nlp.states["qdot"],
)
if min_torque:
min_bound = if_else(lt(min_bound, min_torque), min_torque, min_bound)
max_bound = if_else(lt(max_bound, min_torque), min_torque, max_bound)
value = vertcat(nlp.controls["tau"].cx + min_bound, nlp.controls["tau"].cx - max_bound)
n_rows = constraint.rows if constraint.rows else int(value.shape[0] / 2)
constraint.min_bound = [0] * n_rows + [-np.inf] * n_rows
constraint.max_bound = [np.inf] * n_rows + [0] * n_rows
return value
@staticmethod
def time_constraint(_: Constraint, all_pn: PenaltyNodeList, **unused_param):
"""
The time constraint is taken care elsewhere, but must be declared here. This function therefore does nothing
Parameters
----------
_: Constraint
The actual constraint to declare
all_pn: PenaltyNodeList
The penalty node elements
**unused_param: dict
Since the function does nothing, we can safely ignore any argument
"""
return all_pn.nlp.tf
@staticmethod
def qddot_equals_forward_dynamics(_: Constraint, all_pn: PenaltyNodeList, **unused_param):
"""
Compute the difference between symbolic joint accelerations and forward dynamic results
It includes the inversion of mass matrix
Parameters
----------
_: Constraint
The actual constraint to declare
all_pn: PenaltyNodeList
The penalty node elements
**unused_param: dict
Since the function does nothing, we can safely ignore any argument
"""
nlp = all_pn.nlp
q = nlp.states["q"].mx
qdot = nlp.states["qdot"].mx
tau = nlp.states["tau"].mx if "tau" in nlp.states.keys() else nlp.controls["tau"].mx
qddot = nlp.controls["qddot"].mx
qddot_fd = nlp.model.ForwardDynamics(q, qdot, tau).to_mx()
var = []
var.extend([nlp.states[key] for key in nlp.states])
var.extend([nlp.controls[key] for key in nlp.controls])
var.extend([nlp.parameters[key] for key in nlp.parameters])
return BiorbdInterface.mx_to_cx("ForwardDynamics", qddot - qddot_fd, *var)
@staticmethod
def tau_equals_inverse_dynamics(_: Constraint, all_pn: PenaltyNodeList, **unused_param):
"""
Compute the difference between symbolic joint torques and inverse dynamic results
It does not include any inversion of mass matrix
Parameters
----------
_: Constraint
The actual constraint to declare
all_pn: PenaltyNodeList
The penalty node elements
**unused_param: dict
Since the function does nothing, we can safely ignore any argument
"""
nlp = all_pn.nlp
q = nlp.states["q"].mx
qdot = nlp.states["qdot"].mx
tau = nlp.states["tau"].mx if "tau" in nlp.states.keys() else nlp.controls["tau"].mx
qddot = nlp.states["qddot"].mx if "qddot" in nlp.states.keys() else nlp.controls["qddot"].mx
if nlp.external_forces:
raise NotImplementedError(
"This implicit constraint tau_equals_inverse_dynamics is not implemented yet with external forces"
)
# Todo: add fext tau_id = nlp.model.InverseDynamics(q, qdot, qddot, fext).to_mx()
# fext need to be a mx
tau_id = nlp.model.InverseDynamics(q, qdot, qddot).to_mx()
var = []
var.extend([nlp.states[key] for key in nlp.states])
var.extend([nlp.controls[key] for key in nlp.controls])
var.extend([nlp.parameters[key] for key in nlp.parameters])
return BiorbdInterface.mx_to_cx("InverseDynamics", tau_id - tau, *var)
@staticmethod
def implicit_soft_contact_forces(_: Constraint, all_pn: PenaltyNodeList, **unused_param):
"""
The time constraint is taken care elsewhere, but must be declared here. This function therefore does nothing
Parameters
----------
_: Constraint
The actual constraint to declare
all_pn: PenaltyNodeList
The penalty node elements
**unused_param: dict
Since the function does nothing, we can safely ignore any argument
"""
nlp = all_pn.nlp
force_idx = []
for i_sc in range(nlp.model.nbSoftContacts()):
force_idx.append(3 + (6 * i_sc))
force_idx.append(4 + (6 * i_sc))
force_idx.append(5 + (6 * i_sc))
soft_contact_all = nlp.soft_contact_forces_func(nlp.states.mx, nlp.controls.mx, nlp.parameters.mx)
soft_contact_force = soft_contact_all[force_idx]
var = []
var.extend([nlp.states[key] for key in nlp.states])
var.extend([nlp.controls[key] for key in nlp.controls])
var.extend([nlp.parameters[key] for key in nlp.parameters])
return BiorbdInterface.mx_to_cx("ForwardDynamics", nlp.controls["fext"].mx - soft_contact_force, *var)
@staticmethod
def inner_phase_continuity(ocp):
"""
Add continuity constraints between each nodes of a phase.
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
"""
# Dynamics must be sound within phases
for i, nlp in enumerate(ocp.nlp):
penalty = Constraint(
ConstraintFcn.CONTINUITY, node=Node.ALL_SHOOTING, constraint_type=ConstraintType.INTERNAL
)
penalty.add_or_replace_to_penalty_pool(ocp, nlp)
@staticmethod
def inter_phase_continuity(ocp):
"""
Add phase transition constraints between two phases.
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
"""
for i, pt in enumerate(ocp.phase_transitions):
# Dynamics must be respected between phases
pt.name = f"PHASE_TRANSITION {pt.phase_pre_idx}->{pt.phase_post_idx}"
pt.list_index = -1
pt.add_or_replace_to_penalty_pool(ocp, ocp.nlp[pt.phase_pre_idx])
@staticmethod
def get_dt(_):
return 1
@staticmethod
def penalty_nature() -> str:
return "constraints"
class ConstraintFcn(Enum):
"""
Selection of valid constraint functions
Methods
-------
def get_type() -> Callable
Returns the type of the penalty
"""
CONTINUITY = (PenaltyFunctionAbstract.Functions.continuity,)
TRACK_CONTROL = (PenaltyFunctionAbstract.Functions.minimize_controls,)
TRACK_STATE = (PenaltyFunctionAbstract.Functions.minimize_states,)
TRACK_MARKERS = (PenaltyFunctionAbstract.Functions.minimize_markers,)
TRACK_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_markers_velocity,)
SUPERIMPOSE_MARKERS = (PenaltyFunctionAbstract.Functions.superimpose_markers,)
PROPORTIONAL_STATE = (PenaltyFunctionAbstract.Functions.proportional_states,)
PROPORTIONAL_CONTROL = (PenaltyFunctionAbstract.Functions.proportional_controls,)
TRACK_CONTACT_FORCES = (PenaltyFunctionAbstract.Functions.minimize_contact_forces,)
TRACK_SEGMENT_WITH_CUSTOM_RT = (PenaltyFunctionAbstract.Functions.track_segment_with_custom_rt,)
TRACK_MARKER_WITH_SEGMENT_AXIS = (PenaltyFunctionAbstract.Functions.track_marker_with_segment_axis,)
TRACK_COM_POSITION = (PenaltyFunctionAbstract.Functions.minimize_com_position,)
TRACK_COM_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_com_velocity,)
CUSTOM = (PenaltyFunctionAbstract.Functions.custom,)
NON_SLIPPING = (ConstraintFunction.Functions.non_slipping,)
TORQUE_MAX_FROM_Q_AND_QDOT = (ConstraintFunction.Functions.torque_max_from_q_and_qdot,)
TIME_CONSTRAINT = (ConstraintFunction.Functions.time_constraint,)
@staticmethod
def get_type():
"""
Returns the type of the penalty
"""
return ConstraintFunction
class ImplicitConstraintFcn(Enum):
"""
Selection of valid constraint functions
Methods
-------
def get_type() -> Callable
Returns the type of the penalty
"""
QDDOT_EQUALS_FORWARD_DYNAMICS = (ConstraintFunction.Functions.qddot_equals_forward_dynamics,)
TAU_EQUALS_INVERSE_DYNAMICS = (ConstraintFunction.Functions.tau_equals_inverse_dynamics,)
SOFT_CONTACTS_EQUALS_SOFT_CONTACTS_DYNAMICS = (ConstraintFunction.Functions.implicit_soft_contact_forces,)
@staticmethod
def get_type():
"""
Returns the type of the penalty
"""
return ConstraintFunction
class ContinuityFunctions:
"""
Interface between continuity and constraint
"""
@staticmethod
def continuity(ocp):
"""
The declaration of inner- and inter-phase continuity constraints
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
"""
ConstraintFunction.inner_phase_continuity(ocp)
# Dynamics must be respected between phases
ConstraintFunction.inter_phase_continuity(ocp)
| [
"casadi.lt",
"numpy.array",
"casadi.sum1",
"casadi.vertcat"
] | [((2529, 2553), 'numpy.array', 'np.array', (['self.min_bound'], {}), '(self.min_bound)\n', (2537, 2553), True, 'import numpy as np\n'), ((2644, 2668), 'numpy.array', 'np.array', (['self.max_bound'], {}), '(self.max_bound)\n', (2652, 2668), True, 'import numpy as np\n'), ((8869, 8885), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8877, 8885), True, 'import numpy as np\n'), ((8921, 8947), 'numpy.array', 'np.array', (['[np.inf, np.inf]'], {}), '([np.inf, np.inf])\n', (8929, 8947), True, 'import numpy as np\n'), ((9713, 9884), 'casadi.vertcat', 'vertcat', (['(mu_squared * normal_contact_force_squared - tangential_contact_force_squared)', '(mu_squared * normal_contact_force_squared + tangential_contact_force_squared)'], {}), '(mu_squared * normal_contact_force_squared -\n tangential_contact_force_squared, mu_squared *\n normal_contact_force_squared + tangential_contact_force_squared)\n', (9720, 9884), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((11525, 11604), 'casadi.vertcat', 'vertcat', (["(nlp.controls['tau'].cx + min_bound)", "(nlp.controls['tau'].cx - max_bound)"], {}), "(nlp.controls['tau'].cx + min_bound, nlp.controls['tau'].cx - max_bound)\n", (11532, 11604), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((9096, 9134), 'casadi.sum1', 'sum1', (['contact[normal_component_idx, 0]'], {}), '(contact[normal_component_idx, 0])\n', (9100, 9134), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((9242, 9287), 'casadi.sum1', 'sum1', (['contact[tangential_component_idx[0], 0]'], {}), '(contact[tangential_component_idx[0], 0])\n', (9246, 9287), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((11368, 11393), 'casadi.lt', 'lt', (['min_bound', 'min_torque'], {}), '(min_bound, min_torque)\n', (11370, 11393), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((11454, 11479), 'casadi.lt', 'lt', (['max_bound', 'min_torque'], {}), '(max_bound, min_torque)\n', (11456, 11479), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((9419, 9464), 'casadi.sum1', 'sum1', (['contact[tangential_component_idx[0], 0]'], {}), '(contact[tangential_component_idx[0], 0])\n', (9423, 9464), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n'), ((9492, 9537), 'casadi.sum1', 'sum1', (['contact[tangential_component_idx[1], 0]'], {}), '(contact[tangential_component_idx[1], 0])\n', (9496, 9537), False, 'from casadi import sum1, if_else, vertcat, lt, SX, MX\n')] |
import skimage.draw as skd
import skimage.io as skio
import numpy as np
import h5py
import itertools
import random
from typing import List
from dataclasses import dataclass, field
def default_float(n=1,low=0.0,high=1.0):
if n == 1:
return field(default_factory = lambda: np.random.uniform(low, high) )
else:
return field(default_factory = lambda: np.random.uniform(low, high, n) )
@dataclass
class Shape:
color: List[float] = default_float(3)
size: float = default_float(low=.1,high=.9)
x: float = default_float()
y: float = default_float()
def gen(self, img_size):
raise NotImplementedError()
@dataclass
class RotatableShape(Shape):
rotation: float = default_float()
class Circle(Shape):
def render(self, img_size):
radius = int(self.size * 0.5 * min(img_size[0], img_size[1]))
r = int(self.y*img_size[0])
c = int(self.x*img_size[1])
return skd.circle(r, c, radius, shape=img_size[:2])
class CircleOutline(Shape):
def render(self, img_size):
radius = int(self.size * 0.5 * min(img_size[0], img_size[1]))
r = int(self.y*img_size[0])
c = int(self.x*img_size[1])
return skd.circle_perimeter(r, c, radius, shape=img_size[:2])
class Square(RotatableShape):
def render(self, img_size):
r = int(self.y*img_size[0])
c = int(self.x*img_size[1])
th = self.rotation * np.pi
hs = int(self.size * 0.5 * min(img_size[0], img_size[1]))
rr = [r-hs, r-hs, r+hs, r+hs]
cc = [c-hs, c+hs, c+hs, c-hs]
rr, cc = rotate_rc(rr, cc, th, r, c)
rr,cc = skd.polygon(rr, cc)
rr,cc = rr.flatten(), cc.flatten()
return rr, cc
@dataclass
class Rectangle(RotatableShape):
aspect: float = default_float(low=0.0,high=1.0)
def render(self, img_size):
r = int(self.y * img_size[0])
c = int(self.x * img_size[1])
th = self.rotation * np.pi
hs = int(self.size * 0.5 * min(img_size[0], img_size[1]))
rr = [r-hs, r-hs, r+hs, r+hs]
cc = [c-hs*self.aspect, c+hs*self.aspect, c+hs*self.aspect, c-hs*self.aspect]
rr, cc = rotate_rc(rr, cc, th, r, c)
rr,cc = skd.polygon(rr, cc)
rr,cc = rr.flatten(), cc.flatten()
return rr, cc
@dataclass
class Ellipse(RotatableShape):
aspect: float = default_float(low=0.0,high=1.0)
def render(self, img_size):
r = int(self.y * img_size[0])
c = int(self.x * img_size[1])
th = self.rotation * 2 * np.pi - np.pi
radius = int(self.size * 0.5 * min(img_size[0], img_size[1]))
rr,cc = skd.ellipse(r, c, radius, radius*self.aspect, rotation=th)
rr,cc = rr.flatten(), cc.flatten()
return rr,cc
class Triangle(RotatableShape):
def render(self, img_size):
r = int(self.y * img_size[0])
c = int(self.x * img_size[1])
th = self.rotation * 2 * np.pi
hw = int(self.size * 0.5 * min(img_size[0], img_size[1]))
hh = hw*np.sqrt(3) * 0.5
rr = [ r+hw, r, r-hw ]
cc = [ c-hh, c+hh, c-hh ]
rr, cc = rotate_rc(rr, cc, th, r, c)
rr,cc = skd.polygon(rr, cc)
rr,cc = rr.flatten(), cc.flatten()
return rr, cc
SHAPE_CHOICES = [ Circle, Triangle, Rectangle, Ellipse, Square ]
def render_shapes(shapes, img_size, bg=None):
img = np.zeros(img_size, dtype=np.float32)
if bg is None:
bg = np.random.random(3)
img[:,:,0] = bg[0]
img[:,:,1] = bg[1]
img[:,:,2] = bg[2]
for shape in shapes:
rr,cc = shape.render(img_size)
rr,cc = crop_rc(rr, cc, img_size)
img[rr,cc,:] = shape.color
return img
def random_shapes(shape, n_min, n_max):
n = np.random.randint(n_min, n_max+1)
shapes = [ random_shape(shape) for i in range(n) ]
shapes.sort(key=lambda s: s.size)
shapes = shapes[::-1]
return shapes
def random_shape(shape=None):
if shape is None:
shape = random.choice(SHAPE_CHOICES)
return shape()
def rotate_rc(rr,cc,th,r,c):
p = np.array([ rr,
cc,
np.ones(len(rr)) ])
T1 = np.array([[ 1, 0, -r],
[ 0, 1, -c],
[ 0, 0, 1 ]])
R = np.array([ [ np.cos(th), -np.sin(th), 0 ],
[ np.sin(th), np.cos(th), 0 ],
[ 0, 0, 1 ] ])
T2 = np.array([[ 1, 0, r ],
[ 0, 1, c ],
[ 0, 0, 1 ]])
T = np.dot(T2, np.dot(R, T1))
pt = np.dot(T, p)
pt = np.round(pt)
return pt[0].astype(int), pt[1].astype(int)
def crop_rc(rr, cc, img_size):
mask = (rr >= 0) & (rr < img_size[0]) & (cc >= 0) & (cc < img_size[1])
return rr[mask], cc[mask]
def render_shape_sets(n, shape, img_sizes, n_min, n_max, dtype=np.float32):
img_sets = [ np.zeros([n]+list(img_size), dtype=dtype) for img_size in img_sizes ]
for i in range(n):
shapes = random_shapes(shape, n_min, n_max)
for j in range(len(img_sizes)):
img_sets[j][i,:] = render_shapes(shapes, img_sizes[j])
return img_sets
if __name__ == "__main__":
shp = (512,512,3)
im = render_shape_sets(1, None, [shp,shp], 10, 10)
import matplotlib.pyplot as plt
plt.imshow(im[1][0])
plt.axis('off')
plt.show()
plt.savefig('test.png')
| [
"numpy.sqrt",
"numpy.array",
"numpy.sin",
"matplotlib.pyplot.imshow",
"numpy.random.random",
"skimage.draw.ellipse",
"numpy.dot",
"matplotlib.pyplot.axis",
"numpy.round",
"random.choice",
"matplotlib.pyplot.savefig",
"skimage.draw.circle_perimeter",
"numpy.cos",
"skimage.draw.polygon",
"... | [((3454, 3490), 'numpy.zeros', 'np.zeros', (['img_size'], {'dtype': 'np.float32'}), '(img_size, dtype=np.float32)\n', (3462, 3490), True, 'import numpy as np\n'), ((3874, 3909), 'numpy.random.randint', 'np.random.randint', (['n_min', '(n_max + 1)'], {}), '(n_min, n_max + 1)\n', (3891, 3909), True, 'import numpy as np\n'), ((4300, 4345), 'numpy.array', 'np.array', (['[[1, 0, -r], [0, 1, -c], [0, 0, 1]]'], {}), '([[1, 0, -r], [0, 1, -c], [0, 0, 1]])\n', (4308, 4345), True, 'import numpy as np\n'), ((4554, 4597), 'numpy.array', 'np.array', (['[[1, 0, r], [0, 1, c], [0, 0, 1]]'], {}), '([[1, 0, r], [0, 1, c], [0, 0, 1]])\n', (4562, 4597), True, 'import numpy as np\n'), ((4687, 4699), 'numpy.dot', 'np.dot', (['T', 'p'], {}), '(T, p)\n', (4693, 4699), True, 'import numpy as np\n'), ((4709, 4721), 'numpy.round', 'np.round', (['pt'], {}), '(pt)\n', (4717, 4721), True, 'import numpy as np\n'), ((5458, 5478), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im[1][0]'], {}), '(im[1][0])\n', (5468, 5478), True, 'import matplotlib.pyplot as plt\n'), ((5483, 5498), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5491, 5498), True, 'import matplotlib.pyplot as plt\n'), ((5503, 5513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5511, 5513), True, 'import matplotlib.pyplot as plt\n'), ((5522, 5545), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.png"""'], {}), "('test.png')\n", (5533, 5545), True, 'import matplotlib.pyplot as plt\n'), ((948, 992), 'skimage.draw.circle', 'skd.circle', (['r', 'c', 'radius'], {'shape': 'img_size[:2]'}), '(r, c, radius, shape=img_size[:2])\n', (958, 992), True, 'import skimage.draw as skd\n'), ((1220, 1274), 'skimage.draw.circle_perimeter', 'skd.circle_perimeter', (['r', 'c', 'radius'], {'shape': 'img_size[:2]'}), '(r, c, radius, shape=img_size[:2])\n', (1240, 1274), True, 'import skimage.draw as skd\n'), ((1676, 1695), 'skimage.draw.polygon', 'skd.polygon', (['rr', 'cc'], {}), '(rr, cc)\n', (1687, 1695), True, 'import skimage.draw as skd\n'), ((2266, 2285), 'skimage.draw.polygon', 'skd.polygon', (['rr', 'cc'], {}), '(rr, cc)\n', (2277, 2285), True, 'import skimage.draw as skd\n'), ((2706, 2766), 'skimage.draw.ellipse', 'skd.ellipse', (['r', 'c', 'radius', '(radius * self.aspect)'], {'rotation': 'th'}), '(r, c, radius, radius * self.aspect, rotation=th)\n', (2717, 2766), True, 'import skimage.draw as skd\n'), ((3245, 3264), 'skimage.draw.polygon', 'skd.polygon', (['rr', 'cc'], {}), '(rr, cc)\n', (3256, 3264), True, 'import skimage.draw as skd\n'), ((3528, 3547), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (3544, 3547), True, 'import numpy as np\n'), ((4116, 4144), 'random.choice', 'random.choice', (['SHAPE_CHOICES'], {}), '(SHAPE_CHOICES)\n', (4129, 4144), False, 'import random\n'), ((4662, 4675), 'numpy.dot', 'np.dot', (['R', 'T1'], {}), '(R, T1)\n', (4668, 4675), True, 'import numpy as np\n'), ((3099, 3109), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3106, 3109), True, 'import numpy as np\n'), ((4410, 4420), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (4416, 4420), True, 'import numpy as np\n'), ((4461, 4471), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (4467, 4471), True, 'import numpy as np\n'), ((4473, 4483), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (4479, 4483), True, 'import numpy as np\n'), ((284, 312), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high'], {}), '(low, high)\n', (301, 312), True, 'import numpy as np\n'), ((372, 403), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high', 'n'], {}), '(low, high, n)\n', (389, 403), True, 'import numpy as np\n'), ((4423, 4433), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (4429, 4433), True, 'import numpy as np\n')] |
import os
import sys
import subprocess
import numpy as np
from tqdm import tqdm
import cv2
import torch
import orjson
from torch.utils.data import Dataset
from vaetc.utils.debug import debug_print
from .utils import IMAGE_HEIGHT, IMAGE_WIDTH, ImageDataset, cache_path
class FFHQThumbnails(Dataset):
""" FFHQ-Dataset (https://github.com/NVlabs/ffhq-dataset)
Because we resize images small, we use 128x128 thumbnails
"""
def __init__(self, root_path: str, download=False, train=True) -> None:
super().__init__()
self.root_path = root_path
self.category = "training" if train else "validation"
if download and self._download_required():
self._download()
with open(os.path.join(self.root_path, "ffhq-dataset-v2.json"), "r", encoding="utf8") as fp:
debug_print("Loading metadata...")
json_str = ""
with tqdm() as pbar:
while True:
chunk = fp.read(4096)
if len(chunk) == 0:
break
json_str += chunk
pbar.update(1)
debug_print("Parsing metadata...")
self.metadata = orjson.loads(json_str)
debug_print("Splitting ...")
self.keys = []
for index in tqdm(range(70000)):
metadata_instance = self.metadata[str(index)]
if metadata_instance["category"] == self.category:
if os.path.isfile(os.path.join(self.root_path, metadata_instance["thumbnail"]["file_path"])):
self.keys.append(index)
def _download_required(self) -> bool:
if not os.path.isfile(os.path.join(self.root_path, "ffhq-dataset-v2.json")):
return True
if not os.path.isdir(os.path.join(self.root_path, "thumbnails128x128")):
return True
return False
def _download(self):
print("This dataset (FFHQ Thumbnail) must be downloaded and placed manually as:")
print("1. Download ffhq-dataset-v2.json from https://github.com/NVlabs/ffhq-dataset#overview")
print("2. Download thumbnails128x128 from https://github.com/NVlabs/ffhq-dataset#overview")
print(f"3. Put them in {self.root_path}/")
raise NotImplementedError("This dataset is currently available by manual download")
def __len__(self) -> int:
return len(self.keys)
def _load_image(self, image_path: str) -> np.ndarray:
img = cv2.imread(image_path)
img = cv2.resize(img, [IMAGE_WIDTH, IMAGE_HEIGHT], interpolation=cv2.INTER_LANCZOS4)
img = img[...,::-1] # BGR -> RGB
img = img.transpose(2, 0, 1)
img = img.astype(np.float32) / 255
return img
def __getitem__(self, index):
index_entire = self.keys[index]
metadata_instance = self.metadata[str(index_entire)]
image_path = os.path.join(
self.root_path,
metadata_instance["thumbnail"]["file_path"]
)
img = self._load_image(image_path)
t = np.array(metadata_instance["image"]["face_landmarks"], dtype=np.float32) # (M, 2)
t[:,0] /= metadata_instance["image"]["pixel_size"][0]
t[:,1] /= metadata_instance["image"]["pixel_size"][1]
return torch.tensor(img), torch.tensor(t).view(-1)
def ffhq(download=True):
root_path = cache_path("ffhq")
train_set = FFHQThumbnails(root_path=root_path, download=download, train=True)
test_set = FFHQThumbnails(root_path=root_path, download=download, train=False)
return ImageDataset(train_set, test_set) | [
"orjson.loads",
"os.path.join",
"tqdm.tqdm",
"numpy.array",
"torch.tensor",
"cv2.resize",
"cv2.imread",
"vaetc.utils.debug.debug_print"
] | [((1276, 1304), 'vaetc.utils.debug.debug_print', 'debug_print', (['"""Splitting ..."""'], {}), "('Splitting ...')\n", (1287, 1304), False, 'from vaetc.utils.debug import debug_print\n'), ((2543, 2565), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2553, 2565), False, 'import cv2\n'), ((2580, 2658), 'cv2.resize', 'cv2.resize', (['img', '[IMAGE_WIDTH, IMAGE_HEIGHT]'], {'interpolation': 'cv2.INTER_LANCZOS4'}), '(img, [IMAGE_WIDTH, IMAGE_HEIGHT], interpolation=cv2.INTER_LANCZOS4)\n', (2590, 2658), False, 'import cv2\n'), ((2967, 3040), 'os.path.join', 'os.path.join', (['self.root_path', "metadata_instance['thumbnail']['file_path']"], {}), "(self.root_path, metadata_instance['thumbnail']['file_path'])\n", (2979, 3040), False, 'import os\n'), ((3132, 3204), 'numpy.array', 'np.array', (["metadata_instance['image']['face_landmarks']"], {'dtype': 'np.float32'}), "(metadata_instance['image']['face_landmarks'], dtype=np.float32)\n", (3140, 3204), True, 'import numpy as np\n'), ((861, 895), 'vaetc.utils.debug.debug_print', 'debug_print', (['"""Loading metadata..."""'], {}), "('Loading metadata...')\n", (872, 895), False, 'from vaetc.utils.debug import debug_print\n'), ((1181, 1215), 'vaetc.utils.debug.debug_print', 'debug_print', (['"""Parsing metadata..."""'], {}), "('Parsing metadata...')\n", (1192, 1215), False, 'from vaetc.utils.debug import debug_print\n'), ((1244, 1266), 'orjson.loads', 'orjson.loads', (['json_str'], {}), '(json_str)\n', (1256, 1266), False, 'import orjson\n'), ((3354, 3371), 'torch.tensor', 'torch.tensor', (['img'], {}), '(img)\n', (3366, 3371), False, 'import torch\n'), ((753, 805), 'os.path.join', 'os.path.join', (['self.root_path', '"""ffhq-dataset-v2.json"""'], {}), "(self.root_path, 'ffhq-dataset-v2.json')\n", (765, 805), False, 'import os\n'), ((939, 945), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (943, 945), False, 'from tqdm import tqdm\n'), ((1720, 1772), 'os.path.join', 'os.path.join', (['self.root_path', '"""ffhq-dataset-v2.json"""'], {}), "(self.root_path, 'ffhq-dataset-v2.json')\n", (1732, 1772), False, 'import os\n'), ((1829, 1878), 'os.path.join', 'os.path.join', (['self.root_path', '"""thumbnails128x128"""'], {}), "(self.root_path, 'thumbnails128x128')\n", (1841, 1878), False, 'import os\n'), ((1526, 1599), 'os.path.join', 'os.path.join', (['self.root_path', "metadata_instance['thumbnail']['file_path']"], {}), "(self.root_path, metadata_instance['thumbnail']['file_path'])\n", (1538, 1599), False, 'import os\n'), ((3373, 3388), 'torch.tensor', 'torch.tensor', (['t'], {}), '(t)\n', (3385, 3388), False, 'import torch\n')] |
#!/usr/bin/env python
#
#
__author__ = '<EMAIL>'
import numpy as np
from scipy.stats import norm
from keras.models import load_model
import tensorflow as tf
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS
mnist_z2=load_model('mnist-va-decoder-zdim-2.h5')
mnist_z2._make_predict_function()
graph_mnist_z2 = tf.get_default_graph()
mnist_z4=load_model('mnist-va-decoder-zdim-4.h5')
graph_mnist_z4 = tf.get_default_graph()
fashion_z2=load_model('fashion-va-decoder-zdim-2.h5')
fashion_z2._make_predict_function()
graph_fashion_z2 = tf.get_default_graph()
fashion_z4=load_model('fashion-va-decoder-zdim-4.h5')
fashion_z4._make_predict_function()
graph_fashion_z4 = tf.get_default_graph()
img_size = 28
decoders = dict(mnist_z2=(mnist_z2,graph_mnist_z2), mnist_z4=(mnist_z4,graph_mnist_z4), fashion_z2=(fashion_z2,graph_fashion_z2), fashion_z4=(fashion_z4,graph_fashion_z4))
app = Flask(__name__)
cors = CORS(app)
def single_sample(decoder,graph,z):
z_sample=norm.ppf(np.array([z]))
with graph.as_default():
decoded = decoder.predict(z_sample)
sampled = decoded[0].reshape(img_size, img_size)
sampled -= np.min(sampled)
sampled = np.round(sampled * (255.0 / np.max(sampled)))
return sampled.tolist()
def grid_sample(decoder,graph,z,grid):
start,end,num=grid
grid_x = np.linspace(start,end,num)
grid_y = grid_x
grid_list = [[[] for j in range(num)] for i in range(num)]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_new = [xi, yi, z[0], z[1]]
grid_list[i][j]=single_sample(decoder, graph, z_new)
return grid_list
@app.route('/api/sample/', methods=['POST'])
def sample():
if not request.json or 'name' not in request.json or 'z' not in request.json:
return None
args = request.get_json()
z=args['z']
name = args['name']
decoder,graph=decoders[name]
z_sample=norm.ppf(np.array([z]))
sample_fmt=args['format']
if sample_fmt == 'single':
sampled=single_sample(decoder,graph,z)
else:
sampled=grid_sample(decoder,graph, z, args['grid'])
return jsonify({ "sample":sampled}), 201
if __name__ == '__main__':
app.run(debug=False, threaded=True)
| [
"keras.models.load_model",
"flask_cors.CORS",
"flask.Flask",
"numpy.max",
"numpy.array",
"numpy.linspace",
"flask.request.get_json",
"numpy.min",
"tensorflow.get_default_graph",
"flask.jsonify"
] | [((273, 313), 'keras.models.load_model', 'load_model', (['"""mnist-va-decoder-zdim-2.h5"""'], {}), "('mnist-va-decoder-zdim-2.h5')\n", (283, 313), False, 'from keras.models import load_model\n'), ((365, 387), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (385, 387), True, 'import tensorflow as tf\n'), ((397, 437), 'keras.models.load_model', 'load_model', (['"""mnist-va-decoder-zdim-4.h5"""'], {}), "('mnist-va-decoder-zdim-4.h5')\n", (407, 437), False, 'from keras.models import load_model\n'), ((455, 477), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (475, 477), True, 'import tensorflow as tf\n'), ((489, 531), 'keras.models.load_model', 'load_model', (['"""fashion-va-decoder-zdim-2.h5"""'], {}), "('fashion-va-decoder-zdim-2.h5')\n", (499, 531), False, 'from keras.models import load_model\n'), ((587, 609), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (607, 609), True, 'import tensorflow as tf\n'), ((621, 663), 'keras.models.load_model', 'load_model', (['"""fashion-va-decoder-zdim-4.h5"""'], {}), "('fashion-va-decoder-zdim-4.h5')\n", (631, 663), False, 'from keras.models import load_model\n'), ((719, 741), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (739, 741), True, 'import tensorflow as tf\n'), ((937, 952), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (942, 952), False, 'from flask import Flask\n'), ((960, 969), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (964, 969), False, 'from flask_cors import CORS\n'), ((1378, 1406), 'numpy.linspace', 'np.linspace', (['start', 'end', 'num'], {}), '(start, end, num)\n', (1389, 1406), True, 'import numpy as np\n'), ((1869, 1887), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1885, 1887), False, 'from flask import request\n'), ((1030, 1043), 'numpy.array', 'np.array', (['[z]'], {}), '([z])\n', (1038, 1043), True, 'import numpy as np\n'), ((1194, 1209), 'numpy.min', 'np.min', (['sampled'], {}), '(sampled)\n', (1200, 1209), True, 'import numpy as np\n'), ((1984, 1997), 'numpy.array', 'np.array', (['[z]'], {}), '([z])\n', (1992, 1997), True, 'import numpy as np\n'), ((2189, 2217), 'flask.jsonify', 'jsonify', (["{'sample': sampled}"], {}), "({'sample': sampled})\n", (2196, 2217), False, 'from flask import jsonify\n'), ((1256, 1271), 'numpy.max', 'np.max', (['sampled'], {}), '(sampled)\n', (1262, 1271), True, 'import numpy as np\n')] |
import numpy as np
from scipy.optimize import minimize
# height distribution
def vf(z, k, v0):
#v0 = 1880. # v0 = n(z = 0)
drho = 1063. - 997. # density (kg / m^3)
g = 9.81 # gravity (m / s)
#r = 0.52e-6 # radius of particle (m)
r3 = 0.140608 # (e-18 m^3)
#k = 1.380649e-23 # (m^2 kg / s^2 / K)
T = 293. # temperature K
return v0 * np.exp(-4. * np.pi * r3 * drho * g * z / (3. * k * T))
# log-likelihood function
def lnl(x, args):
k = x[0]
v0 = x[1]
z, n = args
return -np.sum(n * np.log(vf(z, k, v0)) - vf(z, k, v0))
# binned maximum likelihood
def binml(z, n):
x0 = np.array([1.380649e1, 1880.]) # starting guesses
mtol = 1e-15
# minimize -ln L
res = minimize(lnl, x0, args=np.array([z, n]), method="L-BFGS-B", tol=mtol)
hinv = res.hess_inv.todense() # inverse of Hessian for covariances
return res.x, hinv
| [
"numpy.exp",
"numpy.array"
] | [((623, 651), 'numpy.array', 'np.array', (['[13.80649, 1880.0]'], {}), '([13.80649, 1880.0])\n', (631, 651), True, 'import numpy as np\n'), ((365, 421), 'numpy.exp', 'np.exp', (['(-4.0 * np.pi * r3 * drho * g * z / (3.0 * k * T))'], {}), '(-4.0 * np.pi * r3 * drho * g * z / (3.0 * k * T))\n', (371, 421), True, 'import numpy as np\n'), ((743, 759), 'numpy.array', 'np.array', (['[z, n]'], {}), '([z, n])\n', (751, 759), True, 'import numpy as np\n')] |
from sequence.utils.quantum_state import QuantumState
from sequence.utils.encoding import polarization
from math import sqrt
from numpy.random import default_rng
rng = default_rng()
def test_measure():
qs = QuantumState()
states = [(complex(1), complex(0)),
(complex(0), complex(1)),
(complex(sqrt(1 / 2)), complex(sqrt(1 / 2))),
(complex(-sqrt(1 / 2)), complex(sqrt(1 / 2)))]
basis1, basis2 = polarization['bases'][0], polarization['bases'][1]
basis = [basis1,
basis1,
basis2,
basis2]
expect = [0, 100, 0, 100]
for s, b, e in zip(states, basis, expect):
counter = 0
for _ in range(100):
qs.set_state_single(s)
res = qs.measure(b, rng)
if res:
counter += 1
assert counter == e
basis = [basis2,
basis2,
basis1,
basis1]
expect = [500, 500, 500, 500]
for s, b, e in zip(states, basis, expect):
counter = 0
for _ in range(1000):
qs.set_state_single(s)
res = qs.measure(b, rng)
if res:
counter += 1
assert abs(0.5 - counter / 1000) < 0.1
def test_measure_entangled():
qs1 = QuantumState()
states = [(complex(1), complex(0)),
(complex(0), complex(1)),
(complex(sqrt(1 / 2)), complex(sqrt(1 / 2))),
(complex(-sqrt(1 / 2)), complex(sqrt(1 / 2)))]
basis1, basis2 = polarization['bases'][0], polarization['bases'][1]
basis = [basis1,
basis1,
basis2,
basis2]
expect = [0, 100, 0, 100]
for s, b, e in zip(states, basis, expect):
counter = 0
for _ in range(100):
qs1.set_state_single(s)
qs2 = QuantumState()
qs1.entangle(qs2)
res = qs1.measure(b, rng)
if res:
counter += 1
assert counter == e
basis = [basis2,
basis2,
basis1,
basis1]
expect = [500, 500, 500, 500]
for s, b, e in zip(states, basis, expect):
counter = 0
for _ in range(1000):
qs1.set_state_single(s)
qs2 = QuantumState()
qs1.entangle(qs2)
res = qs1.measure(b, rng)
if res:
counter += 1
assert abs(0.5 - counter / 1000) < 0.1
| [
"sequence.utils.quantum_state.QuantumState",
"math.sqrt",
"numpy.random.default_rng"
] | [((169, 182), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (180, 182), False, 'from numpy.random import default_rng\n'), ((214, 228), 'sequence.utils.quantum_state.QuantumState', 'QuantumState', ([], {}), '()\n', (226, 228), False, 'from sequence.utils.quantum_state import QuantumState\n'), ((1289, 1303), 'sequence.utils.quantum_state.QuantumState', 'QuantumState', ([], {}), '()\n', (1301, 1303), False, 'from sequence.utils.quantum_state import QuantumState\n'), ((1842, 1856), 'sequence.utils.quantum_state.QuantumState', 'QuantumState', ([], {}), '()\n', (1854, 1856), False, 'from sequence.utils.quantum_state import QuantumState\n'), ((2273, 2287), 'sequence.utils.quantum_state.QuantumState', 'QuantumState', ([], {}), '()\n', (2285, 2287), False, 'from sequence.utils.quantum_state import QuantumState\n'), ((332, 343), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (336, 343), False, 'from math import sqrt\n'), ((354, 365), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (358, 365), False, 'from math import sqrt\n'), ((415, 426), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (419, 426), False, 'from math import sqrt\n'), ((1407, 1418), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (1411, 1418), False, 'from math import sqrt\n'), ((1429, 1440), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (1433, 1440), False, 'from math import sqrt\n'), ((1490, 1501), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (1494, 1501), False, 'from math import sqrt\n'), ((393, 404), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (397, 404), False, 'from math import sqrt\n'), ((1468, 1479), 'math.sqrt', 'sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (1472, 1479), False, 'from math import sqrt\n')] |
import numpy as np
from costs import sigmoid
def mse_gradient(y, tx, w):
err = y - tx.dot(w)
grad = -tx.T.dot(err) / len(err)
return grad, err
def hessian(w, tx):
sigm = sigmoid(tx.dot(w))
S = sigm * (1 - sigm)
return np.multiply(tx.T, S) @ tx
def log_likelihood_gradient(y, tx, w):
s = sigmoid(tx.dot(w))
return tx.T.dot(s - y)
| [
"numpy.multiply"
] | [((245, 265), 'numpy.multiply', 'np.multiply', (['tx.T', 'S'], {}), '(tx.T, S)\n', (256, 265), True, 'import numpy as np\n')] |
import json
import numpy as np
import os
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from tqdm import trange
import random
import numpy as np
from torch.utils.data import DataLoader
# METRICS = ['glob_acc', 'per_acc', 'glob_loss', 'per_loss', 'user_train_time', 'server_agg_time']
METRICS = ['glob_acc', 'per_acc', 'glob_loss', 'per_loss']
def get_data_dir(dataset):
if 'EMnist' in dataset:
# #EMnist-alpha0.1-ratio0.1-0-letters
# dataset_=dataset.replace('alpha', '').replace('ratio', '').split('-')
# alpha, ratio =dataset_[1], dataset_[2]
# types = 'letters'
# path_prefix = os.path.join('data', 'EMnist', f'u20-{types}-alpha{alpha}-ratio{ratio}')
# train_data_dir=os.path.join(path_prefix, 'train')
# test_data_dir=os.path.join(path_prefix, 'test')
# proxy_data_dir = 'data/proxy_data/emnist-n10/'
if 'alpha' in dataset:
dataset_=dataset.replace('alpha', '').replace('ratio', '').split('-')
if 'mtl' in dataset:
print("EMnist-alpha-mtl")
alpha, ratio=dataset_[2], dataset_[3]
#path_prefix=os.path.join('data', 'Mnist', 'u20alpha{}min10ratio{}'.format(alpha, ratio))
path_prefix=os.path.join('data', 'EMnist', 'u20c10-mtl-alpha{}-ratio{}'.format(alpha, ratio))
else:
alpha, ratio=dataset_[1], dataset_[2]
#path_prefix=os.path.join('data', 'Mnist', 'u20alpha{}min10ratio{}'.format(alpha, ratio))
path_prefix=os.path.join('data', 'EMnist', 'ou20c10-alpha{}-ratio{}'.format(alpha, ratio))
train_data_dir=os.path.join(path_prefix, 'train')
test_data_dir=os.path.join(path_prefix, 'test')
proxy_data_dir = 'data/proxy_data/mnist-n10/'
else:
dataset_ = dataset.replace('class', '').split('-')
# path_prefix=os.path.join('data', 'Mnist', 'u20alpha{}min10ratio{}'.format(alpha, ratio))
if 'mtl' in dataset:
classes = dataset_[2]
path_prefix = os.path.join('data', 'EMnist', 'u20c10-mtl-class{}'.format(classes))
else:
classes = dataset_[1]
path_prefix = os.path.join('data', 'EMnist', 'u20c10-class{}'.format(classes))
train_data_dir = os.path.join(path_prefix, 'train')
test_data_dir = os.path.join(path_prefix, 'test')
proxy_data_dir = 'data/proxy_data/mnist-n10/'
elif 'Mnist' in dataset:
if 'alpha' in dataset:
dataset_=dataset.replace('alpha', '').replace('ratio', '').split('-')
if 'mtl' in dataset:
alpha, ratio=dataset_[2], dataset_[3]
#path_prefix=os.path.join('data', 'Mnist', 'u20alpha{}min10ratio{}'.format(alpha, ratio))
path_prefix=os.path.join('data', 'Mnist', 'u20c10-mtl-alpha{}-ratio{}'.format(alpha, ratio))
else:
alpha, ratio=dataset_[1], dataset_[2]
#path_prefix=os.path.join('data', 'Mnist', 'u20alpha{}min10ratio{}'.format(alpha, ratio))
path_prefix=os.path.join('data', 'Mnist', 'ou20c10-alpha{}-ratio{}'.format(alpha, ratio))
train_data_dir=os.path.join(path_prefix, 'train')
test_data_dir=os.path.join(path_prefix, 'test')
proxy_data_dir = 'data/proxy_data/mnist-n10/'
else:
dataset_ = dataset.replace('class', '').split('-')
# path_prefix=os.path.join('data', 'Mnist', 'u20alpha{}min10ratio{}'.format(alpha, ratio))
if 'mtl' in dataset:
classes = dataset_[2]
path_prefix = os.path.join('data', 'Mnist', 'u20c10-mtl-class{}'.format(classes))
else:
classes = dataset_[1]
path_prefix = os.path.join('data', 'Mnist', 'u20c10-class{}'.format(classes))
train_data_dir = os.path.join(path_prefix, 'train')
test_data_dir = os.path.join(path_prefix, 'test')
proxy_data_dir = 'data/proxy_data/mnist-n10/'
elif 'celeb' in dataset.lower():
dataset_ = dataset.lower().replace('user', '').replace('agg','').split('-')
user, agg_user = dataset_[1], dataset_[2]
path_prefix = os.path.join('data', 'CelebA', 'user{}-agg{}'.format(user,agg_user))
train_data_dir=os.path.join(path_prefix, 'train')
test_data_dir=os.path.join(path_prefix, 'test')
proxy_data_dir=os.path.join('/user500/', 'proxy')
else:
raise ValueError("Dataset not recognized.")
return train_data_dir, test_data_dir, proxy_data_dir
def read_data(dataset):
'''parses data in given train and test data directories
assumes:
- the data in the input directories are .json files with
keys 'users' and 'user_data'
- the set of train set users is the same as the set of test set users
Return:
clients: list of client ids
groups: list of group ids; empty list if none found
train_data: dictionary of train data
test_data: dictionary of test data
'''
train_data_dir, test_data_dir, proxy_data_dir = get_data_dir(dataset)
clients = []
groups = []
train_data = {}
test_data = {}
proxy_data = {}
train_files = os.listdir(train_data_dir)
train_files = [f for f in train_files if f.endswith('.json') or f.endswith(".pt")]
for f in train_files:
file_path = os.path.join(train_data_dir, f)
if file_path.endswith("json"):
with open(file_path, 'r') as inf:
cdata = json.load(inf)
elif file_path.endswith(".pt"):
with open(file_path, 'rb') as inf:
cdata = torch.load(inf)
else:
raise TypeError("Data format not recognized: {}".format(file_path))
clients.extend(cdata['users'])
if 'hierarchies' in cdata:
groups.extend(cdata['hierarchies'])
train_data.update(cdata['user_data'])
clients = list(sorted(train_data.keys()))
test_files = os.listdir(test_data_dir)
test_files = [f for f in test_files if f.endswith('.json') or f.endswith(".pt")]
for f in test_files:
file_path = os.path.join(test_data_dir, f)
if file_path.endswith(".pt"):
with open(file_path, 'rb') as inf:
cdata = torch.load(inf)
elif file_path.endswith(".json"):
with open(file_path, 'r') as inf:
cdata = json.load(inf)
else:
raise TypeError("Data format not recognized: {}".format(file_path))
test_data.update(cdata['user_data'])
if proxy_data_dir and os.path.exists(proxy_data_dir):
proxy_files=os.listdir(proxy_data_dir)
proxy_files=[f for f in proxy_files if f.endswith('.json') or f.endswith(".pt")]
for f in proxy_files:
file_path=os.path.join(proxy_data_dir, f)
if file_path.endswith(".pt"):
with open(file_path, 'rb') as inf:
cdata=torch.load(inf)
elif file_path.endswith(".json"):
with open(file_path, 'r') as inf:
cdata=json.load(inf)
else:
raise TypeError("Data format not recognized: {}".format(file_path))
proxy_data.update(cdata['user_data'])
return clients, groups, train_data, test_data, proxy_data
def read_proxy_data(proxy_data, dataset, batch_size):
X, y=proxy_data['x'], proxy_data['y']
X, y = convert_data(X, y, dataset=dataset)
dataset = [(x, y) for x, y in zip(X, y)]
proxyloader = DataLoader(dataset, batch_size, shuffle=True)
iter_proxyloader = iter(proxyloader)
return proxyloader, iter_proxyloader
def aggregate_data_(clients, dataset, dataset_name, batch_size):
combined = []
unique_labels = []
for i in range(len(dataset)):
id = clients[i]
user_data = dataset[id]
X, y = convert_data(user_data['x'], user_data['y'], dataset=dataset_name)
combined += [(x, y) for x, y in zip(X, y)]
unique_y=torch.unique(y)
unique_y = unique_y.detach().numpy()
unique_labels += list(unique_y)
data_loader=DataLoader(combined, batch_size, shuffle=True)
iter_loader=iter(data_loader)
return data_loader, iter_loader, unique_labels
def aggregate_user_test_data(data, dataset_name, batch_size):
clients, loaded_data=data[0], data[3]
data_loader, _, unique_labels=aggregate_data_(clients, loaded_data, dataset_name, batch_size)
return data_loader, np.unique(unique_labels)
def aggregate_user_data(data, dataset_name, batch_size):
# data contains: clients, groups, train_data, test_data, proxy_data
clients, loaded_data = data[0], data[2]
data_loader, data_iter, unique_labels = aggregate_data_(clients, loaded_data, dataset_name, batch_size)
return data_loader, data_iter, np.unique(unique_labels)
def convert_data(X, y, dataset=''):
if not isinstance(X, torch.Tensor):
if 'celeb' in dataset.lower():
X=torch.Tensor(X).type(torch.float32).permute(0, 3, 1, 2)
y=torch.Tensor(y).type(torch.int64)
else:
X=torch.Tensor(X).type(torch.float32)
y=torch.Tensor(y).type(torch.int64)
return X, y
# for multi-task data
# def convert_multidata(X, y, dataset=''):
# if not isinstance(X, torch.Tensor):
# if 'celeb' in dataset.lower():
# X=torch.Tensor(X).type(torch.float32).permute(0, 3, 1, 2)
# y=torch.Tensor(y).type(torch.int64)
#
# else:
# X=torch.Tensor(X).type(torch.float32)
# y=torch.Tensor(y).type(torch.int64)
# return X, y
def read_user_multidata(index, data, dataset='', count_labels=False):
#data contains: clients, groups(optional), train_data, test_data, proxy_data(optional)
id = data[0][index]
train_data = data[2][id]
test_data = data[3][id]
X_train, y_train = convert_data(train_data['x'], train_data['y'], dataset=dataset)
# for multi-task version
train_data = []
for i in range(len(X_train)):
multi_y = torch.tensor([])
train_data.append((X_train[i], y_train[:, i].view(1,len(y_train))))
# train_data = [(x, y) for x, y in zip(X_train, y_train)]
X_test, y_test = convert_data(test_data['x'], test_data['y'], dataset=dataset)
test_data = []
for i in range(len(X_test)):
test_data.append((X_test[i], y_test[:, i].view(1,len(y_train))))
# test_data = [(x, y) for x, y in zip(X_test, y_test)]
if count_labels:
label_info = {}
unique_y, counts=torch.unique(y_train, return_counts=True)
unique_y=unique_y.detach().numpy()
counts=counts.detach().numpy()
label_info['labels']=unique_y
label_info['counts']=counts
return id, train_data, test_data, label_info
return id, train_data, test_data
def read_user_data(index, data, dataset='', count_labels=False):
#data contains: clients, groups, train_data, test_data, proxy_data(optional)
id = data[0][index]
train_data = data[2][id]
test_data = data[3][id]
X_train, y_train = convert_data(train_data['x'], train_data['y'], dataset=dataset)
train_data = [(x, y) for x, y in zip(X_train, y_train)]
X_test, y_test = convert_data(test_data['x'], test_data['y'], dataset=dataset)
test_data = [(x, y) for x, y in zip(X_test, y_test)]
if count_labels:
label_info = {}
unique_y, counts=torch.unique(y_train, return_counts=True)
unique_y=unique_y.detach().numpy()
counts=counts.detach().numpy()
label_info['labels']=unique_y
label_info['counts']=counts
return id, train_data, test_data, label_info
return id, train_data, test_data
# def get_parameters(model):
# model_params = []
# for m in model:
# model_params += model[m].parameters()
# return model_params
def get_parameters(model):
model_params = [[] for _ in range(len(model))]
for m in model:
if m == "rep":
model_params[0] += model[m].parameters()
else:
model_params[m+1] += model[m].parameters()
# model_params[m] = []
# for param in model[m].parameters():
# model_params[m] += model[m].parameters() # .append() is wrong
# model_params += model[m].parameters()
return model_params
def get_dataset_name(dataset):
dataset=dataset.lower()
passed_dataset=dataset.lower()
if 'celeb' in dataset:
passed_dataset='celeb'
elif 'emnist' in dataset:
passed_dataset='emnist'
elif 'mnist' in dataset:
passed_dataset='mnist'
else:
raise ValueError('Unsupported dataset {}'.format(dataset))
return passed_dataset
def polyak_move(params, target_params, ratio=0.1):
for param, target_param in zip(params, target_params):
param.data=param.data - ratio * (param.clone().detach().data - target_param.clone().detach().data)
def meta_move(params, target_params, ratio):
for param, target_param in zip(params, target_params):
target_param.data = param.clone().data + ratio * (target_param.clone().data - param.clone().data)
def moreau_loss(params, reg_params):
# return 1/T \sum_i^T |param_i - reg_param_i|^2
losses = []
for param, reg_param in zip(params, reg_params):
losses.append( torch.mean(torch.square(param - reg_param.clone().detach())) )
loss = torch.mean(torch.stack(losses))
return loss
def l2_loss(params):
losses = []
for param in params:
losses.append( torch.mean(torch.square(param)))
loss = torch.mean(torch.stack(losses))
return loss
def update_fast_params(fast_weights, grads, lr, allow_unused=False):
"""
Update fast_weights by applying grads.
:param fast_weights: list of parameters.
:param grads: list of gradients
:param lr:
:return: updated fast_weights .
"""
for grad, fast_weight in zip(grads, fast_weights):
if allow_unused and grad is None: continue
grad=torch.clamp(grad, -10, 10)
fast_weight.data = fast_weight.data.clone() - lr * grad
return fast_weights
def init_named_params(model, keywords=['encode']):
named_params={}
#named_params_list = []
for name, params in model.named_layers.items():
if any([key in name for key in keywords]):
named_params[name]=[param.clone().detach().requires_grad_(True) for param in params]
#named_params_list += named_params[name]
return named_params#, named_params_list
| [
"os.path.exists",
"torch.unique",
"os.listdir",
"numpy.unique",
"torch.load",
"torch.stack",
"os.path.join",
"torch.square",
"torch.Tensor",
"torch.tensor",
"torch.utils.data.DataLoader",
"json.load",
"torch.clamp"
] | [((5378, 5404), 'os.listdir', 'os.listdir', (['train_data_dir'], {}), '(train_data_dir)\n', (5388, 5404), False, 'import os\n'), ((6149, 6174), 'os.listdir', 'os.listdir', (['test_data_dir'], {}), '(test_data_dir)\n', (6159, 6174), False, 'import os\n'), ((7702, 7747), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)'}), '(dataset, batch_size, shuffle=True)\n', (7712, 7747), False, 'from torch.utils.data import DataLoader\n'), ((8296, 8342), 'torch.utils.data.DataLoader', 'DataLoader', (['combined', 'batch_size'], {'shuffle': '(True)'}), '(combined, batch_size, shuffle=True)\n', (8306, 8342), False, 'from torch.utils.data import DataLoader\n'), ((5538, 5569), 'os.path.join', 'os.path.join', (['train_data_dir', 'f'], {}), '(train_data_dir, f)\n', (5550, 5569), False, 'import os\n'), ((6305, 6335), 'os.path.join', 'os.path.join', (['test_data_dir', 'f'], {}), '(test_data_dir, f)\n', (6317, 6335), False, 'import os\n'), ((6755, 6785), 'os.path.exists', 'os.path.exists', (['proxy_data_dir'], {}), '(proxy_data_dir)\n', (6769, 6785), False, 'import os\n'), ((6807, 6833), 'os.listdir', 'os.listdir', (['proxy_data_dir'], {}), '(proxy_data_dir)\n', (6817, 6833), False, 'import os\n'), ((8178, 8193), 'torch.unique', 'torch.unique', (['y'], {}), '(y)\n', (8190, 8193), False, 'import torch\n'), ((8656, 8680), 'numpy.unique', 'np.unique', (['unique_labels'], {}), '(unique_labels)\n', (8665, 8680), True, 'import numpy as np\n'), ((8999, 9023), 'numpy.unique', 'np.unique', (['unique_labels'], {}), '(unique_labels)\n', (9008, 9023), True, 'import numpy as np\n'), ((10228, 10244), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (10240, 10244), False, 'import torch\n'), ((10720, 10761), 'torch.unique', 'torch.unique', (['y_train'], {'return_counts': '(True)'}), '(y_train, return_counts=True)\n', (10732, 10761), False, 'import torch\n'), ((11593, 11634), 'torch.unique', 'torch.unique', (['y_train'], {'return_counts': '(True)'}), '(y_train, return_counts=True)\n', (11605, 11634), False, 'import torch\n'), ((13573, 13592), 'torch.stack', 'torch.stack', (['losses'], {}), '(losses)\n', (13584, 13592), False, 'import torch\n'), ((13751, 13770), 'torch.stack', 'torch.stack', (['losses'], {}), '(losses)\n', (13762, 13770), False, 'import torch\n'), ((14168, 14194), 'torch.clamp', 'torch.clamp', (['grad', '(-10)', '(10)'], {}), '(grad, -10, 10)\n', (14179, 14194), False, 'import torch\n'), ((1714, 1748), 'os.path.join', 'os.path.join', (['path_prefix', '"""train"""'], {}), "(path_prefix, 'train')\n", (1726, 1748), False, 'import os\n'), ((1775, 1808), 'os.path.join', 'os.path.join', (['path_prefix', '"""test"""'], {}), "(path_prefix, 'test')\n", (1787, 1808), False, 'import os\n'), ((2397, 2431), 'os.path.join', 'os.path.join', (['path_prefix', '"""train"""'], {}), "(path_prefix, 'train')\n", (2409, 2431), False, 'import os\n'), ((2460, 2493), 'os.path.join', 'os.path.join', (['path_prefix', '"""test"""'], {}), "(path_prefix, 'test')\n", (2472, 2493), False, 'import os\n'), ((6975, 7006), 'os.path.join', 'os.path.join', (['proxy_data_dir', 'f'], {}), '(proxy_data_dir, f)\n', (6987, 7006), False, 'import os\n'), ((3325, 3359), 'os.path.join', 'os.path.join', (['path_prefix', '"""train"""'], {}), "(path_prefix, 'train')\n", (3337, 3359), False, 'import os\n'), ((3386, 3419), 'os.path.join', 'os.path.join', (['path_prefix', '"""test"""'], {}), "(path_prefix, 'test')\n", (3398, 3419), False, 'import os\n'), ((4006, 4040), 'os.path.join', 'os.path.join', (['path_prefix', '"""train"""'], {}), "(path_prefix, 'train')\n", (4018, 4040), False, 'import os\n'), ((4069, 4102), 'os.path.join', 'os.path.join', (['path_prefix', '"""test"""'], {}), "(path_prefix, 'test')\n", (4081, 4102), False, 'import os\n'), ((4447, 4481), 'os.path.join', 'os.path.join', (['path_prefix', '"""train"""'], {}), "(path_prefix, 'train')\n", (4459, 4481), False, 'import os\n'), ((4504, 4537), 'os.path.join', 'os.path.join', (['path_prefix', '"""test"""'], {}), "(path_prefix, 'test')\n", (4516, 4537), False, 'import os\n'), ((4561, 4595), 'os.path.join', 'os.path.join', (['"""/user500/"""', '"""proxy"""'], {}), "('/user500/', 'proxy')\n", (4573, 4595), False, 'import os\n'), ((5679, 5693), 'json.load', 'json.load', (['inf'], {}), '(inf)\n', (5688, 5693), False, 'import json\n'), ((6445, 6460), 'torch.load', 'torch.load', (['inf'], {}), '(inf)\n', (6455, 6460), False, 'import torch\n'), ((13707, 13726), 'torch.square', 'torch.square', (['param'], {}), '(param)\n', (13719, 13726), False, 'import torch\n'), ((5805, 5820), 'torch.load', 'torch.load', (['inf'], {}), '(inf)\n', (5815, 5820), False, 'import torch\n'), ((6573, 6587), 'json.load', 'json.load', (['inf'], {}), '(inf)\n', (6582, 6587), False, 'import json\n'), ((7126, 7141), 'torch.load', 'torch.load', (['inf'], {}), '(inf)\n', (7136, 7141), False, 'import torch\n'), ((9225, 9240), 'torch.Tensor', 'torch.Tensor', (['y'], {}), '(y)\n', (9237, 9240), False, 'import torch\n'), ((9288, 9303), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (9300, 9303), False, 'import torch\n'), ((9338, 9353), 'torch.Tensor', 'torch.Tensor', (['y'], {}), '(y)\n', (9350, 9353), False, 'import torch\n'), ((7264, 7278), 'json.load', 'json.load', (['inf'], {}), '(inf)\n', (7273, 7278), False, 'import json\n'), ((9155, 9170), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (9167, 9170), False, 'import torch\n')] |
from functools import total_ordering
from operator import is_
import numpy as np
from typing import Callable, Iterable, List, Optional, Sequence, Tuple, Union
from numpy import random
from scipy.ndimage import rotate, map_coordinates, gaussian_filter
import h5py
from itertools import chain
from batchgenerators.augmentations.utils import resize_segmentation
import matplotlib.pyplot as plt
import torch
from .utils import generate_pos_neg_label_crop_centers, \
create_zero_centered_coordinate_mesh, \
elastic_deform_coordinates, \
interpolate_img, scale_coords,\
augment_gamma, augment_mirroring, is_positive, generate_spatial_bounding_box,\
Pad
from medical_seg.utils import resample_image_array_size
from .utils import resample_data_or_seg
RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3
def get_do_separate_z(spacing, anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold
return do_separate_z
def get_lowres_axis(new_spacing):
axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic
return axis
def resample_patient(data, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False,
order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
"""
:param data:
:param seg:
:param original_spacing:
:param target_spacing:
:param order_data:
:param order_seg:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately
:param order_z_seg: only applies if do_separate_z is True
:param order_z_data: only applies if do_separate_z is True
:param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis)
then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg
:return:
"""
assert not ((data is None) and (seg is None))
if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
if seg is not None:
if len(seg.shape) == 3:
seg = np.expand_dims(seg, axis=0)
assert len(seg.shape) == 4, "seg must be c x y z"
if data is not None:
shape = np.array(data[0].shape)
else:
shape = np.array(seg[0].shape)
new_shape = np.round(((np.array(original_spacing) / np.array(target_spacing)).astype(float) * shape)).astype(int)
if force_separate_z is not None:
do_separate_z = force_separate_z
if force_separate_z:
axis = get_lowres_axis(original_spacing)
else:
axis = None
else:
if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(original_spacing)
elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(target_spacing)
else:
do_separate_z = False
axis = None
if axis is not None:
if len(axis) == 3:
# every axis has the spacing, this should never happen, why is this code here?
do_separate_z = False
elif len(axis) == 2:
# this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample
# separately in the out of plane axis
do_separate_z = False
else:
pass
if data is not None:
data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z,
order_z=order_z_data)
else:
data_reshaped = None
if seg is not None:
seg_reshaped = resample_data_or_seg(seg, new_shape, True, axis, order_seg, do_separate_z, order_z=order_z_seg)
else:
seg_reshaped = None
if len(seg_reshaped.shape) == 4:
seg_reshaped = np.squeeze(seg_reshaped, axis=0)
return data_reshaped, seg_reshaped
class ResampleImage:
def __init__(self, resample_size, order=[3, 0]) -> None:
self.rsize = resample_size
self.order = order
def __call__(self, image, label=None):
if len(image.shape) == 3:
image = np.expand_dims(image, axis=0)
c = image.shape[0]
image = resample_image_array_size(image, out_size=(c,) + self.rsize, order=self.order[0])
if label is not None:
label = resample_image_array_size(label, out_size=self.rsize, order=self.order[1])
return image, label
class CropForegroundImageLabel:
def __init__(self,
select_fn: Callable = is_positive,
channel_indices = None,
margin = 0,
mode = ["constant"]
):
pass
self.cropper = CropForeground(
select_fn=select_fn, channel_indices=channel_indices, margin=margin
)
self.mode = mode
def __call__(self, image, label=None):
if len(image.shape) == 3:
image = np.expand_dims(image, axis=0)
box_start, box_end = self.cropper.compute_bounding_box(image)
print(box_start, box_end)
# d[self.start_coord_key] = box_start
# d[self.end_coord_key] = box_end
# for key, m in self.key_iterator(d, self.mode):
# self.push_transform(d, key, extra_info={"box_start": box_start, "box_end": box_end})
image = self.cropper.crop_pad(img=image, box_start=box_start, box_end=box_end, mode=self.mode[0])
if label is not None :
if len(label.shape) == 3:
label = np.expand_dims(label, axis=0)
label = self.cropper.crop_pad(img=label, box_start=box_start, box_end=box_end, mode=self.mode[1])
if len(label.shape) == 4:
label = np.squeeze(label, axis=0)
return image, label
class CropForeground():
"""
Crop an image using a bounding box. The bounding box is generated by selecting foreground using select_fn
at channels channel_indices. margin is added in each spatial dimension of the bounding box.
The typical usage is to help training and evaluation if the valid part is small in the whole medical image.
Users can define arbitrary function to select expected foreground from the whole image or specified channels.
And it can also add margin to every dim of the bounding box of foreground object.
For example:
.. code-block:: python
image = np.array(
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0],
[0, 1, 3, 2, 0],
[0, 1, 2, 1, 0],
[0, 0, 0, 0, 0]]]) # 1x5x5, single channel 5x5 image
def threshold_at_one(x):
# threshold at 1
return x > 1
cropper = CropForeground(select_fn=threshold_at_one, margin=0)
print(cropper(image))
[[[2, 1],
[3, 2],
[2, 1]]]
"""
def __init__(
self,
select_fn: Callable = is_positive,
channel_indices = None,
margin: Union[Sequence[int], int] = 0,
return_coords: bool = False,
mode: str = "constant",
**np_kwargs,
) -> None:
"""
Args:
select_fn: function to select expected foreground, default is to select values > 0.
channel_indices: if defined, select foreground only on the specified channels
of image. if None, select foreground on the whole image.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
return_coords: whether return the coordinates of spatial bounding box for foreground.
k_divisible: make each spatial dimension to be divisible by k, default to 1.
if `k_divisible` is an int, the same `k` be applied to all the input spatial dimensions.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
np_kwargs: other args for `np.pad` API, note that `np.pad` treats channel dimension as the first dimension.
more details: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
"""
self.select_fn = select_fn
self.channel_indices = channel_indices
self.margin = margin
self.return_coords = return_coords
self.mode = mode
self.np_kwargs = np_kwargs
def compute_bounding_box(self, img):
"""
Compute the start points and end points of bounding box to crop.
And adjust bounding box coords to be divisible by `k`.
"""
box_start, box_end = generate_spatial_bounding_box(img, self.select_fn, self.channel_indices, self.margin)
# box_start_, *_ = convert_data_type(box_start, output_type=np.ndarray, dtype=np.int16, wrap_sequence=True)
# box_end_, *_ = convert_data_type(box_end, output_type=np.ndarray, dtype=np.int16, wrap_sequence=True)
# print(box_start)
# print(box_end)
box_start = np.array(box_start)
box_end = np.array(box_end)
orig_spatial_size = box_end - box_start
# make the spatial size divisible by `k`
spatial_size = np.array(orig_spatial_size)
# spatial_size = np.asarray(compute_divisible_spatial_size(orig_spatial_size.tolist(), k=self.k_divisible))
# update box_start and box_end
box_start_ = box_start - np.floor_divide(np.asarray(spatial_size) - orig_spatial_size, 2)
box_end_ = box_start + spatial_size
return box_start_, box_end_
def crop_pad(
self,
img,
box_start: np.ndarray,
box_end: np.ndarray,
mode = None,
):
"""
Crop and pad based on the bounding box.
"""
cropped = SpatialCrop(roi_start=box_start, roi_end=box_end)(img)
pad_to_start = np.maximum(-box_start, 0)
pad_to_end = np.maximum(box_end - np.asarray(img.shape[1:]), 0)
pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist())))
return BorderPad(spatial_border=pad, mode=mode or self.mode, **self.np_kwargs)(cropped)
def __call__(self, img, mode = None):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't change the channel dim.
"""
box_start, box_end = self.compute_bounding_box(img)
cropped = self.crop_pad(img, box_start, box_end, mode)
if self.return_coords:
return cropped, box_start, box_end
return cropped
class Random:
def __init__(self, seed) -> None:
self.seed = seed
self.R = np.random.RandomState(seed)
def do_transform(self, prob):
## 随机一个概率,当这个概率小于prob的时候,便去进行变换。
prob = min(max(prob, 0.0), 1.0)
return self.R.rand() < prob
class BorderPad:
"""
Pad the input data by adding specified borders to every dimension.
Args:
spatial_border: specified size for every spatial border. Any -ve values will be set to 0. It can be 3 shapes:
- single int number, pad all the borders with the same size.
- length equals the length of image shape, pad every spatial dimension separately.
for example, image shape(CHW) is [1, 4, 4], spatial_border is [2, 1],
pad every border of H dim with 2, pad every border of W dim with 1, result shape is [1, 8, 6].
- length equals 2 x (length of image shape), pad every border of every dimension separately.
for example, image shape(CHW) is [1, 4, 4], spatial_border is [1, 2, 3, 4], pad top of H dim with 1,
pad bottom of H dim with 2, pad left of W dim with 3, pad right of W dim with 4.
the result shape is [1, 7, 11].
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
def __init__(
self,
spatial_border: Union[Sequence[int], int],
mode = "constant",
**kwargs,
) -> None:
self.spatial_border = spatial_border
self.mode = mode
self.kwargs = kwargs
def __call__(
self, img, mode = None
):
"""
Args:
img: data to be transformed, assuming `img` is channel-first and
padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
Raises:
ValueError: When ``self.spatial_border`` does not contain ints.
ValueError: When ``self.spatial_border`` length is not one of
[1, len(spatial_shape), 2*len(spatial_shape)].
"""
spatial_shape = img.shape[1:]
spatial_border = self.spatial_border
if not all(isinstance(b, int) for b in spatial_border):
raise ValueError(f"self.spatial_border must contain only ints, got {spatial_border}.")
spatial_border = tuple(max(0, b) for b in spatial_border)
if len(spatial_border) == 1:
data_pad_width = [(spatial_border[0], spatial_border[0]) for _ in spatial_shape]
elif len(spatial_border) == len(spatial_shape):
data_pad_width = [(sp, sp) for sp in spatial_border[: len(spatial_shape)]]
elif len(spatial_border) == len(spatial_shape) * 2:
data_pad_width = [(spatial_border[2 * i], spatial_border[2 * i + 1]) for i in range(len(spatial_shape))]
else:
raise ValueError(
f"Unsupported spatial_border length: {len(spatial_border)}, available options are "
f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2*len(spatial_shape)}]."
)
all_pad_width = [(0, 0)] + data_pad_width
padder = Pad(all_pad_width, mode or self.mode, **self.kwargs)
return padder(img)
def map_spatial_axes(
img_ndim: int,
spatial_axes=None,
channel_first=True,
) -> List[int]:
"""
Utility to map the spatial axes to real axes in channel first/last shape.
For example:
If `channel_first` is True, and `img` has 3 spatial dims, map spatial axes to real axes as below:
None -> [1, 2, 3]
[0, 1] -> [1, 2]
[0, -1] -> [1, -1]
If `channel_first` is False, and `img` has 3 spatial dims, map spatial axes to real axes as below:
None -> [0, 1, 2]
[0, 1] -> [0, 1]
[0, -1] -> [0, -2]
Args:
img_ndim: dimension number of the target image.
spatial_axes: spatial axes to be converted, default is None.
The default `None` will convert to all the spatial axes of the image.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints.
channel_first: the image data is channel first or channel last, default to channel first.
"""
if spatial_axes is None:
spatial_axes_ = list(range(1, img_ndim) if channel_first else range(img_ndim - 1))
else:
spatial_axes_ = []
for a in spatial_axes:
if channel_first:
spatial_axes_.append(a if a < 0 else a + 1)
else:
spatial_axes_.append(a - 1 if a < 0 else a)
return spatial_axes_
class RandomFlip():
"""
Reverses the order of elements along the given spatial axis. Preserves shape.
Uses ``np.flip`` in practice. See numpy.flip for additional details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html.
Args:
spatial_axis: spatial axes along which to flip over. Default is None.
The default `axis=None` will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
"""
def __init__(self, random_state, spatial_axis = None, execution_probability=0.2):
self.spatial_axis = spatial_axis
self.random_state = random_state
self.execution_probability = execution_probability
def __call__(self, img: np.ndarray, label: np.ndarray = None) -> np.ndarray:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
if self.random_state.uniform() > self.execution_probability:
## 不去做变换
return img, label
result: np.ndarray = np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))
if label is not None :
if len(label.shape) == 3:
# 说明通道维度没有
label = np.expand_dims(label, axis=0)
label = np.flip(label, map_spatial_axes(label.ndim, self.spatial_axis))
label = np.squeeze(label, axis=0)
elif len(label.shape) == 4:
label = np.flip(label, map_spatial_axes(label.ndim, self.spatial_axis))
else :
raise "label shape err"
return result.astype(img.dtype), label.astype(label.dtype)
return result.astype(img.dtype)
class RandomRotate90:
def __init__(self, random_state, execution_probability=0.2):
self.random_state = random_state
self.axis = (1, 2)
self.execution_probability = execution_probability
def __call__(self, m, label=None):
assert m.ndim in [3, 4], 'Supports only 3D (DxHxW) or 4D (CxDxHxW) images'
k = self.random_state.randint(0, 4)
# rotate k times around a given plane
assert m.ndim == 4, "输入必须为3d图像,第一个维度为channel"
if self.random_state.uniform() < self.execution_probability:
channels = [np.rot90(m[c], k, self.axis) for c in range(m.shape[0])]
m = np.stack(channels, axis=0)
if label is not None :
assert label.ndim == 3, "label shape 必须为三维"
label = np.rot90(label, k, self.axis)
return m, label
class RandomRotate:
"""
Rotate an array by a random degrees from taken from (-angle_spectrum, angle_spectrum) interval.
Rotation axis is picked at random from the list of provided axes.
"""
def __init__(self, random_state, angle_spectrum=30, axes=None, mode='reflect', order=0, execution_probability=0.2):
if axes is None:
axes = [[2, 1]] # 这样就是以后两个维度为平面进行旋转。 第一个维度是深度
self.random_state = random_state
self.angle_spectrum = angle_spectrum
self.axes = axes
self.execution_probability = execution_probability
self.mode = mode
self.order = order
def __call__(self, m, label=None):
if self.random_state.uniform() < self.execution_probability:
axis = self.axes[self.random_state.randint(len(self.axes))]
angle = self.random_state.randint(-self.angle_spectrum, self.angle_spectrum)
assert m.ndim == 4, "输入必须为3d图像,第一个维度为channel"
channels = [rotate(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.mode, cval=-1) for c
in range(m.shape[0])]
m = np.stack(channels, axis=0)
if label is not None :
assert label.ndim == 3, "label shape 必须为三维"
label = rotate(label, angle, axes=axis, reshape=False, order=self.order, mode="nearest", cval=-1)
return m, label
class Elatic:
def __init__(self, random_state, alpha=(0., 900.), sigma=(9., 13.), scale=(0.85, 1.25),
order_seg=1, order_data=3, border_mode_seg="constant",
border_cval_seg=0, execution_probability=0.2) -> None:
self.random_state = random_state
self.alpha = alpha
self.sigma = sigma
self.scale = scale
self.order_seg = order_seg
self.order_data = order_data
self.border_mode_seg = border_mode_seg
self.border_cval_seg = border_cval_seg
self.execution_probability = execution_probability
def _do_elastic(self, m, seg=None):
a = self.random_state.uniform(self.alpha[0], self.alpha[1])
s = self.random_state.uniform(self.sigma[0], self.sigma[1])
patch_size = m.shape[1:]
coords = create_zero_centered_coordinate_mesh(patch_size)
coords = elastic_deform_coordinates(coords, a, s, self.random_state)
dim = 3
seg_result = None
if seg is not None:
seg_result = np.zeros((patch_size[0], patch_size[1], patch_size[2]),
dtype=np.float32)
data_result = np.zeros((m.shape[0], patch_size[0], patch_size[1], patch_size[2]),
dtype=np.float32)
for d in range(dim):
ctr = m.shape[d + 1] / 2. - 0.5
coords[d] += ctr
if self.scale[0] < 1:
sc = self.random_state.uniform(self.scale[0], 1)
else :
sc = self.random_state.uniform(max(self.scale[0], 1), self.scale[1])
coords = scale_coords(coords, sc)
for channel_id in range(m.shape[0]):
data_result[channel_id] = interpolate_img(m[channel_id], coords, self.order_data,
cval=0.0, is_seg=False)
if seg is not None:
seg_result = interpolate_img(seg, coords, self.order_seg,
self.border_mode_seg,
cval=self.border_cval_seg,
is_seg=True)
return data_result, seg_result
def __call__(self, m, seg=None):
assert len(m.shape) == 4, "image dim 必须为4"
if self.random_state.uniform() < self.execution_probability:
m, seg = self._do_elastic(m, seg=seg)
if seg is not None :
return m, seg
else :
return m
class Standardize:
"""
Apply Z-score normalization to a given input tensor, i.e. re-scaling the values to be 0-mean and 1-std.
Mean and std parameter have to be provided explicitly.
"""
def __init__(self, a_min, a_max, b_min=0, b_max=1, eps=1e-6, clip=True):
self.a_min = a_min
self.a_max = a_max
self.b_min = b_min
self.b_max = b_max
self.eps = eps
self.clip = clip
def __call__(self, m):
img = (m - self.a_min) / (self.a_max - self.a_min)
if self.clip:
img = np.clip(img, self.b_min, self.b_max)
return img
class Normalization():
def __init__(self, channel_wise=False):
pass
self.channel_wise = channel_wise
def __call__(self, m):
assert len(m.shape) == 4, "image shape err"
if not self.channel_wise:
m = (m - m.mean()) / m.std()
else :
for i, d in enumerate(m):
slices = d != 0
_sub = d[slices].mean()
_div = d[slices].std()
m[i][slices] = (m[i][slices] - _sub) / (_div+1e-8)
return m
class AdditiveGaussianNoise:
def __init__(self, random_state, scale=(0.0, 0.2), execution_probability=0.2):
self.execution_probability = execution_probability
self.random_state = random_state
self.scale = scale
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
std = self.random_state.uniform(self.scale[0], self.scale[1])
gaussian_noise = self.random_state.normal(0, std, size=m.shape)
return m + gaussian_noise
return m
class AdditivePoissonNoise:
def __init__(self, random_state, lam=(0.0, 0.2), execution_probability=0.2):
self.execution_probability = execution_probability
self.random_state = random_state
self.lam = lam
def __call__(self, m):
if self.random_state.rand() < self.execution_probability:
lam = self.random_state.uniform(self.lam[0], self.lam[1])
poisson_noise = self.random_state.poisson(lam, size=m.shape)
return m + poisson_noise
return m
class SpatialCrop:
"""
General purpose cropper to produce sub-volume region of interest (ROI).
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
It can support to crop ND spatial (channel-first) data.
The cropped region can be parameterised in various ways:
- a list of slices for each spatial dimension (allows for use of -ve indexing and `None`)
- a spatial center and size
- the start and end coordinates of the ROI
"""
def __init__(
self,
roi_center: Union[Sequence[int], np.ndarray, None] = None,
roi_size: Union[Sequence[int], np.ndarray, None] = None,
roi_start: Union[Sequence[int], np.ndarray, None] = None,
roi_end: Union[Sequence[int], np.ndarray, None] = None,
) -> None:
"""
Args:
roi_center: voxel coordinates for center of the crop ROI.
roi_size: size of the crop ROI, if a dimension of ROI size is bigger than image size,
will not crop that dimension of the image.
roi_start: voxel coordinates for start of the crop ROI.
roi_end: voxel coordinates for end of the crop ROI, if a coordinate is out of image,
use the end coordinate of image.
roi_slices: list of slices for each of the spatial dimensions.
"""
if roi_center is not None and roi_size is not None:
roi_center = np.asarray(roi_center, dtype=np.int16)
roi_size = np.asarray(roi_size, dtype=np.int16)
roi_start_np = np.maximum(roi_center - np.floor_divide(roi_size, 2), 0)
roi_end_np = np.maximum(roi_start_np + roi_size, roi_start_np)
else:
if roi_start is None or roi_end is None:
raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.")
roi_start_np = np.maximum(np.asarray(roi_start, dtype=np.int16), 0)
roi_end_np = np.maximum(np.asarray(roi_end, dtype=np.int16), roi_start_np)
# Allow for 1D by converting back to np.array (since np.maximum will convert to int)
roi_start_np = roi_start_np if isinstance(roi_start_np, np.ndarray) else np.array([roi_start_np])
roi_end_np = roi_end_np if isinstance(roi_end_np, np.ndarray) else np.array([roi_end_np])
# convert to slices
self.slices = [slice(s, e) for s, e in zip(roi_start_np, roi_end_np)]
def __call__(self, img: Union[np.ndarray, torch.Tensor]):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
sd = min(len(self.slices), len(img.shape[1:])) # spatial dims
slices = [slice(None)] + self.slices[:sd]
return img[tuple(slices)]
class CenterSpatialCrop:
"""
Crop at the center of image with specified ROI size.
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
Args:
roi_size: the spatial size of the crop region e.g. [224,224,128]
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
If its components have non-positive values, the corresponding size of input image will be used.
for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
"""
def __init__(self, roi_size: Union[Sequence[int], int]) -> None:
self.roi_size = roi_size
def __call__(self, img: np.ndarray):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
assert img.ndim == 4, "img ndim 必须为4, (channel, W, H, D)"
center = [i // 2 for i in img.shape[1:]]
cropper = SpatialCrop(roi_center=center, roi_size=self.roi_size)
return cropper(img)
def map_binary_to_indices(
label: np.ndarray,
image: Optional[np.ndarray] = None,
image_threshold: float = 0.0,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the foreground and background of input label data, return the indices after fattening.
For example:
``label = np.array([[[0, 1, 1], [1, 0, 1], [1, 1, 0]]])``
``foreground indices = np.array([1, 2, 3, 5, 6, 7])`` and ``background indices = np.array([0, 4, 8])``
Args:
label: use the label data to get the foreground/background information.
image: if image is not None, use ``label = 0 & image > image_threshold``
to define background. so the output items will not map to all the voxels in the label.
image_threshold: if enabled `image`, use ``image > image_threshold`` to
determine the valid image content area and select background only in this area.
"""
# Prepare fg/bg indices
if label.shape[0] > 1:
label = label[1:] # for One-Hot format data, remove the background channel
label_flat = np.any(label, axis=0).ravel() # in case label has multiple dimensions
fg_indices = np.nonzero(label_flat)[0]
if image is not None:
img_flat = np.any(image > image_threshold, axis=0).ravel()
bg_indices = np.nonzero(np.logical_and(img_flat, ~label_flat))[0]
else:
bg_indices = np.nonzero(~label_flat)[0]
return fg_indices, bg_indices
class RandCropByPosNegLabel:
"""
Crop random fixed sized regions with the center being a foreground or background voxel
based on the Pos Neg Ratio.
And will return a list of arrays for all the cropped images.
For example, crop two (3 x 3) arrays from (5 x 5) array with pos/neg=1::
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0], [[0, 1, 2], [[2, 1, 0],
[0, 1, 3, 0, 0], --> [0, 1, 3], [3, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 0]] [0, 0, 0]]
[0, 0, 0, 0, 0]]]
If a dimension of the expected spatial size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than expected size, and the cropped
results of several images may not have exactly same shape.
Args:
spatial_size: the spatial size of the crop region e.g. [224, 224, 128].
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
if its components have non-positive values, the corresponding size of `label` will be used.
for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
label: the label image that is used for finding foreground/background, if None, must set at
`self.__call__`. Non-zero indicates foreground, zero indicates background.
pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
num_samples: number of samples (crop regions) to take in each list.
image: optional image data to help select valid area, can be same as `img` or another image array.
if not None, use ``label == 0 & image > image_threshold`` to select the negative
sample (background) center. So the crop center will only come from the valid image areas.
image_threshold: if enabled `image`, use ``image > image_threshold`` to determine
the valid image content areas.
fg_indices: if provided pre-computed foreground indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
bg_indices: if provided pre-computed background indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
Raises:
ValueError: When ``pos`` or ``neg`` are negative.
ValueError: When ``pos=0`` and ``neg=0``. Incompatible values.
"""
def __init__(
self,
spatial_size: Union[Sequence[int], int],
label: Optional[np.ndarray] = None,
pos: float = 1.0,
neg: float = 1.0,
num_samples: int = 1,
image: Optional[np.ndarray] = None,
image_threshold: float = 0.0,
random_state: np.random.RandomState = None,
) -> None:
self.spatial_size = spatial_size
self.label = label
if pos < 0 or neg < 0:
raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.")
if pos + neg == 0:
raise ValueError("Incompatible values: pos=0 and neg=0.")
self.pos_ratio = pos / (pos + neg)
self.num_samples = num_samples
self.image = image
self.image_threshold = image_threshold
self.centers: Optional[List[List[np.ndarray]]] = None
self.random_state = random_state
def randomize(
self,
label: np.ndarray,
image: Optional[np.ndarray] = None,
) -> None:
self.spatial_size = self.spatial_size
fg_indices_, bg_indices_ = map_binary_to_indices(label, image, self.image_threshold)
self.centers = generate_pos_neg_label_crop_centers(
self.spatial_size, self.num_samples, self.pos_ratio, label.shape[1:], fg_indices_, bg_indices_, rand_state=self.random_state
)
def __call__(
self,
img: np.ndarray,
label: Optional[np.ndarray] = None,
image: Optional[np.ndarray] = None,
is_label = False,
) -> List[np.ndarray]:
"""
Args:
img: input data to crop samples from based on the pos/neg ratio of `label` and `image`.
Assumes `img` is a channel-first array.
label: the label image that is used for finding foreground/background, if None, use `self.label`.
image: optional image data to help select valid area, can be same as `img` or another image array.
use ``label == 0 & image > image_threshold`` to select the negative sample(background) center.
so the crop center will only exist on valid image area. if None, use `self.image`.
fg_indices: foreground indices to randomly select crop centers,
need to provide `fg_indices` and `bg_indices` together.
bg_indices: background indices to randomly select crop centers,
need to provide `fg_indices` and `bg_indices` together.
"""
if label is None:
label = self.label
if label is None:
raise ValueError("label should be provided.")
if len(label.shape) == 3:
label = np.expand_dims(label, axis=0)
if image is None:
image = self.image
if not is_label:
self.randomize(label, image)
else :
if len(img.shape) == 3:
img = np.expand_dims(img, axis=0)
results: List[np.ndarray] = []
if self.centers is not None:
for center in self.centers:
cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore
r = cropper(img)
if is_label:
if len(r.shape) == 4:
r = np.squeeze(r, axis=0)
results.append(r)
return results
class Normalize:
"""
Apply simple min-max scaling to a given input tensor, i.e. shrinks the range of the data in a fixed range of [-1, 1].
"""
def __init__(self, min_value, max_value):
assert max_value > min_value
self.min_value = min_value
self.value_range = max_value - min_value
def __call__(self, m):
norm_0_1 = (m - self.min_value) / self.value_range
return np.clip(2 * norm_0_1 - 1, -1, 1)
class GammaTransformer:
def __init__(self, random_state, gamma_range=(0.5, 2), epsilon=1e-7, per_channel=False,
retain_stats: Union[bool, Callable[[], bool]] = False, execution_probability=0.2) -> None:
self.gamma_range = gamma_range
self.epsilon = epsilon
self.per_channel = per_channel
self.retain_stats = retain_stats
self.execution_probability = execution_probability
self.random_state = random_state
def __call__(self, m):
if self.random_state.uniform() < self.execution_probability:
m = augment_gamma(m, gamma_range=self.gamma_range, epsilon=self.epsilon,
per_channel=self.per_channel, retain_stats=self.retain_stats)
return m
class MirrorTransform:
""" Randomly mirrors data along specified axes. Mirroring is evenly distributed. Probability of mirroring along
each axis is 0.5
Args:
axes (tuple of int): axes along which to mirror
"""
def __init__(self, random_state, axes=(0, 1, 2), execution_probability=0.2):
self.execution_probability = execution_probability
self.random_state = random_state
self.axes = axes
if max(axes) > 2:
raise ValueError("MirrorTransform now takes the axes as the spatial dimensions. What previously was "
"axes=(2, 3, 4) to mirror along all spatial dimensions of a 5d tensor (b, c, x, y, z) "
"is now axes=(0, 1, 2). Please adapt your scripts accordingly.")
def __call__(self, data, seg=None):
if self.random_state.uniform() < self.execution_probability:
ret_val = augment_mirroring(data, self.random_state, sample_seg=seg, axes=self.axes)
data = ret_val[0]
if seg is not None:
seg = ret_val[1]
return data, seg
# if __name__ == "__main__":
# print("数据增强函数测试")
# r = Random(seed=8)
# print(r.do_transform(0.5))
# print(r.do_transform(0.5))
# print(r.do_transform(0.5))
# print(r.do_transform(0.5))
# f = RandomFlip(r.R)
# image = h5py.File("./BAI_YUE_BIN_data.h5", "r")
# single_model_image = image["image"][:1]
# label = image["label"][0]
# print(f"label shape is {label.shape}")
# print(single_model_image.shape)
# sd = Standardize(a_min=single_model_image.min(), a_max=single_model_image.max())
# single_model_image = sd(single_model_image)
# print("归一化变换")
# plot_3d(single_model_image)
# plot_3d_label(label)
# # print("随机翻转变换")
# # single_model_image, label = f(single_model_image, label)
# # plot_3d(single_model_image)
# # plot_3d_label(label)
# # print("随机旋转变换")
# # ro = RandomRotate(random_state=r.R)
# # single_model_image, label = ro(single_model_image, label)
# # print(single_model_image.shape)
# # plot_3d(single_model_image)
# # plot_3d_label(label)
# # print("添加高斯噪声")
# # gn = AdditiveGaussianNoise(r.R)
# # single_model_image = gn(single_model_image)
# # plot_3d(single_model_image)
# print("添加柏松噪声")
# pn = AdditivePoissonNoise(r.R)
# single_model_image = pn(single_model_image)
# plot_3d(single_model_image)
| [
"numpy.clip",
"numpy.logical_and",
"numpy.floor_divide",
"numpy.asarray",
"numpy.any",
"numpy.squeeze",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"scipy.ndimage.rotate",
"numpy.nonzero",
"numpy.min",
"numpy.expand_dims",
"numpy.rot90",
"numpy.maximum",
"medical_seg.ut... | [((2509, 2532), 'numpy.array', 'np.array', (['data[0].shape'], {}), '(data[0].shape)\n', (2517, 2532), True, 'import numpy as np\n'), ((2559, 2581), 'numpy.array', 'np.array', (['seg[0].shape'], {}), '(seg[0].shape)\n', (2567, 2581), True, 'import numpy as np\n'), ((4237, 4269), 'numpy.squeeze', 'np.squeeze', (['seg_reshaped'], {'axis': '(0)'}), '(seg_reshaped, axis=0)\n', (4247, 4269), True, 'import numpy as np\n'), ((4631, 4717), 'medical_seg.utils.resample_image_array_size', 'resample_image_array_size', (['image'], {'out_size': '((c,) + self.rsize)', 'order': 'self.order[0]'}), '(image, out_size=(c,) + self.rsize, order=self.\n order[0])\n', (4656, 4717), False, 'from medical_seg.utils import resample_image_array_size\n'), ((9873, 9892), 'numpy.array', 'np.array', (['box_start'], {}), '(box_start)\n', (9881, 9892), True, 'import numpy as np\n'), ((9911, 9928), 'numpy.array', 'np.array', (['box_end'], {}), '(box_end)\n', (9919, 9928), True, 'import numpy as np\n'), ((10049, 10076), 'numpy.array', 'np.array', (['orig_spatial_size'], {}), '(orig_spatial_size)\n', (10057, 10076), True, 'import numpy as np\n'), ((10713, 10738), 'numpy.maximum', 'np.maximum', (['(-box_start)', '(0)'], {}), '(-box_start, 0)\n', (10723, 10738), True, 'import numpy as np\n'), ((11492, 11519), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (11513, 11519), True, 'import numpy as np\n'), ((22620, 22710), 'numpy.zeros', 'np.zeros', (['(m.shape[0], patch_size[0], patch_size[1], patch_size[2])'], {'dtype': 'np.float32'}), '((m.shape[0], patch_size[0], patch_size[1], patch_size[2]), dtype=\n np.float32)\n', (22628, 22710), True, 'import numpy as np\n'), ((31770, 31792), 'numpy.nonzero', 'np.nonzero', (['label_flat'], {}), '(label_flat)\n', (31780, 31792), True, 'import numpy as np\n'), ((39239, 39271), 'numpy.clip', 'np.clip', (['(2 * norm_0_1 - 1)', '(-1)', '(1)'], {}), '(2 * norm_0_1 - 1, -1, 1)\n', (39246, 39271), True, 'import numpy as np\n'), ((1005, 1020), 'numpy.max', 'np.max', (['spacing'], {}), '(spacing)\n', (1011, 1020), True, 'import numpy as np\n'), ((1023, 1038), 'numpy.min', 'np.min', (['spacing'], {}), '(spacing)\n', (1029, 1038), True, 'import numpy as np\n'), ((2380, 2407), 'numpy.expand_dims', 'np.expand_dims', (['seg'], {'axis': '(0)'}), '(seg, axis=0)\n', (2394, 2407), True, 'import numpy as np\n'), ((4557, 4586), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (4571, 4586), True, 'import numpy as np\n'), ((4763, 4837), 'medical_seg.utils.resample_image_array_size', 'resample_image_array_size', (['label'], {'out_size': 'self.rsize', 'order': 'self.order[1]'}), '(label, out_size=self.rsize, order=self.order[1])\n', (4788, 4837), False, 'from medical_seg.utils import resample_image_array_size\n'), ((5319, 5348), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (5333, 5348), True, 'import numpy as np\n'), ((19772, 19798), 'numpy.stack', 'np.stack', (['channels'], {'axis': '(0)'}), '(channels, axis=0)\n', (19780, 19798), True, 'import numpy as np\n'), ((21121, 21147), 'numpy.stack', 'np.stack', (['channels'], {'axis': '(0)'}), '(channels, axis=0)\n', (21129, 21147), True, 'import numpy as np\n'), ((22455, 22528), 'numpy.zeros', 'np.zeros', (['(patch_size[0], patch_size[1], patch_size[2])'], {'dtype': 'np.float32'}), '((patch_size[0], patch_size[1], patch_size[2]), dtype=np.float32)\n', (22463, 22528), True, 'import numpy as np\n'), ((24606, 24642), 'numpy.clip', 'np.clip', (['img', 'self.b_min', 'self.b_max'], {}), '(img, self.b_min, self.b_max)\n', (24613, 24642), True, 'import numpy as np\n'), ((27923, 27961), 'numpy.asarray', 'np.asarray', (['roi_center'], {'dtype': 'np.int16'}), '(roi_center, dtype=np.int16)\n', (27933, 27961), True, 'import numpy as np\n'), ((27985, 28021), 'numpy.asarray', 'np.asarray', (['roi_size'], {'dtype': 'np.int16'}), '(roi_size, dtype=np.int16)\n', (27995, 28021), True, 'import numpy as np\n'), ((28131, 28180), 'numpy.maximum', 'np.maximum', (['(roi_start_np + roi_size)', 'roi_start_np'], {}), '(roi_start_np + roi_size, roi_start_np)\n', (28141, 28180), True, 'import numpy as np\n'), ((28691, 28715), 'numpy.array', 'np.array', (['[roi_start_np]'], {}), '([roi_start_np])\n', (28699, 28715), True, 'import numpy as np\n'), ((28791, 28813), 'numpy.array', 'np.array', (['[roi_end_np]'], {}), '([roi_end_np])\n', (28799, 28813), True, 'import numpy as np\n'), ((31682, 31703), 'numpy.any', 'np.any', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (31688, 31703), True, 'import numpy as np\n'), ((31994, 32017), 'numpy.nonzero', 'np.nonzero', (['(~label_flat)'], {}), '(~label_flat)\n', (32004, 32017), True, 'import numpy as np\n'), ((38106, 38135), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (38120, 38135), True, 'import numpy as np\n'), ((5896, 5925), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (5910, 5925), True, 'import numpy as np\n'), ((6098, 6123), 'numpy.squeeze', 'np.squeeze', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (6108, 6123), True, 'import numpy as np\n'), ((10781, 10806), 'numpy.asarray', 'np.asarray', (['img.shape[1:]'], {}), '(img.shape[1:])\n', (10791, 10806), True, 'import numpy as np\n'), ((18656, 18685), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (18670, 18685), True, 'import numpy as np\n'), ((18798, 18823), 'numpy.squeeze', 'np.squeeze', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (18808, 18823), True, 'import numpy as np\n'), ((19699, 19727), 'numpy.rot90', 'np.rot90', (['m[c]', 'k', 'self.axis'], {}), '(m[c], k, self.axis)\n', (19707, 19727), True, 'import numpy as np\n'), ((19918, 19947), 'numpy.rot90', 'np.rot90', (['label', 'k', 'self.axis'], {}), '(label, k, self.axis)\n', (19926, 19947), True, 'import numpy as np\n'), ((20964, 21057), 'scipy.ndimage.rotate', 'rotate', (['m[c]', 'angle'], {'axes': 'axis', 'reshape': '(False)', 'order': 'self.order', 'mode': 'self.mode', 'cval': '(-1)'}), '(m[c], angle, axes=axis, reshape=False, order=self.order, mode=self.\n mode, cval=-1)\n', (20970, 21057), False, 'from scipy.ndimage import rotate, map_coordinates, gaussian_filter\n'), ((21268, 21362), 'scipy.ndimage.rotate', 'rotate', (['label', 'angle'], {'axes': 'axis', 'reshape': '(False)', 'order': 'self.order', 'mode': '"""nearest"""', 'cval': '(-1)'}), "(label, angle, axes=axis, reshape=False, order=self.order, mode=\n 'nearest', cval=-1)\n", (21274, 21362), False, 'from scipy.ndimage import rotate, map_coordinates, gaussian_filter\n'), ((28388, 28425), 'numpy.asarray', 'np.asarray', (['roi_start'], {'dtype': 'np.int16'}), '(roi_start, dtype=np.int16)\n', (28398, 28425), True, 'import numpy as np\n'), ((28466, 28501), 'numpy.asarray', 'np.asarray', (['roi_end'], {'dtype': 'np.int16'}), '(roi_end, dtype=np.int16)\n', (28476, 28501), True, 'import numpy as np\n'), ((31841, 31880), 'numpy.any', 'np.any', (['(image > image_threshold)'], {'axis': '(0)'}), '(image > image_threshold, axis=0)\n', (31847, 31880), True, 'import numpy as np\n'), ((31921, 31958), 'numpy.logical_and', 'np.logical_and', (['img_flat', '(~label_flat)'], {}), '(img_flat, ~label_flat)\n', (31935, 31958), True, 'import numpy as np\n'), ((38333, 38360), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (38347, 38360), True, 'import numpy as np\n'), ((1163, 1184), 'numpy.array', 'np.array', (['new_spacing'], {}), '(new_spacing)\n', (1171, 1184), True, 'import numpy as np\n'), ((10281, 10305), 'numpy.asarray', 'np.asarray', (['spatial_size'], {}), '(spatial_size)\n', (10291, 10305), True, 'import numpy as np\n'), ((28073, 28101), 'numpy.floor_divide', 'np.floor_divide', (['roi_size', '(2)'], {}), '(roi_size, 2)\n', (28088, 28101), True, 'import numpy as np\n'), ((38717, 38738), 'numpy.squeeze', 'np.squeeze', (['r'], {'axis': '(0)'}), '(r, axis=0)\n', (38727, 38738), True, 'import numpy as np\n'), ((2614, 2640), 'numpy.array', 'np.array', (['original_spacing'], {}), '(original_spacing)\n', (2622, 2640), True, 'import numpy as np\n'), ((2643, 2667), 'numpy.array', 'np.array', (['target_spacing'], {}), '(target_spacing)\n', (2651, 2667), True, 'import numpy as np\n')] |
from .Variables import EMBEDDINGS_PATH, SVM_MODEL_PATH,SVM_CONFIDENCE, model
from .Get_Embeddings import get_embedding
import pickle
import cv2
import numpy as np
from numpy import asarray
from numpy import savez_compressed
from numpy import load
from numpy import expand_dims
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from PIL import Image
import mtcnn
from mtcnn.mtcnn import MTCNN
detector = MTCNN() # Creating instance from the class MTCNN
def multiple_faces(filename, required_size=(160, 160)):
data = load( EMBEDDINGS_PATH + '/Embeddings-dataset.npz')
trainy = data['arr_1']
out_encoder = LabelEncoder()
out_encoder.fit(trainy)
trainy = out_encoder.transform(trainy)
# LOAD THE MODEL
print("TESTING ON AN IMAGE")
print("LOADING THE MODEL...")
svm_model = pickle.load(open(SVM_MODEL_PATH + '/svm_model.sav', 'rb'))
print("DONE LOADING THE MODEL!")
print("LOADING THE IMAGE...")
image = Image.open(filename) # load image from file
print("DONE LOADING THE IMAGE!")
image = image.convert('RGB') # convert to RGB, if needed
pixels = asarray(image) # convert to array
results = detector.detect_faces(pixels) # detect faces in the image
if(len(results) == 0):
return False
# LOOP OVER ALL FOUND FACE AND ANNOTATE THEM
for i in range(len(results)):
x1, y1, width, height = results[i]['box'] # extract the bounding box from the i-th first face
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
face = pixels[y1:y2, x1:x2] # extract the face
image = Image.fromarray(face)
image = image.resize(required_size) # resize pixels to the model size
face_array = asarray(image)
face_emb = get_embedding(model, face_array) # get the embeddings
samples = expand_dims(face_emb, axis=0)
prediction = svm_model.predict(samples)
predict_name = out_encoder.inverse_transform(prediction)
pred_proba = svm_model.predict_proba(samples)
# predict_name = out_encoder.inverse_transform(prediction)
# print("Prediction is",predict_name ,prediction,pred_proba )
print(pred_proba[0][prediction[0]])
if(pred_proba[0][prediction[0]] >= SVM_CONFIDENCE): # SVM Thresholding to Get Known vs UnKnown People was 0.999
predict_name = out_encoder.inverse_transform(prediction)
print("Prediction is",predict_name)
else:
predict_name = ['UNKNOWN']
print("Prediction is",predict_name)
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(pixels, predict_name[0], (x1, y1), font, 0.7, (255, 255, 0), 2)
print("SAVING OUTPUT IMAGE...")
cv2.imwrite( filename , pixels)
print("IMAGE SAVED TO:" , 'D:\\Work\\Attendance\\media\\testing\\IMG_1607.JPG' )
return filename | [
"cv2.imwrite",
"sklearn.preprocessing.LabelEncoder",
"PIL.Image.open",
"PIL.Image.fromarray",
"numpy.asarray",
"cv2.putText",
"numpy.expand_dims",
"numpy.load",
"mtcnn.mtcnn.MTCNN"
] | [((490, 497), 'mtcnn.mtcnn.MTCNN', 'MTCNN', ([], {}), '()\n', (495, 497), False, 'from mtcnn.mtcnn import MTCNN\n'), ((605, 654), 'numpy.load', 'load', (["(EMBEDDINGS_PATH + '/Embeddings-dataset.npz')"], {}), "(EMBEDDINGS_PATH + '/Embeddings-dataset.npz')\n", (609, 654), False, 'from numpy import load\n'), ((697, 711), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (709, 711), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1015, 1035), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1025, 1035), False, 'from PIL import Image\n'), ((1164, 1178), 'numpy.asarray', 'asarray', (['image'], {}), '(image)\n', (1171, 1178), False, 'from numpy import asarray\n'), ((2668, 2697), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'pixels'], {}), '(filename, pixels)\n', (2679, 2697), False, 'import cv2\n'), ((1631, 1652), 'PIL.Image.fromarray', 'Image.fromarray', (['face'], {}), '(face)\n', (1646, 1652), False, 'from PIL import Image\n'), ((1744, 1758), 'numpy.asarray', 'asarray', (['image'], {}), '(image)\n', (1751, 1758), False, 'from numpy import asarray\n'), ((1842, 1871), 'numpy.expand_dims', 'expand_dims', (['face_emb'], {'axis': '(0)'}), '(face_emb, axis=0)\n', (1853, 1871), False, 'from numpy import expand_dims\n'), ((2553, 2628), 'cv2.putText', 'cv2.putText', (['pixels', 'predict_name[0]', '(x1, y1)', 'font', '(0.7)', '(255, 255, 0)', '(2)'], {}), '(pixels, predict_name[0], (x1, y1), font, 0.7, (255, 255, 0), 2)\n', (2564, 2628), False, 'import cv2\n')] |
import pandas as pd
import numpy as np
import config
class Result:
"""
A class used to represent a Result.
Attributes
----------
ticker : sequence
The stock ticker.
data : dataframe
The historical data associated with the ticker.
strategy : Strategy
An instance of the Strategy class.
buy_transactions: sequence
List of buy transactions.
sell_transactions: sequence
List of sell transactions.
buy_transaction_equity: sequence
List of equity values corresponding to the buy transactions.
sell_transaction_equity: sequence
List of equity values corresponding to the sell transactions.
Performance : Performance
An instance of the Performance class.
transactions : numeric
The required multiple of the 20D MA volume to generate a buy signal.
Methods
-------
performance_as_dict()
Returns the performance results in a dictionary.
tech_indicators()
Augments the data attribute with columns for technical indicators.
buy_and_sell_signals()
Calculate signals where they can be vectorised.
trade()
Enters and exit positions based on buy/sell signals.
calculate_returns()
Calculate returns after the trade method has been executed.
print_results()
Print the performance results to the console.
"""
def __init__(self, ticker, strategy, raw_data):
self.ticker = ticker
self.data = raw_data
self.strategy = strategy
self.tech_indicators()
self.buy_and_sell_signals()
self.buy_transactions, self.sell_transactions, self.buy_transaction_equity, self.sell_transaction_equity = self.trade()
self.Performance = self.calculate_returns()
self.transactions = len(self.buy_transactions + self.sell_transactions)
self.print_results()
def performance_as_dict(self):
"""Returns the performance results in a dictionary.
Parameters
----------
Raises
------
"""
return {'ticker': self.ticker, 'strategy': "Strategy(" + str(self.strategy.required_profit) + ", " + str(
self.strategy.required_pct_change_min) + ", " + str(self.strategy.required_pct_change_max) + ", " + str(
self.strategy.required_volume) + ")",
'annualised_return': self.Performance.annualised_return,
'annualised_return_ref': self.Performance.annualised_return_ref,
'end_date': self.Performance.end_date,
'end_price': self.Performance.end_price,
'gain': self.Performance.gain,
'gain_ref': self.Performance.gain_ref,
'start_date': self.Performance.start_date,
'start_price': self.Performance.start_price}
def tech_indicators(self):
"""Augments the data attribute with columns for technical indicators.
Parameters
----------
Raises
------
"""
self.data = self.data.assign(close_MA_50=self.data[["close"]].ewm(span=50).mean())
self.data = self.data.assign(close_MA_200=self.data[["close"]].ewm(span=200).mean())
self.data = self.data.assign(volume_MA_20=self.data[["volume"]].rolling(20).mean())
self.data = self.data.assign(
price_change_buy=self.data['close'].pct_change().between(self.strategy.required_pct_change_min,
self.strategy.required_pct_change_max))
self.data = self.data.assign(
volume_change_buy=(self.data["volume"] > self.strategy.required_volume * self.data["volume_MA_20"]))
# Money Flow Index (MFI)
typical_price = (self.data["high"] + self.data["low"] + self.data["close"]) / 3
money_flow = typical_price * self.data["volume"]
delta = money_flow - money_flow.shift(1)
delta = pd.Series([0 if np.isnan(x) else x for x in delta])
positive_money_flow = pd.Series([x if x > 0 else 0 for x in delta])
negative_money_flow = pd.Series([abs(x) if x < 0 else 0 for x in delta])
positive_money_flow_sum = positive_money_flow.rolling(window=14).sum().values
negative_money_flow_sum = negative_money_flow.rolling(window=14).sum().values
with np.errstate(divide='ignore', invalid='ignore'):
money_ratio = positive_money_flow_sum / negative_money_flow_sum
money_flow_index = 100 - 100 / (1 + money_ratio)
self.data = self.data.assign(MFI=money_flow_index)
# Relative Strength Index (RSI)
delta = self.data["close"] - self.data["close"].shift(1)
delta = pd.Series([0 if np.isnan(x) else x for x in delta])
up = pd.Series([x if x > 0 else 0 for x in delta])
down = pd.Series([abs(x) if x < 0 else 0 for x in delta])
with np.errstate(divide='ignore', invalid='ignore'):
rs = up.rolling(window=14).mean().values / down.rolling(window=14).mean().values
relative_strength_index = 100 - 100 / (1 + rs)
self.data = self.data.assign(RSI=relative_strength_index)
# Stochastic Oscillator
stochastic_oscillator = pd.Series(
(self.data["close"] - self.data["close"].rolling(window=14, center=False).min()) / (
self.data["close"].rolling(window=14, center=False).max() - self.data["close"].rolling(window=14,
center=False).min()))
stochastic_oscillator = 100 * stochastic_oscillator.rolling(window=3).mean()
self.data = self.data.assign(STO=stochastic_oscillator)
# Bollinger Bands
rolling_mean = self.data[["close"]].ewm(span=50).mean()
rolling_std = self.data[["close"]].ewm(span=50).std()
self.data = self.data.assign(BB_upper=rolling_mean + (rolling_std * 2))
self.data = self.data.assign(BB_lower=rolling_mean - (rolling_std * 2))
return
def buy_and_sell_signals(self):
"""Calculate signals where they can be vectorised.
Generation of sell signal requires iterating through the data which is done in the trade method.
Parameters
----------
Raises
------
"""
self.data = self.data.assign(buy_signal=np.nan, sell_signal=np.nan, buy_signal_date=np.nan,
sell_signal_date=np.nan)
buy_prices = self.data["close"].iloc[np.where(self.data["volume_change_buy"] & self.data["price_change_buy"])]
buy_dates = self.data["date"].iloc[np.where(self.data["volume_change_buy"] & self.data["price_change_buy"])]
self.data = self.data.assign(buy_signal=buy_prices)
self.data = self.data.assign(buy_signal_date=buy_dates)
return
def trade(self):
"""Enters and exit positions based on buy/sell signals.
Parameters
----------
Raises
------
"""
buy_transactions, buy_transaction_equity, sell_transactions, sell_transaction_equity = ([] for i in range(4))
open_long_position, buy_and_hold, buy_and_hold_shares, buy_and_hold, buy_and_hold_shares, shares = (
0, 0, 0, 0, 0, 0)
buy_and_hold_position_array, open_long_position_array, strategy_equity_array, buy_and_hold_equity_array = (
np.full(len(self.data["close"].values), np.nan) for i in range(4))
# Create buy signal and buy signal dates without NaN or NaT (NaN and NaT inclusive arrays required for plots)
buy_signal_array_nonan = self.data["buy_signal"].values[~np.isnan(self.data["buy_signal"].values)]
buy_signal_array_dates_nonat = self.data["buy_signal_date"].values[
~np.isnat(self.data["buy_signal_date"].values)]
j = 0
cash = config.cash
buy_and_hold_cash = config.buy_and_hold_cash
for i in range(0, len(self.data["close"].values)):
# Handle buy
if np.isfinite(self.data["buy_signal"].values[i]):
if not open_long_position:
open_long_position = self.data["close"].values[i]
shares = (1 - config.transaction_fee) * (cash / open_long_position)
cash = 0
buy_transactions.append(pd.to_datetime(self.data["date"].values[i]).strftime("%d-%m-%Y"))
buy_transaction_equity.append(round(shares * self.data["close"].values[i] + cash, 2))
if not buy_and_hold:
buy_and_hold_shares = ((1 - config.transaction_fee) * buy_and_hold_cash) / \
self.data["close"].values[i]
buy_and_hold_cash = 0
buy_and_hold = 1
# Handle sell
elif (j < len(buy_signal_array_nonan) and self.data["date"].values[i] > buy_signal_array_dates_nonat[j] and
self.data["close"].values[
i] > self.strategy.required_profit *
buy_signal_array_nonan[j]):
# Need to offset the index which is based on the original dataframe with all tickers
self.data.at[self.data.index[0] + i, "sell_signal"] = self.data["close"].values[i]
self.data.at[self.data.index[0] + i, "sell_signal_date"] = pd.to_datetime(self.data["date"].values[i])
if open_long_position:
j = j + 1
cash = (1 - config.transaction_fee) * shares * self.data["close"].values[i]
shares = 0
open_long_position = 0
sell_transactions.append(pd.to_datetime(self.data["date"].values[i]).strftime("%d-%m-%Y"))
sell_transaction_equity.append(round(shares * self.data["close"].values[i] + cash, 2))
# Record open positions
open_long_position_array[i] = self.data["close"].values[i] if open_long_position else 0
buy_and_hold_position_array[i] = self.data["close"].values[i] if buy_and_hold else 0
# Record equity
buy_and_hold_equity_array[i] = buy_and_hold_shares * buy_and_hold_position_array[
i] + buy_and_hold_cash
strategy_equity_array[i] = shares * open_long_position_array[i] + cash
self.data.sell_signal_date = self.data.sell_signal_date.astype("datetime64[ns]", copy=False)
self.data = self.data.assign(strategy_equity=strategy_equity_array,
buy_and_hold_equity=buy_and_hold_equity_array,
open_long_position=open_long_position_array,
buy_and_hold_position=buy_and_hold_position_array)
return buy_transactions, sell_transactions, buy_transaction_equity, sell_transaction_equity
def calculate_returns(self):
"""Calculate returns after the trade method has been executed.
Parameters
----------
Raises
------
"""
# Calculate returns using strategies and buy and hold
date_index_long = np.isfinite(self.data["open_long_position"])
date_index_buy_and_hold = np.isfinite(self.data["buy_and_hold_position"])
# Handle case where there is no long position
if self.data["date"][date_index_long].empty:
performance = Performance(0, 0, 0, 0, 0, 0, 0, 0)
return performance
else:
start_date = self.data["date"][date_index_long].iloc[0]
start_date_ref = self.data["date"][date_index_buy_and_hold].iloc[0]
start_price = self.data["strategy_equity"][date_index_long].iloc[0]
start_price_ref = self.data["buy_and_hold_equity"][date_index_buy_and_hold].iloc[0]
end_date = self.data["date"][date_index_long].iloc[-1]
end_date_ref = self.data["date"][date_index_buy_and_hold].iloc[-1]
end_price = self.data["strategy_equity"][date_index_long].iloc[-1]
end_price_ref = self.data["buy_and_hold_equity"][date_index_buy_and_hold].iloc[-1]
# Compute annualised returns
delta = 1 + (end_date - start_date).days
delta_ref = 1 + (end_date_ref - start_date_ref).days
annualised_return = 100 * (((end_price / start_price) ** (365 / delta)) - 1)
annualised_return_ref = 100 * (((end_price_ref / start_price_ref) ** (365 / delta_ref)) - 1)
gain = end_price / start_price
gain_ref = end_price_ref / start_price_ref
performance = Performance(annualised_return, annualised_return_ref, start_price, start_date, end_price,
end_date, gain, gain_ref)
return performance
def print_results(self):
"""Print the performance results to the console.
Parameters
----------
Raises
------
"""
print(str(self.ticker) + " Strategy Annual Return: " + str(self.Performance.annualised_return) + "%" + "\n" +
str(self.ticker) + " Buy Signals: " + str(
[pd.to_datetime(i).strftime("%d-%m-%Y") for i in self.data["buy_signal_date"].tolist() if
not pd.isna(i)]) + "\n" +
str(self.ticker) + " Buy Transactions: " + str(self.buy_transactions) + "\n" +
str(self.ticker) + " Buy Transaction Equity: " + str(self.buy_transaction_equity) + "\n" +
str(self.ticker) + " Position Start Date: " + str(
pd.to_datetime(self.Performance.start_date).strftime("%d-%m-%Y")) + "\n" +
str(self.ticker) + " Position Equity Start: " + str(self.Performance.start_price) + "\n" +
str(self.ticker) + " Sell Signals: " + str(
[pd.to_datetime(i).strftime("%d-%m-%Y") for i in self.data["sell_signal_date"].tolist() if
not pd.isna(i)]) + "\n" +
str(self.ticker) + " Sell Transactions: " + str(self.sell_transactions) + "\n" +
str(self.ticker) + " Sell Transaction Equity: " + str(self.sell_transaction_equity) + "\n" +
str(self.ticker) + " Position End Date: " + str(
pd.to_datetime(self.Performance.end_date).strftime("%d-%m-%Y")) + "\n" +
str(self.ticker) + " Position Equity End: " + str(self.Performance.end_price) + "\n" +
str(self.ticker) + " Buy and Hold Annual Return: " + str(
self.Performance.annualised_return_ref) + "%" + "\n" +
str(self.ticker) + " Strategy Gain: " + str(self.Performance.gain) + "\n" +
str(self.ticker) + " Buy and Hold Gain: " + str(self.Performance.gain))
return
class Performance:
"""
A class used to hold the performance for the Result.
Attributes
----------
annualised_return : numeric
The annualised return based on equity changes following the buy and sell transactions (based on the trading
strategy) in the trade method.
annualised_return_ref : numeric
The annualised return based on equity changes following the buy and hold transactions in the trade method.
start_price : numeric
The equity at the start of the strategy.
start_date : numeric
The date at the start of the strategy.
end_price : numeric
The equity at the end of the strategy.
end_date : numeric
The date at the end of the strategy.
gain : numeric
The raw gain (i.e. not annualised) based on equity changes following the buy and sell transactions (based on
the trading strategy) in the trade method.
gain_ref : numeric
The raw gain (i.e. not annualised) based on equity changes following the buy and hold transactions
in the trade method.
Methods
-------
"""
def __init__(self, annualised_return, annualised_return_ref, start_price, start_date, end_price, end_date, gain,
gain_ref):
self.annualised_return = np.round(annualised_return, 2)
self.annualised_return_ref = np.round(annualised_return_ref, 2)
self.start_price = np.round(start_price, 2)
self.start_date = start_date
self.end_price = np.round(end_price, 2)
self.end_date = end_date
self.gain = np.round(gain, 2)
self.gain_ref = np.round(gain_ref, 2)
return
| [
"pandas.Series",
"numpy.where",
"pandas.to_datetime",
"numpy.errstate",
"numpy.isfinite",
"numpy.isnan",
"pandas.isna",
"numpy.isnat",
"numpy.round"
] | [((4049, 4096), 'pandas.Series', 'pd.Series', (['[(x if x > 0 else 0) for x in delta]'], {}), '([(x if x > 0 else 0) for x in delta])\n', (4058, 4096), True, 'import pandas as pd\n'), ((4788, 4835), 'pandas.Series', 'pd.Series', (['[(x if x > 0 else 0) for x in delta]'], {}), '([(x if x > 0 else 0) for x in delta])\n', (4797, 4835), True, 'import pandas as pd\n'), ((11209, 11253), 'numpy.isfinite', 'np.isfinite', (["self.data['open_long_position']"], {}), "(self.data['open_long_position'])\n", (11220, 11253), True, 'import numpy as np\n'), ((11288, 11335), 'numpy.isfinite', 'np.isfinite', (["self.data['buy_and_hold_position']"], {}), "(self.data['buy_and_hold_position'])\n", (11299, 11335), True, 'import numpy as np\n'), ((16039, 16069), 'numpy.round', 'np.round', (['annualised_return', '(2)'], {}), '(annualised_return, 2)\n', (16047, 16069), True, 'import numpy as np\n'), ((16107, 16141), 'numpy.round', 'np.round', (['annualised_return_ref', '(2)'], {}), '(annualised_return_ref, 2)\n', (16115, 16141), True, 'import numpy as np\n'), ((16169, 16193), 'numpy.round', 'np.round', (['start_price', '(2)'], {}), '(start_price, 2)\n', (16177, 16193), True, 'import numpy as np\n'), ((16256, 16278), 'numpy.round', 'np.round', (['end_price', '(2)'], {}), '(end_price, 2)\n', (16264, 16278), True, 'import numpy as np\n'), ((16332, 16349), 'numpy.round', 'np.round', (['gain', '(2)'], {}), '(gain, 2)\n', (16340, 16349), True, 'import numpy as np\n'), ((16374, 16395), 'numpy.round', 'np.round', (['gain_ref', '(2)'], {}), '(gain_ref, 2)\n', (16382, 16395), True, 'import numpy as np\n'), ((4361, 4407), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (4372, 4407), True, 'import numpy as np\n'), ((4913, 4959), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (4924, 4959), True, 'import numpy as np\n'), ((6564, 6636), 'numpy.where', 'np.where', (["(self.data['volume_change_buy'] & self.data['price_change_buy'])"], {}), "(self.data['volume_change_buy'] & self.data['price_change_buy'])\n", (6572, 6636), True, 'import numpy as np\n'), ((6681, 6753), 'numpy.where', 'np.where', (["(self.data['volume_change_buy'] & self.data['price_change_buy'])"], {}), "(self.data['volume_change_buy'] & self.data['price_change_buy'])\n", (6689, 6753), True, 'import numpy as np\n'), ((8071, 8117), 'numpy.isfinite', 'np.isfinite', (["self.data['buy_signal'].values[i]"], {}), "(self.data['buy_signal'].values[i])\n", (8082, 8117), True, 'import numpy as np\n'), ((7698, 7738), 'numpy.isnan', 'np.isnan', (["self.data['buy_signal'].values"], {}), "(self.data['buy_signal'].values)\n", (7706, 7738), True, 'import numpy as np\n'), ((7829, 7874), 'numpy.isnat', 'np.isnat', (["self.data['buy_signal_date'].values"], {}), "(self.data['buy_signal_date'].values)\n", (7837, 7874), True, 'import numpy as np\n'), ((3983, 3994), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (3991, 3994), True, 'import numpy as np\n'), ((4739, 4750), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (4747, 4750), True, 'import numpy as np\n'), ((9421, 9464), 'pandas.to_datetime', 'pd.to_datetime', (["self.data['date'].values[i]"], {}), "(self.data['date'].values[i])\n", (9435, 9464), True, 'import pandas as pd\n'), ((8393, 8436), 'pandas.to_datetime', 'pd.to_datetime', (["self.data['date'].values[i]"], {}), "(self.data['date'].values[i])\n", (8407, 8436), True, 'import pandas as pd\n'), ((9749, 9792), 'pandas.to_datetime', 'pd.to_datetime', (["self.data['date'].values[i]"], {}), "(self.data['date'].values[i])\n", (9763, 9792), True, 'import pandas as pd\n'), ((14236, 14277), 'pandas.to_datetime', 'pd.to_datetime', (['self.Performance.end_date'], {}), '(self.Performance.end_date)\n', (14250, 14277), True, 'import pandas as pd\n'), ((13830, 13847), 'pandas.to_datetime', 'pd.to_datetime', (['i'], {}), '(i)\n', (13844, 13847), True, 'import pandas as pd\n'), ((13937, 13947), 'pandas.isna', 'pd.isna', (['i'], {}), '(i)\n', (13944, 13947), True, 'import pandas as pd\n'), ((13579, 13622), 'pandas.to_datetime', 'pd.to_datetime', (['self.Performance.start_date'], {}), '(self.Performance.start_date)\n', (13593, 13622), True, 'import pandas as pd\n'), ((13176, 13193), 'pandas.to_datetime', 'pd.to_datetime', (['i'], {}), '(i)\n', (13190, 13193), True, 'import pandas as pd\n'), ((13282, 13292), 'pandas.isna', 'pd.isna', (['i'], {}), '(i)\n', (13289, 13292), True, 'import pandas as pd\n')] |
"""
geoutils.georaster provides a toolset for working with raster data.
"""
from __future__ import annotations
import copy
import os
import warnings
from collections import abc
from numbers import Number
from typing import IO, Any, Callable, TypeVar, overload
import geopandas as gpd
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyproj
import rasterio as rio
import rasterio.mask
import rasterio.transform
import rasterio.warp
import rasterio.windows
from affine import Affine
from matplotlib import cm, colors
from rasterio.crs import CRS
from rasterio.features import shapes
from rasterio.io import MemoryFile
from rasterio.plot import show as rshow
from rasterio.warp import Resampling
from scipy.ndimage import map_coordinates
from shapely.geometry.polygon import Polygon
import geoutils.geovector as gv
from geoutils.geovector import Vector
# If python38 or above, Literal is builtin. Otherwise, use typing_extensions
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore
try:
import rioxarray
except ImportError:
_has_rioxarray = False
else:
_has_rioxarray = True
RasterType = TypeVar("RasterType", bound="Raster")
def _resampling_from_str(resampling: str) -> Resampling:
"""
Match a rio.warp.Resampling enum from a string representation.
:param resampling: A case-sensitive string matching the resampling enum (e.g. 'cubic_spline')
:raises ValueError: If no matching Resampling enum was found.
:returns: A rio.warp.Resampling enum that matches the given string.
"""
# Try to match the string version of the resampling method with a rio Resampling enum name
for method in rio.warp.Resampling:
if str(method).replace("Resampling.", "") == resampling:
resampling_method = method
break
# If no match was found, raise an error.
else:
raise ValueError(
f"'{resampling}' is not a valid rasterio.warp.Resampling method. "
f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}"
)
return resampling_method
# Function to set the default nodata values for any given dtype
# Similar to GDAL for int types, but without absurdly long nodata values for floats.
# For unsigned types, the maximum value is chosen (with a max of 99999).
# For signed types, the minimum value is chosen (with a min of -99999).
def _default_ndv(dtype: str | np.dtype | type) -> int:
"""
Set the default nodata value for any given dtype, when this is not provided.
"""
default_ndv_lookup = {
"uint8": 255,
"int8": -128,
"uint16": 65535,
"int16": -32768,
"uint32": 99999,
"int32": -99999,
"float32": -99999,
"float64": -99999,
"float128": -99999,
}
# Check argument dtype is as expected
if not isinstance(dtype, (str, np.dtype, type)):
raise ValueError(f"dtype {dtype} not understood")
# Convert numpy types to string
if isinstance(dtype, type):
dtype = np.dtype(dtype).name
# Convert np.dtype to string
if isinstance(dtype, np.dtype):
dtype = dtype.name
if dtype in default_ndv_lookup.keys():
return default_ndv_lookup[dtype]
else:
raise NotImplementedError(f"No default nodata value set for dtype {dtype}")
class Raster:
"""
Create a Raster object from a rasterio-supported raster dataset.
If not otherwise specified below, attribute types and contents correspond
to the attributes defined by rasterio.
Attributes:
filename : str
The path/filename of the loaded, file, only set if a disk-based file is read in.
data : np.array
Loaded image. Dimensions correspond to (bands, height, width).
nbands : int
Number of bands loaded into .data
bands : tuple
The indexes of the opened dataset which correspond to the bands loaded into data.
is_loaded : bool
True if the image data have been loaded into this Raster.
ds : rio.io.DatasetReader
Link to underlying DatasetReader object.
bounds
count
crs
dataset_mask
driver
dtypes
height
indexes
name
nodata
res
shape
transform
width
"""
# This only gets set if a disk-based file is read in.
# If the Raster is created with from_array, from_mem etc, this stays as None.
filename = None
_is_modified: bool | None = None
_disk_hash: int | None = None
# Rasterio-inherited names and types are defined here to get proper type hints.
# Maybe these don't have to be hard-coded in the future?
_data: np.ndarray | np.ma.masked_array
transform: Affine
crs: CRS
nodata: int | float | None
res: tuple[float | int, float | int]
bounds: rio.coords.BoundingBox
height: int
width: int
shape: tuple[int, int]
indexes: list[int]
count: int
dataset_mask: np.ndarray | None
driver: str
dtypes: list[str]
name: str
def __init__(
self,
filename_or_dataset: str | RasterType | rio.io.DatasetReader | rio.io.MemoryFile,
bands: None | int | list[int] = None,
load_data: bool = True,
downsample: int | float = 1,
masked: bool = True,
nodata: abc.Sequence[int | float] | int | float | None = None,
attrs: list[str] | None = None,
as_memfile: bool = False,
) -> None:
"""
Load a rasterio-supported dataset, given a filename.
:param filename_or_dataset: The filename of the dataset.
:param bands: The band(s) to load into the object. Default is to load all bands.
:param load_data: Load the raster data into the object. Default is True.
:param downsample: Reduce the size of the image loaded by this factor. Default is 1
:param masked: the data is loaded as a masked array, with no data values masked. Default is True.
:param nodata: nodata to be used (overwrites the metadata). Default is None, i.e. reads from metadata.
:param attrs: Additional attributes from rasterio's DataReader class to add to the Raster object.
Default list is ['bounds', 'count', 'crs', 'dataset_mask', 'driver', 'dtypes', 'height', 'indexes',
'name', 'nodata', 'res', 'shape', 'transform', 'width'] - if no attrs are specified, these will be added.
:param as_memfile: open the dataset via a rio.MemoryFile.
:return: A Raster object
"""
# If Raster is passed, simply point back to Raster
if isinstance(filename_or_dataset, Raster):
for key in filename_or_dataset.__dict__:
setattr(self, key, filename_or_dataset.__dict__[key])
return
# Image is a file on disk.
elif isinstance(filename_or_dataset, str):
# Save the absolute on-disk filename
self.filename = os.path.abspath(filename_or_dataset)
if as_memfile:
# open the file in memory
memfile = MemoryFile(open(filename_or_dataset, "rb"))
# Read the file as a rasterio dataset
self.ds = memfile.open()
else:
self.ds = rio.open(filename_or_dataset, "r")
# If rio.Dataset is passed
elif isinstance(filename_or_dataset, rio.io.DatasetReader):
self.filename = filename_or_dataset.files[0]
self.ds = filename_or_dataset
# Or, image is already a Memory File.
elif isinstance(filename_or_dataset, rio.io.MemoryFile):
self.ds = filename_or_dataset.open()
# Provide a catch in case trying to load from data array
elif isinstance(filename_or_dataset, np.ndarray):
raise ValueError("np.array provided as filename. Did you mean to call Raster.from_array(...) instead? ")
# Don't recognise the input, so stop here.
else:
raise ValueError("filename argument not recognised.")
self._read_attrs(attrs)
# Save _masked attribute to be used by self.load()
self._masked = masked
# Check number of bands to be loaded
if bands is None:
nbands = self.count
elif isinstance(bands, int):
nbands = 1
elif isinstance(bands, abc.Iterable):
nbands = len(bands)
# Downsampled image size
if not isinstance(downsample, (int, float)):
raise ValueError("downsample must be of type int or float")
if downsample == 1:
out_shape = (nbands, self.height, self.width)
else:
down_width = int(np.ceil(self.width / downsample))
down_height = int(np.ceil(self.height / downsample))
out_shape = (nbands, down_height, down_width)
if load_data:
self.load(bands=bands, out_shape=out_shape)
self.nbands = self._data.shape[0]
self.is_loaded = True
if isinstance(filename_or_dataset, str):
self._is_modified = False
self._disk_hash = hash((self._data.tobytes(), self.transform, self.crs, self.nodata))
else:
self._data = None
self.nbands = None
self.is_loaded = False
# update attributes when downsample is not 1
if downsample != 1:
# Original attributes
meta = self.ds.meta
# width and height must be same as data
meta.update({"width": down_width, "height": down_height})
# Resolution is set, transform must be updated accordingly
res = tuple(np.asarray(self.res) * downsample)
transform = rio.transform.from_origin(self.bounds.left, self.bounds.top, res[0], res[1])
meta.update({"transform": transform})
# Update metadata
self._update(self.data, metadata=meta)
# Set nodata
if nodata is not None:
self.set_ndv(nodata)
@classmethod
def from_array(
cls: type[RasterType],
data: np.ndarray | np.ma.masked_array,
transform: tuple[float, ...] | Affine,
crs: CRS | int,
nodata: int | float | None = None,
) -> RasterType:
"""Create a Raster from a numpy array and some geo-referencing information.
:param data: data array
:param transform: the 2-D affine transform for the image mapping.
Either a tuple(x_res, 0.0, top_left_x, 0.0, y_res, top_left_y) or
an affine.Affine object.
:param crs: Coordinate Reference System for image. Either a rasterio CRS,
or the EPSG integer.
:param nodata: nodata value
:returns: A Raster object containing the provided data.
Example:
You have a data array in EPSG:32645. It has a spatial resolution of
30 m in x and y, and its top left corner is X=478000, Y=3108140.
>>> transform = (30.0, 0.0, 478000.0, 0.0, -30.0, 3108140.0)
>>> myim = Raster.from_array(data, transform, 32645)
"""
if not isinstance(transform, Affine):
if isinstance(transform, tuple):
transform = Affine(*transform)
else:
raise ValueError("transform argument needs to be Affine or tuple.")
# Enable shortcut to create CRS from an EPSG ID.
if isinstance(crs, int):
crs = CRS.from_epsg(crs)
# If a 2-D ('single-band') array is passed in, give it a band dimension.
if len(data.shape) < 3:
data = np.expand_dims(data, 0)
# Preserves input mask
if isinstance(data, np.ma.masked_array):
if nodata is None:
if np.sum(data.mask) > 0:
raise ValueError("For masked arrays, a nodata value must be set")
else:
data.data[data.mask] = nodata
# Open handle to new memory file
mfh = MemoryFile()
# Create the memory file
with rio.open(
mfh,
"w",
height=data.shape[1],
width=data.shape[2],
count=data.shape[0],
dtype=data.dtype,
crs=crs,
transform=transform,
nodata=nodata,
driver="GTiff",
) as ds:
ds.write(data)
# Initialise a Raster object created with MemoryFile.
# (i.e., __init__ will now be run.)
return cls(mfh)
def __repr__(self) -> str:
"""Convert object to formal string representation."""
L = [getattr(self, item) for item in self._saved_attrs]
s: str = "{}.{}({})".format(type(self).__module__, type(self).__qualname__, ", ".join(map(str, L)))
return s
def __str__(self) -> str:
"""Provide string of information about Raster."""
return self.info()
def __eq__(self, other: object) -> bool:
"""Check if a Raster's data and georeferencing is equal to another."""
if not isinstance(other, type(self)): # TODO: Possibly add equals to SatelliteImage?
return NotImplemented
return all(
[
np.array_equal(self.data, other.data, equal_nan=True),
self.transform == other.transform,
self.crs == other.crs,
self.nodata == other.nodata,
]
)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __add__(self: RasterType, other: RasterType | np.ndarray | Number) -> RasterType:
"""
Sum up the data of two rasters or a raster and a numpy array, or a raster and single number.
If other is a Raster, it must have the same data.shape, transform and crs as self.
If other is a np.ndarray, it must have the same shape.
Otherwise, other must be a single number.
"""
# Check that other is of correct type
if not isinstance(other, (Raster, np.ndarray, Number)):
raise ValueError("Addition possible only with a Raster, np.ndarray or single number.")
# Case 1 - other is a Raster
if isinstance(other, Raster):
# Check that both data are loaded
if not (self.is_loaded & other.is_loaded):
raise ValueError("Raster's data must be loaded with self.load().")
# Check that both rasters have the same shape and georeferences
if (self.data.shape == other.data.shape) & (self.transform == other.transform) & (self.crs == other.crs):
pass
else:
raise ValueError("Both rasters must have the same shape, transform and CRS.")
other_data = other.data
# Case 2 - other is a numpy array
elif isinstance(other, np.ndarray):
# Check that both array have the same shape
if self.data.shape == other.shape:
pass
else:
raise ValueError("Both rasters must have the same shape.")
other_data = other
# Case 3 - other is a single number
else:
other_data = other
# Calculate the sum of arrays
data = self.data + other_data
# Save as a new Raster
out_rst = self.from_array(data, self.transform, self.crs, nodata=self.nodata)
return out_rst
def __neg__(self: RasterType) -> RasterType:
"""Return self with self.data set to -self.data"""
out_rst = self.copy()
out_rst.data = -out_rst.data
return out_rst
def __sub__(self, other: Raster | np.ndarray | Number) -> Raster:
"""
Subtract two rasters. Both rasters must have the same data.shape, transform and crs.
"""
if isinstance(other, Raster):
# Need to convert both rasters to a common type before doing the negation
ctype: np.dtype = np.find_common_type([*self.dtypes, *other.dtypes], [])
other = other.astype(ctype) # type: ignore
return self + -other # type: ignore
@overload
def astype(self, dtype: np.dtype | type | str, inplace: Literal[False]) -> Raster:
...
@overload
def astype(self, dtype: np.dtype | type | str, inplace: Literal[True]) -> None:
...
def astype(self, dtype: np.dtype | type | str, inplace: bool = False) -> Raster | None:
"""
Converts the data type of a Raster object.
:param dtype: Any numpy dtype or string accepted by numpy.astype
:param inplace: Set to True to modify the raster in place.
:returns: the output Raster with dtype changed.
"""
# Check that dtype is supported by rasterio
if not rio.dtypes.check_dtype(dtype):
raise TypeError(f"{dtype} is not supported by rasterio")
# Check that data type change will not result in a loss of information
if not rio.dtypes.can_cast_dtype(self.data, dtype):
warnings.warn(
"dtype conversion will result in a loss of information. "
f"{rio.dtypes.get_minimum_dtype(self.data)} is the minimum type to represent the data."
)
out_data = self.data.astype(dtype)
if inplace:
meta = self.ds.meta
meta.update({"dtype": dtype})
self._update(imgdata=out_data, metadata=meta)
return None
else:
return self.from_array(out_data, self.transform, self.crs, nodata=self.nodata)
def _get_rio_attrs(self) -> list[str]:
"""Get the attributes that have the same name in rio.DatasetReader and Raster."""
rio_attrs: list[str] = []
for attr in Raster.__annotations__.keys():
if "__" in attr or attr not in dir(self.ds):
continue
rio_attrs.append(attr)
return rio_attrs
def _read_attrs(self, attrs: list[str] | str | None = None) -> None:
# Copy most used attributes/methods
rio_attrs = self._get_rio_attrs()
for attr in self.__annotations__.keys():
if "__" in attr or attr not in dir(self.ds):
continue
rio_attrs.append(attr)
if attrs is None:
self._saved_attrs = rio_attrs
attrs = rio_attrs
else:
if isinstance(attrs, str):
attrs = [attrs]
for attr in rio_attrs:
if attr not in attrs:
attrs.append(attr)
self._saved_attrs = attrs
for attr in attrs:
setattr(self, attr, getattr(self.ds, attr))
@property
def is_modified(self) -> bool:
"""Check whether file has been modified since it was created/opened.
:returns: True if Raster has been modified.
"""
if not self._is_modified:
new_hash = hash((self._data.tobytes(), self.transform, self.crs, self.nodata))
self._is_modified = not (self._disk_hash == new_hash)
return self._is_modified
@property
def data(self) -> np.ndarray | np.ma.masked_array:
"""
Get data.
:returns: data array.
"""
return self._data
@data.setter
def data(self, new_data: np.ndarray | np.ma.masked_array) -> None:
"""
Set the contents of .data.
new_data must have the same shape as existing data! (bands dimension included)
:param new_data: New data to assign to this instance of Raster
"""
# Check that new_data is a Numpy array
if not isinstance(new_data, np.ndarray):
raise ValueError("New data must be a numpy array.")
# Check that new_data has correct shape
if self.is_loaded:
orig_shape = self._data.shape
else:
orig_shape = (self.count, self.height, self.width)
if new_data.shape != orig_shape:
raise ValueError(f"New data must be of the same shape as existing data: {orig_shape}.")
# Check that new_data has the right type
if new_data.dtype != self._data.dtype:
raise ValueError(
"New data must be of the same type as existing\
data: {}".format(
self.data.dtype
)
)
self._data = new_data
def _update(
self,
imgdata: np.ndarray | None = None,
metadata: dict[str, Any] | None = None,
vrt_to_driver: str = "GTiff",
) -> None:
"""
Update the object with a new image or metadata.
:param imgdata: image data to update with.
:param metadata: metadata to update with.
:param vrt_to_driver: name of driver to coerce a VRT to. This is required
because rasterio does not support writing to to a VRTSourcedRasterBand.
"""
memfile = MemoryFile()
if imgdata is None:
imgdata = self.data
if metadata is None:
metadata = self.ds.meta
if metadata["driver"] == "VRT":
metadata["driver"] = vrt_to_driver
with memfile.open(**metadata) as ds:
ds.write(imgdata)
self.ds = memfile.open()
self._read_attrs()
if self.is_loaded:
self.load()
self._is_modified = True
def info(self, stats: bool = False) -> str:
"""
Returns string of information about the raster (filename, coordinate system, number of columns/rows, etc.).
:param stats: Add statistics for each band of the dataset (max, min, median, mean, std. dev.). Default is to
not calculate statistics.
:returns: text information about Raster attributes.
"""
as_str = [
f"Driver: {self.driver} \n",
f"Opened from file: {self.filename} \n",
f"Filename: {self.name} \n",
f"Raster modified since disk load? {self._is_modified} \n",
f"Size: {self.width}, {self.height}\n",
f"Number of bands: {self.count:d}\n",
f"Data types: {self.dtypes}\n",
f"Coordinate System: EPSG:{self.crs.to_epsg()}\n",
f"NoData Value: {self.nodata}\n",
"Pixel Size: {}, {}\n".format(*self.res),
"Upper Left Corner: {}, {}\n".format(*self.bounds[:2]),
"Lower Right Corner: {}, {}\n".format(*self.bounds[2:]),
]
if stats:
if self.data is not None:
if self.nbands == 1:
as_str.append(f"[MAXIMUM]: {np.nanmax(self.data):.2f}\n")
as_str.append(f"[MINIMUM]: {np.nanmin(self.data):.2f}\n")
as_str.append(f"[MEDIAN]: {np.ma.median(self.data):.2f}\n")
as_str.append(f"[MEAN]: {np.nanmean(self.data):.2f}\n")
as_str.append(f"[STD DEV]: {np.nanstd(self.data):.2f}\n")
else:
for b in range(self.nbands):
# try to keep with rasterio convention.
as_str.append(f"Band {b + 1}:")
as_str.append(f"[MAXIMUM]: {np.nanmax(self.data[b, :, :]):.2f}\n")
as_str.append(f"[MINIMUM]: {np.nanmin(self.data[b, :, :]):.2f}\n")
as_str.append(f"[MEDIAN]: {np.ma.median(self.data[b, :, :]):.2f}\n")
as_str.append(f"[MEAN]: {np.nanmean(self.data[b, :, :]):.2f}\n")
as_str.append(f"[STD DEV]: {np.nanstd(self.data[b, :, :]):.2f}\n")
return "".join(as_str)
def copy(self: RasterType, new_array: np.ndarray | None = None) -> RasterType:
"""
Copy the Raster object in memory
:param new_array: New array to use for the copied Raster
:return:
"""
if new_array is not None:
data = new_array
else:
data = self.data
cp = self.from_array(data=data, transform=self.transform, crs=self.crs, nodata=self.nodata)
return cp
@property
def __array_interface__(self) -> dict[str, Any]:
if self._data is None:
self.load()
return self._data.__array_interface__ # type: ignore
def load(self, bands: int | list[int] | None = None, **kwargs: Any) -> None:
r"""
Load specific bands of the dataset, using rasterio.read().
Ensure that self.data.ndim = 3 for ease of use (needed e.g. in show)
:param bands: The band(s) to load. Note that rasterio begins counting at 1, not 0.
\*\*kwargs: any additional arguments to rasterio.io.DatasetReader.read.
Useful ones are:
.. hlist::
* out_shape : to load a subsampled version
* window : to load a cropped version
* resampling : to set the resampling algorithm
"""
if bands is None:
self._data = self.ds.read(masked=self._masked, **kwargs)
bands = self.ds.indexes
else:
self._data = self.ds.read(bands, masked=self._masked, **kwargs)
if type(bands) is int:
bands = bands
# If ndim is 2, expand to 3
if self._data.ndim == 2:
self._data = np.expand_dims(self._data, 0)
self.nbands = self._data.shape[0]
self.is_loaded = True
self.bands = bands
def crop(
self: RasterType,
cropGeom: Raster | Vector | list[float] | tuple[float, ...],
mode: str = "match_pixel",
inplace: bool = True,
) -> RasterType | None:
"""
Crop the Raster to a given extent.
:param cropGeom: Geometry to crop raster to, as either a Raster object, a Vector object, or a list of
coordinates. If cropGeom is a Raster, crop() will crop to the boundary of the raster as returned by
Raster.ds.bounds. If cropGeom is a Vector, crop() will crop to the bounding geometry. If cropGeom is a
list of coordinates, the order is assumed to be [xmin, ymin, xmax, ymax].
:param mode: one of 'match_pixel' (default) or 'match_extent'. 'match_pixel' will preserve the original pixel
resolution, cropping to the extent that most closely aligns with the current coordinates. 'match_extent'
will match the extent exactly, adjusting the pixel resolution to fit the extent.
:param inplace: Update the raster inplace or return copy.
:returns: None if inplace=True and a new Raster if inplace=False
"""
assert mode in [
"match_extent",
"match_pixel",
], "mode must be one of 'match_pixel', 'match_extent'"
if isinstance(cropGeom, (Raster, Vector)):
xmin, ymin, xmax, ymax = cropGeom.bounds
elif isinstance(cropGeom, (list, tuple)):
xmin, ymin, xmax, ymax = cropGeom
else:
raise ValueError("cropGeom must be a Raster, Vector, or list of coordinates.")
meta = copy.copy(self.ds.meta)
if mode == "match_pixel":
crop_bbox = Polygon([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)])
crop_img, tfm = rio.mask.mask(self.ds, [crop_bbox], crop=True, all_touched=True)
meta.update(
{
"height": crop_img.shape[1],
"width": crop_img.shape[2],
"transform": tfm,
}
)
else:
window = rio.windows.from_bounds(xmin, ymin, xmax, ymax, transform=self.transform)
new_height = int(window.height)
new_width = int(window.width)
new_tfm = rio.transform.from_bounds(xmin, ymin, xmax, ymax, width=new_width, height=new_height)
if self.is_loaded:
new_img = np.zeros((self.nbands, new_height, new_width), dtype=self.data.dtype)
else:
new_img = np.zeros((self.count, new_height, new_width), dtype=self.data.dtype)
crop_img, tfm = rio.warp.reproject(
self.data,
new_img,
src_transform=self.transform,
dst_transform=new_tfm,
src_crs=self.crs,
dst_crs=self.crs,
)
meta.update({"height": new_height, "width": new_width, "transform": tfm})
if inplace:
self._update(crop_img, meta)
return None
else:
return self.from_array(crop_img, meta["transform"], meta["crs"], meta["nodata"])
def reproject(
self: RasterType,
dst_ref: RasterType | rio.io.Dataset | str | None = None,
dst_crs: CRS | str | None = None,
dst_size: tuple[int, int] | None = None,
dst_bounds: dict[str, float] | rio.coords.BoundingBox | None = None,
dst_res: float | abc.Iterable[float] | None = None,
dst_nodata: int | float | None = None,
src_nodata: int | float | None = None,
dtype: np.dtype | None = None,
resampling: Resampling | str = Resampling.nearest,
silent: bool = False,
n_threads: int = 0,
memory_limit: int = 64,
) -> RasterType:
"""
Reproject raster to a specified grid.
The output grid can either be given by a reference Raster (using `dst_ref`),
or by manually providing the output CRS (`dst_crs`), dimensions (`dst_size`),
resolution (with `dst_size`) and/or bounds (`dst_bounds`).
Any resampling algorithm implemented in rasterio can be used.
Currently: requires image data to have been loaded into memory.
NOT SUITABLE for large datasets yet! This requires work...
To reproject a Raster with different source bounds, first run Raster.crop.
:param dst_ref: a reference raster. If set will use the attributes of this
raster for the output grid. Can be provided as Raster/rasterio data set or as path to the file.
:param crs: Specify the Coordinate Reference System to reproject to. If dst_ref not set, defaults to self.crs.
:param dst_size: Raster size to write to (x, y). Do not use with dst_res.
:param dst_bounds: a BoundingBox object or a dictionary containing\
left, bottom, right, top bounds in the source CRS.
:param dst_res: Pixel size in units of target CRS. Either 1 value or (xres, yres). Do not use with dst_size.
:param dst_nodata: nodata value of the destination. If set to None, will use the same as source, \
and if source is None, will use GDAL's default.
:param src_nodata: nodata value of the source. If set to None, will read from the metadata.
:param resampling: A rasterio Resampling method
:param silent: If True, will not print warning statements
:param n_threads: The number of worker threads. Defaults to (os.cpu_count() - 1).
:param memory_limit: The warp operation memory limit in MB. Larger values may perform better.
:returns: Raster
"""
# Check that either dst_ref or dst_crs is provided
if dst_ref is not None:
if dst_crs is not None:
raise ValueError("Either of `dst_ref` or `dst_crs` must be set. Not both.")
else:
# In case dst_res or dst_size is set, use original CRS
if dst_crs is None:
dst_crs = self.crs
# Case a raster is provided as reference
if dst_ref is not None:
# Check that dst_ref type is either str, Raster or rasterio data set
# Preferably use Raster instance to avoid rasterio data set to remain open. See PR #45
if isinstance(dst_ref, Raster):
ds_ref = dst_ref
elif isinstance(dst_ref, rio.io.MemoryFile) or isinstance(dst_ref, rasterio.io.DatasetReader):
ds_ref = dst_ref
elif isinstance(dst_ref, str):
assert os.path.exists(dst_ref), "Reference raster does not exist"
ds_ref = Raster(dst_ref, load_data=False)
else:
raise ValueError(
"Type of dst_ref not understood, must be path to file (str), Raster or rasterio data set"
)
# Read reprojecting params from ref raster
dst_crs = ds_ref.crs
dst_size = (ds_ref.width, ds_ref.height)
dst_res = None
dst_bounds = ds_ref.bounds
else:
# Determine target CRS
dst_crs = CRS.from_user_input(dst_crs)
# Set output dtype
if dtype is None:
# Warning: this will not work for multiple bands with different dtypes
dtype = self.dtypes[0]
# Set source nodata if provided
if src_nodata is None:
src_nodata = self.nodata
# Set destination nodata if provided. This is needed in areas not covered by the input data.
# If None, will use GeoUtils' default, as rasterio's default is unknown, hence cannot be handled properly.
if dst_nodata is None:
dst_nodata = self.nodata
if dst_nodata is None:
dst_nodata = _default_ndv(dtype)
# Basic reprojection options, needed in all cases.
reproj_kwargs = {
"src_transform": self.transform,
"src_crs": self.crs,
"dst_crs": dst_crs,
"resampling": resampling if isinstance(resampling, Resampling) else _resampling_from_str(resampling),
"src_nodata": src_nodata,
"dst_nodata": dst_nodata,
}
# If dst_ref is None, check other input arguments
if dst_size is not None and dst_res is not None:
raise ValueError("dst_size and dst_res both specified. Specify only one.")
# Create a BoundingBox if required
if dst_bounds is not None:
if not isinstance(dst_bounds, rio.coords.BoundingBox):
dst_bounds = rio.coords.BoundingBox(
dst_bounds["left"],
dst_bounds["bottom"],
dst_bounds["right"],
dst_bounds["top"],
)
# Determine target raster size/resolution
dst_transform = None
if dst_res is not None:
if dst_bounds is None:
# Let rasterio determine the maximum bounds of the new raster.
reproj_kwargs.update({"dst_resolution": dst_res})
else:
# Bounds specified. First check if xres and yres are different.
if isinstance(dst_res, tuple):
xres = dst_res[0]
yres = dst_res[1]
else:
xres = dst_res
yres = dst_res
# Calculate new raster size which ensures that pixels have
# precisely the resolution specified.
dst_width = np.ceil((dst_bounds.right - dst_bounds.left) / xres)
dst_height = np.ceil(np.abs(dst_bounds.bottom - dst_bounds.top) / yres)
dst_size = (int(dst_width), int(dst_height))
# As a result of precise pixel size, the destination bounds may
# have to be adjusted.
x1 = dst_bounds.left + (xres * dst_width)
y1 = dst_bounds.top - (yres * dst_height)
dst_bounds = rio.coords.BoundingBox(top=dst_bounds.top, left=dst_bounds.left, bottom=y1, right=x1)
# Set output shape (Note: dst_size is (ncol, nrow))
if dst_size is not None:
dst_shape = (self.count, dst_size[1], dst_size[0])
dst_data = np.ones(dst_shape, dtype=dtype)
reproj_kwargs.update({"destination": dst_data})
else:
dst_shape = (self.count, self.height, self.width)
# If dst_bounds is set, will enforce dst_bounds
if dst_bounds is not None:
if dst_size is None:
# Calculate new raster size which ensures that pixels resolution is as close as possible to original
# Raster size is increased by up to one pixel if needed
yres, xres = self.res
dst_width = int(np.ceil((dst_bounds.right - dst_bounds.left) / xres))
dst_height = int(np.ceil(np.abs(dst_bounds.bottom - dst_bounds.top) / yres))
dst_size = (dst_width, dst_height)
# Calculate associated transform
dst_transform = rio.transform.from_bounds(*dst_bounds, width=dst_size[0], height=dst_size[1])
# Specify the output bounds and shape, let rasterio handle the rest
reproj_kwargs.update({"dst_transform": dst_transform})
dst_data = np.ones((dst_size[1], dst_size[0]), dtype=dtype)
reproj_kwargs.update({"destination": dst_data})
# Check that reprojection is actually needed
# Caution, dst_size is (width, height) while shape is (height, width)
if all(
[
(dst_transform == self.transform) or (dst_transform is None),
(dst_crs == self.crs) or (dst_crs is None),
(dst_size == self.shape[::-1]) or (dst_size is None),
(dst_res == self.res) or (dst_res == self.res[0] == self.res[1]) or (dst_res is None),
]
):
if (dst_nodata == self.nodata) or (dst_nodata is None):
if not silent:
warnings.warn("Output projection, bounds and size are identical -> return self (not a copy!)")
return self
elif dst_nodata is not None:
if not silent:
warnings.warn(
"Only nodata is different, consider using the 'set_ndv()' method instead'\
' -> return self (not a copy!)"
)
return self
# Set the performance keywords
if n_threads == 0:
# Default to cpu count minus one. If the cpu count is undefined, num_threads will be 1
cpu_count = os.cpu_count() or 2
num_threads = cpu_count - 1
else:
num_threads = n_threads
reproj_kwargs.update({"num_threads": num_threads, "warp_mem_limit": memory_limit})
# Currently reprojects all in-memory bands at once.
# This may need to be improved to allow reprojecting from-disk.
# See rio.warp.reproject docstring for more info.
dst_data, dst_transformed = rio.warp.reproject(self.data, **reproj_kwargs)
# Enforce output type
dst_data = dst_data.astype(dtype)
# Check for funny business.
if dst_transform is not None:
assert dst_transform == dst_transformed
# Write results to a new Raster.
dst_r = self.from_array(dst_data, dst_transformed, dst_crs, dst_nodata)
return dst_r
def shift(self, xoff: float, yoff: float) -> None:
"""
Translate the Raster by a given x,y offset.
:param xoff: Translation x offset.
:param yoff: Translation y offset.
"""
# Check that data is loaded, as it is necessary for this method
assert self.is_loaded, "Data must be loaded, use self.load"
meta = self.ds.meta
dx, b, xmin, d, dy, ymax = list(self.transform)[:6]
meta.update({"transform": rio.transform.Affine(dx, b, xmin + xoff, d, dy, ymax + yoff)})
self._update(metadata=meta)
def set_ndv(self, ndv: abc.Sequence[int | float] | int | float, update_array: bool = False) -> None:
"""
Set new nodata values for bands (and possibly update arrays).
:param ndv: nodata values
:param update_array: change the existing nodata in array
"""
if not isinstance(ndv, (abc.Sequence, int, float, np.integer, np.floating)):
raise ValueError("Type of ndv not understood, must be list or float or int")
elif (isinstance(ndv, (int, float, np.integer, np.floating))) and self.count > 1:
print("Several raster band: using nodata value for all bands")
ndv = [ndv] * self.count
elif isinstance(ndv, abc.Sequence) and self.count == 1:
print("Only one raster band: using first nodata value provided")
ndv = list(ndv)[0]
# Check that ndv has same length as number of bands in self
if isinstance(ndv, abc.Sequence):
if len(ndv) != self.count:
raise ValueError(f"Length of ndv ({len(ndv)}) incompatible with number of bands ({self.count})")
# Check that ndv value is compatible with dtype
for k in range(len(ndv)):
if not rio.dtypes.can_cast_dtype(ndv[k], self.dtypes[k]):
raise ValueError(f"ndv value {ndv[k]} incompatible with self.dtype {self.dtypes[k]}")
else:
if not rio.dtypes.can_cast_dtype(ndv, self.dtypes[0]):
raise ValueError(f"ndv value {ndv} incompatible with self.dtype {self.dtypes[0]}")
meta = self.ds.meta
imgdata = self.data
pre_ndv = self.nodata
meta.update({"nodata": ndv})
if update_array and pre_ndv is not None:
# nodata values are specific to each band
# let's do a loop then
if self.count == 1:
if np.ma.isMaskedArray(imgdata):
imgdata.data[imgdata.mask] = ndv # type: ignore
else:
ind = imgdata[:] == pre_ndv
imgdata[ind] = ndv
else:
# At this point, ndv is definitely iterable, but mypy doesn't understand that.
for i in range(self.count):
if np.ma.isMaskedArray(imgdata):
imgdata.data[i, imgdata.mask[i, :]] = ndv[i] # type: ignore
else:
ind = imgdata[i, :] == pre_ndv[i] # type: ignore
imgdata[i, ind] = ndv[i] # type: ignore
else:
imgdata = None
self._update(metadata=meta, imgdata=imgdata)
def save(
self,
filename: str | IO[bytes],
driver: str = "GTiff",
dtype: np.dtype | None = None,
compress: str = "deflate",
tiled: bool = False,
blank_value: None | int | float = None,
co_opts: dict[str, str] | None = None,
metadata: dict[str, Any] | None = None,
gcps: list[tuple[float, ...]] | None = None,
gcps_crs: CRS | None = None,
) -> None:
"""Write the Raster to a geo-referenced file.
Given a filename to save the Raster to, create a geo-referenced file
on disk which contains the contents of self.data.
If blank_value is set to an integer or float, then instead of writing
the contents of self.data to disk, write this provided value to every
pixel instead.
:param filename: Filename to write the file to.
:param driver: the 'GDAL' driver to use to write the file as.
:param dtype: Data Type to write the image as (defaults to dtype of image data)
:param compress: Compression type. Defaults to 'deflate' (equal to GDALs: COMPRESS=DEFLATE)
:param tiled: Whether to write blocks in tiles instead of strips. Improves read performance on large files,
but increases file size.
:param blank_value: Use to write an image out with every pixel's value
corresponding to this value, instead of writing the image data to disk.
:param co_opts: GDAL creation options provided as a dictionary,
e.g. {'TILED':'YES', 'COMPRESS':'LZW'}
:param metadata: pairs of metadata key, value
:param gcps: list of gcps, each gcp being [row, col, x, y, (z)]
:param gcps_crs: the CRS of the GCPS (Default is None)
:returns: None.
"""
dtype = self.data.dtype if dtype is None else dtype
if co_opts is None:
co_opts = {}
if metadata is None:
metadata = {}
if gcps is None:
gcps = []
if (self.data is None) & (blank_value is None):
raise AttributeError("No data loaded, and alternative blank_value not set.")
elif blank_value is not None:
if isinstance(blank_value, int) | isinstance(blank_value, float):
save_data = np.zeros((self.ds.count, self.ds.height, self.ds.width))
save_data[:, :, :] = blank_value
else:
raise ValueError("blank_values must be one of int, float (or None).")
else:
save_data = self.data
with rio.open(
filename,
"w",
driver=driver,
height=self.ds.height,
width=self.ds.width,
count=self.ds.count,
dtype=save_data.dtype,
crs=self.ds.crs,
transform=self.ds.transform,
nodata=self.ds.nodata,
compress=compress,
tiled=tiled,
**co_opts,
) as dst:
dst.write(save_data)
# Add metadata (tags in rio)
dst.update_tags(**metadata)
# Save GCPs
if not isinstance(gcps, list):
raise ValueError("gcps must be a list")
if len(gcps) > 0:
rio_gcps = []
for gcp in gcps:
rio_gcps.append(rio.control.GroundControlPoint(*gcp))
# Warning: this will overwrite the transform
if dst.transform != rio.transform.Affine(1, 0, 0, 0, 1, 0):
warnings.warn(
"A geotransform previously set is going \
to be cleared due to the setting of GCPs."
)
dst.gcps = (rio_gcps, gcps_crs)
def to_xarray(self, name: str | None = None) -> rioxarray.DataArray:
"""Convert this Raster into an xarray DataArray using rioxarray.
This method uses rioxarray to generate a DataArray with associated
geo-referencing information.
See the documentation of rioxarray and xarray for more information on
the methods and attributes of the resulting DataArray.
:param name: Set the name of the DataArray.
:returns: xarray DataArray
"""
if not _has_rioxarray:
raise ImportError("rioxarray is required for this functionality.")
xr = rioxarray.open_rasterio(self.ds)
if name is not None:
xr.name = name
return xr
def get_bounds_projected(self, out_crs: CRS, densify_pts_max: int = 5000) -> rio.coords.BoundingBox:
"""
Return self's bounds in the given CRS.
:param out_crs: Output CRS
:param densify_pts_max: Maximum points to be added between image corners to account for non linear edges.
Reduce if time computation is really critical (ms) or increase if extent is \
not accurate enough.
"""
# Max points to be added between image corners to account for non linear edges
# rasterio's default is a bit low for very large images
# instead, use image dimensions, with a maximum of 50000
densify_pts = min(max(self.width, self.height), densify_pts_max)
# Calculate new bounds
left, bottom, right, top = self.bounds
new_bounds = rio.warp.transform_bounds(self.crs, out_crs, left, bottom, right, top, densify_pts)
return new_bounds
def intersection(self, rst: str | Raster) -> tuple[float, float, float, float]:
"""
Returns the bounding box of intersection between this image and another.
If the rasters have different projections, the intersection extent is given in self's projection system.
:param rst : path to the second image (or another Raster instance)
:returns: extent of the intersection between the 2 images \
(xmin, ymin, xmax, ymax) in self's coordinate system.
"""
from geoutils import projtools
# If input rst is string, open as Raster
if isinstance(rst, str):
rst = Raster(rst, load_data=False)
# Check if both files have the same projection
# To be implemented
same_proj = True
# Find envelope of rasters' intersections
poly1 = projtools.bounds2poly(self.bounds)
# poly1.AssignSpatialReference(self.crs)
# Create a polygon of the envelope of the second image
poly2 = projtools.bounds2poly(rst.bounds)
# poly2.AssignSpatialReference(rst.srs)
# If coordinate system is different, reproject poly2 into poly1
if not same_proj:
raise NotImplementedError()
# Compute intersection envelope
intersect = poly1.intersection(poly2)
extent: tuple[float, float, float, float] = intersect.envelope.bounds
# check that intersection is not void
if intersect.area == 0:
warnings.warn("Warning: Intersection is void")
return (0.0, 0.0, 0.0, 0.0)
return extent
def show(
self,
band: int | None = None,
cmap: matplotlib.colors.Colormap | str | None = None,
vmin: float | int | None = None,
vmax: float | int | None = None,
cb_title: str | None = None,
add_cb: bool = True,
ax: matplotlib.axes.Axes | None = None,
**kwargs: Any,
) -> None | tuple[matplotlib.axes.Axes, matplotlib.colors.Colormap]:
r"""Show/display the image, with axes in projection of image.
This method is a wrapper to rasterio.plot.show. Any \*\*kwargs which
you give this method will be passed to rasterio.plot.show.
:param band: which band to plot, from 0 to self.count-1 (default is all)
:param cmap: The figure's colormap. Default is plt.rcParams['image.cmap']
:param vmin: Colorbar minimum value. Default is data min.
:param vmax: Colorbar maximum value. Default is data min.
:param cb_title: Colorbar label. Default is None.
:param add_cb: Set to True to display a colorbar. Default is True.
:param ax: A figure ax to be used for plotting. If None, will create default figure and axes,\
and plot figure directly.
:returns: if ax is not None, returns (ax, cbar) where cbar is the colorbar (None if add_cb is False)
You can also pass in \*\*kwargs to be used by the underlying imshow or
contour methods of matplotlib. The example below shows provision of
a kwarg for rasterio.plot.show, and a kwarg for matplotlib as well::
import matplotlib.pyplot as plt
ax1 = plt.subplot(111)
mpl_kws = {'cmap':'seismic'}
myimage.show(ax=ax1, mpl_kws)
"""
# If data is not loaded, need to load it
if not self.is_loaded:
self.load()
# Check if specific band selected, or take all
# rshow takes care of image dimensions
# if self.count=3 (4) => plotted as RGB(A)
if band is None:
band = np.arange(self.count)
elif isinstance(band, int):
if band >= self.count:
raise ValueError(f"band must be in range 0-{self.count - 1:d}")
pass
else:
raise ValueError("band must be int or None")
# If multiple bands (RGB), cbar does not make sense
if isinstance(band, abc.Sequence):
if len(band) > 1:
add_cb = False
# Create colorbar
# Use rcParam default
if cmap is None:
cmap = plt.get_cmap(plt.rcParams["image.cmap"])
elif isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
elif isinstance(cmap, matplotlib.colors.Colormap):
pass
# Set colorbar min/max values (needed for ScalarMappable)
if vmin is None:
vmin = np.nanmin(self.data[band, :, :])
if vmax is None:
vmax = np.nanmax(self.data[band, :, :])
# Make sure they are numbers, to avoid mpl error
try:
vmin = float(vmin)
vmax = float(vmax)
except ValueError:
raise ValueError("vmin or vmax cannot be converted to float")
# Create axes
if ax is None:
fig, ax0 = plt.subplots()
elif isinstance(ax, matplotlib.axes.Axes):
ax0 = ax
fig = ax.figure
else:
raise ValueError("ax must be a matplotlib.axes.Axes instance or None")
# Use data array directly, as rshow on self.ds will re-load data
rshow(
self.data[band, :, :],
transform=self.transform,
ax=ax0,
cmap=cmap,
vmin=vmin,
vmax=vmax,
**kwargs,
)
# Add colorbar
if add_cb:
cbar = fig.colorbar(
cm.ScalarMappable(norm=colors.Normalize(vmin=vmin, vmax=vmax), cmap=cmap),
ax=ax0,
)
if cb_title is not None:
cbar.set_label(cb_title)
else:
cbar = None
# If ax not set, figure should be plotted directly
if ax is None:
plt.show()
return None
return ax0, cbar
def value_at_coords(
self,
x: float | list[float],
y: float | list[float],
latlon: bool = False,
band: int | None = None,
masked: bool = False,
window: int | None = None,
return_window: bool = False,
boundless: bool = True,
reducer_function: Callable[[np.ndarray], float] = np.ma.mean,
) -> Any:
""" Extract the pixel value(s) at the nearest pixel(s) from the specified coordinates.
Extract pixel value of each band in dataset at the specified
coordinates. Alternatively, if band is specified, return only that
band's pixel value.
Optionally, return mean of pixels within a square window.
:param x: x (or longitude) coordinate.
:param y: y (or latitude) coordinate.
:param latlon: Set to True if coordinates provided as longitude/latitude.
:param band: the band number to extract from.
:param masked: If `masked` is `True` the return value will be a masked
array. Otherwise (the default) the return value will be a
regular array.
:param window: expand area around coordinate to dimensions \
window * window. window must be odd.
:param return_window: If True when window=int, returns (mean,array) \
where array is the dataset extracted via the specified window size.
:param boundless: If `True`, windows that extend beyond the dataset's extent
are permitted and partially or completely filled arrays (with self.nodata) will
be returned as appropriate.
:param reducer_function: a function to apply to the values in window.
:returns: When called on a Raster or with a specific band \
set, return value of pixel.
:returns: If multiple band Raster and the band is not specified, a \
dictionary containing the value of the pixel in each band.
:returns: In addition, if return_window=True, return tuple of \
(values, arrays)
:examples:
>>> self.value_at_coords(-48.125,67.8901,window=3)
Returns mean of a 3*3 window:
v v v \
v c v | = float(mean)
v v v /
(c = provided coordinate, v= value of surrounding coordinate)
"""
value: float | dict[int, float] | tuple[float | dict[int, float] | tuple[list[float], np.ndarray] | Any]
if window is not None:
if window % 2 != 1:
raise ValueError("Window must be an odd number.")
def format_value(value: Any) -> Any:
"""Check if valid value has been extracted"""
if type(value) in [np.ndarray, np.ma.core.MaskedArray]:
if window is not None:
value = reducer_function(value.flatten())
else:
value = value[0, 0]
else:
value = None
return value
# Need to implement latlon option later
if latlon:
from geoutils import projtools
x, y = projtools.reproject_from_latlon((y, x), self.crs)
# Convert coordinates to pixel space
row, col = self.ds.index(x, y, op=round)
# Decide what pixel coordinates to read:
if window is not None:
half_win = (window - 1) / 2
# Subtract start coordinates back to top left of window
col = col - half_win
row = row - half_win
# Offset to read to == window
width = window
height = window
else:
# Start reading at col,row and read 1px each way
width = 1
height = 1
# Make sure coordinates are int
col = int(col)
row = int(row)
# Create rasterio's window for reading
window = rio.windows.Window(col, row, width, height)
# Get values for all bands
if band is None:
# Deal with single band case
if self.nbands == 1:
data = self.ds.read(
window=window,
fill_value=self.nodata,
boundless=boundless,
masked=masked,
)
value = format_value(data)
win = data
# Deal with multiband case
else:
value = {}
win = {}
for b in self.indexes:
data = self.ds.read(
window=window,
fill_value=self.nodata,
boundless=boundless,
indexes=b,
masked=masked,
)
val = format_value(data)
# Store according to GDAL band numbers
value[b] = val
win[b] = data
# Or just for specified band in multiband case
elif isinstance(band, int):
data = self.ds.read(
window=window,
fill_value=self.nodata,
boundless=boundless,
indexes=band,
masked=masked,
)
value = format_value(data)
else:
raise ValueError("Value provided for band was not int or None.")
if return_window:
return (value, win)
return value
def coords(self, offset: str = "corner", grid: bool = True) -> tuple[np.ndarray, np.ndarray]:
"""
Get x,y coordinates of all pixels in the raster.
:param offset: coordinate type. If 'corner', returns corner coordinates of pixels.
If 'center', returns center coordinates. Default is corner.
:param grid: Return grid
:returns x,y: numpy arrays corresponding to the x,y coordinates of each pixel.
"""
assert offset in [
"corner",
"center",
], f"ctype is not one of 'corner', 'center': {offset}"
dx = self.res[0]
dy = self.res[1]
xx = np.linspace(self.bounds.left, self.bounds.right, self.width + 1)[:: int(np.sign(dx))]
yy = np.linspace(self.bounds.bottom, self.bounds.top, self.height + 1)[:: int(np.sign(dy))]
if offset == "center":
xx += dx / 2 # shift by half a pixel
yy += dy / 2
if grid:
meshgrid: tuple[np.ndarray, np.ndarray] = np.meshgrid(xx[:-1], yy[:-1]) # drop the last element
return meshgrid
else:
return xx[:-1], yy[:-1]
def xy2ij(
self,
x: np.ndarray,
y: np.ndarray,
op: type = np.float32,
area_or_point: str | None = None,
precision: float | None = None,
) -> tuple[np.ndarray, np.ndarray]:
"""
Return row, column indices for a given x,y coordinate pair.
:param x: x coordinates
:param y: y coordinates
:param op: operator to calculate index
:param precision: precision for rio.Dataset.index
:param area_or_point: shift index according to GDAL AREA_OR_POINT attribute (None) or \
force position ('Point' or 'Area') of the interpretation of where the raster value \
corresponds to in the pixel ('Area' = lower left or 'Point' = center)
:returns i, j: indices of x,y in the image.
"""
if op not in [np.float32, np.float64, float]:
raise UserWarning(
"Operator is not of type float: rio.Dataset.index might "
"return unreliable indexes due to rounding issues."
)
if area_or_point not in [None, "Area", "Point"]:
raise ValueError(
'Argument "area_or_point" must be either None (falls back to GDAL metadata), "Point" or "Area".'
)
i, j = self.ds.index(x, y, op=op, precision=precision)
# # necessary because rio.Dataset.index does not return abc.Iterable for a single point
if not isinstance(i, abc.Iterable):
i, j = (
np.asarray(
[
i,
]
),
np.asarray(
[
j,
]
),
)
else:
i, j = (np.asarray(i), np.asarray(j))
# AREA_OR_POINT GDAL attribute, i.e. does the value refer to the upper left corner (AREA) or
# the center of pixel (POINT)
# This has no influence on georeferencing, it's only about the interpretation of the raster values,
# and thus only affects sub-pixel interpolation
# if input is None, default to GDAL METADATA
if area_or_point is None:
area_or_point = self.ds.tags()["AREA_OR_POINT"]
if area_or_point == "Point":
if not isinstance(i.flat[0], np.floating):
raise ValueError(
"Operator must return np.floating values to perform AREA_OR_POINT subpixel index shifting"
)
# if point, shift index by half a pixel
i += 0.5
j += 0.5
# otherwise, leave as is
return i, j
def ij2xy(self, i: np.ndarray, j: np.ndarray, offset: str = "center") -> tuple[np.ndarray, np.ndarray]:
"""
Return x,y coordinates for a given row, column index pair.
:param i: row (i) index of pixel.
:param j: column (j) index of pixel.
:param offset: return coordinates as "corner" or "center" of pixel
:returns x, y: x,y coordinates of i,j in reference system.
"""
x, y = self.ds.xy(i, j, offset=offset)
return x, y
def outside_image(self, xi: np.ndarray, yj: np.ndarray, index: bool = True) -> bool:
"""
Check whether a given point falls outside of the raster.
:param xi: Indices (or coordinates) of x direction to check.
:param yj: Indices (or coordinates) of y direction to check.
:param index: Interpret ij as raster indices (default is True). If False, assumes ij is coordinates.
:returns is_outside: True if ij is outside of the image.
"""
if not index:
xi, xj = self.xy2ij(xi, yj)
if np.any(np.array((xi, yj)) < 0):
return True
elif xi > self.width or yj > self.height:
return True
else:
return False
def interp_points(
self,
pts: np.ndarray,
input_latlon: bool = False,
mode: str = "linear",
band: int = 1,
area_or_point: str | None = None,
**kwargs: Any,
) -> np.ndarray:
"""
Interpolate raster values at a given point, or sets of points.
:param pts: Point(s) at which to interpolate raster value. If points fall outside of image,
value returned is nan. Shape should be (N,2)'
:param input_latlon: Whether the input is in latlon, unregarding of Raster CRS
:param mode: One of 'linear', 'cubic', or 'quintic'. Determines what type of spline is
used to interpolate the raster value at each point. For more information, see
scipy.interpolate.interp2d. Default is linear.
:param band: Raster band to use
:param area_or_point: shift index according to GDAL AREA_OR_POINT attribute (None) or force position\
('Point' or 'Area') of the interpretation of where the raster value corresponds to in the pixel\
('Area' = lower left or 'Point' = center)
:returns rpts: Array of raster value(s) for the given points.
"""
assert mode in [
"mean",
"linear",
"cubic",
"quintic",
"nearest",
], "mode must be mean, linear, cubic, quintic or nearest."
# get coordinates
x, y = list(zip(*pts))
# if those are in latlon, convert to Raster crs
if input_latlon:
init_crs = pyproj.CRS(4326)
dest_crs = pyproj.CRS(self.crs)
transformer = pyproj.Transformer.from_crs(init_crs, dest_crs)
x, y = transformer.transform(x, y)
i, j = self.xy2ij(x, y, op=np.float32, area_or_point=area_or_point)
ind_invalid = np.vectorize(lambda k1, k2: self.outside_image(k1, k2, index=True))(j, i)
rpts = map_coordinates(self.data[band - 1, :, :].astype(np.float32), [i, j], **kwargs)
rpts = np.array(rpts, dtype=np.float32)
rpts[np.array(ind_invalid)] = np.nan
return rpts
# #TODO: right now it's a loop... could add multiprocessing parallel loop outside,
# # but such a method probably exists already within scipy/other interpolation packages?
# for pt in pts:
# i,j = self.xy2ij(pt[0],pt[1])
# if self.outside_image(i,j, index=True):
# rpts.append(np.nan)
# continue
# else:
# x = xx[j - nsize:j + nsize + 1]
# y = yy[i - nsize:i + nsize + 1]
#
# #TODO: read only that window?
# z = self.data[band-1, i - nsize:i + nsize + 1, j - nsize:j + nsize + 1]
# if mode in ['linear', 'cubic', 'quintic', 'nearest']:
# X, Y = np.meshgrid(x, y)
# try:
# zint = griddata((X.flatten(), Y.flatten()), z.flatten(), list(pt), method=mode)[0]
# except:
# #TODO: currently fails when dealing with the edges
# print('Interpolation failed for:')
# print(pt)
# print(i,j)
# print(x)
# print(y)
# print(z)
# zint = np.nan
# else:
# zint = np.nanmean(z.flatten())
# rpts.append(zint)
# rpts = np.array(rpts)
def split_bands(self: RasterType, copy: bool = False, subset: list[int] | int | None = None) -> list[Raster]:
"""
Split the bands into separate copied rasters.
:param copy: Copy the bands or return slices of the original data.
:param subset: Optional. A subset of band indices to extract. Defaults to all.
:returns: A list of Rasters for each band.
"""
bands: list[Raster] = []
if subset is None:
indices = list(range(self.nbands))
elif isinstance(subset, int):
indices = [subset]
elif isinstance(subset, list):
indices = subset
else:
raise ValueError(f"'subset' got invalid type: {type(subset)}. Expected list[int], int or None")
if copy:
for band_n in indices:
# Generate a new Raster from a copy of the band's data
bands.append(
self.from_array(
self.data[band_n, :, :],
transform=self.transform,
crs=self.crs,
nodata=self.nodata,
)
)
else:
for band_n in indices:
# Generate a new instance with the same underlying values.
raster = Raster(self)
# Set the data to a slice of the original array
raster._data = self.data[band_n, :, :].reshape((1,) + self.data.shape[1:])
# Set the nbands
raster.nbands = 1
bands.append(raster)
return bands
@overload
def to_points(
self, subset: float | int, as_frame: Literal[True], pixel_offset: Literal["center", "corner"]
) -> gpd.GeoDataFrame:
...
@overload
def to_points(
self, subset: float | int, as_frame: Literal[False], pixel_offset: Literal["center", "corner"]
) -> np.ndarray:
...
def to_points(
self, subset: float | int = 1, as_frame: bool = False, pixel_offset: Literal["center", "corner"] = "center"
) -> np.ndarray:
"""
Subset a point cloud of the raster.
If 'subset' is either 1 or is equal to the pixel count, all points are returned in order.
If 'subset' is smaller than 1 (for fractions) or the pixel count, a random sample is returned.
If the raster is not loaded, sampling will be done from disk without loading the entire Raster.
Formats:
* `as_frame` == None | False: A numpy ndarray of shape (N, 2 + nbands) with the columns [x, y, b1, b2..].
* `as_frame` == True: A GeoPandas GeoDataFrame with the columns ["b1", "b2", ..., "geometry"]
:param subset: The point count or fraction. If 'subset' > 1, it's parsed as a count.
:param as_frame: Return a GeoDataFrame with a geometry column and crs instead of an ndarray.
:param pixel_offset: The point at which to associate the pixel coordinate with ('corner' == upper left).
:raises ValueError: If the subset count or fraction is poorly formatted.
:returns: An ndarray/GeoDataFrame of the shape (N, 2 + nbands) where N is the subset count.
"""
data_size = self.width * self.height
# Validate the subset argument.
if subset <= 0.0:
raise ValueError(f"Subset cannot be zero or negative (given value: {subset})")
# If the subset is equal to or less than 1, it is assumed to be a fraction.
if subset <= 1.0:
subset = int(data_size * subset)
else:
subset = int(subset)
if subset > data_size:
raise ValueError(f"Subset cannot exceed the size of the dataset ({subset} vs {data_size})")
# If the subset is smaller than the max size, take a random subset of indices, otherwise take the whole.
choice = np.random.randint(0, data_size - 1, subset) if subset != data_size else np.arange(data_size)
cols = choice % self.width
rows = (choice / self.width).astype(int)
# Extract the coordinates of the pixels and filter by the chosen pixels.
x_coords, y_coords = (np.array(a) for a in self.ij2xy(rows, cols, offset=pixel_offset))
# If the Raster is loaded, pick from the data, otherwise use the disk-sample method from rasterio.
if self.is_loaded:
pixel_data = self.data[:, rows, cols]
else:
pixel_data = np.array(list(self.ds.sample(zip(x_coords, y_coords)))).T
if isinstance(pixel_data, np.ma.masked_array):
pixel_data = np.where(pixel_data.mask, np.nan, pixel_data.data)
# Merge the coordinates and pixel data into a point cloud.
points = np.vstack((x_coords.reshape(1, -1), y_coords.reshape(1, -1), pixel_data)).T
if as_frame:
points = gpd.GeoDataFrame(
points[:, 2:],
columns=[f"b{i}" for i in range(1, pixel_data.shape[0] + 1)],
geometry=gpd.points_from_xy(points[:, 0], points[:, 1]),
crs=self.crs,
)
return points
def polygonize(
self, in_value: Number | tuple[Number, Number] | list[Number] | np.ndarray | Literal["all"] = 1
) -> Vector:
"""
Return a GeoDataFrame polygonized from a raster.
:param in_value: Value or range of values of the raster from which to
create geometries (Default is 1). If 'all', all unique pixel values of the raster are used.
:returns: Vector containing the polygonized geometries.
"""
# mask a unique value set by a number
if isinstance(in_value, Number):
if np.sum(self.data == in_value) == 0:
raise ValueError(f"no pixel with in_value {in_value}")
bool_msk = np.array(self.data == in_value).astype(np.uint8)
# mask values within boundaries set by a tuple
elif isinstance(in_value, tuple):
if np.sum((self.data > in_value[0]) & (self.data < in_value[1])) == 0:
raise ValueError(f"no pixel with in_value between {in_value[0]} and {in_value[1]}")
bool_msk = ((self.data > in_value[0]) & (self.data < in_value[1])).astype(np.uint8)
# mask specific values set by a sequence
elif isinstance(in_value, list) or isinstance(in_value, np.ndarray):
if np.sum(np.isin(self.data, in_value)) == 0:
raise ValueError("no pixel with in_value " + ", ".join(map("{}".format, in_value)))
bool_msk = np.isin(self.data, in_value).astype("uint8")
# mask all valid values
elif in_value == "all":
vals_for_msk = list(set(self.data.flatten()))
bool_msk = np.isin(self.data, vals_for_msk).astype("uint8")
else:
raise ValueError("in_value must be a number, a tuple or a sequence")
results = (
{"properties": {"raster_value": v}, "geometry": s}
for i, (s, v) in enumerate(shapes(self.data, mask=bool_msk, transform=self.transform))
)
gdf = gpd.GeoDataFrame.from_features(list(results))
gdf.insert(0, "New_ID", range(0, 0 + len(gdf)))
gdf.set_geometry(col="geometry", inplace=True)
gdf.set_crs(self.crs, inplace=True)
return gv.Vector(gdf)
| [
"rasterio.windows.Window",
"rasterio.windows.from_bounds",
"rasterio.transform.from_bounds",
"numpy.isin",
"numpy.array",
"pyproj.Transformer.from_crs",
"numpy.nanmean",
"os.cpu_count",
"affine.Affine",
"numpy.nanmin",
"rasterio.mask.mask",
"copy.copy",
"rasterio.coords.BoundingBox",
"nump... | [((1192, 1229), 'typing.TypeVar', 'TypeVar', (['"""RasterType"""'], {'bound': '"""Raster"""'}), "('RasterType', bound='Raster')\n", (1199, 1229), False, 'from typing import IO, Any, Callable, TypeVar, overload\n'), ((12196, 12208), 'rasterio.io.MemoryFile', 'MemoryFile', ([], {}), '()\n', (12206, 12208), False, 'from rasterio.io import MemoryFile\n'), ((21100, 21112), 'rasterio.io.MemoryFile', 'MemoryFile', ([], {}), '()\n', (21110, 21112), False, 'from rasterio.io import MemoryFile\n'), ((27388, 27411), 'copy.copy', 'copy.copy', (['self.ds.meta'], {}), '(self.ds.meta)\n', (27397, 27411), False, 'import copy\n'), ((38930, 38976), 'rasterio.warp.reproject', 'rio.warp.reproject', (['self.data'], {}), '(self.data, **reproj_kwargs)\n', (38948, 38976), True, 'import rasterio as rio\n'), ((46943, 46975), 'rioxarray.open_rasterio', 'rioxarray.open_rasterio', (['self.ds'], {}), '(self.ds)\n', (46966, 46975), False, 'import rioxarray\n'), ((47939, 48026), 'rasterio.warp.transform_bounds', 'rio.warp.transform_bounds', (['self.crs', 'out_crs', 'left', 'bottom', 'right', 'top', 'densify_pts'], {}), '(self.crs, out_crs, left, bottom, right, top,\n densify_pts)\n', (47964, 48026), True, 'import rasterio as rio\n'), ((48907, 48941), 'geoutils.projtools.bounds2poly', 'projtools.bounds2poly', (['self.bounds'], {}), '(self.bounds)\n', (48928, 48941), False, 'from geoutils import projtools\n'), ((49071, 49104), 'geoutils.projtools.bounds2poly', 'projtools.bounds2poly', (['rst.bounds'], {}), '(rst.bounds)\n', (49092, 49104), False, 'from geoutils import projtools\n'), ((53221, 53330), 'rasterio.plot.show', 'rshow', (['self.data[band, :, :]'], {'transform': 'self.transform', 'ax': 'ax0', 'cmap': 'cmap', 'vmin': 'vmin', 'vmax': 'vmax'}), '(self.data[band, :, :], transform=self.transform, ax=ax0, cmap=cmap,\n vmin=vmin, vmax=vmax, **kwargs)\n', (53226, 53330), True, 'from rasterio.plot import show as rshow\n'), ((57786, 57829), 'rasterio.windows.Window', 'rio.windows.Window', (['col', 'row', 'width', 'height'], {}), '(col, row, width, height)\n', (57804, 57829), True, 'import rasterio as rio\n'), ((66485, 66517), 'numpy.array', 'np.array', (['rpts'], {'dtype': 'np.float32'}), '(rpts, dtype=np.float32)\n', (66493, 66517), True, 'import numpy as np\n'), ((75372, 75386), 'geoutils.geovector.Vector', 'gv.Vector', (['gdf'], {}), '(gdf)\n', (75381, 75386), True, 'import geoutils.geovector as gv\n'), ((3116, 3131), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (3124, 3131), True, 'import numpy as np\n'), ((9909, 9985), 'rasterio.transform.from_origin', 'rio.transform.from_origin', (['self.bounds.left', 'self.bounds.top', 'res[0]', 'res[1]'], {}), '(self.bounds.left, self.bounds.top, res[0], res[1])\n', (9934, 9985), True, 'import rasterio as rio\n'), ((11660, 11678), 'rasterio.crs.CRS.from_epsg', 'CRS.from_epsg', (['crs'], {}), '(crs)\n', (11673, 11678), False, 'from rasterio.crs import CRS\n'), ((11812, 11835), 'numpy.expand_dims', 'np.expand_dims', (['data', '(0)'], {}), '(data, 0)\n', (11826, 11835), True, 'import numpy as np\n'), ((12256, 12426), 'rasterio.open', 'rio.open', (['mfh', '"""w"""'], {'height': 'data.shape[1]', 'width': 'data.shape[2]', 'count': 'data.shape[0]', 'dtype': 'data.dtype', 'crs': 'crs', 'transform': 'transform', 'nodata': 'nodata', 'driver': '"""GTiff"""'}), "(mfh, 'w', height=data.shape[1], width=data.shape[2], count=data.\n shape[0], dtype=data.dtype, crs=crs, transform=transform, nodata=nodata,\n driver='GTiff')\n", (12264, 12426), True, 'import rasterio as rio\n'), ((16156, 16210), 'numpy.find_common_type', 'np.find_common_type', (['[*self.dtypes, *other.dtypes]', '[]'], {}), '([*self.dtypes, *other.dtypes], [])\n', (16175, 16210), True, 'import numpy as np\n'), ((16971, 17000), 'rasterio.dtypes.check_dtype', 'rio.dtypes.check_dtype', (['dtype'], {}), '(dtype)\n', (16993, 17000), True, 'import rasterio as rio\n'), ((17166, 17209), 'rasterio.dtypes.can_cast_dtype', 'rio.dtypes.can_cast_dtype', (['self.data', 'dtype'], {}), '(self.data, dtype)\n', (17191, 17209), True, 'import rasterio as rio\n'), ((25632, 25661), 'numpy.expand_dims', 'np.expand_dims', (['self._data', '(0)'], {}), '(self._data, 0)\n', (25646, 25661), True, 'import numpy as np\n'), ((27471, 27536), 'shapely.geometry.polygon.Polygon', 'Polygon', (['[(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]'], {}), '([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)])\n', (27478, 27536), False, 'from shapely.geometry.polygon import Polygon\n'), ((27566, 27630), 'rasterio.mask.mask', 'rio.mask.mask', (['self.ds', '[crop_bbox]'], {'crop': '(True)', 'all_touched': '(True)'}), '(self.ds, [crop_bbox], crop=True, all_touched=True)\n', (27579, 27630), True, 'import rasterio as rio\n'), ((27877, 27950), 'rasterio.windows.from_bounds', 'rio.windows.from_bounds', (['xmin', 'ymin', 'xmax', 'ymax'], {'transform': 'self.transform'}), '(xmin, ymin, xmax, ymax, transform=self.transform)\n', (27900, 27950), True, 'import rasterio as rio\n'), ((28059, 28149), 'rasterio.transform.from_bounds', 'rio.transform.from_bounds', (['xmin', 'ymin', 'xmax', 'ymax'], {'width': 'new_width', 'height': 'new_height'}), '(xmin, ymin, xmax, ymax, width=new_width, height=\n new_height)\n', (28084, 28149), True, 'import rasterio as rio\n'), ((28415, 28546), 'rasterio.warp.reproject', 'rio.warp.reproject', (['self.data', 'new_img'], {'src_transform': 'self.transform', 'dst_transform': 'new_tfm', 'src_crs': 'self.crs', 'dst_crs': 'self.crs'}), '(self.data, new_img, src_transform=self.transform,\n dst_transform=new_tfm, src_crs=self.crs, dst_crs=self.crs)\n', (28433, 28546), True, 'import rasterio as rio\n'), ((32926, 32954), 'rasterio.crs.CRS.from_user_input', 'CRS.from_user_input', (['dst_crs'], {}), '(dst_crs)\n', (32945, 32954), False, 'from rasterio.crs import CRS\n'), ((36080, 36111), 'numpy.ones', 'np.ones', (['dst_shape'], {'dtype': 'dtype'}), '(dst_shape, dtype=dtype)\n', (36087, 36111), True, 'import numpy as np\n'), ((36905, 36982), 'rasterio.transform.from_bounds', 'rio.transform.from_bounds', (['*dst_bounds'], {'width': 'dst_size[0]', 'height': 'dst_size[1]'}), '(*dst_bounds, width=dst_size[0], height=dst_size[1])\n', (36930, 36982), True, 'import rasterio as rio\n'), ((37154, 37202), 'numpy.ones', 'np.ones', (['(dst_size[1], dst_size[0])'], {'dtype': 'dtype'}), '((dst_size[1], dst_size[0]), dtype=dtype)\n', (37161, 37202), True, 'import numpy as np\n'), ((45141, 45392), 'rasterio.open', 'rio.open', (['filename', '"""w"""'], {'driver': 'driver', 'height': 'self.ds.height', 'width': 'self.ds.width', 'count': 'self.ds.count', 'dtype': 'save_data.dtype', 'crs': 'self.ds.crs', 'transform': 'self.ds.transform', 'nodata': 'self.ds.nodata', 'compress': 'compress', 'tiled': 'tiled'}), "(filename, 'w', driver=driver, height=self.ds.height, width=self.ds\n .width, count=self.ds.count, dtype=save_data.dtype, crs=self.ds.crs,\n transform=self.ds.transform, nodata=self.ds.nodata, compress=compress,\n tiled=tiled, **co_opts)\n", (45149, 45392), True, 'import rasterio as rio\n'), ((49548, 49594), 'warnings.warn', 'warnings.warn', (['"""Warning: Intersection is void"""'], {}), "('Warning: Intersection is void')\n", (49561, 49594), False, 'import warnings\n'), ((51684, 51705), 'numpy.arange', 'np.arange', (['self.count'], {}), '(self.count)\n', (51693, 51705), True, 'import numpy as np\n'), ((52211, 52251), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (["plt.rcParams['image.cmap']"], {}), "(plt.rcParams['image.cmap'])\n", (52223, 52251), True, 'import matplotlib.pyplot as plt\n'), ((52513, 52545), 'numpy.nanmin', 'np.nanmin', (['self.data[band, :, :]'], {}), '(self.data[band, :, :])\n', (52522, 52545), True, 'import numpy as np\n'), ((52591, 52623), 'numpy.nanmax', 'np.nanmax', (['self.data[band, :, :]'], {}), '(self.data[band, :, :])\n', (52600, 52623), True, 'import numpy as np\n'), ((52927, 52941), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (52939, 52941), True, 'import matplotlib.pyplot as plt\n'), ((53839, 53849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53847, 53849), True, 'import matplotlib.pyplot as plt\n'), ((57017, 57066), 'geoutils.projtools.reproject_from_latlon', 'projtools.reproject_from_latlon', (['(y, x)', 'self.crs'], {}), '((y, x), self.crs)\n', (57048, 57066), False, 'from geoutils import projtools\n'), ((60021, 60085), 'numpy.linspace', 'np.linspace', (['self.bounds.left', 'self.bounds.right', '(self.width + 1)'], {}), '(self.bounds.left, self.bounds.right, self.width + 1)\n', (60032, 60085), True, 'import numpy as np\n'), ((60120, 60185), 'numpy.linspace', 'np.linspace', (['self.bounds.bottom', 'self.bounds.top', '(self.height + 1)'], {}), '(self.bounds.bottom, self.bounds.top, self.height + 1)\n', (60131, 60185), True, 'import numpy as np\n'), ((60385, 60414), 'numpy.meshgrid', 'np.meshgrid', (['xx[:-1]', 'yy[:-1]'], {}), '(xx[:-1], yy[:-1])\n', (60396, 60414), True, 'import numpy as np\n'), ((66018, 66034), 'pyproj.CRS', 'pyproj.CRS', (['(4326)'], {}), '(4326)\n', (66028, 66034), False, 'import pyproj\n'), ((66058, 66078), 'pyproj.CRS', 'pyproj.CRS', (['self.crs'], {}), '(self.crs)\n', (66068, 66078), False, 'import pyproj\n'), ((66105, 66152), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['init_crs', 'dest_crs'], {}), '(init_crs, dest_crs)\n', (66132, 66152), False, 'import pyproj\n'), ((66531, 66552), 'numpy.array', 'np.array', (['ind_invalid'], {}), '(ind_invalid)\n', (66539, 66552), True, 'import numpy as np\n'), ((71926, 71969), 'numpy.random.randint', 'np.random.randint', (['(0)', '(data_size - 1)', 'subset'], {}), '(0, data_size - 1, subset)\n', (71943, 71969), True, 'import numpy as np\n'), ((71998, 72018), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (72007, 72018), True, 'import numpy as np\n'), ((72216, 72227), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (72224, 72227), True, 'import numpy as np\n'), ((72645, 72695), 'numpy.where', 'np.where', (['pixel_data.mask', 'np.nan', 'pixel_data.data'], {}), '(pixel_data.mask, np.nan, pixel_data.data)\n', (72653, 72695), True, 'import numpy as np\n'), ((7119, 7155), 'os.path.abspath', 'os.path.abspath', (['filename_or_dataset'], {}), '(filename_or_dataset)\n', (7134, 7155), False, 'import os\n'), ((8859, 8891), 'numpy.ceil', 'np.ceil', (['(self.width / downsample)'], {}), '(self.width / downsample)\n', (8866, 8891), True, 'import numpy as np\n'), ((8923, 8956), 'numpy.ceil', 'np.ceil', (['(self.height / downsample)'], {}), '(self.height / downsample)\n', (8930, 8956), True, 'import numpy as np\n'), ((11430, 11448), 'affine.Affine', 'Affine', (['*transform'], {}), '(*transform)\n', (11436, 11448), False, 'from affine import Affine\n'), ((13418, 13471), 'numpy.array_equal', 'np.array_equal', (['self.data', 'other.data'], {'equal_nan': '(True)'}), '(self.data, other.data, equal_nan=True)\n', (13432, 13471), True, 'import numpy as np\n'), ((28203, 28272), 'numpy.zeros', 'np.zeros', (['(self.nbands, new_height, new_width)'], {'dtype': 'self.data.dtype'}), '((self.nbands, new_height, new_width), dtype=self.data.dtype)\n', (28211, 28272), True, 'import numpy as np\n'), ((28317, 28385), 'numpy.zeros', 'np.zeros', (['(self.count, new_height, new_width)'], {'dtype': 'self.data.dtype'}), '((self.count, new_height, new_width), dtype=self.data.dtype)\n', (28325, 28385), True, 'import numpy as np\n'), ((34379, 34488), 'rasterio.coords.BoundingBox', 'rio.coords.BoundingBox', (["dst_bounds['left']", "dst_bounds['bottom']", "dst_bounds['right']", "dst_bounds['top']"], {}), "(dst_bounds['left'], dst_bounds['bottom'], dst_bounds\n ['right'], dst_bounds['top'])\n", (34401, 34488), True, 'import rasterio as rio\n'), ((35347, 35399), 'numpy.ceil', 'np.ceil', (['((dst_bounds.right - dst_bounds.left) / xres)'], {}), '((dst_bounds.right - dst_bounds.left) / xres)\n', (35354, 35399), True, 'import numpy as np\n'), ((35814, 35903), 'rasterio.coords.BoundingBox', 'rio.coords.BoundingBox', ([], {'top': 'dst_bounds.top', 'left': 'dst_bounds.left', 'bottom': 'y1', 'right': 'x1'}), '(top=dst_bounds.top, left=dst_bounds.left, bottom=y1,\n right=x1)\n', (35836, 35903), True, 'import rasterio as rio\n'), ((38502, 38516), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (38514, 38516), False, 'import os\n'), ((39806, 39866), 'rasterio.transform.Affine', 'rio.transform.Affine', (['dx', 'b', '(xmin + xoff)', 'd', 'dy', '(ymax + yoff)'], {}), '(dx, b, xmin + xoff, d, dy, ymax + yoff)\n', (39826, 39866), True, 'import rasterio as rio\n'), ((41330, 41376), 'rasterio.dtypes.can_cast_dtype', 'rio.dtypes.can_cast_dtype', (['ndv', 'self.dtypes[0]'], {}), '(ndv, self.dtypes[0])\n', (41355, 41376), True, 'import rasterio as rio\n'), ((41793, 41821), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['imgdata'], {}), '(imgdata)\n', (41812, 41821), True, 'import numpy as np\n'), ((52307, 52325), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (52319, 52325), True, 'import matplotlib.pyplot as plt\n'), ((62044, 62059), 'numpy.asarray', 'np.asarray', (['[i]'], {}), '([i])\n', (62054, 62059), True, 'import numpy as np\n'), ((62162, 62177), 'numpy.asarray', 'np.asarray', (['[j]'], {}), '([j])\n', (62172, 62177), True, 'import numpy as np\n'), ((62312, 62325), 'numpy.asarray', 'np.asarray', (['i'], {}), '(i)\n', (62322, 62325), True, 'import numpy as np\n'), ((62327, 62340), 'numpy.asarray', 'np.asarray', (['j'], {}), '(j)\n', (62337, 62340), True, 'import numpy as np\n'), ((64277, 64295), 'numpy.array', 'np.array', (['(xi, yj)'], {}), '((xi, yj))\n', (64285, 64295), True, 'import numpy as np\n'), ((73740, 73769), 'numpy.sum', 'np.sum', (['(self.data == in_value)'], {}), '(self.data == in_value)\n', (73746, 73769), True, 'import numpy as np\n'), ((7434, 7468), 'rasterio.open', 'rio.open', (['filename_or_dataset', '"""r"""'], {}), "(filename_or_dataset, 'r')\n", (7442, 7468), True, 'import rasterio as rio\n'), ((9850, 9870), 'numpy.asarray', 'np.asarray', (['self.res'], {}), '(self.res)\n', (9860, 9870), True, 'import numpy as np\n'), ((11967, 11984), 'numpy.sum', 'np.sum', (['data.mask'], {}), '(data.mask)\n', (11973, 11984), True, 'import numpy as np\n'), ((36633, 36685), 'numpy.ceil', 'np.ceil', (['((dst_bounds.right - dst_bounds.left) / xres)'], {}), '((dst_bounds.right - dst_bounds.left) / xres)\n', (36640, 36685), True, 'import numpy as np\n'), ((37880, 37984), 'warnings.warn', 'warnings.warn', (['"""Output projection, bounds and size are identical -> return self (not a copy!)"""'], {}), "(\n 'Output projection, bounds and size are identical -> return self (not a copy!)'\n )\n", (37893, 37984), False, 'import warnings\n'), ((41140, 41189), 'rasterio.dtypes.can_cast_dtype', 'rio.dtypes.can_cast_dtype', (['ndv[k]', 'self.dtypes[k]'], {}), '(ndv[k], self.dtypes[k])\n', (41165, 41189), True, 'import rasterio as rio\n'), ((42181, 42209), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['imgdata'], {}), '(imgdata)\n', (42200, 42209), True, 'import numpy as np\n'), ((44869, 44925), 'numpy.zeros', 'np.zeros', (['(self.ds.count, self.ds.height, self.ds.width)'], {}), '((self.ds.count, self.ds.height, self.ds.width))\n', (44877, 44925), True, 'import numpy as np\n'), ((46061, 46099), 'rasterio.transform.Affine', 'rio.transform.Affine', (['(1)', '(0)', '(0)', '(0)', '(1)', '(0)'], {}), '(1, 0, 0, 0, 1, 0)\n', (46081, 46099), True, 'import rasterio as rio\n'), ((46121, 46228), 'warnings.warn', 'warnings.warn', (['"""A geotransform previously set is going to be cleared due to the setting of GCPs."""'], {}), "(\n 'A geotransform previously set is going to be cleared due to the setting of GCPs.'\n )\n", (46134, 46228), False, 'import warnings\n'), ((60093, 60104), 'numpy.sign', 'np.sign', (['dx'], {}), '(dx)\n', (60100, 60104), True, 'import numpy as np\n'), ((60193, 60204), 'numpy.sign', 'np.sign', (['dy'], {}), '(dy)\n', (60200, 60204), True, 'import numpy as np\n'), ((73052, 73098), 'geopandas.points_from_xy', 'gpd.points_from_xy', (['points[:, 0]', 'points[:, 1]'], {}), '(points[:, 0], points[:, 1])\n', (73070, 73098), True, 'import geopandas as gpd\n'), ((73871, 73902), 'numpy.array', 'np.array', (['(self.data == in_value)'], {}), '(self.data == in_value)\n', (73879, 73902), True, 'import numpy as np\n'), ((74034, 74095), 'numpy.sum', 'np.sum', (['((self.data > in_value[0]) & (self.data < in_value[1]))'], {}), '((self.data > in_value[0]) & (self.data < in_value[1]))\n', (74040, 74095), True, 'import numpy as np\n'), ((75070, 75128), 'rasterio.features.shapes', 'shapes', (['self.data'], {'mask': 'bool_msk', 'transform': 'self.transform'}), '(self.data, mask=bool_msk, transform=self.transform)\n', (75076, 75128), False, 'from rasterio.features import shapes\n'), ((17331, 17370), 'rasterio.dtypes.get_minimum_dtype', 'rio.dtypes.get_minimum_dtype', (['self.data'], {}), '(self.data)\n', (17359, 17370), True, 'import rasterio as rio\n'), ((32350, 32373), 'os.path.exists', 'os.path.exists', (['dst_ref'], {}), '(dst_ref)\n', (32364, 32373), False, 'import os\n'), ((35437, 35479), 'numpy.abs', 'np.abs', (['(dst_bounds.bottom - dst_bounds.top)'], {}), '(dst_bounds.bottom - dst_bounds.top)\n', (35443, 35479), True, 'import numpy as np\n'), ((38096, 38245), 'warnings.warn', 'warnings.warn', (['"""Only nodata is different, consider using the \'set_ndv()\' method instead\' \' -> return self (not a copy!)"""'], {}), '(\n "Only nodata is different, consider using the \'set_ndv()\' method instead\' \' -> return self (not a copy!)"\n )\n', (38109, 38245), False, 'import warnings\n'), ((45925, 45961), 'rasterio.control.GroundControlPoint', 'rio.control.GroundControlPoint', (['*gcp'], {}), '(*gcp)\n', (45955, 45961), True, 'import rasterio as rio\n'), ((53537, 53575), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (53553, 53575), False, 'from matplotlib import cm, colors\n'), ((36728, 36770), 'numpy.abs', 'np.abs', (['(dst_bounds.bottom - dst_bounds.top)'], {}), '(dst_bounds.bottom - dst_bounds.top)\n', (36734, 36770), True, 'import numpy as np\n'), ((74449, 74477), 'numpy.isin', 'np.isin', (['self.data', 'in_value'], {}), '(self.data, in_value)\n', (74456, 74477), True, 'import numpy as np\n'), ((74609, 74637), 'numpy.isin', 'np.isin', (['self.data', 'in_value'], {}), '(self.data, in_value)\n', (74616, 74637), True, 'import numpy as np\n'), ((22877, 22897), 'numpy.nanmax', 'np.nanmax', (['self.data'], {}), '(self.data)\n', (22886, 22897), True, 'import numpy as np\n'), ((22964, 22984), 'numpy.nanmin', 'np.nanmin', (['self.data'], {}), '(self.data)\n', (22973, 22984), True, 'import numpy as np\n'), ((23051, 23074), 'numpy.ma.median', 'np.ma.median', (['self.data'], {}), '(self.data)\n', (23063, 23074), True, 'import numpy as np\n'), ((23141, 23162), 'numpy.nanmean', 'np.nanmean', (['self.data'], {}), '(self.data)\n', (23151, 23162), True, 'import numpy as np\n'), ((23229, 23249), 'numpy.nanstd', 'np.nanstd', (['self.data'], {}), '(self.data)\n', (23238, 23249), True, 'import numpy as np\n'), ((74801, 74833), 'numpy.isin', 'np.isin', (['self.data', 'vals_for_msk'], {}), '(self.data, vals_for_msk)\n', (74808, 74833), True, 'import numpy as np\n'), ((23511, 23540), 'numpy.nanmax', 'np.nanmax', (['self.data[b, :, :]'], {}), '(self.data[b, :, :])\n', (23520, 23540), True, 'import numpy as np\n'), ((23611, 23640), 'numpy.nanmin', 'np.nanmin', (['self.data[b, :, :]'], {}), '(self.data[b, :, :])\n', (23620, 23640), True, 'import numpy as np\n'), ((23711, 23743), 'numpy.ma.median', 'np.ma.median', (['self.data[b, :, :]'], {}), '(self.data[b, :, :])\n', (23723, 23743), True, 'import numpy as np\n'), ((23814, 23844), 'numpy.nanmean', 'np.nanmean', (['self.data[b, :, :]'], {}), '(self.data[b, :, :])\n', (23824, 23844), True, 'import numpy as np\n'), ((23915, 23944), 'numpy.nanstd', 'np.nanstd', (['self.data[b, :, :]'], {}), '(self.data[b, :, :])\n', (23924, 23944), True, 'import numpy as np\n')] |
import math
import pandas as pd
from hdmf.common.table import DynamicTable
from pynwb import ProcessingModule
from src.bsl_python.preprocessing.processor.processor import Processor
import numpy as np
class DeviantTone(Processor):
parameters = dict()
def __init__(self, window_spikes, trials_info, repetitions):
super(DeviantTone, self).__init__('deviant_tone_parameters',
'Firing rate per tone (standard and deviant) and per sequence length')
spikes = window_spikes.copy()
spikes_per_cond_seq_electrode = spikes.groupby(["electrodes", "freq", "seq_length"])
firing_rate_per_trial_electrode = spikes_per_cond_seq_electrode.count() / 0.06
for stim in repetitions.index.levels[0]:
for seq_length in repetitions.index.levels[1]:
locations = np.logical_and(firing_rate_per_trial_electrode.index.get_level_values(2) == seq_length,
firing_rate_per_trial_electrode.index.get_level_values(1) == stim)
if (stim, seq_length) in repetitions:
firing_rate_per_trial_electrode.loc[locations, "trial_time"] /= repetitions[(stim, seq_length)]
self.parameters["firing_rate"] = \
firing_rate_per_trial_electrode.reset_index().set_index(["electrodes", "freq"])[
["seq_length", "trial_time"]]
frequencies = [trial["freq"] for trial in trials_info]
unique_freq = np.unique(frequencies)
freq_1_rate = np.sum(frequencies == unique_freq[0]) / len(frequencies)
freq_2_rate = np.sum(frequencies == unique_freq[1]) / len(frequencies)
self.deviant_tone_freq = unique_freq[1] if freq_1_rate > freq_2_rate else unique_freq[0]
self.standard_tone_freq = unique_freq[1] if freq_1_rate < freq_2_rate else unique_freq[0]
self.parameters["deviant_tone_frequency"] = self.deviant_tone_freq
self.parameters["standard_tone_frequency"] = self.standard_tone_freq
def create_module(self):
module = ProcessingModule(name=self.name, description=self.description)
module.add_container(DynamicTable.from_dataframe(self.parameters, name="deviant_tone_parameters"))
return module
| [
"pynwb.ProcessingModule",
"numpy.sum",
"numpy.unique",
"hdmf.common.table.DynamicTable.from_dataframe"
] | [((1494, 1516), 'numpy.unique', 'np.unique', (['frequencies'], {}), '(frequencies)\n', (1503, 1516), True, 'import numpy as np\n'), ((2069, 2131), 'pynwb.ProcessingModule', 'ProcessingModule', ([], {'name': 'self.name', 'description': 'self.description'}), '(name=self.name, description=self.description)\n', (2085, 2131), False, 'from pynwb import ProcessingModule\n'), ((1539, 1576), 'numpy.sum', 'np.sum', (['(frequencies == unique_freq[0])'], {}), '(frequencies == unique_freq[0])\n', (1545, 1576), True, 'import numpy as np\n'), ((1618, 1655), 'numpy.sum', 'np.sum', (['(frequencies == unique_freq[1])'], {}), '(frequencies == unique_freq[1])\n', (1624, 1655), True, 'import numpy as np\n'), ((2161, 2237), 'hdmf.common.table.DynamicTable.from_dataframe', 'DynamicTable.from_dataframe', (['self.parameters'], {'name': '"""deviant_tone_parameters"""'}), "(self.parameters, name='deviant_tone_parameters')\n", (2188, 2237), False, 'from hdmf.common.table import DynamicTable\n')] |
import yt
import numpy as np
OCT_MASK_LIST = [8, 0, 0, 0, 0, 8, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
8, 0, 0, 0, 0, 0, 0, 0,
0]
def test_octree():
# See Issue #1272
octree_mask = np.array(OCT_MASK_LIST, dtype=np.uint8)
quantities = {}
quantities[('gas', 'density')] = np.ones((22, 1), dtype=float)
bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]])
ds = yt.load_octree(octree_mask=octree_mask,
data=quantities,
bbox=bbox,
over_refine_factor=0,
partial_coverage=0)
proj = ds.proj('density', 'x')
proj['density']
| [
"numpy.array",
"yt.load_octree",
"numpy.ones"
] | [((234, 273), 'numpy.array', 'np.array', (['OCT_MASK_LIST'], {'dtype': 'np.uint8'}), '(OCT_MASK_LIST, dtype=np.uint8)\n', (242, 273), True, 'import numpy as np\n'), ((332, 361), 'numpy.ones', 'np.ones', (['(22, 1)'], {'dtype': 'float'}), '((22, 1), dtype=float)\n', (339, 361), True, 'import numpy as np\n'), ((374, 429), 'numpy.array', 'np.array', (['[[-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0]]'], {}), '([[-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0]])\n', (382, 429), True, 'import numpy as np\n'), ((434, 547), 'yt.load_octree', 'yt.load_octree', ([], {'octree_mask': 'octree_mask', 'data': 'quantities', 'bbox': 'bbox', 'over_refine_factor': '(0)', 'partial_coverage': '(0)'}), '(octree_mask=octree_mask, data=quantities, bbox=bbox,\n over_refine_factor=0, partial_coverage=0)\n', (448, 547), False, 'import yt\n')] |
from ceres import optimize
import numpy as np
def func(ps):
return np.sum(ps**2)
def grad(ps):
return 2 * ps
x0 = np.array([1,2,3,4,5,6,7,8,9,10], dtype=np.double)
print(optimize(func, grad, x0))
| [
"numpy.array",
"ceres.optimize",
"numpy.sum"
] | [((126, 184), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {'dtype': 'np.double'}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=np.double)\n', (134, 184), True, 'import numpy as np\n'), ((73, 88), 'numpy.sum', 'np.sum', (['(ps ** 2)'], {}), '(ps ** 2)\n', (79, 88), True, 'import numpy as np\n'), ((183, 207), 'ceres.optimize', 'optimize', (['func', 'grad', 'x0'], {}), '(func, grad, x0)\n', (191, 207), False, 'from ceres import optimize\n')] |
from typing import List
import numpy as np
from ..base import BaseAudioEncoder
from ...helper import batching
class MfccEncoder(BaseAudioEncoder):
batch_size = 64
def __init__(self, n_mfcc: int = 13, sample_rate: int = 16000, max_length: int = 100, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_mfcc = n_mfcc
self.sample_rate = sample_rate
self.max_length = max_length
@batching
def encode(self, data: List['np.array'], *args, **kwargs) -> np.ndarray:
import librosa
mfccs = [np.array(librosa.feature.mfcc(y=audio, sr=self.sample_rate, n_mfcc=self.n_mfcc).T)
for audio in data]
mfccs = [np.concatenate((mf, np.zeros((self.max_length - mf.shape[0], self.n_mfcc), dtype=np.float32)), axis=0)
if mf.shape[0] < self.max_length else mf[:self.max_length] for mf in mfccs]
mfccs = [mfcc.reshape((1, -1)) for mfcc in mfccs]
mfccs = np.squeeze(np.array(mfccs), axis=1)
return mfccs
| [
"numpy.array",
"numpy.zeros",
"librosa.feature.mfcc"
] | [((981, 996), 'numpy.array', 'np.array', (['mfccs'], {}), '(mfccs)\n', (989, 996), True, 'import numpy as np\n'), ((572, 642), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'audio', 'sr': 'self.sample_rate', 'n_mfcc': 'self.n_mfcc'}), '(y=audio, sr=self.sample_rate, n_mfcc=self.n_mfcc)\n', (592, 642), False, 'import librosa\n'), ((720, 792), 'numpy.zeros', 'np.zeros', (['(self.max_length - mf.shape[0], self.n_mfcc)'], {'dtype': 'np.float32'}), '((self.max_length - mf.shape[0], self.n_mfcc), dtype=np.float32)\n', (728, 792), True, 'import numpy as np\n')] |
# Here we will use the support vector machine
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
# Lets try to write the basic of support vector machine
# we will use a class of several objects and methods
class Support_Vector_Machine:
# when you call a class to run, none of the methods will run except the __init__ one
def __init__(self, visualization=True): # visualization is used with a boolean variable.
self.visualization = visualization
self.colors = {1: 'r', -1: 'b'} # Color of the classes, one red for +1 and blue of -1
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1) # simply add a subplot as a grid to for the plotting
# now we will define the method of fitting (notice you can add pass in the end of the method
# in case you don't know what to add yet,
# This method (fit) actually is the training of the data
def fit(self, data):
self.data = data
# here we will create a dictionary with ||w||:[w,b] see the theory to understand more
# first we will create an empty dictionary and later we will populate it with these information
opt_dict = {}
# as we learn before this is the transforms to check read the theory
# each time a vector is created we check with the transform here
transforms = [[1,1],
[-1,1],
[-1,-1],
[1,-1]]
# lets check the maximum and minimum range of the data
all_data = []
# yi is the class name which is the output,
# yi is -1 or +1
for yi in self.data: # to iterate through classes
for featureset in self.data[yi]: # to iterate through features for e.g. [1,7] is 1 then [2,8] is 2
for feature in featureset: # to iterate through points 1,7
all_data.append(feature) # to append them to a list populated with the numbers
# Now we can use the max function to know largest value in our data
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
# now since you got these values they will be stored and you can now get ride of matrix
all_data = None
# Now recall the picture of the big U shape, first we take a large steps and the medium and later small once we
# reach to the optimum value we want
# we can also thread or multi-processed
step_size = [self.max_feature_value * 0.1, # Big Step
self.max_feature_value * 0.01, # Medium Steps
self.max_feature_value * 0.001] # Small (expensive) steps
b_range_multiple = 5 # extremely expensive, we dont care about b that much
#
b_multiple =5
latest_optimum =self.max_feature_value*10 # this the largest vector w will be equal to this number
# Now we will start the stepping
for step in step_size:
w = np.array([latest_optimum,latest_optimum])
# we can do this because convex
optimized = False # until we run out of our step_size
while not optimized:
pass
# now we will define a method to make the predication
def predict(self, features):
# should return the sign of the class, as sign(x.w+b)
# you can make a lambda expression for upper than 1 and lower than 1
# or you can simply use the numpy sign function,
classification = np.sign(np.dot(np.array(features), self.w) + self.b)
return classification
# now we will start with simple data and later we will extend
data_dict = {-1: np.array([[1, 7],
[2, 8],
[3, 8], ]),
1: np.array([[5, 1],
[6, -1],
[7, 3], ])}
| [
"matplotlib.use",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.style.use"
] | [((84, 107), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (98, 107), False, 'import matplotlib\n'), ((170, 189), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (179, 189), False, 'from matplotlib import style\n'), ((3875, 3909), 'numpy.array', 'np.array', (['[[1, 7], [2, 8], [3, 8]]'], {}), '([[1, 7], [2, 8], [3, 8]])\n', (3883, 3909), True, 'import numpy as np\n'), ((3983, 4018), 'numpy.array', 'np.array', (['[[5, 1], [6, -1], [7, 3]]'], {}), '([[5, 1], [6, -1], [7, 3]])\n', (3991, 4018), True, 'import numpy as np\n'), ((707, 719), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (717, 719), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3208), 'numpy.array', 'np.array', (['[latest_optimum, latest_optimum]'], {}), '([latest_optimum, latest_optimum])\n', (3174, 3208), True, 'import numpy as np\n'), ((3725, 3743), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (3733, 3743), True, 'import numpy as np\n')] |
import random
from collections import defaultdict
import torch
from torch.utils.data import Dataset
from data.base_dataset import BaseDataset
from utils import load_json, compute_rel
import pickle
import networkx
import numpy as np
g_use_heuristic_relation_matrix = False
g_add_in_room_relation = False
g_add_random_parent_link = False
g_prepend_room = False
g_shuffle_subject_object = False
def suncg_collate_fn(batch):
"""
Collate function to be used when wrapping SuncgDataset in a
DataLoader. Returns a tuple of the following:
- objs: LongTensor of shape (O,) giving object categories
- boxes: FloatTensor of shape (O, 4)
- triples: LongTensor of shape (T, 3) giving triples
- obj_to_img: LongTensor of shape (O,) mapping objects to room
- triple_to_img: LongTensor of shape (T,) mapping triples to room
"""
all_ids, all_objs, all_boxes, all_triples, all_angles, all_attributes = [], [], [], [], [], []
all_obj_to_room, all_triple_to_room = [], []
obj_offset = 0
for i, (room_id, objs, boxes, triples, angles, attributes) in enumerate(batch):
if objs.dim() == 0 or triples.dim() == 0:
continue
O, T = objs.size(0), triples.size(0)
all_objs.append(objs)
all_angles.append(angles)
all_attributes.append(attributes)
all_boxes.append(boxes)
all_ids.append(room_id)
triples = triples.clone()
triples[:, 0] += obj_offset
triples[:, 2] += obj_offset
all_triples.append(triples)
all_obj_to_room.append(torch.LongTensor(O).fill_(i))
all_triple_to_room.append(torch.LongTensor(T).fill_(i))
obj_offset += O
all_ids = torch.LongTensor(all_ids)
all_objs = torch.cat(all_objs)
all_boxes = torch.cat(all_boxes)
all_triples = torch.cat(all_triples)
all_angles = torch.cat(all_angles)
all_attributes = torch.cat(all_attributes)
all_obj_to_room = torch.cat(all_obj_to_room)
all_triple_to_room = torch.cat(all_triple_to_room)
out = (all_ids, all_objs, all_boxes, all_triples, all_angles, all_attributes, all_obj_to_room, all_triple_to_room)
return out
class SuncgDataset(BaseDataset):
def __init__(self, data_dir, valid_types_dir = "metadata/valid_types.json", train_3d=True, touching_relations=True, use_attr_30=False):
super(Dataset, self).__init__()
self.train_3d = train_3d
assert self.train_3d
# Do we train using 3D coors? You want True.
self.use_attr_30 = use_attr_30
# Do we want to train on object attributes? Split by 70:30? Tall/Short & Large/Small & None?
print("Starting to read the json file for SUNCG")
self.data = load_json(data_dir)
# Json file for cleaned & normalized data
self.room_ids = [int(i) for i in list(self.data)]
self.touching_relations = touching_relations
# Do objects touch? Works either way
# Construction dict
# obj_name is object type (chair/table/sofa etc. etc.)
# pred_name is relation type (left/right etc.)
# idx_to_name maps respective index back to object type or relation name
valid_types = load_json(valid_types_dir)
self.vocab = {'object_idx_to_name': ['__room__'] + valid_types}
# map obj type to idx
self.vocab['object_name_to_idx'] = {}
for i, name in enumerate(self.vocab['object_idx_to_name']):
self.vocab['object_name_to_idx'][name] = i
# map idx to relation type
self.vocab['pred_idx_to_name'] = [
'__in_room__',
'left of',
'right of',
'behind',
'in front of',
'inside',
'surrounding',
'left touching',
'right touching',
'front touching',
'behind touching',
'front left',
'front right',
'back left',
'back right',
'on',
]
# We don't actually use the front left, front right, back left, back right
# map relation type to idx
self.vocab['pred_name_to_idx'] = {}
for idx, name in enumerate(self.vocab['pred_idx_to_name']):
self.vocab['pred_name_to_idx'][name] = idx
self.vocab['attrib_idx_to_name'] = [
'none',
'tall',
'short',
'large',
'small',
]
self.vocab['attrib_name_to_idx'] = {}
for idx, name in enumerate(self.vocab['attrib_idx_to_name']):
self.vocab['attrib_name_to_idx'][name] = idx
self.image_id_to_objects = defaultdict(list)
self.room_bboxes = {}
for room_id in self.data:
room = self.data[room_id]
room_id = int(room_id)
self.image_id_to_objects[room_id] = room["valid_objects"]
self.room_bboxes[room_id] = room["bbox"]
self.size_data = load_json(
"metadata/size_info_many.json")
self.size_data_30 = load_json(
"metadata/30_size_info_many.json")
if g_use_heuristic_relation_matrix:
self.relation_score_matrix = self.get_relation_score_matrix()
def total_objects(self):
total_objs = 0
for i, room_id in enumerate(self.room_ids):
num_objs = len(self.image_id_to_objects[room_id])
total_objs += num_objs
return total_objs
def __len__(self):
return len(self.room_ids)
def return_room_ids(self):
return self.room_ids
def get_by_room_id(self, room_id):
try:
idx = self.room_ids.index(int(room_id))
except:
print("Get by room id failed! Defaulting to 0.")
idx = 0
return self.__getitem__(idx)
# -------------------new------------------
def get_relation_score_matrix(self, path = "new/relation_graph_v1.p"):
vocab = self.vocab
print("loading relation score matrix from: ", path)
R_G = pickle.load(open(path,"rb"))
relation_score_matrix = np.zeros((len(vocab['object_idx_to_name']), len(vocab['object_idx_to_name']))) + 0.6
for i in range(len(vocab['object_idx_to_name'])):
obj1 = vocab['object_idx_to_name'][i]
if obj1 == "shower_curtain":
continue
if obj1 == "floor_mat":
obj1 = "floor"
if obj1 == "night_stand":
obj1 = "stand"
if obj1 not in R_G.nodes:
continue
max_count_obj = max([R_G.edges[edge]['count'] for edge in R_G.edges(obj1)])
for j in range(len(vocab['object_idx_to_name'])):
obj2 = vocab['object_idx_to_name'][j]
if obj2 == "shower_curtain":
continue
if obj2 == "floor_mat":
obj2 = "floor"
if obj2 == "night_stand":
obj2 = "stand"
if obj2 not in R_G.nodes:
continue
if (obj1, obj2) not in R_G.edges:
continue
relation_score_matrix[i][j] += np.log(R_G.edges[(obj1, obj2)]["count"]) / np.log(max_count_obj)
return relation_score_matrix
def __getitem__(self, index, shuffle_obj = True):
room_id = self.room_ids[index]
objs, boxes, angles = [], [], []
if g_prepend_room:
objs.append(self.vocab['object_name_to_idx']['__room__'])
room_bbox = self.room_bboxes[room_id]
x0 = 0.0
y0 = 0.0
z0 = 0.0
x1 = room_bbox[0]
y1 = room_bbox[1]
z1 = room_bbox[2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
angles.append(0)
obj_data_list = self.image_id_to_objects[room_id]
if shuffle_obj:
random.shuffle(obj_data_list)
for object_data in obj_data_list:
obj_type = object_data["type"]
objs.append(self.vocab['object_name_to_idx'][obj_type])
bbox = object_data['new_bbox']
# Get min/max of the bbox
x0 = bbox[0][0]
y0 = bbox[0][1]
z0 = bbox[0][2]
x1 = bbox[1][0]
y1 = bbox[1][1]
z1 = bbox[1][2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
theta = object_data['rotation']
angles.append(theta)
if not g_prepend_room:
objs.append(self.vocab['object_name_to_idx']['__room__'])
room_bbox = self.room_bboxes[room_id]
x0 = 0.0
y0 = 0.0
z0 = 0.0
x1 = room_bbox[0]
y1 = room_bbox[1]
z1 = room_bbox[2]
if self.train_3d:
boxes.append(torch.FloatTensor([x0, y0, z0, x1, y1, z1]))
else:
boxes.append(torch.FloatTensor([x0, z0, x1, z1]))
angles.append(0)
objs = torch.LongTensor(objs)
boxes = torch.stack(boxes, dim=0)
# Angles are discrete, so make it a long tensor
angles = torch.LongTensor(angles)
# # Compute centers of all objects
# obj_centers = []
# if self.train_3d:
# for i, obj_idx in enumerate(objs):
# x0, y0, z0, x1, y1, z1 = boxes[i]
# mean_x = 0.5 * (x0 + x1)
# mean_y = 0.5 * (y0 + y1)
# mean_z = 0.5 * (z0 + z1)
# obj_centers.append([mean_x, mean_y, mean_z])
# else:
# for i, obj_idx in enumerate(objs):
# x0, z0, x1, z1 = boxes[i]
# mean_x = 0.5 * (x0 + x1)
# mean_z = 0.5 * (z0 + z1)
# obj_centers.append([mean_x, mean_z])
# obj_centers = torch.FloatTensor(obj_centers)
# Compute scene graphs
triples = []
num_objs = objs.size(0)
__room__ = self.vocab['object_name_to_idx']['__room__']
real_objs = []
if num_objs > 1:
# get non-room object indices
real_objs = (objs != __room__).nonzero().squeeze(1)
if self.train_3d:
# special: "on" relationships
on_rels = defaultdict(list)
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
for other in choices:
cur_box = boxes[cur]
other_box = boxes[other]
p = compute_rel(cur_box, other_box, None, None)
if p == "on":
p = self.vocab['pred_name_to_idx']['on']
triples.append([cur, p, other])
on_rels[cur].append(other)
# new: add random parent link
if g_add_random_parent_link:
for cur in real_objs:
if cur in on_rels.keys():
# "on" relation is an absolute parent link
choices = on_rels[cur]
other = random.choice(choices)
p = len(self.vocab['pred_name_to_idx']) # 16: parent link
triples.append([cur, p, other])
else:
# random choose a parent
choices = [obj for obj in real_objs if obj != cur]
if g_prepend_room:
choices.append(0)
else:
choices.append(objs.size(0)- 1)
other = random.choice(choices)
if (g_prepend_room and other == 0) or (not g_prepend_room and other == objs.size(0)- 1):
p = self.vocab['pred_name_to_idx']["__in_room__"]
triples.append([cur, p, other])
else:
# real relation
p = compute_rel(boxes[cur], boxes[other], None, None)
p = self.vocab['pred_name_to_idx'][p]
triples.append([cur, p, other])
# add parent link
triples.append([cur, len(self.vocab['pred_name_to_idx']), other])
else:
# add random relationships
for cur in real_objs:
choices = [obj for obj in real_objs if obj != cur]
# ---------- new ---------------
if g_use_heuristic_relation_matrix:
prob = [self.relation_score_matrix[objs[cur], objs[otr]] for otr in real_objs if otr != cur]
prob = np.asarray(prob) / np.sum(prob)
other = np.random.choice(choices, p = prob)
else:
other = random.choice(choices)
if g_shuffle_subject_object and random.random() > 0.5:
s, o = cur, other
else:
s, o = other, cur
if s in on_rels[o] or o in on_rels[s]:
continue
p = compute_rel(boxes[s], boxes[o], None, None)
p = self.vocab['pred_name_to_idx'][p]
triples.append([s, p, o])
# Add __in_room__ triples
if g_add_in_room_relation:
O = objs.size(0)
for i in range(O - 1):
p = compute_rel(boxes[i], boxes[-1], None, "__room__")
p = self.vocab['pred_name_to_idx'][p]
triples.append([i, p, O - 1])
triples = torch.LongTensor(triples)
# normalize boxes, all in [0,1] relative to room
b = boxes.size(0)
if self.train_3d:
for i in range(b - 1):
boxes[i][0] /= boxes[-1][3]
boxes[i][3] /= boxes[-1][3]
boxes[i][1] /= boxes[-1][4]
boxes[i][4] /= boxes[-1][4]
boxes[i][2] /= boxes[-1][5]
boxes[i][5] /= boxes[-1][5]
else:
for i in range(b - 1):
boxes[i][0] /= boxes[-1][2]
boxes[i][2] /= boxes[-1][2]
boxes[i][1] /= boxes[-1][3]
boxes[i][3] /= boxes[-1][3]
if not self.use_attr_30:
# compute size attributes using normalized bboxes
attributes = []
for i in range(b - 1):
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5 or (obj_type not in self.size_data):
attributes.append("none")
else:
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5:
# tall/short
obj_height = boxes[i][4] - boxes[i][1]
if obj_height > self.size_data[obj_type][0][1]:
attributes.append("tall")
else:
attributes.append("short")
else:
# large/small
obj_volume = (boxes[i][3] - boxes[i][0]) * (boxes[i][4] - boxes[i][1]) * (
boxes[i][5] - boxes[i][2])
if obj_volume > self.size_data[obj_type][1]:
attributes.append("large")
else:
attributes.append("small")
else:
# compute size attributes using normalized bboxes, use 30/70 size
attributes = []
for i in range(b - 1):
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5 or (obj_type not in self.size_data_30):
# if random.random() > 0.7:
attributes.append("none")
else:
obj_type = self.vocab['object_idx_to_name'][objs[i]]
if random.random() > 0.5:
# tall/short
obj_height = boxes[i][4] - boxes[i][1]
if obj_height > self.size_data_30[obj_type]["height_7"]:
attributes.append("tall")
elif obj_height < self.size_data_30[obj_type]["height_3"]:
attributes.append("short")
else:
attributes.append("none")
else:
# large/small
obj_volume = (boxes[i][3] - boxes[i][0]) * (boxes[i][4] - boxes[i][1]) * (
boxes[i][5] - boxes[i][2])
if obj_volume > self.size_data_30[obj_type]["volume_7"]:
attributes.append("large")
elif obj_volume < self.size_data_30[obj_type]["volume_3"]:
attributes.append("small")
else:
attributes.append("none")
attributes.append("none")
attributes = [self.vocab["attrib_name_to_idx"][name] for name in attributes]
attributes = torch.LongTensor(attributes)
assert attributes.size(0) == objs.size(0)
return room_id, objs, boxes, triples, angles, attributes
| [
"random.choice",
"random.shuffle",
"utils.load_json",
"torch.LongTensor",
"numpy.random.choice",
"torch.stack",
"numpy.log",
"numpy.asarray",
"utils.compute_rel",
"numpy.sum",
"collections.defaultdict",
"random.random",
"torch.FloatTensor",
"torch.cat"
] | [((1696, 1721), 'torch.LongTensor', 'torch.LongTensor', (['all_ids'], {}), '(all_ids)\n', (1712, 1721), False, 'import torch\n'), ((1737, 1756), 'torch.cat', 'torch.cat', (['all_objs'], {}), '(all_objs)\n', (1746, 1756), False, 'import torch\n'), ((1773, 1793), 'torch.cat', 'torch.cat', (['all_boxes'], {}), '(all_boxes)\n', (1782, 1793), False, 'import torch\n'), ((1812, 1834), 'torch.cat', 'torch.cat', (['all_triples'], {}), '(all_triples)\n', (1821, 1834), False, 'import torch\n'), ((1852, 1873), 'torch.cat', 'torch.cat', (['all_angles'], {}), '(all_angles)\n', (1861, 1873), False, 'import torch\n'), ((1895, 1920), 'torch.cat', 'torch.cat', (['all_attributes'], {}), '(all_attributes)\n', (1904, 1920), False, 'import torch\n'), ((1943, 1969), 'torch.cat', 'torch.cat', (['all_obj_to_room'], {}), '(all_obj_to_room)\n', (1952, 1969), False, 'import torch\n'), ((1995, 2024), 'torch.cat', 'torch.cat', (['all_triple_to_room'], {}), '(all_triple_to_room)\n', (2004, 2024), False, 'import torch\n'), ((2709, 2728), 'utils.load_json', 'load_json', (['data_dir'], {}), '(data_dir)\n', (2718, 2728), False, 'from utils import load_json, compute_rel\n'), ((3187, 3213), 'utils.load_json', 'load_json', (['valid_types_dir'], {}), '(valid_types_dir)\n', (3196, 3213), False, 'from utils import load_json, compute_rel\n'), ((4644, 4661), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4655, 4661), False, 'from collections import defaultdict\n'), ((4948, 4989), 'utils.load_json', 'load_json', (['"""metadata/size_info_many.json"""'], {}), "('metadata/size_info_many.json')\n", (4957, 4989), False, 'from utils import load_json, compute_rel\n'), ((5031, 5075), 'utils.load_json', 'load_json', (['"""metadata/30_size_info_many.json"""'], {}), "('metadata/30_size_info_many.json')\n", (5040, 5075), False, 'from utils import load_json, compute_rel\n'), ((9376, 9398), 'torch.LongTensor', 'torch.LongTensor', (['objs'], {}), '(objs)\n', (9392, 9398), False, 'import torch\n'), ((9415, 9440), 'torch.stack', 'torch.stack', (['boxes'], {'dim': '(0)'}), '(boxes, dim=0)\n', (9426, 9440), False, 'import torch\n'), ((9514, 9538), 'torch.LongTensor', 'torch.LongTensor', (['angles'], {}), '(angles)\n', (9530, 9538), False, 'import torch\n'), ((14132, 14157), 'torch.LongTensor', 'torch.LongTensor', (['triples'], {}), '(triples)\n', (14148, 14157), False, 'import torch\n'), ((17753, 17781), 'torch.LongTensor', 'torch.LongTensor', (['attributes'], {}), '(attributes)\n', (17769, 17781), False, 'import torch\n'), ((8140, 8169), 'random.shuffle', 'random.shuffle', (['obj_data_list'], {}), '(obj_data_list)\n', (8154, 8169), False, 'import random\n'), ((10631, 10648), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10642, 10648), False, 'from collections import defaultdict\n'), ((1563, 1582), 'torch.LongTensor', 'torch.LongTensor', (['O'], {}), '(O)\n', (1579, 1582), False, 'import torch\n'), ((1627, 1646), 'torch.LongTensor', 'torch.LongTensor', (['T'], {}), '(T)\n', (1643, 1646), False, 'import torch\n'), ((7289, 7327), 'numpy.log', 'np.log', (["R_G.edges[obj1, obj2]['count']"], {}), "(R_G.edges[obj1, obj2]['count'])\n", (7295, 7327), True, 'import numpy as np\n'), ((7332, 7353), 'numpy.log', 'np.log', (['max_count_obj'], {}), '(max_count_obj)\n', (7338, 7353), True, 'import numpy as np\n'), ((7887, 7930), 'torch.FloatTensor', 'torch.FloatTensor', (['[x0, y0, z0, x1, y1, z1]'], {}), '([x0, y0, z0, x1, y1, z1])\n', (7904, 7930), False, 'import torch\n'), ((7979, 8014), 'torch.FloatTensor', 'torch.FloatTensor', (['[x0, z0, x1, z1]'], {}), '([x0, z0, x1, z1])\n', (7996, 8014), False, 'import torch\n'), ((8631, 8674), 'torch.FloatTensor', 'torch.FloatTensor', (['[x0, y0, z0, x1, y1, z1]'], {}), '([x0, y0, z0, x1, y1, z1])\n', (8648, 8674), False, 'import torch\n'), ((8723, 8758), 'torch.FloatTensor', 'torch.FloatTensor', (['[x0, z0, x1, z1]'], {}), '([x0, z0, x1, z1])\n', (8740, 8758), False, 'import torch\n'), ((9202, 9245), 'torch.FloatTensor', 'torch.FloatTensor', (['[x0, y0, z0, x1, y1, z1]'], {}), '([x0, y0, z0, x1, y1, z1])\n', (9219, 9245), False, 'import torch\n'), ((9294, 9329), 'torch.FloatTensor', 'torch.FloatTensor', (['[x0, z0, x1, z1]'], {}), '([x0, z0, x1, z1])\n', (9311, 9329), False, 'import torch\n'), ((10898, 10941), 'utils.compute_rel', 'compute_rel', (['cur_box', 'other_box', 'None', 'None'], {}), '(cur_box, other_box, None, None)\n', (10909, 10941), False, 'from utils import load_json, compute_rel\n'), ((13632, 13675), 'utils.compute_rel', 'compute_rel', (['boxes[s]', 'boxes[o]', 'None', 'None'], {}), '(boxes[s], boxes[o], None, None)\n', (13643, 13675), False, 'from utils import load_json, compute_rel\n'), ((13954, 14004), 'utils.compute_rel', 'compute_rel', (['boxes[i]', 'boxes[-1]', 'None', '"""__room__"""'], {}), "(boxes[i], boxes[-1], None, '__room__')\n", (13965, 14004), False, 'from utils import load_json, compute_rel\n'), ((11474, 11496), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (11487, 11496), False, 'import random\n'), ((11996, 12018), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (12009, 12018), False, 'import random\n'), ((13192, 13225), 'numpy.random.choice', 'np.random.choice', (['choices'], {'p': 'prob'}), '(choices, p=prob)\n', (13208, 13225), True, 'import numpy as np\n'), ((13286, 13308), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (13299, 13308), False, 'import random\n'), ((15039, 15054), 'random.random', 'random.random', ([], {}), '()\n', (15052, 15054), False, 'import random\n'), ((15262, 15277), 'random.random', 'random.random', ([], {}), '()\n', (15275, 15277), False, 'import random\n'), ((16274, 16289), 'random.random', 'random.random', ([], {}), '()\n', (16287, 16289), False, 'import random\n'), ((16548, 16563), 'random.random', 'random.random', ([], {}), '()\n', (16561, 16563), False, 'import random\n'), ((12377, 12426), 'utils.compute_rel', 'compute_rel', (['boxes[cur]', 'boxes[other]', 'None', 'None'], {}), '(boxes[cur], boxes[other], None, None)\n', (12388, 12426), False, 'from utils import load_json, compute_rel\n'), ((13128, 13144), 'numpy.asarray', 'np.asarray', (['prob'], {}), '(prob)\n', (13138, 13144), True, 'import numpy as np\n'), ((13147, 13159), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (13153, 13159), True, 'import numpy as np\n'), ((13382, 13397), 'random.random', 'random.random', ([], {}), '()\n', (13395, 13397), False, 'import random\n')] |
"""
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/tkipf/relational-gcn
Difference compared to tkipf/relation-gcn
* l2norm applied to all weights
* remove nodes that won't be touched
"""
import argparse
import gc
import logging
from pathlib import Path
from types import SimpleNamespace
import numpy as np
import time
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
import dgl
from dgl.nn import RelGraphConv
# import sys
# import os
# dir_path = Path(os.path.dirname(__file__))
# sys.path.insert(0, dir_path.parent)
from .. import utils
# import utils
class EntityClassify(nn.Module):
def __init__(self,
device,
num_nodes,
h_dim,
out_dim,
num_rels,
num_bases=None,
num_hidden_layers=1,
dropout=0,
use_self_loop=False,
low_mem=True,
layer_norm=False):
super(EntityClassify, self).__init__()
self.device = th.device(device if device >= 0 else 'cpu')
self.num_nodes = num_nodes
self.h_dim = h_dim
self.out_dim = out_dim
self.num_rels = num_rels
self.num_bases = None if num_bases < 0 else num_bases
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.use_self_loop = use_self_loop
self.low_mem = low_mem
self.layer_norm = layer_norm
self.layers = nn.ModuleList()
# i2h
self.layers.append(RelGraphConv(
self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
low_mem=self.low_mem, dropout=self.dropout, layer_norm=layer_norm))
# h2h
for idx in range(self.num_hidden_layers):
self.layers.append(RelGraphConv(
self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
low_mem=self.low_mem, dropout=self.dropout, layer_norm=layer_norm))
# h2o
self.layers.append(RelGraphConv(
self.h_dim, self.out_dim, self.num_rels, "basis",
self.num_bases, activation=None,
self_loop=self.use_self_loop,
low_mem=self.low_mem, layer_norm=layer_norm))
def forward(self, blocks, feats, norm=None):
if blocks is None:
# full graph training
blocks = [self.g] * len(self.layers)
h = feats
for layer, block in zip(self.layers, blocks):
block = block.to(self.device)
h = layer(block, h, block.edata['etype'], block.edata['norm'])
return h
def gen_norm(g):
_, v, eid = g.all_edges(form='all')
_, inverse_index, count = th.unique(
v, return_inverse=True, return_counts=True)
degrees = count[inverse_index]
norm = th.ones(eid.shape[0], device=eid.device) / degrees
norm = norm.unsqueeze(1)
g.edata['norm'] = norm
class NeighborSampler:
def __init__(self, g, target_idx, fanouts):
self.g = g
self.target_idx = target_idx
self.fanouts = fanouts
def sample_blocks(self, seeds):
blocks = []
etypes = []
norms = []
ntypes = []
seeds = th.tensor(seeds).long()
cur = self.target_idx[seeds]
for fanout in self.fanouts:
if fanout is None or fanout == -1:
frontier = dgl.in_subgraph(self.g, cur)
else:
frontier = dgl.sampling.sample_neighbors(self.g, cur, fanout)
block = dgl.to_block(frontier, cur)
gen_norm(block)
cur = block.srcdata[dgl.NID]
blocks.insert(0, block)
return seeds, blocks
@utils.thread_wrapped_func
def run(proc_id, n_gpus, n_cpus, args, devices, dataset, split, queue=None):
from .rgcn_model import RelGraphEmbedLayer
dev_id = devices[proc_id]
g, node_feats, num_of_ntype, num_classes, num_rels, target_idx, \
train_idx, val_idx, test_idx, labels = dataset
labels = labels.cuda(dev_id)
if split is not None:
train_seed, val_seed, test_seed = split
train_idx = train_idx[train_seed]
# val_idx = val_idx[val_seed]
# test_idx = test_idx[test_seed]
fanouts = args.fanout
node_tids = g.ndata[dgl.NTYPE]
sampler = NeighborSampler(g, target_idx, fanouts)
loader = DataLoader(dataset=train_idx.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
num_workers=args.num_workers)
world_size = n_gpus
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
backend = 'nccl'
# using sparse embedding or usig mix_cpu_gpu model (embedding model can not be stored in GPU)
if args.dgl_sparse is False:
backend = 'gloo'
print("backend using {}".format(backend))
th.distributed.init_process_group(backend=backend,
init_method=dist_init_method,
world_size=world_size,
rank=dev_id)
# node features
# None for one-hot feature, if not none, it should be the feature tensor.
#
embed_layer = RelGraphEmbedLayer(dev_id,
g.number_of_nodes(),
node_tids,
num_of_ntype,
node_feats,
args.n_hidden,
dgl_sparse=args.dgl_sparse)
# create model
# all model params are in device.
model = EntityClassify(dev_id,
g.number_of_nodes(),
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers - 2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
low_mem=args.low_mem,
layer_norm=args.layer_norm)
model.cuda(dev_id)
model = DistributedDataParallel(
model, device_ids=[dev_id], output_device=dev_id)
if args.dgl_sparse:
embed_layer.cuda(dev_id)
if len(list(embed_layer.parameters())) > 0:
embed_layer = DistributedDataParallel(
embed_layer, device_ids=[dev_id], output_device=dev_id)
else:
if len(list(embed_layer.parameters())) > 0:
embed_layer = DistributedDataParallel(
embed_layer, device_ids=None, output_device=None)
# optimizer
dense_params = list(model.parameters())
if args.node_feats:
if n_gpus > 1:
dense_params += list(embed_layer.module.embeds.parameters())
else:
dense_params += list(embed_layer.embeds.parameters())
optimizer = th.optim.Adam(dense_params, lr=args.lr,
weight_decay=args.l2norm)
if args.dgl_sparse:
all_params = list(model.parameters()) + list(embed_layer.parameters())
optimizer = th.optim.Adam(
all_params, lr=args.lr, weight_decay=args.l2norm)
if n_gpus > 1 and isinstance(embed_layer, DistributedDataParallel):
dgl_emb = embed_layer.module.dgl_emb
else:
dgl_emb = embed_layer.dgl_emb
emb_optimizer = dgl.optim.SparseAdam(
params=dgl_emb, lr=args.sparse_lr, eps=1e-8) if len(dgl_emb) > 0 else None
else:
if n_gpus > 1:
embs = list(embed_layer.module.node_embeds.parameters())
else:
embs = list(embed_layer.node_embeds.parameters())
emb_optimizer = th.optim.SparseAdam(
embs, lr=args.sparse_lr) if len(embs) > 0 else None
# training loop
print("start training...")
forward_time = []
backward_time = []
train_time = 0
validation_time = 0
test_time = 0
last_val_acc = 0.0
do_test = False
if n_gpus > 1 and n_cpus - args.num_workers > 0:
th.set_num_threads(n_cpus-args.num_workers)
steps = 0
time_records = []
model.train()
embed_layer.train()
# Warm up
for i, sample_data in enumerate(loader):
seeds, blocks = sample_data
t0 = time.time()
feats = embed_layer(blocks[0].srcdata[dgl.NID],
blocks[0].srcdata['ntype'],
blocks[0].srcdata['type_id'],
node_feats)
logits = model(blocks, feats)
loss = F.cross_entropy(logits, labels[seeds])
t1 = time.time()
optimizer.zero_grad()
if emb_optimizer is not None:
emb_optimizer.zero_grad()
loss.backward()
if emb_optimizer is not None:
emb_optimizer.step()
optimizer.step()
gc.collect()
if i >= 3:
break
# real time
for i, sample_data in enumerate(loader):
seeds, blocks = sample_data
t0 = time.time()
feats = embed_layer(blocks[0].srcdata[dgl.NID],
blocks[0].srcdata['ntype'],
blocks[0].srcdata['type_id'],
node_feats)
logits = model(blocks, feats)
loss = F.cross_entropy(logits, labels[seeds])
t1 = time.time()
optimizer.zero_grad()
if emb_optimizer is not None:
emb_optimizer.zero_grad()
loss.backward()
if emb_optimizer is not None:
emb_optimizer.step()
optimizer.step()
th.distributed.barrier()
t2 = time.time()
forward_time.append(t1 - t0)
backward_time.append(t2 - t1)
time_records.append(t2 - t0)
gc.collect()
if i >= 10:
break
if proc_id == 0:
queue.put(np.array(time_records))
@utils.skip_if_not_4gpu()
@utils.benchmark('time', timeout=600)
@utils.parametrize('data', ['am', 'ogbn-mag'])
@utils.parametrize('low_mem', [True, False])
@utils.parametrize('dgl_sparse', [True, False])
def track_time(data, low_mem, dgl_sparse):
# load graph data
dataset = utils.process_data(data)
args = config()
devices = [0, 1, 2, 3]
args.low_mem = low_mem
args.dgl_sparse = dgl_sparse
args.dataset = dataset
ogb_dataset = False
if data == 'am':
args.n_bases = 40
args.l2norm = 5e-4
elif data == 'ogbn-mag':
args.n_bases = 2
args.l2norm = 0
else:
raise ValueError()
if ogb_dataset is True:
split_idx = dataset.get_idx_split()
train_idx = split_idx["train"]['paper']
val_idx = split_idx["valid"]['paper']
test_idx = split_idx["test"]['paper']
hg_orig, labels = dataset[0]
subgs = {}
for etype in hg_orig.canonical_etypes:
u, v = hg_orig.all_edges(etype=etype)
subgs[etype] = (u, v)
subgs[(etype[2], 'rev-'+etype[1], etype[0])] = (v, u)
hg = dgl.heterograph(subgs)
hg.nodes['paper'].data['feat'] = hg_orig.nodes['paper'].data['feat']
labels = labels['paper'].squeeze()
num_rels = len(hg.canonical_etypes)
num_of_ntype = len(hg.ntypes)
num_classes = dataset.num_classes
if args.dataset == 'ogbn-mag':
category = 'paper'
print('Number of relations: {}'.format(num_rels))
print('Number of class: {}'.format(num_classes))
print('Number of train: {}'.format(len(train_idx)))
print('Number of valid: {}'.format(len(val_idx)))
print('Number of test: {}'.format(len(test_idx)))
else:
# Load from hetero-graph
hg = dataset[0]
num_rels = len(hg.canonical_etypes)
num_of_ntype = len(hg.ntypes)
category = dataset.predict_category
num_classes = dataset.num_classes
train_mask = hg.nodes[category].data.pop('train_mask')
test_mask = hg.nodes[category].data.pop('test_mask')
labels = hg.nodes[category].data.pop('labels')
train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
# AIFB, MUTAG, BGS and AM datasets do not provide validation set split.
# Split train set into train and validation if args.validation is set
# otherwise use train set as the validation set.
if args.validation:
val_idx = train_idx[:len(train_idx) // 5]
train_idx = train_idx[len(train_idx) // 5:]
else:
val_idx = train_idx
node_feats = []
for ntype in hg.ntypes:
if len(hg.nodes[ntype].data) == 0 or args.node_feats is False:
node_feats.append(hg.number_of_nodes(ntype))
else:
assert len(hg.nodes[ntype].data) == 1
feat = hg.nodes[ntype].data.pop('feat')
node_feats.append(feat.share_memory_())
# get target category id
category_id = len(hg.ntypes)
for i, ntype in enumerate(hg.ntypes):
if ntype == category:
category_id = i
print('{}:{}'.format(i, ntype))
g = dgl.to_homogeneous(hg)
g.ndata['ntype'] = g.ndata[dgl.NTYPE]
g.ndata['ntype'].share_memory_()
g.edata['etype'] = g.edata[dgl.ETYPE]
g.edata['etype'].share_memory_()
g.ndata['type_id'] = g.ndata[dgl.NID]
g.ndata['type_id'].share_memory_()
node_ids = th.arange(g.number_of_nodes())
# find out the target node ids
node_tids = g.ndata[dgl.NTYPE]
loc = (node_tids == category_id)
target_idx = node_ids[loc]
target_idx.share_memory_()
train_idx.share_memory_()
val_idx.share_memory_()
test_idx.share_memory_()
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
g.create_formats_()
n_gpus = len(devices)
n_cpus = mp.cpu_count()
ctx = mp.get_context('fork')
queue = ctx.Queue()
procs = []
num_train_seeds = train_idx.shape[0]
num_valid_seeds = val_idx.shape[0]
num_test_seeds = test_idx.shape[0]
train_seeds = th.randperm(num_train_seeds)
valid_seeds = th.randperm(num_valid_seeds)
test_seeds = th.randperm(num_test_seeds)
tseeds_per_proc = num_train_seeds // n_gpus
vseeds_per_proc = num_valid_seeds // n_gpus
tstseeds_per_proc = num_test_seeds // n_gpus
for proc_id in range(n_gpus):
# we have multi-gpu for training, evaluation and testing
# so split trian set, valid set and test set into num-of-gpu parts.
proc_train_seeds = train_seeds[proc_id * tseeds_per_proc:
(proc_id + 1) * tseeds_per_proc
if (proc_id + 1) * tseeds_per_proc < num_train_seeds
else num_train_seeds]
proc_valid_seeds = valid_seeds[proc_id * vseeds_per_proc:
(proc_id + 1) * vseeds_per_proc
if (proc_id + 1) * vseeds_per_proc < num_valid_seeds
else num_valid_seeds]
proc_test_seeds = test_seeds[proc_id * tstseeds_per_proc:
(proc_id + 1) * tstseeds_per_proc
if (proc_id + 1) * tstseeds_per_proc < num_test_seeds
else num_test_seeds]
p = ctx.Process(target=run, args=(proc_id, n_gpus, n_cpus // n_gpus, args, devices,
(g, node_feats, num_of_ntype, num_classes, num_rels, target_idx,
train_idx, val_idx, test_idx, labels),
(proc_train_seeds,
proc_valid_seeds, proc_test_seeds),
queue))
p.start()
procs.append(p)
for p in procs:
p.join()
time_records = queue.get(block=False)
num_exclude = 10 # exclude first 10 iterations
if len(time_records) < 15:
# exclude less if less records
num_exclude = int(len(time_records)*0.3)
return np.mean(time_records[num_exclude:])
def config():
# parser = argparse.ArgumentParser(description='RGCN')
args = SimpleNamespace(
dropout=0,
n_hidden=16,
gpu="0,1,2,3",
lr=1e-2,
sparse_lr=2e-2,
n_bases=-1,
n_layers=2,
dataset=None,
l2norm=0,
fanout=[10, 25],
use_self_loop=True,
batch_size=100,
layer_norm=False,
validation=False,
node_feats=False,
num_workers=0,
dgl_sparse=False,
low_mem=False,
)
# parser.add_argument("--dropout", type=float, default=0,
# help="dropout probability")
# parser.add_argument("--n-hidden", type=int, default=16,
# help="number of hidden units")
# parser.add_argument("--gpu", type=str, default='0',
# help="gpu")
# parser.add_argument("--lr", type=float, default=1e-2,
# help="learning rate")
# parser.add_argument("--sparse-lr", type=float, default=2e-2,
# help="sparse embedding learning rate")
# parser.add_argument("--n-bases", type=int, default=-1,
# help="number of filter weight matrices, default: -1 [use all]")
# parser.add_argument("--n-layers", type=int, default=2,
# help="number of propagation rounds")
# parser.add_argument("-e", "--n-epochs", type=int, default=50,
# help="number of training epochs")
# parser.add_argument("-d", "--dataset", type=str, required=True,
# help="dataset to use")
# parser.add_argument("--l2norm", type=float, default=0,
# help="l2 norm coef")
# parser.add_argument("--fanout", type=str, default="4, 4",
# help="Fan-out of neighbor sampling.")
# parser.add_argument("--use-self-loop", default=False, action='store_true',
# help="include self feature as a special relation")
# fp = parser.add_mutually_exclusive_group(required=False)
# parser.add_argument("--batch-size", type=int, default=100,
# help="Mini-batch size. ")
# parser.add_argument("--eval-batch-size", type=int, default=32,
# help="Mini-batch size. ")
# parser.add_argument("--num-workers", type=int, default=0,
# help="Number of workers for dataloader.")
# parser.add_argument("--low-mem", default=False, action='store_true',
# help="Whether use low mem RelGraphCov")
# parser.add_argument("--dgl-sparse", default=False, action='store_true',
# help='Use sparse embedding for node embeddings.')
# parser.add_argument('--node-feats', default=False, action='store_true',
# help='Whether use node features')
# parser.add_argument('--layer-norm', default=False, action='store_true',
# help='Use layer norm')
# parser.set_defaults(validation=True)
# args = parser.parse_args()
return args
if __name__ == '__main__':
track_time('am')
| [
"dgl.heterograph",
"torch.randperm",
"dgl.in_subgraph",
"numpy.array",
"dgl.optim.SparseAdam",
"torch.multiprocessing.cpu_count",
"torch.distributed.barrier",
"numpy.mean",
"torch.unique",
"dgl.to_block",
"torch.optim.SparseAdam",
"torch.nn.ModuleList",
"dgl.nn.RelGraphConv",
"torch.set_nu... | [((3078, 3131), 'torch.unique', 'th.unique', (['v'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(v, return_inverse=True, return_counts=True)\n', (3087, 3131), True, 'import torch as th\n'), ((5341, 5462), 'torch.distributed.init_process_group', 'th.distributed.init_process_group', ([], {'backend': 'backend', 'init_method': 'dist_init_method', 'world_size': 'world_size', 'rank': 'dev_id'}), '(backend=backend, init_method=\n dist_init_method, world_size=world_size, rank=dev_id)\n', (5374, 5462), True, 'import torch as th\n'), ((6670, 6743), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['model'], {'device_ids': '[dev_id]', 'output_device': 'dev_id'}), '(model, device_ids=[dev_id], output_device=dev_id)\n', (6693, 6743), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((7441, 7506), 'torch.optim.Adam', 'th.optim.Adam', (['dense_params'], {'lr': 'args.lr', 'weight_decay': 'args.l2norm'}), '(dense_params, lr=args.lr, weight_decay=args.l2norm)\n', (7454, 7506), True, 'import torch as th\n'), ((13708, 13730), 'dgl.to_homogeneous', 'dgl.to_homogeneous', (['hg'], {}), '(hg)\n', (13726, 13730), False, 'import dgl\n'), ((14514, 14528), 'torch.multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (14526, 14528), True, 'import torch.multiprocessing as mp\n'), ((14540, 14562), 'torch.multiprocessing.get_context', 'mp.get_context', (['"""fork"""'], {}), "('fork')\n", (14554, 14562), True, 'import torch.multiprocessing as mp\n'), ((14739, 14767), 'torch.randperm', 'th.randperm', (['num_train_seeds'], {}), '(num_train_seeds)\n', (14750, 14767), True, 'import torch as th\n'), ((14786, 14814), 'torch.randperm', 'th.randperm', (['num_valid_seeds'], {}), '(num_valid_seeds)\n', (14797, 14814), True, 'import torch as th\n'), ((14832, 14859), 'torch.randperm', 'th.randperm', (['num_test_seeds'], {}), '(num_test_seeds)\n', (14843, 14859), True, 'import torch as th\n'), ((16822, 16857), 'numpy.mean', 'np.mean', (['time_records[num_exclude:]'], {}), '(time_records[num_exclude:])\n', (16829, 16857), True, 'import numpy as np\n'), ((16944, 17239), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'dropout': '(0)', 'n_hidden': '(16)', 'gpu': '"""0,1,2,3"""', 'lr': '(0.01)', 'sparse_lr': '(0.02)', 'n_bases': '(-1)', 'n_layers': '(2)', 'dataset': 'None', 'l2norm': '(0)', 'fanout': '[10, 25]', 'use_self_loop': '(True)', 'batch_size': '(100)', 'layer_norm': '(False)', 'validation': '(False)', 'node_feats': '(False)', 'num_workers': '(0)', 'dgl_sparse': '(False)', 'low_mem': '(False)'}), "(dropout=0, n_hidden=16, gpu='0,1,2,3', lr=0.01, sparse_lr=\n 0.02, n_bases=-1, n_layers=2, dataset=None, l2norm=0, fanout=[10, 25],\n use_self_loop=True, batch_size=100, layer_norm=False, validation=False,\n node_feats=False, num_workers=0, dgl_sparse=False, low_mem=False)\n", (16959, 17239), False, 'from types import SimpleNamespace\n'), ((1287, 1330), 'torch.device', 'th.device', (["(device if device >= 0 else 'cpu')"], {}), "(device if device >= 0 else 'cpu')\n", (1296, 1330), True, 'import torch as th\n'), ((1735, 1750), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1748, 1750), True, 'import torch.nn as nn\n'), ((3187, 3227), 'torch.ones', 'th.ones', (['eid.shape[0]'], {'device': 'eid.device'}), '(eid.shape[0], device=eid.device)\n', (3194, 3227), True, 'import torch as th\n'), ((7661, 7724), 'torch.optim.Adam', 'th.optim.Adam', (['all_params'], {'lr': 'args.lr', 'weight_decay': 'args.l2norm'}), '(all_params, lr=args.lr, weight_decay=args.l2norm)\n', (7674, 7724), True, 'import torch as th\n'), ((8602, 8647), 'torch.set_num_threads', 'th.set_num_threads', (['(n_cpus - args.num_workers)'], {}), '(n_cpus - args.num_workers)\n', (8620, 8647), True, 'import torch as th\n'), ((8833, 8844), 'time.time', 'time.time', ([], {}), '()\n', (8842, 8844), False, 'import time\n'), ((9108, 9146), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels[seeds]'], {}), '(logits, labels[seeds])\n', (9123, 9146), True, 'import torch.nn.functional as F\n'), ((9160, 9171), 'time.time', 'time.time', ([], {}), '()\n', (9169, 9171), False, 'import time\n'), ((9407, 9419), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9417, 9419), False, 'import gc\n'), ((9572, 9583), 'time.time', 'time.time', ([], {}), '()\n', (9581, 9583), False, 'import time\n'), ((9847, 9885), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels[seeds]'], {}), '(logits, labels[seeds])\n', (9862, 9885), True, 'import torch.nn.functional as F\n'), ((9899, 9910), 'time.time', 'time.time', ([], {}), '()\n', (9908, 9910), False, 'import time\n'), ((10146, 10170), 'torch.distributed.barrier', 'th.distributed.barrier', ([], {}), '()\n', (10168, 10170), True, 'import torch as th\n'), ((10184, 10195), 'time.time', 'time.time', ([], {}), '()\n', (10193, 10195), False, 'import time\n'), ((10318, 10330), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10328, 10330), False, 'import gc\n'), ((11570, 11592), 'dgl.heterograph', 'dgl.heterograph', (['subgs'], {}), '(subgs)\n', (11585, 11592), False, 'import dgl\n'), ((1792, 1992), 'dgl.nn.RelGraphConv', 'RelGraphConv', (['self.h_dim', 'self.h_dim', 'self.num_rels', '"""basis"""', 'self.num_bases'], {'activation': 'F.relu', 'self_loop': 'self.use_self_loop', 'low_mem': 'self.low_mem', 'dropout': 'self.dropout', 'layer_norm': 'layer_norm'}), "(self.h_dim, self.h_dim, self.num_rels, 'basis', self.num_bases,\n activation=F.relu, self_loop=self.use_self_loop, low_mem=self.low_mem,\n dropout=self.dropout, layer_norm=layer_norm)\n", (1804, 1992), False, 'from dgl.nn import RelGraphConv\n'), ((2402, 2582), 'dgl.nn.RelGraphConv', 'RelGraphConv', (['self.h_dim', 'self.out_dim', 'self.num_rels', '"""basis"""', 'self.num_bases'], {'activation': 'None', 'self_loop': 'self.use_self_loop', 'low_mem': 'self.low_mem', 'layer_norm': 'layer_norm'}), "(self.h_dim, self.out_dim, self.num_rels, 'basis', self.\n num_bases, activation=None, self_loop=self.use_self_loop, low_mem=self.\n low_mem, layer_norm=layer_norm)\n", (2414, 2582), False, 'from dgl.nn import RelGraphConv\n'), ((3903, 3930), 'dgl.to_block', 'dgl.to_block', (['frontier', 'cur'], {}), '(frontier, cur)\n', (3915, 3930), False, 'import dgl\n'), ((6888, 6967), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['embed_layer'], {'device_ids': '[dev_id]', 'output_device': 'dev_id'}), '(embed_layer, device_ids=[dev_id], output_device=dev_id)\n', (6911, 6967), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((7073, 7146), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['embed_layer'], {'device_ids': 'None', 'output_device': 'None'}), '(embed_layer, device_ids=None, output_device=None)\n', (7096, 7146), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((7943, 8009), 'dgl.optim.SparseAdam', 'dgl.optim.SparseAdam', ([], {'params': 'dgl_emb', 'lr': 'args.sparse_lr', 'eps': '(1e-08)'}), '(params=dgl_emb, lr=args.sparse_lr, eps=1e-08)\n', (7963, 8009), False, 'import dgl\n'), ((8254, 8298), 'torch.optim.SparseAdam', 'th.optim.SparseAdam', (['embs'], {'lr': 'args.sparse_lr'}), '(embs, lr=args.sparse_lr)\n', (8273, 8298), True, 'import torch as th\n'), ((10409, 10431), 'numpy.array', 'np.array', (['time_records'], {}), '(time_records)\n', (10417, 10431), True, 'import numpy as np\n'), ((2118, 2318), 'dgl.nn.RelGraphConv', 'RelGraphConv', (['self.h_dim', 'self.h_dim', 'self.num_rels', '"""basis"""', 'self.num_bases'], {'activation': 'F.relu', 'self_loop': 'self.use_self_loop', 'low_mem': 'self.low_mem', 'dropout': 'self.dropout', 'layer_norm': 'layer_norm'}), "(self.h_dim, self.h_dim, self.num_rels, 'basis', self.num_bases,\n activation=F.relu, self_loop=self.use_self_loop, low_mem=self.low_mem,\n dropout=self.dropout, layer_norm=layer_norm)\n", (2130, 2318), False, 'from dgl.nn import RelGraphConv\n'), ((3587, 3603), 'torch.tensor', 'th.tensor', (['seeds'], {}), '(seeds)\n', (3596, 3603), True, 'import torch as th\n'), ((3758, 3786), 'dgl.in_subgraph', 'dgl.in_subgraph', (['self.g', 'cur'], {}), '(self.g, cur)\n', (3773, 3786), False, 'import dgl\n'), ((3832, 3882), 'dgl.sampling.sample_neighbors', 'dgl.sampling.sample_neighbors', (['self.g', 'cur', 'fanout'], {}), '(self.g, cur, fanout)\n', (3861, 3882), False, 'import dgl\n'), ((12635, 12673), 'torch.nonzero', 'th.nonzero', (['train_mask'], {'as_tuple': '(False)'}), '(train_mask, as_tuple=False)\n', (12645, 12673), True, 'import torch as th\n'), ((12703, 12740), 'torch.nonzero', 'th.nonzero', (['test_mask'], {'as_tuple': '(False)'}), '(test_mask, as_tuple=False)\n', (12713, 12740), True, 'import torch as th\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import numpy as np
import unittest
import faiss
import tempfile
import os
import io
import sys
import warnings
from multiprocessing.dummy import Pool as ThreadPool
from common import get_dataset, get_dataset_2
class TestIOVariants(unittest.TestCase):
def test_io_error(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
_, fname = tempfile.mkstemp()
try:
faiss.write_index(index, fname)
# should be fine
faiss.read_index(fname)
# now damage file
data = open(fname, 'rb').read()
data = data[:int(len(data) / 2)]
open(fname, 'wb').write(data)
# should make a nice readable exception that mentions the filename
try:
faiss.read_index(fname)
except RuntimeError as e:
if fname not in str(e):
raise
else:
raise
finally:
if os.path.exists(fname):
os.unlink(fname)
class TestCallbacks(unittest.TestCase):
def do_write_callback(self, bsz):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
f = io.BytesIO()
# test with small block size
writer = faiss.PyCallbackIOWriter(f.write, 1234)
if bsz > 0:
writer = faiss.BufferedIOWriter(writer, bsz)
faiss.write_index(index, writer)
del writer # make sure all writes committed
if sys.version_info[0] < 3:
buf = f.getvalue()
else:
buf = f.getbuffer()
index2 = faiss.deserialize_index(np.frombuffer(buf, dtype='uint8'))
self.assertEqual(index.d, index2.d)
self.assertTrue(np.all(
faiss.vector_to_array(index.xb) == faiss.vector_to_array(index2.xb)
))
# This is not a callable function: shoudl raise an exception
writer = faiss.PyCallbackIOWriter("blabla")
self.assertRaises(
Exception,
faiss.write_index, index, writer
)
def test_buf_read(self):
x = np.random.uniform(size=20)
_, fname = tempfile.mkstemp()
try:
x.tofile(fname)
f = open(fname, 'rb')
reader = faiss.PyCallbackIOReader(f.read, 1234)
bsz = 123
reader = faiss.BufferedIOReader(reader, bsz)
y = np.zeros_like(x)
print('nbytes=', y.nbytes)
reader(faiss.swig_ptr(y), y.nbytes, 1)
np.testing.assert_array_equal(x, y)
finally:
if os.path.exists(fname):
os.unlink(fname)
def do_read_callback(self, bsz):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
_, fname = tempfile.mkstemp()
try:
faiss.write_index(index, fname)
f = open(fname, 'rb')
reader = faiss.PyCallbackIOReader(f.read, 1234)
if bsz > 0:
reader = faiss.BufferedIOReader(reader, bsz)
index2 = faiss.read_index(reader)
self.assertEqual(index.d, index2.d)
np.testing.assert_array_equal(
faiss.vector_to_array(index.xb),
faiss.vector_to_array(index2.xb)
)
# This is not a callable function: should raise an exception
reader = faiss.PyCallbackIOReader("blabla")
self.assertRaises(
Exception,
faiss.read_index, reader
)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_write_callback(self):
self.do_write_callback(0)
def test_write_buffer(self):
self.do_write_callback(123)
self.do_write_callback(2345)
def test_read_callback(self):
self.do_read_callback(0)
def test_read_callback_buffered(self):
self.do_read_callback(123)
self.do_read_callback(12345)
def test_read_buffer(self):
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
_, fname = tempfile.mkstemp()
try:
faiss.write_index(index, fname)
reader = faiss.BufferedIOReader(
faiss.FileIOReader(fname), 1234)
index2 = faiss.read_index(reader)
self.assertEqual(index.d, index2.d)
np.testing.assert_array_equal(
faiss.vector_to_array(index.xb),
faiss.vector_to_array(index2.xb)
)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_transfer_pipe(self):
""" transfer an index through a Unix pipe """
d, n = 32, 1000
x = np.random.uniform(size=(n, d)).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
Dref, Iref = index.search(x, 10)
rf, wf = os.pipe()
# start thread that will decompress the index
def index_from_pipe():
reader = faiss.PyCallbackIOReader(lambda size: os.read(rf, size))
return faiss.read_index(reader)
fut = ThreadPool(1).apply_async(index_from_pipe, ())
# write to pipe
writer = faiss.PyCallbackIOWriter(lambda b: os.write(wf, b))
faiss.write_index(index, writer)
index2 = fut.get()
# closing is not really useful but it does not hurt
os.close(wf)
os.close(rf)
Dnew, Inew = index2.search(x, 10)
np.testing.assert_array_equal(Iref, Inew)
np.testing.assert_array_equal(Dref, Dnew)
| [
"faiss.swig_ptr",
"io.BytesIO",
"faiss.PyCallbackIOWriter",
"os.read",
"os.path.exists",
"faiss.write_index",
"faiss.BufferedIOWriter",
"os.unlink",
"numpy.frombuffer",
"numpy.testing.assert_array_equal",
"faiss.BufferedIOReader",
"faiss.FileIOReader",
"os.close",
"os.write",
"tempfile.m... | [((587, 607), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (604, 607), False, 'import faiss\n'), ((648, 666), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (664, 666), False, 'import tempfile\n'), ((1504, 1524), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (1521, 1524), False, 'import faiss\n'), ((1559, 1571), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1569, 1571), False, 'import io\n'), ((1626, 1665), 'faiss.PyCallbackIOWriter', 'faiss.PyCallbackIOWriter', (['f.write', '(1234)'], {}), '(f.write, 1234)\n', (1650, 1665), False, 'import faiss\n'), ((1753, 1785), 'faiss.write_index', 'faiss.write_index', (['index', 'writer'], {}), '(index, writer)\n', (1770, 1785), False, 'import faiss\n'), ((2286, 2320), 'faiss.PyCallbackIOWriter', 'faiss.PyCallbackIOWriter', (['"""blabla"""'], {}), "('blabla')\n", (2310, 2320), False, 'import faiss\n'), ((2468, 2494), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(20)'}), '(size=20)\n', (2485, 2494), True, 'import numpy as np\n'), ((2515, 2533), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (2531, 2533), False, 'import tempfile\n'), ((3150, 3170), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (3167, 3170), False, 'import faiss\n'), ((3212, 3230), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (3228, 3230), False, 'import tempfile\n'), ((4547, 4567), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (4564, 4567), False, 'import faiss\n'), ((4609, 4627), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (4625, 4627), False, 'import tempfile\n'), ((5312, 5332), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['d'], {}), '(d)\n', (5329, 5332), False, 'import faiss\n'), ((5413, 5422), 'os.pipe', 'os.pipe', ([], {}), '()\n', (5420, 5422), False, 'import os\n'), ((5796, 5828), 'faiss.write_index', 'faiss.write_index', (['index', 'writer'], {}), '(index, writer)\n', (5813, 5828), False, 'import faiss\n'), ((5926, 5938), 'os.close', 'os.close', (['wf'], {}), '(wf)\n', (5934, 5938), False, 'import os\n'), ((5947, 5959), 'os.close', 'os.close', (['rf'], {}), '(rf)\n', (5955, 5959), False, 'import os\n'), ((6012, 6053), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['Iref', 'Inew'], {}), '(Iref, Inew)\n', (6041, 6053), True, 'import numpy as np\n'), ((6062, 6103), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['Dref', 'Dnew'], {}), '(Dref, Dnew)\n', (6091, 6103), True, 'import numpy as np\n'), ((692, 723), 'faiss.write_index', 'faiss.write_index', (['index', 'fname'], {}), '(index, fname)\n', (709, 723), False, 'import faiss\n'), ((766, 789), 'faiss.read_index', 'faiss.read_index', (['fname'], {}), '(fname)\n', (782, 789), False, 'import faiss\n'), ((1266, 1287), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (1280, 1287), False, 'import os\n'), ((1708, 1743), 'faiss.BufferedIOWriter', 'faiss.BufferedIOWriter', (['writer', 'bsz'], {}), '(writer, bsz)\n', (1730, 1743), False, 'import faiss\n'), ((1996, 2029), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': '"""uint8"""'}), "(buf, dtype='uint8')\n", (2009, 2029), True, 'import numpy as np\n'), ((2631, 2669), 'faiss.PyCallbackIOReader', 'faiss.PyCallbackIOReader', (['f.read', '(1234)'], {}), '(f.read, 1234)\n', (2655, 2669), False, 'import faiss\n'), ((2714, 2749), 'faiss.BufferedIOReader', 'faiss.BufferedIOReader', (['reader', 'bsz'], {}), '(reader, bsz)\n', (2736, 2749), False, 'import faiss\n'), ((2767, 2783), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2780, 2783), True, 'import numpy as np\n'), ((2887, 2922), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (2916, 2922), True, 'import numpy as np\n'), ((2955, 2976), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2969, 2976), False, 'import os\n'), ((3256, 3287), 'faiss.write_index', 'faiss.write_index', (['index', 'fname'], {}), '(index, fname)\n', (3273, 3287), False, 'import faiss\n'), ((3345, 3383), 'faiss.PyCallbackIOReader', 'faiss.PyCallbackIOReader', (['f.read', '(1234)'], {}), '(f.read, 1234)\n', (3369, 3383), False, 'import faiss\n'), ((3492, 3516), 'faiss.read_index', 'faiss.read_index', (['reader'], {}), '(reader)\n', (3508, 3516), False, 'import faiss\n'), ((3816, 3850), 'faiss.PyCallbackIOReader', 'faiss.PyCallbackIOReader', (['"""blabla"""'], {}), "('blabla')\n", (3840, 3850), False, 'import faiss\n'), ((3996, 4017), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (4010, 4017), False, 'import os\n'), ((4653, 4684), 'faiss.write_index', 'faiss.write_index', (['index', 'fname'], {}), '(index, fname)\n', (4670, 4684), False, 'import faiss\n'), ((4802, 4826), 'faiss.read_index', 'faiss.read_index', (['reader'], {}), '(reader)\n', (4818, 4826), False, 'import faiss\n'), ((5064, 5085), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (5078, 5085), False, 'import os\n'), ((5607, 5631), 'faiss.read_index', 'faiss.read_index', (['reader'], {}), '(reader)\n', (5623, 5631), False, 'import faiss\n'), ((522, 552), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, d)'}), '(size=(n, d))\n', (539, 552), True, 'import numpy as np\n'), ((1065, 1088), 'faiss.read_index', 'faiss.read_index', (['fname'], {}), '(fname)\n', (1081, 1088), False, 'import faiss\n'), ((1305, 1321), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (1314, 1321), False, 'import os\n'), ((1439, 1469), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, d)'}), '(size=(n, d))\n', (1456, 1469), True, 'import numpy as np\n'), ((2842, 2859), 'faiss.swig_ptr', 'faiss.swig_ptr', (['y'], {}), '(y)\n', (2856, 2859), False, 'import faiss\n'), ((2994, 3010), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (3003, 3010), False, 'import os\n'), ((3085, 3115), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, d)'}), '(size=(n, d))\n', (3102, 3115), True, 'import numpy as np\n'), ((3434, 3469), 'faiss.BufferedIOReader', 'faiss.BufferedIOReader', (['reader', 'bsz'], {}), '(reader, bsz)\n', (3456, 3469), False, 'import faiss\n'), ((3625, 3656), 'faiss.vector_to_array', 'faiss.vector_to_array', (['index.xb'], {}), '(index.xb)\n', (3646, 3656), False, 'import faiss\n'), ((3674, 3706), 'faiss.vector_to_array', 'faiss.vector_to_array', (['index2.xb'], {}), '(index2.xb)\n', (3695, 3706), False, 'import faiss\n'), ((4035, 4051), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (4044, 4051), False, 'import os\n'), ((4482, 4512), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, d)'}), '(size=(n, d))\n', (4499, 4512), True, 'import numpy as np\n'), ((4747, 4772), 'faiss.FileIOReader', 'faiss.FileIOReader', (['fname'], {}), '(fname)\n', (4765, 4772), False, 'import faiss\n'), ((4935, 4966), 'faiss.vector_to_array', 'faiss.vector_to_array', (['index.xb'], {}), '(index.xb)\n', (4956, 4966), False, 'import faiss\n'), ((4984, 5016), 'faiss.vector_to_array', 'faiss.vector_to_array', (['index2.xb'], {}), '(index2.xb)\n', (5005, 5016), False, 'import faiss\n'), ((5103, 5119), 'os.unlink', 'os.unlink', (['fname'], {}), '(fname)\n', (5112, 5119), False, 'import os\n'), ((5247, 5277), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, d)'}), '(size=(n, d))\n', (5264, 5277), True, 'import numpy as np\n'), ((5647, 5660), 'multiprocessing.dummy.Pool', 'ThreadPool', (['(1)'], {}), '(1)\n', (5657, 5660), True, 'from multiprocessing.dummy import Pool as ThreadPool\n'), ((5771, 5786), 'os.write', 'os.write', (['wf', 'b'], {}), '(wf, b)\n', (5779, 5786), False, 'import os\n'), ((2120, 2151), 'faiss.vector_to_array', 'faiss.vector_to_array', (['index.xb'], {}), '(index.xb)\n', (2141, 2151), False, 'import faiss\n'), ((2155, 2187), 'faiss.vector_to_array', 'faiss.vector_to_array', (['index2.xb'], {}), '(index2.xb)\n', (2176, 2187), False, 'import faiss\n'), ((5569, 5586), 'os.read', 'os.read', (['rf', 'size'], {}), '(rf, size)\n', (5576, 5586), False, 'import os\n')] |
# coding=utf-8
import math
import numpy as np
from torch.autograd import Variable
class GloveHelper(object):
def __init__(self, glove_file):
self.glove_file = glove_file
embeds = np.zeros((5000, 100), dtype='float32')
for i, (word, embed) in enumerate(self.embeddings):
if i == 5000: break
embeds[i] = embed
self.mean = np.mean(embeds)
self.std = np.std(embeds)
@property
def embeddings(self):
with open(self.glove_file, 'r',encoding='utf-8') as f:
for line in f:
tokens = line.split()
word, embed = tokens[0], np.array([float(tok) for tok in tokens[1:]])
yield word, embed
def emulate_embeddings(self, shape):
samples = np.random.normal(self.mean, self.std, size=shape)
return samples
def load_to(self, embed_layer, vocab):
new_tensor = embed_layer.weight.data.new
word_ids = set(range(embed_layer.num_embeddings))
for word, embed in self.embeddings:
if word in vocab:
word_id = vocab[word]
word_ids.remove(word_id)
embed_layer.weight[word_id].data = new_tensor(embed)
word_ids = list(word_ids)
embed_layer.weight[word_ids].data = new_tensor(self.emulate_embeddings(shape=(len(word_ids), embed_layer.embedding_dim)))
@property
def words(self):
with open(self.glove_file, 'r') as f:
for line in f:
tokens = line.split()
yield tokens[0]
def batch_iter(examples, batch_size, shuffle=False):
batch_num = int(math.ceil(len(examples) / float(batch_size)))
index_array = list(range(len(examples)))
if shuffle:
np.random.shuffle(index_array)
for i in range(batch_num):
indices = index_array[i * batch_size: (i + 1) * batch_size]
batch_examples = [examples[idx] for idx in indices]
yield batch_examples
def get_parser_class(lang):
if lang in ['python', 'lambda_dcs', 'prolog', 'python3']:
from model.parser import Parser
return Parser
elif lang == 'wikisql':
from model.wikisql.parser import WikiSqlParser
return WikiSqlParser
else:
raise ValueError('unknown parser class for %s' % lang)
| [
"numpy.random.normal",
"numpy.mean",
"numpy.zeros",
"numpy.std",
"numpy.random.shuffle"
] | [((203, 241), 'numpy.zeros', 'np.zeros', (['(5000, 100)'], {'dtype': '"""float32"""'}), "((5000, 100), dtype='float32')\n", (211, 241), True, 'import numpy as np\n'), ((385, 400), 'numpy.mean', 'np.mean', (['embeds'], {}), '(embeds)\n', (392, 400), True, 'import numpy as np\n'), ((420, 434), 'numpy.std', 'np.std', (['embeds'], {}), '(embeds)\n', (426, 434), True, 'import numpy as np\n'), ((784, 833), 'numpy.random.normal', 'np.random.normal', (['self.mean', 'self.std'], {'size': 'shape'}), '(self.mean, self.std, size=shape)\n', (800, 833), True, 'import numpy as np\n'), ((1766, 1796), 'numpy.random.shuffle', 'np.random.shuffle', (['index_array'], {}), '(index_array)\n', (1783, 1796), True, 'import numpy as np\n')] |
'''
=======================================================================
2016 Changes by ARM (abhilashreddy.com)
- made to work with Python 3+
- made to work with recent versions of matplotlib
=======================================================================
Author: <NAME> (<EMAIL> where cu=columbia.edu)
copyright (c) 2010
liscence: BSD style
======================================================================
'''
import numpy as np
import matplotlib.tri as tri
def get_points():
'''
Create the coordinates of the vertices of the base icosahedron
'''
# Define the verticies with the golden ratio
a = (1. + np.sqrt(5.))/2.0 # golden ratio
p = np.array([[a,-a,-a, a, 1, 1,-1,-1, 0, 0, 0, 0],
[0, 0, 0, 0, a,-a,-a, a, 1, 1,-1,-1],
[1, 1,-1,-1, 0, 0, 0, 0, a,-a,-a, a]]).transpose()
p = p / np.sqrt((p**2).sum(1))[0]
# rotate top point to the z-axis
ang = np.arctan(p[0,0] / p[0,2])
ca, sa = np.cos(ang), np.sin(ang)
rotation = np.array([[ca, 0, -sa], [0, 1.0, 0], [sa, 0, ca]])
p = np.inner(rotation, p).transpose()
# reorder in a downward spiral
reorder_index = [0, 3, 4, 8, -1, 5,-2, -3, 7, 1, 6, 2]
return p[reorder_index, :]
def get_barymat(n):
"""
Define the matrix that will refine points on a triangle
"""
numrows = n*(n+1)//2
# define the values that will be needed
ns = np.arange(n)
vals = ns / float(n-1)
# initialize array
bcmat = np.zeros((numrows, 3))#np.arange(numrows*3).reshape((numrows, 3))
# loop over blocks to fill in the matrix
shifts = np.arange(n,0,-1)
starts = np.zeros(n, dtype=int);
starts[1:] += np.cumsum(shifts[:-1]) # starts are the cumulative shifts
stops = starts + shifts
for n_, start, stop, shift in zip(ns, starts, stops, shifts):
bcmat[start:stop, 0] = vals[shift-1::-1]
bcmat[start:stop, 1] = vals[:shift]
bcmat[start:stop, 2] = vals[n_]
return bcmat
class icosahedron(object):
"""
The verticies of an icosahedron, together with triangles, edges, and
triangle midpoints and edge midpoints. The class data stores the
"""
# define points (verticies)
p = get_points()
px, py, pz = p[:,0], p[:,1], p[:,2]
# define triangles (faces)
tri = np.array([[1,2,3,4,5,6,2,7,2,8,3, 9,10,10,6, 6, 7, 8, 9,10],
[0,0,0,0,0,1,7,2,3,3,4, 4, 4, 5,5, 7, 8, 9,10, 6],
[2,3,4,5,1,7,1,8,8,9,9,10, 5, 6,1,11,11,11,11,11]]).transpose()
trimids = (p[tri[:,0]] + p[tri[:,1]] + p[tri[:,2]]) / 3.0
# define bars (edges)
bar = list()
for t in tri:
bar += [np.array([i,j]) for i, j in [t[0:2], t[1:], t[[2, 0]]] if j>i]
bar = np.array(bar)
barmids = (p[bar[:,0]] + p[bar[:,1]]) / 2.0
def triangulate_bary(bary):
"""
triangulate a barycentric triangle using matplotlib.
return (bars, triangles)
"""
x, y = np.cos(-np.pi/4.)*bary[:,0] + np.sin(-np.pi/4.)*bary[:,1] , bary[:,2]
dely = tri.Triangulation(x,y)
return dely.edges, dely.triangles
def get_triangulation(n, ico=icosahedron()):
"""
Compute the triangulation of the sphere by refineing each face of the
icosahedron to an nth order barycentric triangle. There are two key issues
that this routine addresses.
1) calculate the triangles (unique by construction)
2) remove non-unique nodes and edges
"""
verts = np.array([ico.p[ico.tri[:,0]],
ico.p[ico.tri[:,1]],
ico.p[ico.tri[:,2]]])
bary = get_barymat(n)
newverts = np.tensordot(verts, bary, axes=[(0,), (-1,)]).transpose(0,2,1)
numverts = newverts.shape[1]
if newverts.size/3 > 1e6: print("newverts.size/3 is high: {0}".format(
newverts.size/3))
flat_coordinates = np.arange(newverts.size/3).reshape(20, numverts)
barbary, tribary = triangulate_bary(bary)
newtri = np.zeros((20, tribary.shape[0], 3), dtype=int)
newbar = np.zeros((20, barbary.shape[0], 2), dtype=int)
for i in range(20):
for j in range(3):
newtri[i, :, j] = flat_coordinates[i, tribary[:,j]]
if j < 2: newbar[i, :, j] = flat_coordinates[i, barbary[:,j]]
newverts = newverts.reshape(newverts.size//3, 3)
newtri = newtri.reshape(newtri.size//3, 3)
newbar = newbar.reshape(newbar.size//2, 2)
# normalize verticies
scalars = np.sqrt((newverts**2).sum(-1))
newverts = (newverts.T / scalars).T
# remove repeated verticies
aux, iunique, irepeat = np.unique(np.dot(newverts//1e-8, 100*np.arange(1,4,1)),
return_index=True, return_inverse=True)
univerts = newverts[iunique]
unitri = irepeat[newtri]
unibar = irepeat[newbar]
mid = .5 * (univerts[unibar[:,0]] + univerts[unibar[:,1]])
aux, iu = np.unique(np.dot(mid//1e-8, 100*np.arange(1,4,1)), return_index=True)
unimid = mid[iu]
unibar = unibar[iu,:]
return univerts, unitri, unibar
class icosphere(icosahedron):
"""
"""
def __init__(self, n):
"""
define an icosahedron based discretization of the sphere
n is the order of barycentric triangles used to refine each
face of the icosaheral base mesh.
"""
self.p, self.tri, self.bar = get_triangulation(n+1, icosahedron)
'''
if __name__ == "__main__":
import numpy as np
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
isph=icosphere(15)
npt=isph.p.shape[0]
nelem=isph.tri.shape[0]
vertices=isph.p
faces=isph.tri
fig = pyplot.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.plot_trisurf(vertices[:,0],vertices[:,1],vertices[:,2],cmap='viridis', triangles=faces, linewidth=0.10,edgecolor="black",alpha=1.0)
pyplot.show()
''' | [
"numpy.sqrt",
"matplotlib.tri.Triangulation",
"numpy.tensordot",
"numpy.inner",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.cumsum",
"numpy.arange",
"numpy.arctan"
] | [((955, 983), 'numpy.arctan', 'np.arctan', (['(p[0, 0] / p[0, 2])'], {}), '(p[0, 0] / p[0, 2])\n', (964, 983), True, 'import numpy as np\n'), ((1035, 1085), 'numpy.array', 'np.array', (['[[ca, 0, -sa], [0, 1.0, 0], [sa, 0, ca]]'], {}), '([[ca, 0, -sa], [0, 1.0, 0], [sa, 0, ca]])\n', (1043, 1085), True, 'import numpy as np\n'), ((1430, 1442), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1439, 1442), True, 'import numpy as np\n'), ((1506, 1528), 'numpy.zeros', 'np.zeros', (['(numrows, 3)'], {}), '((numrows, 3))\n', (1514, 1528), True, 'import numpy as np\n'), ((1631, 1650), 'numpy.arange', 'np.arange', (['n', '(0)', '(-1)'], {}), '(n, 0, -1)\n', (1640, 1650), True, 'import numpy as np\n'), ((1662, 1684), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (1670, 1684), True, 'import numpy as np\n'), ((1704, 1726), 'numpy.cumsum', 'np.cumsum', (['shifts[:-1]'], {}), '(shifts[:-1])\n', (1713, 1726), True, 'import numpy as np\n'), ((2760, 2773), 'numpy.array', 'np.array', (['bar'], {}), '(bar)\n', (2768, 2773), True, 'import numpy as np\n'), ((3046, 3069), 'matplotlib.tri.Triangulation', 'tri.Triangulation', (['x', 'y'], {}), '(x, y)\n', (3063, 3069), True, 'import matplotlib.tri as tri\n'), ((3467, 3543), 'numpy.array', 'np.array', (['[ico.p[ico.tri[:, 0]], ico.p[ico.tri[:, 1]], ico.p[ico.tri[:, 2]]]'], {}), '([ico.p[ico.tri[:, 0]], ico.p[ico.tri[:, 1]], ico.p[ico.tri[:, 2]]])\n', (3475, 3543), True, 'import numpy as np\n'), ((3961, 4007), 'numpy.zeros', 'np.zeros', (['(20, tribary.shape[0], 3)'], {'dtype': 'int'}), '((20, tribary.shape[0], 3), dtype=int)\n', (3969, 4007), True, 'import numpy as np\n'), ((4021, 4067), 'numpy.zeros', 'np.zeros', (['(20, barbary.shape[0], 2)'], {'dtype': 'int'}), '((20, barbary.shape[0], 2), dtype=int)\n', (4029, 4067), True, 'import numpy as np\n'), ((995, 1006), 'numpy.cos', 'np.cos', (['ang'], {}), '(ang)\n', (1001, 1006), True, 'import numpy as np\n'), ((1008, 1019), 'numpy.sin', 'np.sin', (['ang'], {}), '(ang)\n', (1014, 1019), True, 'import numpy as np\n'), ((654, 666), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (661, 666), True, 'import numpy as np\n'), ((696, 836), 'numpy.array', 'np.array', (['[[a, -a, -a, a, 1, 1, -1, -1, 0, 0, 0, 0], [0, 0, 0, 0, a, -a, -a, a, 1, 1,\n -1, -1], [1, 1, -1, -1, 0, 0, 0, 0, a, -a, -a, a]]'], {}), '([[a, -a, -a, a, 1, 1, -1, -1, 0, 0, 0, 0], [0, 0, 0, 0, a, -a, -a,\n a, 1, 1, -1, -1], [1, 1, -1, -1, 0, 0, 0, 0, a, -a, -a, a]])\n', (704, 836), True, 'import numpy as np\n'), ((1094, 1115), 'numpy.inner', 'np.inner', (['rotation', 'p'], {}), '(rotation, p)\n', (1102, 1115), True, 'import numpy as np\n'), ((2330, 2544), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 2, 7, 2, 8, 3, 9, 10, 10, 6, 6, 7, 8, 9, 10], [0, 0, 0,\n 0, 0, 1, 7, 2, 3, 3, 4, 4, 4, 5, 5, 7, 8, 9, 10, 6], [2, 3, 4, 5, 1, 7,\n 1, 8, 8, 9, 9, 10, 5, 6, 1, 11, 11, 11, 11, 11]]'], {}), '([[1, 2, 3, 4, 5, 6, 2, 7, 2, 8, 3, 9, 10, 10, 6, 6, 7, 8, 9, 10],\n [0, 0, 0, 0, 0, 1, 7, 2, 3, 3, 4, 4, 4, 5, 5, 7, 8, 9, 10, 6], [2, 3, 4,\n 5, 1, 7, 1, 8, 8, 9, 9, 10, 5, 6, 1, 11, 11, 11, 11, 11]])\n', (2338, 2544), True, 'import numpy as np\n'), ((2687, 2703), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (2695, 2703), True, 'import numpy as np\n'), ((3626, 3671), 'numpy.tensordot', 'np.tensordot', (['verts', 'bary'], {'axes': '[(0,), (-1,)]'}), '(verts, bary, axes=[(0,), (-1,)])\n', (3638, 3671), True, 'import numpy as np\n'), ((3851, 3879), 'numpy.arange', 'np.arange', (['(newverts.size / 3)'], {}), '(newverts.size / 3)\n', (3860, 3879), True, 'import numpy as np\n'), ((2965, 2985), 'numpy.cos', 'np.cos', (['(-np.pi / 4.0)'], {}), '(-np.pi / 4.0)\n', (2971, 2985), True, 'import numpy as np\n'), ((2995, 3015), 'numpy.sin', 'np.sin', (['(-np.pi / 4.0)'], {}), '(-np.pi / 4.0)\n', (3001, 3015), True, 'import numpy as np\n'), ((4614, 4632), 'numpy.arange', 'np.arange', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (4623, 4632), True, 'import numpy as np\n'), ((4913, 4931), 'numpy.arange', 'np.arange', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (4922, 4931), True, 'import numpy as np\n')] |
import torch
import sys, math
import numpy as np
import torch.jit as jit
import warnings
import copy
import torch.nn.init as init
import torch.nn as nn
from distutils.util import strtobool
from models.base import Model
from models.utils import *
from models.multi_head_att import MultiHeadedAttention
from models.ssm.inference import RNN_STInf, Attention_STInf
from models.iefs.gated import GatedTransition
from models.iefs.att_iefs import AttentionIEFTransition
from models.iefs.moe import MofE
from pyro.distributions import Normal, Independent, Categorical, LogNormal
from typing import List, Tuple
from torch import Tensor
from collections import namedtuple
from typing import List, Tuple
from torch.autograd import Variable
from argparse import ArgumentParser
class SSM(Model):
def __init__(self, trial, **kwargs):
super(SSM, self).__init__(trial)
self.save_hyperparameters()
def init_model(self):
ttype = self.hparams['ttype']; etype = self.hparams['etype']
dim_hidden = self.hparams['dim_hidden']
num_heads = self.hparams['nheads']
dim_stochastic = self.hparams['dim_stochastic']
#dim_stochastic = self.trial.suggest_int('dim_stochastic',8,256)
dim_data = self.hparams['dim_data']
dim_base = self.hparams['dim_base']
dim_treat = self.hparams['dim_treat']
post_approx = self.hparams['post_approx']
inftype = self.hparams['inftype']; etype = self.hparams['etype']; ttype = self.hparams['ttype']
augmented = self.hparams['augmented']; alpha1_type = self.hparams['alpha1_type']
rank = self.hparams['rank']; combiner_type = self.hparams['combiner_type']; nheads = self.hparams['nheads']
add_stochastic = self.hparams['add_stochastic']
zmatrix = self.hparams['zmatrix']
# Inference Network
self.inf_noise = np.abs(self.hparams['inf_noise'])
if inftype == 'rnn':
self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, combiner_type = combiner_type)
elif inftype == 'rnn_bn':
self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, use_bn=True, combiner_type = combiner_type)
elif inftype == 'rnn_relu':
self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, nl='relu', combiner_type = combiner_type)
elif inftype == 'att':
self.inf_network = Attention_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, nheads = num_heads, post_approx = post_approx, rank = rank)
else:
raise ValueError('Bad inference type')
# Emission Function
if etype == 'lin':
self.e_mu = nn.Linear(dim_stochastic, dim_data)
self.e_sigma = nn.Linear(dim_stochastic, dim_data)
elif etype == 'nl':
dim_hidden = self.trial.suggest_int('dim_hidden',100,500)
emodel = nn.Sequential(nn.Linear(dim_stochastic, dim_hidden), nn.ReLU(True))
self.e_mu = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data))
self.e_sigma = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data))
else:
raise ValueError('bad etype')
# Transition Function
if self.hparams['include_baseline'] != 'none':
self.transition_fxn = TransitionFunction(dim_stochastic, dim_data, dim_treat+dim_base, dim_hidden, ttype, \
augmented=augmented, alpha1_type=alpha1_type, add_stochastic=add_stochastic, num_heads=num_heads, zmatrix=zmatrix)
else:
self.transition_fxn = TransitionFunction(dim_stochastic, dim_data, dim_treat, dim_hidden, ttype, \
augmented=augmented, alpha1_type=alpha1_type, add_stochastic=add_stochastic, num_heads=num_heads, zmatrix=zmatrix)
# Prior over Z1
self.prior_W = nn.Linear(dim_treat+dim_data+dim_base, dim_stochastic)
self.prior_sigma = nn.Linear(dim_treat+dim_data+dim_base, dim_stochastic)
def p_Z1(self, B, X0, A0):
inp_cat = torch.cat([B, X0, A0], -1)
mu = self.prior_W(inp_cat)
sigma = torch.nn.functional.softplus(self.prior_sigma(inp_cat))
p_z_bxa = Independent(Normal(mu, sigma), 1)
return p_z_bxa
def p_X_Z(self, Zt, Tval):
if 'spiral' in self.hparams['etype']:
mu = self.e_mu(Zt, Tval)
if 'Spiral' in self.e_sigma.__class__.__name__:
sigma = torch.nn.functional.softplus(self.e_sigma(Zt, Tval))
else:
sigma = torch.nn.functional.softplus(self.e_sigma(Zt))
else:
mu = self.e_mu(Zt)
sigma = torch.nn.functional.softplus(self.e_sigma(Zt))
return mu, sigma
def p_Zt_Ztm1(self, Zt, A, B, X, A0, eps = 0.):
X0 = X[:,0,:]; Xt = X[:,1:,:]
inp_cat = torch.cat([B, X0, A0], -1)
mu1 = self.prior_W(inp_cat)[:,None,:]
sig1 = torch.nn.functional.softplus(self.prior_sigma(inp_cat))[:,None,:]
# mu1 = torch.zeros_like(sig1).to(sig1.device)
Tmax = Zt.shape[1]
if self.hparams['augmented']:
Zinp = torch.cat([Zt[:,:-1,:], Xt[:,:-1,:]], -1)
else:
Zinp = Zt[:,:-1,:]
if self.hparams['include_baseline'] != 'none':
Aval = A[:,1:Tmax,:]
Acat = torch.cat([Aval[...,[0]],B[:,None,:].repeat(1,Aval.shape[1],1), Aval[...,1:]],-1)
mu2T, sig2T = self.transition_fxn(Zinp, Acat, eps = eps)
else:
mu2T, sig2T = self.transition_fxn(Zinp, A[:,1:Tmax,:], eps = eps)
mu, sig = torch.cat([mu1,mu2T],1), torch.cat([sig1,sig2T],1)
return Independent(Normal(mu, sig), 1)
def get_loss(self, B, X, A, M, Y, CE, anneal = 1., return_reconstruction = False, with_pred = False):
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
m_t, m_g_t, _ = get_masks(M[:,1:,:])
Xnew = X + torch.randn(X.shape).to(X.device)*self.inf_noise
Z_t, q_zt = self.inf_network(Xnew, A, M, B)
Tmax = Z_t.shape[1]
p_x_mu, p_x_std = self.p_X_Z(Z_t, A[:,1:Tmax+1,[0]])
p_zt = self.p_Zt_Ztm1(Z_t, A, B, X, A[:,0,:])
masked_nll = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu, p_x_std, M[:,1:Tmax+1,:])
full_masked_nll = masked_nll
masked_nll = masked_nll.sum(-1).sum(-1)
# full_masked_kl_t = m_t[:,:Tmax]*kl_t
# full_nelbo = full_masked_nll + (m_t[:,:Tmax]*kl_t)[...,None]
if with_pred:
p_x_mu_pred, p_x_std_pred = self.p_X_Z(p_zt.mean, A[:,:Z_t.shape[1],[0]])
masked_nll_pred = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu_pred, p_x_std_pred, M[:,1:Tmax+1,:])
masked_nll_pred = masked_nll_pred.sum(-1).sum(-1)
masked_nll = (masked_nll+masked_nll_pred)*0.5
kl_t = q_zt.log_prob(Z_t)-p_zt.log_prob(Z_t)
masked_kl_t= (m_t[:,:Tmax]*kl_t).sum(-1)
neg_elbo = masked_nll + anneal*masked_kl_t
if return_reconstruction:
return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t), p_x_mu*M[:,1:,:], p_x_std*M[:,1:,:])
else:
return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t))
'''
full_masked_kl_t = m_t[:,:Tmax]*kl_t
full_nelbo = full_masked_nll + (m_t[:,:Tmax]*kl_t)[...,None]
return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t), full_masked_nll, full_masked_kl_t)
'''
def imp_sampling(self, B, X, A, M, Y, CE, anneal = 1., imp_samples=100, idx = -1, mask = None):
_, _, lens = get_masks(M)
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
m_t, m_g_t, _ = get_masks(M[:,1:,:])
ll_estimates = torch.zeros((imp_samples,X.shape[0])).to(X.device)
ll_priors = torch.zeros((imp_samples,X.shape[0])).to(X.device)
ll_posteriors = torch.zeros((imp_samples,X.shape[0])).to(X.device)
X0 = X[:,0,:]; Xt = X[:,1:,:]; A0 = A[:,0,:]
inp_cat = torch.cat([B, X0, A0], -1)
mu1 = self.prior_W(inp_cat)[:,None,:]
sig1 = torch.nn.functional.softplus(self.prior_sigma(inp_cat))[:,None,:]
for sample in range(imp_samples):
Z_s, q_zt = self.inf_network(X, A, M, B)
Tmax = Z_s.shape[-2]
p_x_mu, p_x_std = self.p_X_Z(Z_s, A[:,1:Tmax+1,[0]])
masked_nll = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu, p_x_std, M[:,1:Tmax+1,:]) # (bs,T,D)
if idx != -1:
masked_ll = -1*masked_nll[...,[idx]].sum(-1).sum(-1)
elif mask is not None:
mask = mask[...,:Tmax]
masked_ll = -1*(masked_nll.sum(-1)*mask).sum(-1)
else:
masked_ll = -1*masked_nll.sum(-1).sum(-1)
# prior
if self.hparams['augmented']:
Zinp = torch.cat([Z_s[:,:-1,:], Xt[:,:-1,:]], -1)
else:
Zinp = Z_s[:,:-1,:]
if self.hparams['include_baseline']:
Aval = A[:,1:Tmax,:]
Acat = torch.cat([Aval[...,[0]],B[:,None,:].repeat(1,Aval.shape[1],1), Aval[...,1:]],-1)
mu2T, sig2T = self.transition_fxn(Zinp, Acat)
else:
mu2T, sig2T = self.transition_fxn(Zinp, A[:,1:Tmax,:])
mu_prior, std_prior = torch.cat([mu1,mu2T],1), torch.cat([sig1,sig2T],1)
ll_prior = -1*masked_gaussian_nll_3d(Z_s, mu_prior, std_prior, m_t[:,:Tmax,None])
# posterior
ll_posterior = -1*masked_gaussian_nll_3d(Z_s, q_zt.mean, q_zt.stddev, m_t[:,:Tmax,None])
# store values
ll_estimates[sample] = masked_ll
if idx != -1:
ll_priors[sample] = ll_prior[...,[idx]].sum(-1).sum(-1)
ll_posteriors[sample] = ll_posterior[...,[idx]].sum(-1).sum(-1)
elif mask is not None:
mask = mask[...,:Tmax]
ll_priors[sample] = (ll_prior.sum(-1)*mask).sum(-1)
ll_posteriors[sample] = (ll_posterior.sum(-1)*mask).sum(-1)
else:
ll_priors[sample] = ll_prior.sum(-1).sum(-1)
ll_posteriors[sample] = ll_posterior.sum(-1).sum(-1)
nll_estimate = -1*(torch.logsumexp(ll_estimates + ll_priors - ll_posteriors, dim=0) - np.log(imp_samples))
return nll_estimate, torch.mean(nll_estimate)
def forward(self, B, X, A, M, Y, CE, anneal = 1., full_ret_loss=False):
if self.hparams.clock_ablation:
A[...,0] = torch.ones(A.shape[1])
if self.training:
if self.hparams['elbo_samples']>1:
B, X = torch.repeat_interleave(B, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(X, repeats=self.elbo_samples, dim=0)
A, M = torch.repeat_interleave(A, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(M, repeats=self.elbo_samples, dim=0)
Y, CE= torch.repeat_interleave(Y, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(CE, repeats=self.elbo_samples, dim=0)
neg_elbo, masked_nll, kl, _ = self.get_loss(B, X, A, M, Y, CE, anneal = anneal, with_pred = True)
else:
neg_elbo, masked_nll, kl, _ = self.get_loss(B, X, A, M, Y, CE, anneal = anneal, with_pred = False)
reg_loss = torch.mean(neg_elbo)
for name,param in self.named_parameters():
if self.reg_all == 'all':
# reg_loss += self.hparams['C']*apply_reg(param, reg_type=self.hparams['reg_type'])
reg_loss += self.C*apply_reg(param, reg_type=self.reg_type)
elif self.reg_all == 'except_multi_head':
# regularize everything except the multi-headed attention weights?
if 'attn' not in name:
reg_loss += self.C*apply_reg(param, reg_type=self.reg_type)
elif self.reg_all == 'except_multi_head_ief':
if 'attn' not in name and 'logcell' not in name \
and 'treatment_exp' not in name and 'control_layer' not in name:
reg_loss += self.C*apply_reg(param, reg_type=self.reg_type)
loss = torch.mean(reg_loss)
# if full_ret_loss:
# return (full_nelbo, torch.mean(neg_elbo), torch.mean(masked_nll), torch.mean(kl), torch.ones_like(kl)), loss
return (torch.mean(neg_elbo), torch.mean(masked_nll), torch.mean(kl), torch.ones_like(kl)), loss
def forward_sample(self, A, T_forward, Z_start = None, B=None, X0=None, A0=None, eps = 0.):
if Z_start is None:
inp_cat = torch.cat([B, X0, A0], -1)
mu1 = self.prior_W(inp_cat)
sig1 = torch.nn.functional.softplus(self.prior_sigma(inp_cat))
Z_start = torch.squeeze(Independent(Normal(mu1, sig1), 1).sample((1,)))
Zlist = [Z_start]
for t in range(1, T_forward):
Ztm1 = Zlist[t-1]
if self.hparams.include_baseline != 'none':
Aval = A[:,t-1,:]
Acat = torch.cat([Aval[...,[0]], B, Aval[...,1:]], -1)
mut, sigmat= self.transition_fxn(Ztm1, Acat, eps = eps)
else:
mut, sigmat= self.transition_fxn(Ztm1, A[:,t-1,:], eps = eps)
sample = torch.squeeze(Independent(Normal(mut, sigmat), 1).sample((1,)))
if len(sample.shape) == 1:
sample = sample[None,...]
Zlist.append(sample)
Z_t = torch.cat([k[:,None,:] for k in Zlist], 1)
p_x_mu, p_x_sigma = self.p_X_Z(Z_t, A[:,:Z_t.shape[1],[0]])
sample = torch.squeeze(Independent(Normal(p_x_mu, p_x_sigma), 1).sample((1,)))
return sample, (Z_t, p_x_mu, p_x_sigma)
def inspect(self, T_forward, T_condition, B, X, A, M, Y, CE, restrict_lens = False, nsamples = 1, eps = 0.):
self.eval()
m_t, _, lens = get_masks(M)
idx_select = lens>1
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
m_t, m_g_t, lens = get_masks(M[:,1:,:])
Z_t, q_zt = self.inf_network(X, A, M, B)
p_x_mu, p_x_std = self.p_X_Z(Z_t, A[:,:Z_t.shape[1],[0]])
p_zt = self.p_Zt_Ztm1(Z_t, A, B, X, A[:,0,:], eps = eps)
Tmax = Z_t.shape[1]
masked_nll = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu, p_x_std, M[:,1:Tmax+1,:])
kl_t = q_zt.log_prob(Z_t)-p_zt.log_prob(Z_t)
masked_kl_t = (m_t[:,:Tmax]*kl_t).sum(-1)
per_feat_nelbo = (masked_nll.sum(1) + masked_kl_t[...,None]).mean(0)
# calculate MSE instead
mse = (((p_x_mu-X[:,1:Tmax+1])**2)*M[:,1:Tmax+1]).sum(0).sum(0)
vals= M[:,1:Tmax+1].sum(0).sum(0)
per_feat_nelbo = mse/vals
neg_elbo = torch.mean(masked_nll.sum(-1).sum(-1)+masked_kl_t)
if restrict_lens:
_, _, lens = get_masks(M)
idx_select = lens>(T_forward+T_condition)
B, X, A, M, Y, CE = B[idx_select], X[idx_select], A[idx_select], M[idx_select], Y[idx_select], CE[idx_select]
x_forward_list = []
for n in range(nsamples):
_,(_,x_forward,_) = self.forward_sample(A[:,1:T_forward+1,:], T_forward-1, B = B, X0=X[:,0,:], A0=A[:,0,:], eps = eps)
x_forward_list.append(x_forward[...,None])
x_forward = torch.cat(x_forward_list,-1).mean(-1)
x_forward = torch.cat([X[:,[0],:], x_forward], 1)
if T_condition != -1:
x_forward_conditional_list = []
for n in range(nsamples):
Z_t_cond, _ = self.inf_network(X[:,:T_condition,:], A[:,:T_condition,:], M[:,:T_condition,:], B)
_,(_,x_forward_conditional,_) = self.forward_sample(A[:,T_condition:,:], T_forward, Z_start = Z_t_cond[:,-1,:], B = B, eps = eps)
x_forward_conditional_list.append(x_forward_conditional[...,None])
x_forward_conditional = torch.cat(x_forward_conditional_list, -1).mean(-1)
x_sample_conditional = torch.cat([X[:,:T_condition,:], x_forward_conditional],1)
return neg_elbo, per_feat_nelbo, torch.ones_like(masked_kl_t), torch.ones_like(masked_kl_t), x_sample_conditional, x_forward, (B,X,A,M,Y,CE), idx_select
return neg_elbo, per_feat_nelbo, torch.ones_like(masked_kl_t), torch.ones_like(masked_kl_t), x_forward, (B,X,A,M,Y,CE), idx_select
def inspect_trt(self, B, X, A, M, Y, CE, nsamples=3):
self.eval()
m_t, _, lens = get_masks(M)
idx_select = lens>1
B, X, A, M, Y, CE = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1]
x_conditional_list = []
for n in range(nsamples):
x_conditionals_per_pt = []
for i in range(X.shape[0]):
# np.unique(np.where(pt_numpy(A)[...,-2:] == 1.)[0])
T_condition = np.max(np.where(pt_numpy(A[i,:,-3]) == 1.)[0])+1
print(i)
if i == 38:
import pdb; pdb.set_trace()
l = np.where(pt_numpy(A[i,:,-1]) == 1.)[0]
if len(l) == 0:
T_total = np.max(np.where(pt_numpy(A[i,:-2] == 1.))[0])+1
else:
T_total = np.max(l)+1
T_forward = T_total - T_condition
Z_t_cond, _ = self.inf_network(X[[i],:T_condition,:], A[[i],:T_condition,:], M[[i],:T_condition,:], B[[i]])
_, (_, x_forward_conditional, _) = self.forward_sample(A[[i],T_condition:,:], T_forward, Z_start=Z_t_cond[:,-1,:], B = B[[i]])
x_conditional = torch.cat((X[[i],:T_condition], x_forward_conditional, X[[i],T_total:]),1)
x_conditionals_per_pt.append(x_conditional)
x_conditional_list.append(torch.cat(x_conditionals_per_pt,0)[...,None])
x_final_conditional = torch.cat(x_conditional_list, -1).mean(-1)
return x_final_conditional, (B,X,A,M,Y,CE), idx_select
def predict(self, **kwargs):
raise NotImplemented()
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents = [parent_parser], add_help=False)
parser.add_argument('--dim_stochastic', type=int, default=48, help='stochastic dimension of state space model')
parser.add_argument('--dim_hidden', type=int, default=300, help='hidden dimension for nonlinear model')
parser.add_argument('--etype', type=str, default='lin', help='SSM emission function')
parser.add_argument('--ttype', type=str, default='lin', help='SSM transition function')
parser.add_argument('--inftype', type=str, default='rnn_relu', help='inference network type')
parser.add_argument('--post_approx', type=str, default='diag', help='inference of approximate posterior distribution')
parser.add_argument('--elbo_samples', type=int, default=1, help='number of samples to run through inference network')
parser.add_argument('--augmented', type=strtobool, default=False, help='SSM augmented')
parser.add_argument('--C', type=float, default=.01, help='regularization strength')
parser.add_argument('--inf_noise', type=float, default=0., help='noise parameter on input')
parser.add_argument('--nheads', type=int, default=1, help='number of heads for attention inference network and generative model')
parser.add_argument('--rank', type=int, default=5, help='rank of matrix for low_rank posterior approximation')
parser.add_argument('--combiner_type', type=str, default='pog', help='combiner function used in inference network')
parser.add_argument('--reg_all', type=str, default='all', help='regularize all weights or only subset')
parser.add_argument('--reg_type', type=str, default='l2', help='regularization type (l1 or l2)')
parser.add_argument('--alpha1_type', type=str, default='linear', help='alpha1 parameterization in TreatExp IEF')
parser.add_argument('--zmatrix', type=str, default='identity')
parser.add_argument('--otype', type=str, default='linear', help='final layer of GroMOdE IEF (linear, identity, nl)')
parser.add_argument('--add_stochastic', type=strtobool, default=False, help='conditioning alpha-1 of TEXP on S_[t-1]')
parser.add_argument('--clock_ablation', type=strtobool, default=False, help='set to true to run without local clock')
return parser
class SSMAtt(SSM):
def __init__(self, trial, **kwargs):
super(SSMAtt, self).__init__(trial)
self.save_hyperparameters()
def init_model(self):
ttype = 'attn_transition'; etype = self.hparams['etype']
dim_hidden = self.hparams['dim_hidden']
# dim_stochastic = self.hparams['dim_stochastic']
dim_stochastic = self.trial.suggest_int('dim_stochastic',16,64)
num_heads = self.hparams['nheads']
dim_data = self.hparams['dim_data']
dim_base = self.hparams['dim_base']
dim_treat = self.hparams['dim_treat']
post_approx = self.hparams['post_approx']
inftype = self.hparams['inftype']; etype = self.hparams['etype']; ttype = self.hparams['ttype']
augmented = self.hparams['augmented']; alpha1_type = self.hparams['alpha1_type']
rank = self.hparams['rank']; combiner_type = self.hparams['combiner_type']; nheads = self.hparams['nheads']
add_stochastic = self.hparams['add_stochastic']
# Inference Network
if inftype == 'rnn':
self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, combiner_type = combiner_type)
elif inftype == 'rnn_bn':
self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, use_bn=True, combiner_type = combiner_type)
elif inftype == 'rnn_relu':
self.inf_network = RNN_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, post_approx = post_approx, rank = rank, nl='relu', combiner_type = combiner_type)
elif inftype == 'att':
self.inf_network = Attention_STInf(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic, nheads = num_heads, post_approx = post_approx, rank = rank)
else:
raise ValueError('Bad inference type')
# Emission Function
if etype == 'lin':
self.e_mu = nn.Linear(dim_stochastic, dim_data)
self.e_sigma = nn.Linear(dim_stochastic, dim_data)
elif etype == 'nl':
dim_hidden = self.trial.suggest_int('dim_hidden',100,500)
emodel = nn.Sequential(nn.Linear(dim_stochastic, dim_hidden), nn.ReLU(True))
self.e_mu = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data))
self.e_sigma = nn.Sequential(emodel, nn.Linear(dim_hidden, dim_data))
else:
raise ValueError('bad etype')
# Transition Function
if self.hparams['include_baseline'] == 'all':
self.transition_fxn = TransitionFunction(dim_stochastic, dim_data, dim_treat+dim_base, dim_hidden, ttype, \
augmented=augmented, alpha1_type=alpha1_type, add_stochastic=add_stochastic, num_heads=num_heads)
elif self.hparams['include_baseline'] == 'none':
self.transition_fxn = TransitionFunction(dim_stochastic, dim_data, dim_treat, dim_hidden, ttype, \
augmented=augmented, alpha1_type=alpha1_type, add_stochastic=add_stochastic, num_heads=num_heads)
else:
pass
# Prior over Z1
self.prior_W = nn.Linear(dim_treat+dim_data+dim_base, dim_stochastic)
self.prior_sigma = nn.Linear(dim_treat+dim_data+dim_base, dim_stochastic)
# Attention
self.attn = MultiHeadedAttention(num_heads, dim_treat+dim_base)
self.attn_lin = nn.Linear(dim_stochastic, dim_treat+dim_base)
def p_Zt_Ztm1(self, Zt, A, B, X, A0, Am, eps = 0.):
X0 = X[:,0,:]; Xt = X[:,1:,:]
inp_cat = torch.cat([B, X0, A0], -1)
mu1 = self.prior_W(inp_cat)[:,None,:]
sig1 = torch.nn.functional.softplus(self.prior_sigma(inp_cat))[:,None,:]
Tmax = Zt.shape[1]
if self.hparams['augmented']:
Zinp = torch.cat([Zt[:,:-1,:], Xt[:,:-1,:]], -1)
else:
Zinp = Zt[:,:-1,:]
Aval = A[:,1:Tmax,:]; Am_res = Am[:,1:Tmax,1:Tmax]
if self.hparams['include_baseline']:
Acat = torch.cat([Aval[...,[0]],B[:,None,:].repeat(1,Aval.shape[1],1), Aval[...,1:]],-1)
res = self.attn(self.attn_lin(Zinp), Acat, Acat, mask=Am_res, use_matmul=True)
mu2T, sig2T = self.transition_fxn(Zinp, res, eps = eps)
else:
res = self.attn(self.attn_lin(Zinp), Aval, Aval, mask=Am_res, use_matmul=True) # res
mu2T, sig2T = self.transition_fxn(Zinp, res, eps = eps)
mu, sig = torch.cat([mu1,mu2T],1), torch.cat([sig1,sig2T],1)
return Independent(Normal(mu, sig), 1)
def get_loss(self, B, X, A, M, Y, CE, Am, anneal = 1., return_reconstruction = False, with_pred = False):
_, _, lens = get_masks(M)
B, X, A, M, Y, CE, Am = B[lens>1], X[lens>1], A[lens>1], M[lens>1], Y[lens>1], CE[lens>1], Am[lens>1]
m_t, m_g_t, _ = get_masks(M[:,1:,:])
Z_t, q_zt = self.inf_network(X, A, M, B)
Tmax = Z_t.shape[1]
p_x_mu, p_x_std = self.p_X_Z(Z_t, A[:,1:Tmax+1,[0]])
p_zt = self.p_Zt_Ztm1(Z_t, A, B, X, A[:,0,:], Am)
masked_nll = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu, p_x_std, M[:,1:Tmax+1,:])
full_masked_nll = masked_nll
masked_nll = masked_nll.sum(-1).sum(-1)
if with_pred:
p_x_mu_pred, p_x_std_pred = self.p_X_Z(p_zt.mean, A[:,:Z_t.shape[1],[0]])
masked_nll_pred = masked_gaussian_nll_3d(X[:,1:Tmax+1,:], p_x_mu_pred, p_x_std_pred, M[:,1:Tmax+1,:])
masked_nll_pred = masked_nll_pred.sum(-1).sum(-1)
masked_nll = (masked_nll+masked_nll_pred)*0.5
kl_t = q_zt.log_prob(Z_t)-p_zt.log_prob(Z_t)
masked_kl_t= (m_t[:,:Tmax]*kl_t).sum(-1)
neg_elbo = masked_nll + anneal*masked_kl_t
if return_reconstruction:
return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t), p_x_mu*M[:,1:,:], p_x_std*M[:,1:,:])
else:
return (neg_elbo, masked_nll, masked_kl_t, torch.ones_like(masked_kl_t))
def forward(self, B, X, A, M, Y, CE, Am, anneal = 1.):
if self.training:
if self.hparams['elbo_samples']>1:
B, X = torch.repeat_interleave(B, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(X, repeats=self.elbo_samples, dim=0)
A, M = torch.repeat_interleave(A, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(M, repeats=self.elbo_samples, dim=0)
Y, CE= torch.repeat_interleave(Y, repeats=self.elbo_samples, dim=0), torch.repeat_interleave(CE, repeats=self.elbo_samples, dim=0)
neg_elbo, masked_nll, kl, _ = self.get_loss(B, X, A, M, Y, CE, Am, anneal = anneal, with_pred = True)
else:
neg_elbo, masked_nll, kl, _ = self.get_loss(B, X, A, M, Y, CE, Am, anneal = anneal, with_pred = False)
reg_loss = torch.mean(neg_elbo)
for name,param in self.named_parameters():
if self.reg_all:
# reg_loss += self.hparams['C']*apply_reg(param, reg_type=self.hparams['reg_type'])
reg_loss += self.C*apply_reg(param, reg_type=self.reg_type)
else:
if 'attn' not in name:
reg_loss += self.C*apply_reg(param, reg_type=self.reg_type)
loss = torch.mean(reg_loss)
return (torch.mean(neg_elbo), torch.mean(masked_nll), torch.mean(kl), torch.ones_like(kl)), loss
def forward_sample(self, A, T_forward, Z_start = None, B=None, X0=None, A0=None, eps = 0.):
pass
def inspect(self, T_forward, T_condition, B, X, A, M, Y, CE, Am, restrict_lens = False, nsamples = 1, eps = 0.):
pass
def inspect_trt(self, B, X, A, M, Y, CE, Am, nsamples=3):
pass
class TransitionFunction(nn.Module):
def __init__(self,
dim_stochastic,
dim_data,
dim_treat,
dim_hidden,
ttype,
augmented: bool = False,
alpha1_type: str = 'linear',
otype: str = 'linear',
add_stochastic: bool = False,
num_heads: int = 1,
zmatrix: str = 'identity'):
super(TransitionFunction, self).__init__()
self.dim_stochastic = dim_stochastic
self.dim_treat = dim_treat
self.dim_hidden = dim_hidden
self.dim_data = dim_data
# Number of different lines of therapy to multiplex on (only for heterogenous models)
self.K = 3
self.ttype = ttype
dim_treat_mK = dim_treat-self.K
if augmented: # augmented does not completely work for transition function other than ('gated','lin'), ('lin','lin')
dim_input = dim_stochastic+dim_data
else:
dim_input = dim_stochastic
if self.ttype == 'lin':
self.t_mu = nn.Linear(dim_input+dim_treat, dim_stochastic)
self.t_sigma = nn.Linear(dim_input+dim_treat, dim_stochastic)
elif self.ttype == 'nl':
tmodel = nn.Sequential(nn.Linear(dim_input+dim_treat, dim_hidden),nn.ReLU(True))
self.t_mu = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))
self.t_sigma = nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic))
elif self.ttype == 'het_lin':
self.t_mu = nn.ModuleList([nn.Linear(dim_input+dim_treat_mK, dim_stochastic) for k in range(self.K)])
self.t_sigma = nn.ModuleList([nn.Linear(dim_input+dim_treat_mK, dim_stochastic) for k in range(self.K)])
elif self.ttype == 'het_nl':
t_mu, t_sigma = [],[]
for k in range(self.K):
tmodel = nn.Sequential(nn.Linear(dim_input+dim_treat_mK, dim_hidden), nn.ReLU(True))
t_mu.append(nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic)))
t_sigma.append(nn.Sequential(tmodel, nn.Linear(dim_hidden, dim_stochastic)))
self.t_mu = nn.ModuleList(t_mu)
self.t_sigma = nn.ModuleList(t_sigma)
elif self.ttype == 'gated':
avoid_init = False
if self.dim_data != 16 or self.dim_treat != 9:
avoid_init = True
self.t_mu = GatedTransition(dim_input, dim_treat, avoid_init = avoid_init, dim_output=dim_stochastic, alpha1_type=alpha1_type, otype=otype, add_stochastic=add_stochastic)
self.t_sigma = nn.Linear(dim_input+dim_treat, dim_stochastic)
elif self.ttype == 'attn_transition':
avoid_init = False
if self.dim_data != 16 or self.dim_treat != 9:
avoid_init = True
self.t_mu = AttentionIEFTransition(dim_input, dim_treat, avoid_init = avoid_init, dim_output=dim_stochastic, alpha1_type=alpha1_type, otype=otype, add_stochastic=add_stochastic, num_heads=num_heads, zmatrix=zmatrix)
self.t_sigma = nn.Linear(dim_input+dim_treat, dim_stochastic)
elif self.ttype == 'moe':
self.t_mu = MofE(dim_input, dim_treat, dim_output=dim_stochastic, eclass='nl', num_experts=3)
self.t_sigma = nn.Linear(dim_input+dim_treat, dim_stochastic)
else:
raise ValueError('bad ttype')
def apply(self, fxn, z, u, eps=0.):
if 'Monotonic' in fxn.__class__.__name__ or 'LogCellTransition' in fxn.__class__.__name__ or 'LogCellKill' in fxn.__class__.__name__ \
or 'TreatmentExp' in fxn.__class__.__name__ or 'GatedTransition' in fxn.__class__.__name__ or 'Synthetic' in fxn.__class__.__name__ \
or 'MofE' in fxn.__class__.__name__ or 'Ablation1' in fxn.__class__.__name__ or 'Ablation2' in fxn.__class__.__name__ \
or 'AttentionIEFTransition' in fxn.__class__.__name__:
return fxn(z, u, eps)
else:
return fxn(torch.cat([z, u],-1))
def forward(self, z, u, eps=0.):
if 'het_' in self.ttype:
treat = u[...,:-self.K]
lot_oh = u[...,-self.K:]
mul, sigl = [], []
for t_mu, t_sigma in zip(self.t_mu, self.t_sigma):
mu = self.apply(t_mu, z, treat)[...,None]
sig = torch.nn.functional.softplus(self.apply(t_sigma, z, treat))[...,None]
mul.append(mu)
sigl.append(sig)
mu = torch.cat(mul,-1)
sig= torch.cat(sigl,-1)
mu = torch.sum(mu*lot_oh.unsqueeze(-2),-1)
sig= torch.sum(sig*lot_oh.unsqueeze(-2),-1)+0.05
else:
mu = self.apply(self.t_mu, z, u, eps)
sig = torch.nn.functional.softplus(self.apply(self.t_sigma, z, u))
return mu, sig
| [
"models.iefs.moe.MofE",
"torch.nn.ReLU",
"numpy.log",
"models.ssm.inference.Attention_STInf",
"torch.repeat_interleave",
"models.iefs.gated.GatedTransition",
"argparse.ArgumentParser",
"torch.mean",
"torch.nn.ModuleList",
"numpy.max",
"torch.randn",
"numpy.abs",
"torch.ones_like",
"models.... | [((1903, 1936), 'numpy.abs', 'np.abs', (["self.hparams['inf_noise']"], {}), "(self.hparams['inf_noise'])\n", (1909, 1936), True, 'import numpy as np\n'), ((4105, 4163), 'torch.nn.Linear', 'nn.Linear', (['(dim_treat + dim_data + dim_base)', 'dim_stochastic'], {}), '(dim_treat + dim_data + dim_base, dim_stochastic)\n', (4114, 4163), True, 'import torch.nn as nn\n'), ((4190, 4248), 'torch.nn.Linear', 'nn.Linear', (['(dim_treat + dim_data + dim_base)', 'dim_stochastic'], {}), '(dim_treat + dim_data + dim_base, dim_stochastic)\n', (4199, 4248), True, 'import torch.nn as nn\n'), ((4295, 4321), 'torch.cat', 'torch.cat', (['[B, X0, A0]', '(-1)'], {}), '([B, X0, A0], -1)\n', (4304, 4321), False, 'import torch\n'), ((5156, 5182), 'torch.cat', 'torch.cat', (['[B, X0, A0]', '(-1)'], {}), '([B, X0, A0], -1)\n', (5165, 5182), False, 'import torch\n'), ((8598, 8624), 'torch.cat', 'torch.cat', (['[B, X0, A0]', '(-1)'], {}), '([B, X0, A0], -1)\n', (8607, 8624), False, 'import torch\n'), ((11979, 11999), 'torch.mean', 'torch.mean', (['neg_elbo'], {}), '(neg_elbo)\n', (11989, 11999), False, 'import torch\n'), ((12838, 12858), 'torch.mean', 'torch.mean', (['reg_loss'], {}), '(reg_loss)\n', (12848, 12858), False, 'import torch\n'), ((14170, 14214), 'torch.cat', 'torch.cat', (['[k[:, None, :] for k in Zlist]', '(1)'], {}), '([k[:, None, :] for k in Zlist], 1)\n', (14179, 14214), False, 'import torch\n'), ((16239, 16278), 'torch.cat', 'torch.cat', (['[X[:, [0], :], x_forward]', '(1)'], {}), '([X[:, [0], :], x_forward], 1)\n', (16248, 16278), False, 'import torch\n'), ((19031, 19086), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (19045, 19086), False, 'from argparse import ArgumentParser\n'), ((24660, 24718), 'torch.nn.Linear', 'nn.Linear', (['(dim_treat + dim_data + dim_base)', 'dim_stochastic'], {}), '(dim_treat + dim_data + dim_base, dim_stochastic)\n', (24669, 24718), True, 'import torch.nn as nn\n'), ((24745, 24803), 'torch.nn.Linear', 'nn.Linear', (['(dim_treat + dim_data + dim_base)', 'dim_stochastic'], {}), '(dim_treat + dim_data + dim_base, dim_stochastic)\n', (24754, 24803), True, 'import torch.nn as nn\n'), ((24846, 24899), 'models.multi_head_att.MultiHeadedAttention', 'MultiHeadedAttention', (['num_heads', '(dim_treat + dim_base)'], {}), '(num_heads, dim_treat + dim_base)\n', (24866, 24899), False, 'from models.multi_head_att import MultiHeadedAttention\n'), ((24922, 24969), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', '(dim_treat + dim_base)'], {}), '(dim_stochastic, dim_treat + dim_base)\n', (24931, 24969), True, 'import torch.nn as nn\n'), ((25082, 25108), 'torch.cat', 'torch.cat', (['[B, X0, A0]', '(-1)'], {}), '([B, X0, A0], -1)\n', (25091, 25108), False, 'import torch\n'), ((28470, 28490), 'torch.mean', 'torch.mean', (['neg_elbo'], {}), '(neg_elbo)\n', (28480, 28490), False, 'import torch\n'), ((28899, 28919), 'torch.mean', 'torch.mean', (['reg_loss'], {}), '(reg_loss)\n', (28909, 28919), False, 'import torch\n'), ((2000, 2137), 'models.ssm.inference.RNN_STInf', 'RNN_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'post_approx': 'post_approx', 'rank': 'rank', 'combiner_type': 'combiner_type'}), '(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n post_approx=post_approx, rank=rank, combiner_type=combiner_type)\n', (2009, 2137), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((2930, 2965), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', 'dim_data'], {}), '(dim_stochastic, dim_data)\n', (2939, 2965), True, 'import torch.nn as nn\n'), ((2993, 3028), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', 'dim_data'], {}), '(dim_stochastic, dim_data)\n', (3002, 3028), True, 'import torch.nn as nn\n'), ((4466, 4483), 'pyro.distributions.Normal', 'Normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (4472, 4483), False, 'from pyro.distributions import Normal, Independent, Categorical, LogNormal\n'), ((5477, 5522), 'torch.cat', 'torch.cat', (['[Zt[:, :-1, :], Xt[:, :-1, :]]', '(-1)'], {}), '([Zt[:, :-1, :], Xt[:, :-1, :]], -1)\n', (5486, 5522), False, 'import torch\n'), ((5937, 5962), 'torch.cat', 'torch.cat', (['[mu1, mu2T]', '(1)'], {}), '([mu1, mu2T], 1)\n', (5946, 5962), False, 'import torch\n'), ((5962, 5989), 'torch.cat', 'torch.cat', (['[sig1, sig2T]', '(1)'], {}), '([sig1, sig2T], 1)\n', (5971, 5989), False, 'import torch\n'), ((6015, 6030), 'pyro.distributions.Normal', 'Normal', (['mu', 'sig'], {}), '(mu, sig)\n', (6021, 6030), False, 'from pyro.distributions import Normal, Independent, Categorical, LogNormal\n'), ((11016, 11040), 'torch.mean', 'torch.mean', (['nll_estimate'], {}), '(nll_estimate)\n', (11026, 11040), False, 'import torch\n'), ((11186, 11208), 'torch.ones', 'torch.ones', (['A.shape[1]'], {}), '(A.shape[1])\n', (11196, 11208), False, 'import torch\n'), ((13277, 13303), 'torch.cat', 'torch.cat', (['[B, X0, A0]', '(-1)'], {}), '([B, X0, A0], -1)\n', (13286, 13303), False, 'import torch\n'), ((16897, 16957), 'torch.cat', 'torch.cat', (['[X[:, :T_condition, :], x_forward_conditional]', '(1)'], {}), '([X[:, :T_condition, :], x_forward_conditional], 1)\n', (16906, 16957), False, 'import torch\n'), ((17171, 17199), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (17186, 17199), False, 'import torch\n'), ((17201, 17229), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (17216, 17229), False, 'import torch\n'), ((22502, 22639), 'models.ssm.inference.RNN_STInf', 'RNN_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'post_approx': 'post_approx', 'rank': 'rank', 'combiner_type': 'combiner_type'}), '(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n post_approx=post_approx, rank=rank, combiner_type=combiner_type)\n', (22511, 22639), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((23432, 23467), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', 'dim_data'], {}), '(dim_stochastic, dim_data)\n', (23441, 23467), True, 'import torch.nn as nn\n'), ((23495, 23530), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', 'dim_data'], {}), '(dim_stochastic, dim_data)\n', (23504, 23530), True, 'import torch.nn as nn\n'), ((25343, 25388), 'torch.cat', 'torch.cat', (['[Zt[:, :-1, :], Xt[:, :-1, :]]', '(-1)'], {}), '([Zt[:, :-1, :], Xt[:, :-1, :]], -1)\n', (25352, 25388), False, 'import torch\n'), ((25998, 26023), 'torch.cat', 'torch.cat', (['[mu1, mu2T]', '(1)'], {}), '([mu1, mu2T], 1)\n', (26007, 26023), False, 'import torch\n'), ((26023, 26050), 'torch.cat', 'torch.cat', (['[sig1, sig2T]', '(1)'], {}), '([sig1, sig2T], 1)\n', (26032, 26050), False, 'import torch\n'), ((26076, 26091), 'pyro.distributions.Normal', 'Normal', (['mu', 'sig'], {}), '(mu, sig)\n', (26082, 26091), False, 'from pyro.distributions import Normal, Independent, Categorical, LogNormal\n'), ((30547, 30595), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat)', 'dim_stochastic'], {}), '(dim_input + dim_treat, dim_stochastic)\n', (30556, 30595), True, 'import torch.nn as nn\n'), ((30633, 30681), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat)', 'dim_stochastic'], {}), '(dim_input + dim_treat, dim_stochastic)\n', (30642, 30681), True, 'import torch.nn as nn\n'), ((34172, 34190), 'torch.cat', 'torch.cat', (['mul', '(-1)'], {}), '(mul, -1)\n', (34181, 34190), False, 'import torch\n'), ((34207, 34226), 'torch.cat', 'torch.cat', (['sigl', '(-1)'], {}), '(sigl, -1)\n', (34216, 34226), False, 'import torch\n'), ((2208, 2363), 'models.ssm.inference.RNN_STInf', 'RNN_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'post_approx': 'post_approx', 'rank': 'rank', 'use_bn': '(True)', 'combiner_type': 'combiner_type'}), '(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n post_approx=post_approx, rank=rank, use_bn=True, combiner_type=\n combiner_type)\n', (2217, 2363), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((7575, 7603), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (7590, 7603), False, 'import torch\n'), ((7711, 7739), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (7726, 7739), False, 'import torch\n'), ((8322, 8360), 'torch.zeros', 'torch.zeros', (['(imp_samples, X.shape[0])'], {}), '((imp_samples, X.shape[0]))\n', (8333, 8360), False, 'import torch\n'), ((8398, 8436), 'torch.zeros', 'torch.zeros', (['(imp_samples, X.shape[0])'], {}), '((imp_samples, X.shape[0]))\n', (8409, 8436), False, 'import torch\n'), ((8474, 8512), 'torch.zeros', 'torch.zeros', (['(imp_samples, X.shape[0])'], {}), '((imp_samples, X.shape[0]))\n', (8485, 8512), False, 'import torch\n'), ((9495, 9541), 'torch.cat', 'torch.cat', (['[Z_s[:, :-1, :], Xt[:, :-1, :]]', '(-1)'], {}), '([Z_s[:, :-1, :], Xt[:, :-1, :]], -1)\n', (9504, 9541), False, 'import torch\n'), ((9973, 9998), 'torch.cat', 'torch.cat', (['[mu1, mu2T]', '(1)'], {}), '([mu1, mu2T], 1)\n', (9982, 9998), False, 'import torch\n'), ((9998, 10025), 'torch.cat', 'torch.cat', (['[sig1, sig2T]', '(1)'], {}), '([sig1, sig2T], 1)\n', (10007, 10025), False, 'import torch\n'), ((10899, 10963), 'torch.logsumexp', 'torch.logsumexp', (['(ll_estimates + ll_priors - ll_posteriors)'], {'dim': '(0)'}), '(ll_estimates + ll_priors - ll_posteriors, dim=0)\n', (10914, 10963), False, 'import torch\n'), ((10966, 10985), 'numpy.log', 'np.log', (['imp_samples'], {}), '(imp_samples)\n', (10972, 10985), True, 'import numpy as np\n'), ((13036, 13056), 'torch.mean', 'torch.mean', (['neg_elbo'], {}), '(neg_elbo)\n', (13046, 13056), False, 'import torch\n'), ((13058, 13080), 'torch.mean', 'torch.mean', (['masked_nll'], {}), '(masked_nll)\n', (13068, 13080), False, 'import torch\n'), ((13082, 13096), 'torch.mean', 'torch.mean', (['kl'], {}), '(kl)\n', (13092, 13096), False, 'import torch\n'), ((13098, 13117), 'torch.ones_like', 'torch.ones_like', (['kl'], {}), '(kl)\n', (13113, 13117), False, 'import torch\n'), ((13726, 13775), 'torch.cat', 'torch.cat', (['[Aval[..., [0]], B, Aval[..., 1:]]', '(-1)'], {}), '([Aval[..., [0]], B, Aval[..., 1:]], -1)\n', (13735, 13775), False, 'import torch\n'), ((16160, 16189), 'torch.cat', 'torch.cat', (['x_forward_list', '(-1)'], {}), '(x_forward_list, -1)\n', (16169, 16189), False, 'import torch\n'), ((17009, 17037), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (17024, 17037), False, 'import torch\n'), ((17039, 17067), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (17054, 17067), False, 'import torch\n'), ((18520, 18597), 'torch.cat', 'torch.cat', (['(X[[i], :T_condition], x_forward_conditional, X[[i], T_total:])', '(1)'], {}), '((X[[i], :T_condition], x_forward_conditional, X[[i], T_total:]), 1)\n', (18529, 18597), False, 'import torch\n'), ((18770, 18803), 'torch.cat', 'torch.cat', (['x_conditional_list', '(-1)'], {}), '(x_conditional_list, -1)\n', (18779, 18803), False, 'import torch\n'), ((22710, 22865), 'models.ssm.inference.RNN_STInf', 'RNN_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'post_approx': 'post_approx', 'rank': 'rank', 'use_bn': '(True)', 'combiner_type': 'combiner_type'}), '(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n post_approx=post_approx, rank=rank, use_bn=True, combiner_type=\n combiner_type)\n', (22719, 22865), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((27466, 27494), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (27481, 27494), False, 'import torch\n'), ((27602, 27630), 'torch.ones_like', 'torch.ones_like', (['masked_kl_t'], {}), '(masked_kl_t)\n', (27617, 27630), False, 'import torch\n'), ((28936, 28956), 'torch.mean', 'torch.mean', (['neg_elbo'], {}), '(neg_elbo)\n', (28946, 28956), False, 'import torch\n'), ((28958, 28980), 'torch.mean', 'torch.mean', (['masked_nll'], {}), '(masked_nll)\n', (28968, 28980), False, 'import torch\n'), ((28982, 28996), 'torch.mean', 'torch.mean', (['kl'], {}), '(kl)\n', (28992, 28996), False, 'import torch\n'), ((28998, 29017), 'torch.ones_like', 'torch.ones_like', (['kl'], {}), '(kl)\n', (29013, 29017), False, 'import torch\n'), ((33672, 33693), 'torch.cat', 'torch.cat', (['[z, u]', '(-1)'], {}), '([z, u], -1)\n', (33681, 33693), False, 'import torch\n'), ((2431, 2579), 'models.ssm.inference.RNN_STInf', 'RNN_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'post_approx': 'post_approx', 'rank': 'rank', 'nl': '"""relu"""', 'combiner_type': 'combiner_type'}), "(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n post_approx=post_approx, rank=rank, nl='relu', combiner_type=combiner_type)\n", (2440, 2579), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((3171, 3208), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', 'dim_hidden'], {}), '(dim_stochastic, dim_hidden)\n', (3180, 3208), True, 'import torch.nn as nn\n'), ((3210, 3223), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (3217, 3223), True, 'import torch.nn as nn\n'), ((3274, 3305), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_data'], {}), '(dim_hidden, dim_data)\n', (3283, 3305), True, 'import torch.nn as nn\n'), ((3356, 3387), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_data'], {}), '(dim_hidden, dim_data)\n', (3365, 3387), True, 'import torch.nn as nn\n'), ((11305, 11365), 'torch.repeat_interleave', 'torch.repeat_interleave', (['B'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(B, repeats=self.elbo_samples, dim=0)\n', (11328, 11365), False, 'import torch\n'), ((11367, 11427), 'torch.repeat_interleave', 'torch.repeat_interleave', (['X'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(X, repeats=self.elbo_samples, dim=0)\n', (11390, 11427), False, 'import torch\n'), ((11451, 11511), 'torch.repeat_interleave', 'torch.repeat_interleave', (['A'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(A, repeats=self.elbo_samples, dim=0)\n', (11474, 11511), False, 'import torch\n'), ((11513, 11573), 'torch.repeat_interleave', 'torch.repeat_interleave', (['M'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(M, repeats=self.elbo_samples, dim=0)\n', (11536, 11573), False, 'import torch\n'), ((11597, 11657), 'torch.repeat_interleave', 'torch.repeat_interleave', (['Y'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(Y, repeats=self.elbo_samples, dim=0)\n', (11620, 11657), False, 'import torch\n'), ((11659, 11720), 'torch.repeat_interleave', 'torch.repeat_interleave', (['CE'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(CE, repeats=self.elbo_samples, dim=0)\n', (11682, 11720), False, 'import torch\n'), ((16810, 16851), 'torch.cat', 'torch.cat', (['x_forward_conditional_list', '(-1)'], {}), '(x_forward_conditional_list, -1)\n', (16819, 16851), False, 'import torch\n'), ((17908, 17923), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (17921, 17923), False, 'import pdb\n'), ((18693, 18728), 'torch.cat', 'torch.cat', (['x_conditionals_per_pt', '(0)'], {}), '(x_conditionals_per_pt, 0)\n', (18702, 18728), False, 'import torch\n'), ((22933, 23081), 'models.ssm.inference.RNN_STInf', 'RNN_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'post_approx': 'post_approx', 'rank': 'rank', 'nl': '"""relu"""', 'combiner_type': 'combiner_type'}), "(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n post_approx=post_approx, rank=rank, nl='relu', combiner_type=combiner_type)\n", (22942, 23081), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((23673, 23710), 'torch.nn.Linear', 'nn.Linear', (['dim_stochastic', 'dim_hidden'], {}), '(dim_stochastic, dim_hidden)\n', (23682, 23710), True, 'import torch.nn as nn\n'), ((23712, 23725), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (23719, 23725), True, 'import torch.nn as nn\n'), ((23776, 23807), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_data'], {}), '(dim_hidden, dim_data)\n', (23785, 23807), True, 'import torch.nn as nn\n'), ((23858, 23889), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_data'], {}), '(dim_hidden, dim_data)\n', (23867, 23889), True, 'import torch.nn as nn\n'), ((27788, 27848), 'torch.repeat_interleave', 'torch.repeat_interleave', (['B'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(B, repeats=self.elbo_samples, dim=0)\n', (27811, 27848), False, 'import torch\n'), ((27850, 27910), 'torch.repeat_interleave', 'torch.repeat_interleave', (['X'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(X, repeats=self.elbo_samples, dim=0)\n', (27873, 27910), False, 'import torch\n'), ((27934, 27994), 'torch.repeat_interleave', 'torch.repeat_interleave', (['A'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(A, repeats=self.elbo_samples, dim=0)\n', (27957, 27994), False, 'import torch\n'), ((27996, 28056), 'torch.repeat_interleave', 'torch.repeat_interleave', (['M'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(M, repeats=self.elbo_samples, dim=0)\n', (28019, 28056), False, 'import torch\n'), ((28080, 28140), 'torch.repeat_interleave', 'torch.repeat_interleave', (['Y'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(Y, repeats=self.elbo_samples, dim=0)\n', (28103, 28140), False, 'import torch\n'), ((28142, 28203), 'torch.repeat_interleave', 'torch.repeat_interleave', (['CE'], {'repeats': 'self.elbo_samples', 'dim': '(0)'}), '(CE, repeats=self.elbo_samples, dim=0)\n', (28165, 28203), False, 'import torch\n'), ((30765, 30809), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat)', 'dim_hidden'], {}), '(dim_input + dim_treat, dim_hidden)\n', (30774, 30809), True, 'import torch.nn as nn\n'), ((30808, 30821), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (30815, 30821), True, 'import torch.nn as nn\n'), ((30883, 30920), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_stochastic'], {}), '(dim_hidden, dim_stochastic)\n', (30892, 30920), True, 'import torch.nn as nn\n'), ((30983, 31020), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_stochastic'], {}), '(dim_hidden, dim_stochastic)\n', (30992, 31020), True, 'import torch.nn as nn\n'), ((2647, 2779), 'models.ssm.inference.Attention_STInf', 'Attention_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'nheads': 'num_heads', 'post_approx': 'post_approx', 'rank': 'rank'}), '(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n nheads=num_heads, post_approx=post_approx, rank=rank)\n', (2662, 2779), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((6352, 6372), 'torch.randn', 'torch.randn', (['X.shape'], {}), '(X.shape)\n', (6363, 6372), False, 'import torch\n'), ((14324, 14349), 'pyro.distributions.Normal', 'Normal', (['p_x_mu', 'p_x_sigma'], {}), '(p_x_mu, p_x_sigma)\n', (14330, 14349), False, 'from pyro.distributions import Normal, Independent, Categorical, LogNormal\n'), ((18157, 18166), 'numpy.max', 'np.max', (['l'], {}), '(l)\n', (18163, 18166), True, 'import numpy as np\n'), ((23149, 23281), 'models.ssm.inference.Attention_STInf', 'Attention_STInf', (['dim_base', 'dim_data', 'dim_treat', 'dim_hidden', 'dim_stochastic'], {'nheads': 'num_heads', 'post_approx': 'post_approx', 'rank': 'rank'}), '(dim_base, dim_data, dim_treat, dim_hidden, dim_stochastic,\n nheads=num_heads, post_approx=post_approx, rank=rank)\n', (23164, 23281), False, 'from models.ssm.inference import RNN_STInf, Attention_STInf\n'), ((31754, 31773), 'torch.nn.ModuleList', 'nn.ModuleList', (['t_mu'], {}), '(t_mu)\n', (31767, 31773), True, 'import torch.nn as nn\n'), ((31808, 31830), 'torch.nn.ModuleList', 'nn.ModuleList', (['t_sigma'], {}), '(t_sigma)\n', (31821, 31830), True, 'import torch.nn as nn\n'), ((13477, 13494), 'pyro.distributions.Normal', 'Normal', (['mu1', 'sig1'], {}), '(mu1, sig1)\n', (13483, 13494), False, 'from pyro.distributions import Normal, Independent, Categorical, LogNormal\n'), ((13989, 14008), 'pyro.distributions.Normal', 'Normal', (['mut', 'sigmat'], {}), '(mut, sigmat)\n', (13995, 14008), False, 'from pyro.distributions import Normal, Independent, Categorical, LogNormal\n'), ((31113, 31164), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat_mK)', 'dim_stochastic'], {}), '(dim_input + dim_treat_mK, dim_stochastic)\n', (31122, 31164), True, 'import torch.nn as nn\n'), ((31241, 31292), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat_mK)', 'dim_stochastic'], {}), '(dim_input + dim_treat_mK, dim_stochastic)\n', (31250, 31292), True, 'import torch.nn as nn\n'), ((32029, 32195), 'models.iefs.gated.GatedTransition', 'GatedTransition', (['dim_input', 'dim_treat'], {'avoid_init': 'avoid_init', 'dim_output': 'dim_stochastic', 'alpha1_type': 'alpha1_type', 'otype': 'otype', 'add_stochastic': 'add_stochastic'}), '(dim_input, dim_treat, avoid_init=avoid_init, dim_output=\n dim_stochastic, alpha1_type=alpha1_type, otype=otype, add_stochastic=\n add_stochastic)\n', (32044, 32195), False, 'from models.iefs.gated import GatedTransition\n'), ((32226, 32274), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat)', 'dim_stochastic'], {}), '(dim_input + dim_treat, dim_stochastic)\n', (32235, 32274), True, 'import torch.nn as nn\n'), ((31475, 31522), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat_mK)', 'dim_hidden'], {}), '(dim_input + dim_treat_mK, dim_hidden)\n', (31484, 31522), True, 'import torch.nn as nn\n'), ((31522, 31535), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (31529, 31535), True, 'import torch.nn as nn\n'), ((32482, 32691), 'models.iefs.att_iefs.AttentionIEFTransition', 'AttentionIEFTransition', (['dim_input', 'dim_treat'], {'avoid_init': 'avoid_init', 'dim_output': 'dim_stochastic', 'alpha1_type': 'alpha1_type', 'otype': 'otype', 'add_stochastic': 'add_stochastic', 'num_heads': 'num_heads', 'zmatrix': 'zmatrix'}), '(dim_input, dim_treat, avoid_init=avoid_init,\n dim_output=dim_stochastic, alpha1_type=alpha1_type, otype=otype,\n add_stochastic=add_stochastic, num_heads=num_heads, zmatrix=zmatrix)\n', (32504, 32691), False, 'from models.iefs.att_iefs import AttentionIEFTransition\n'), ((32724, 32772), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat)', 'dim_stochastic'], {}), '(dim_input + dim_treat, dim_stochastic)\n', (32733, 32772), True, 'import torch.nn as nn\n'), ((31587, 31624), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_stochastic'], {}), '(dim_hidden, dim_stochastic)\n', (31596, 31624), True, 'import torch.nn as nn\n'), ((31680, 31717), 'torch.nn.Linear', 'nn.Linear', (['dim_hidden', 'dim_stochastic'], {}), '(dim_hidden, dim_stochastic)\n', (31689, 31717), True, 'import torch.nn as nn\n'), ((32844, 32929), 'models.iefs.moe.MofE', 'MofE', (['dim_input', 'dim_treat'], {'dim_output': 'dim_stochastic', 'eclass': '"""nl"""', 'num_experts': '(3)'}), "(dim_input, dim_treat, dim_output=dim_stochastic, eclass='nl',\n num_experts=3)\n", (32848, 32929), False, 'from models.iefs.moe import MofE\n'), ((32965, 33013), 'torch.nn.Linear', 'nn.Linear', (['(dim_input + dim_treat)', 'dim_stochastic'], {}), '(dim_input + dim_treat, dim_stochastic)\n', (32974, 33013), True, 'import torch.nn as nn\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.