text stringlengths 0 27.1M | meta dict |
|---|---|
#
import aei
import gdal
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib tk
# set the working directories
base = '/home/cba/Downloads/scale-conceptual/'
plots = base + 'plots/'
tch_file = base + 'marin_tch_byte.tif'
gd_file = base + 'marin_tch_mask.tif'
rnd_data = base + 'random-sampling-data.pck'
# set flag for whether to calculate or load the random sampling
calc_rs = False
# set the number of resolutions to assess
#res = [20, 30, 50, 100, 250, 500, 1000, 1250, 1500, 1750, 2000]
res = [20, 30, 50, 100, 250, 500, 1000]
# set the number of pixels to sample
fullres = 2 * 2
testres = min(res) * min(res)
n_var = testres / fullres
n_loc = n_var
n_rep = 1000
if calc_rs:
# open the suitability raster and read in good indices
gdref = gdal.Open(gd_file)
gd = gdref.ReadAsArray()
#gd_ext1 = gd == 1
gd_ext1 = np.where(gd == 1)
bd = gd == 0
#gd_ext2 = np.where(gd > 1)
gd = None
gdref = None
# read the tch data into memory
tchref = gdal.Open(tch_file)
#tchb1 = tchref.GetRasterBand(1)
#ndval = tchb1.GetNoDataValue()
tch = tchref.ReadAsArray().astype(np.float)
tchb1 = None
tchref = None
# set the nodata values to nan
tch[bd] = np.nan
# create a set of arrays to store the data
wg_var = np.zeros((len(res), n_rep, n_loc))
bg_var = np.zeros((len(res), n_rep, n_loc))
# loop through each resolution, and calculate within/between grain variance
for i in range(len(res)):
rdif = res[i] / 2
for j in range(n_rep):
# find the random pixel locations to select
loc_ext1 = np.random.choice(len(gd_ext1[0]), n_loc)
#loc_ext2 = np.random.choice(len(gd_ext2[0]), n_loc)
# loop through each random location
for k in range(n_loc):
# sample the x/y space around this pixel
xmin = gd_ext1[0][loc_ext1[k]] - rdif
xmax = gd_ext1[0][loc_ext1[k]] + rdif
ymin = gd_ext1[1][loc_ext1[k]] - rdif
ymax = gd_ext1[1][loc_ext1[k]] + rdif
subset = tch[xmin:xmax, ymin:ymax]
avg_ext1 = np.nanmean(subset)
var_ext1 = np.nanvar(subset)
wg_var[i,j,k] = var_ext1
bg_var[i,j,k] = avg_ext1
# then do it for the second extent
#xmin = gd[0][loc_ext2[k]] - rdif
#xmax = gd[0][loc_ext2[k]] + rdif
#ymin = gd[1][loc_ext2[k]] - rdif
#ymax = gd[1][loc_ext2[k]] + rdif
#subset = tch[xmin:xmax, ymin:ymax]
#avg_ext2 = np.nanmean(subset)
#var_ext2 = np.nanvar(subset)
# save this loop to a data file
with open(rnd_data, 'w+') as f:
pickle.dump([wg_var, bg_var], f)
else:
with open(rnd_data, 'r+') as f:
wg_var, bg_var = pickle.load(f)
# now that we have the data, reduce it to the plot the within/between grain variance
wg_mean_loc = np.nanmean(wg_var, axis=2) # mean across all locations per rep
wg_mean_all = np.nanmean(wg_mean_loc, axis=1) # mean of all locs, all reps
wg_std = np.nanstd(wg_mean_loc, axis=1) # stdev of within grain variation across reps
bg_var_loc = np.nanvar(bg_var, axis=2), # variance of between-grain measurements
bg_mean = np.nanmean(bg_var_loc[0], axis=1) # mean of between-grain variance
bg_std = np.nanstd(bg_var_loc[0], axis=1) # stdev of between-grain variance
# plot these results
cols = aei.color.color_blind(5)
col = '#E3C16D'
#col=cols[4]
fill_alpha = 0.7
plt.figure(figsize=(4,3), dpi=200)
# plot the standard deviations for each
plt.fill_between(res, wg_mean_all-wg_std, wg_mean_all+wg_std,
color=col, alpha=fill_alpha, label='Bootstrapped\nstandard deviation')
plt.fill_between(res, bg_mean-bg_std, bg_mean+bg_std,
color=col, alpha=fill_alpha)
# set the line plots
plt.plot(res, wg_mean_all, color='black', linewidth=1.5, linestyle = '-',
label='Within-grain\nvariance')
plt.plot(res, bg_mean, color='black', linewidth=1.5, linestyle = '--',
label='Between-grain\nvariance')
# set the labels
plt.xlabel('Log grain size (m)')
plt.ylabel('Spatial\nvariance (m{})'.format(r'$^2$'))
plt.title('Scale-dependence in\nspatial tree height patterns')
# log transform the axes
#plt.yticks([], [])
#ax = plt.gca()
#ax.loglog()
plt.xscale('log')
#plt.yscale('log')
# replace the axis labels
#yticks=(20, 40, 60, 80, 100)
yticks=[25, 50, 75, 100]
#ylabels = ax.get_yticklabels()
plt.yticks(yticks, ['{}'.format(f) for f in yticks])
plt.xticks(res, res)
# set the custom legend
lgd = plt.legend(loc='right', bbox_to_anchor=(1.7, 0.5), fancybox=True)#,
# fancybox=True, shadow=True)
# save the figure
plt.tight_layout()
plt.savefig('{}{}.png'.format(plots, 'Variance-plots'), dpi=300,
additional_artists = [lgd], bbox_inches="tight")
plt.savefig('{}{}.svg'.format(plots, 'Variance-plots'),
additional_artists = [lgd], bbox_inches="tight") | {
"alphanum_fraction": 0.6294256491,
"author": null,
"avg_line_length": 33.4473684211,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f42ecba628c1621be6f014aa7951f72152e9670d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-04-08T05:41:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-08T05:41:45.000Z",
"max_forks_repo_head_hexsha": "ea5be566d810ca9d792625b2e97acd2065c9d09a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "christobal54/aei-grad-school",
"max_forks_repo_path": "projects/scale/scale-plot-spatial-variance.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ea5be566d810ca9d792625b2e97acd2065c9d09a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "christobal54/aei-grad-school",
"max_issues_repo_path": "projects/scale/scale-plot-spatial-variance.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ea5be566d810ca9d792625b2e97acd2065c9d09a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "christobal54/aei-grad-school",
"max_stars_repo_path": "projects/scale/scale-plot-spatial-variance.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1543,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5084
} |
import pandas as pd
import numpy as np
#https://iq-inc.com/importerror-attempted-relative-import/
import data.data_retrieve as dr
import features.data_preprocessing as pp
import models.train_model as tm
import models.train_model_FTR as tmFTR
#could make it object oriented so that different models can be different objects
#could make Preprocess Class
data = dr.get_data()
data = pp.feature_engineering(data)
#data, X, y = pp.get_X_and_y_over_under(data,number_goals = 2.5)
data, X, y = pp.get_X_and_y_FTR(data)
X_train, y_train, X_test, y_test = pp.train_test_split(data,X,y)
print(data.shape)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
#optimal_params = tm.get_optimal_parameters(X_train,y_train,n_trials=35)
#print(optimal_params)
optimal_params = {
'n_estimators': 286,
'max_depth': 6,
'num_leaves': 18,
'min_data_in_leaf': 70,
'feature_fraction': 0.4,
'lambda_l1': 10,
'lambda_l2': 55,
'bagging_fraction': 0.5,
'learning_rate': 0.03438248498061834,
'min_gain_to_split': 0.1030288398937825,
'bagging_freq': 1,
}
preds, preds_class = tmFTR.train_and_predict(X_train, y_train,X_test, y_test,optimal_params)
evaluation_metrics = tmFTR.evaluate_predictions(y_test,preds, preds_class)
cv_scores, shap_values, df_shaps= tm.get_cross_validated_scores_and_shap_values(X_train,y_train,optimal_params)
print(df_shaps)
#show performance of model on different teams (perhaps certain teams are predicted better) | {
"alphanum_fraction": 0.7020884521,
"author": null,
"avg_line_length": 33.2244897959,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3fd22b434f4f7aa3afeef1945640e691b4f570aa",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e860b021d2e02d72f5223aee7ca4e33dacf4aa32",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "mkennealy/football-ML-models",
"max_forks_repo_path": "src/main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e860b021d2e02d72f5223aee7ca4e33dacf4aa32",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "mkennealy/football-ML-models",
"max_issues_repo_path": "src/main.py",
"max_line_length": 111,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e860b021d2e02d72f5223aee7ca4e33dacf4aa32",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "mkennealy/football-ML-models",
"max_stars_repo_path": "src/main.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 411,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1628
} |
"""
Provides a subclass to DSNFITSexaminer for plotting data.
Possible symbols::
================ ===============================
character description
================ ===============================
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'D'`` diamond marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'*'`` star marker
``'+'`` plus marker
``'x'`` x marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
``'.'`` point marker
``','`` pixel marker
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
================ ===============================
"""
import logging
import matplotlib as MPL
import matplotlib.font_manager as MPLfont
import numpy
import os.path
import pylab as PL
import re
import Data_Reduction.tipping as DRtip
import Data_Reduction.FITS.SDFITSexaminer as FITSexam
import support
import support.graphics as GR
logger = logging.getLogger(__name__)
plotcolors = ['b','g','r','m','c']
plotsymbols = ['o','v','^','<','>',
's','p','D','h','H',
'1','2','3','4','*',
'+',"x","d","|","_"]
fontP = MPLfont.FontProperties()
fontP.set_size('xx-small')
seconds_formatter = MPL.dates.DateFormatter("%H:%M:%S")
class DSNFITSplotter(FITSexam.DSNFITSexaminer):
"""
"""
def __init__(self, parent=None, FITSfile=None, hdulist=None):
"""
Create a new DSNFITSplotter object from an HDU list
If invoked from within another object, then parent should be 'self'.
Either a FITS file or an HDU list must be provided.
"""
mylogger = logging.getLogger(logger.name+".DSNFITSplotter")
mylogger.debug("__init__: initializing superclass")
FITSexam.DSNFITSexaminer.__init__(self, parent=parent, FITSfile=FITSfile,
hdulist=hdulist)
self.logger = mylogger
self.plotter = {}
for key in list(self.tables.keys()):
table = self.tables[key]
self.logger.debug("__init__: processing %s", table)
self.plotter[key] = self.Plotter(self, table)
self.logger.debug("__init__: completed")
class Plotter(FITSexam.DSNFITSexaminer.Table):
"""
"""
def __init__(self, parent, table):
"""
Initialization was already done for Table superclass
"""
self.logger = logging.getLogger(parent.logger.name+".Plotter")
self.logger.debug("__init__: subclass of %s", table)
for attr in list(table.__dict__.keys()):
self.__setattr__(attr, table.__getattribute__(attr))
self.logger.debug("__init__: copying %s", attr)
#------------------------ Plotter methods ---------------------------------
def figure_rows_and_columns(self, naxes, **kwargs):
"""
Computes number of rows and columns for 'subplots'
@param naxes : number of subplots (axes) needed
@type naxes : int
"""
# get maximum number of rows and columns
screensize = GR.get_screen_resolution()
widthIN, heightIN = screensize['width']/100., screensize['height']/100.
freewidth, freeheight = 0.95*widthIN, 0.95*heightIN
self.logger.debug("figure_rows_and_columns: width available = %f in",
freewidth)
self.logger.debug("figure_rows_and_columns: height available = %f in",
freeheight)
if "width" in kwargs:
width = kwargs["width"] # inches
else:
width = 4
if "heigth" in kwargs:
height = kwargs["width"] # inches
else:
height = 4
max_cols = int(round(freewidth/width))
max_rows = int(round(freeheight/height))
self.logger.debug("figure_rows_and_columns: max cols and rows: %d,%d",
max_cols, max_rows)
max_graphs = max_cols*max_rows
aspect = float(max_cols)/max_rows
# how many figures do we need?
num_figs = 1+naxes/max_graphs
if num_figs > 1:
num_cols = max_cols
num_rows = max_rows
else:
num_cols = int(ceil(sqrt(aspect*naxes)))
num_rows = int(ceil(float(naxes)/num_cols))
self.logger.debug("figure_rows_and_columns: %d rows, %d columns",
num_rows, num_cols)
return num_figs, num_rows, num_cols, (width,height)
def init_multiplot(self, title, nrows, ncols, size=(4,4), sharey=False):
"""
create a figure with multiple plots sharing common X and Y axis
The sublots have no space between them.
When the number of graphs is large then multiple figures need to be
created with 'init_multiplots'
@param title : figure title
@type title : str
@param nrows : number of rows of subplots
@type nrows : int
@param ncols : number of columns of subplots
@type ncols : int
@param size : figure width, figure height (in)
@type size : tuple of float
@param sharey : same range on all Y axes (in)
@type sharey : bool
"""
width, height = size
fig, ax = PL.subplots(nrows=nrows, ncols=ncols, sharey=sharey)
self.logger.debug("init_multiplot: %d rows, %d columns, size: %s",
nrows, ncols, size)
# could also be fig.set_size_inches(size, forward=True)
fig.set_size_inches(ncols*width, nrows*height, forward=True)
fig.subplots_adjust(wspace=0, hspace=0) # no space between plots in a row
fig.suptitle(title)
# adjust position to make room for figure legend.
subplot_width = 0.8/ncols
subplot_margin = 0.1*subplot_width
subplot_offset = subplot_margin - 0.1
return fig, ax
def init_multiplots(self, title, nfigs, nrows, ncols, size=(4,4),
sharey=False):
"""
create multiple figures with multiple plots
The sublots have no space between the axes in a figure. Use
figure_rows_and_columns() to compute number of figures, rows, columns and
figure size.
@param title : figure title
@type title : str
@param nfigs : number of figures
@type nfigs : int
@param nrows : number of rows of subplots
@type nrows : int
@param ncols : number of columns of subplots
@type ncols : int
@param size : figure width, figure height (in)
@type size : tuple of float
@param sharey : same range on all Y axes (in)
@type sharey : bool
"""
self.logger.debug("init_multiplots: %d figs, %d rows, %d columns",
nfigs, nrows, ncols)
figs = {}
axs = {}
for fignum in range(nfigs):
figs[fignum], axs[fignum] = self.init_multiplot(title, nrows, ncols,
size=size,
sharey=sharey)
return figs, axs
def show_passband(self, figtitle=None, rows=None, savepath=None):
"""
Plots the passbands from this file as dynamic spectra
If there are multiple beams, there will be a figure column for each beam
and each pol. Else, if there is only one beam but multiple subchannels
then there will be a figure column for each subchannel and each pol.
Otherwise there is just a column for each pol.
Image array structure
---------------------
There is an image array for each subchannel, beam and pol combination. The
data for each is a 2D nparray with dimensions (num_scans, num_chans).
Initialization
--------------
The spectra for each scan, subchannel, beam and pol combination are
initialized as zeros with shape (32768,). The zeros are replaced with data
from the FITS table DATA column.
The images for each subchannel, beam and pol combination are initialized as
numpy.arrays with shape (32768, 1). There is a flag dict 'start_image'
which is initialized as True. When it is True, the initial image (see
above) is replaced with data for the records in the first scan. The flag is
then set to False. After data, the subimages for each scan are appended.
The final image will have dimensions (num_scans*num_records, 32768).
"""
if rows == None:
spectra = self.get_spectra(self.acs_rows)
else:
spectra = self.get_spectra(rows)
# lay out the figure
if self.props['num beams'] > 1:
ncols = self.props['num beams']*self.props['num IFs']
elif self.props['num cycles'] > 1:
ncols = self.props['num cycles']*self.props['num IFs']
else:
ncols = self.props['num IFs']
# collect the diagnostic spectra
if self.props["full Stokes"]:
# when SPECTRA are Stokes parameters, plot IF power for two pol modes
if self.props["num IFs"] == 2:
datasource = "IFSPECTR"
num_chans = self.props['num IFspec chans']
IFspectra = True
else:
if "SPECTRUM" in self.data.columns.names:
datasource = "SPECTRUM"
else:
datasource = "DATA"
IFspectra = False
num_chans = self.props['num chans']
self.logger.debug("show_passband: data source is %s", datasource)
# prepare empty images
images = self.prepare_summary_images(num_chans)
# get data statistics for scaling plots
ymin, ymax, ymean, ystd = self.get_data_stats()
# spectra are 2D arrays with frequency and scan as axes
# images, also 2D arrays, are only needed if there is a TIME axis in
# the data and then each record is a row in the array.
cycles = self.cycle_keys
# images with SCAN on the time axis have sub-images over record
start_image = {}
for cycle in cycles:
subch_idx = cycle - 1
start_image[subch_idx] = {}
for beam_idx in range(self.props["num beams"]):
start_image[subch_idx][beam_idx] = {}
for IF_idx in range(self.props["num IFs"]):
start_image[subch_idx][beam_idx][IF_idx] = True
# get the data
for scan in self.scan_keys:
scan_idx = self.scan_keys.index(scan) # scan numbers can start anywhere
for cycle in cycles:
subch_idx = cycle - 1
for beam_idx in range(self.props["num beams"]):
beam = beam_idx+1
for IF_idx in range(self.props["num IFs"]):
pol = IF_idx+1
#self.logger.debug("plot_passband: processing scan %d, subch %d, beam %d, pol %d",
# scan, cycle, beam, pol)
if self.props["time axis"]:
# average scan spectrum and include record spectra in image
# assume there is a scan number equal to the cycle number
image, spectrum = \
self.average_records(scan, cycle, beam, pol)
# this is the all-record sub-image for the scan
if start_image[subch_idx][beam_idx][IF_idx]:
images[subch_idx][beam_idx][IF_idx] = image
start_image[subch_idx][beam_idx][IF_idx] = False
else:
images[subch_idx][beam_idx][IF_idx] = \
numpy.append(images[subch_idx][beam_idx][IF_idx], image,
axis=0)
else:
# no time axis
spec_indices = self.get_indices(scan=scan, cycle=cycle,
beam=beam, pol=pol)
image_line = self.data[datasource][spec_indices].reshape(
num_chans,1).transpose()
if start_image[subch_idx][beam_idx][IF_idx]:
images[subch_idx][beam_idx][IF_idx] = image_line
start_image[subch_idx][beam_idx][IF_idx] = False
else:
images[subch_idx][beam_idx][IF_idx] = \
numpy.append(images[subch_idx][beam_idx][IF_idx],
image_line, axis=0)
if figtitle:
pass
else:
figtitle = os.path.basename(self.parent.file)[4:-5].replace('_',' ')
fig, ax = self.init_multiplot(figtitle+" Spectrogram",
nrows=1, ncols=ncols)
# display the data
# labels are updated only in the first time around a loop
labels = {}
num_beams = self.props['num beams']
num_cycles = len(self.cycle_keys)
num_pols = self.props["num IFs"]
halfband = self.get_first_good_value('BANDWIDT')/2.e6
for subch in range(num_cycles):
for beam in range(self.props["num beams"]):
for pol in range(self.props["num IFs"]):
label = make_legend_labels(sckeys=list(range(num_cycles)),
bmkeys=list(range(num_beams)),
plkeys=list(range(num_pols)),
sckey=subch,
bmkey=beam,
plkey=pol)
if self.props['num beams'] > 1:
col = 2*beam + pol
elif self.props['num cycles'] > 1:
col = 2*subch + pol
else:
col = pol
# dynamic spectra of IF power
ax[col].set_title(label)
height, width = images[subch][beam][pol].shape
ax[col].imshow(images[subch][beam][pol], aspect="auto",
extent=(-halfband,halfband,0,height))
if col == 0:
ax[col].set_ylabel("Cumulative record number")
ax[col].set_xlabel("Frequency (MHz)")
fig.subplots_adjust(top=0.88)
fig.subplots_adjust(bottom=0.15)
last_col = len(ax)-1
lines, labels = ax[last_col].get_legend_handles_labels()
fig.legend(lines, labels, loc="upper right", ncol=2, prop = fontP)
if savepath:
fig.savefig(savepath)
#fig.show()
def show_all_spectra(self, rows=None, IFspectra=False, sharey=False):
"""
Plot spectra from a Table
In each subplot are all the spectra for each beam and polarization from
one row. If there are multiple records in a row, all records are plotted
(not the average over records)
@param rows : optional list of table rows; default: all
@type rows : list of int
@param IFspectra : plot IFSPECTR if there is one; default: False
@type IFspectra : bool
@param sharey : same range on all Y axes (in)
@type sharey : bool
"""
# gets spectra from SPECTRUM column with RA and dec indices removed
if rows == None:
spectra = self.get_spectra(self.acs_rows)
else:
spectra = self.get_spectra(rows)
num_graphs = len(spectra)/self.props['num cycles']
nfigs, nrows, ncols, size = self.figure_rows_and_columns(num_graphs)
figs, axs = self.init_multiplots("Basic Spectra", nfigs, nrows, ncols,
size, sharey=sharey)
self.logger.debug("show_all_spectra: figure keys: %s", list(figs.keys()))
self.logger.debug("show_all_spectra: axes keys: %s", list(axs.keys()))
for fignum in list(figs.keys()):
fig = figs[fignum]
ax = axs[fignum]
for row in range(nrows):
for col in range(ncols):
specidx = nrows*ncols*fignum + ncols*row + col
if specidx >= len(spectra):
break
scan = self.data['SCAN'][specidx]
cycle = self.data['CYCLE'][specidx]
cycle_idx = self.cycle_keys.index(cycle)
self.logger.debug(
"show_all_spectra: doing row %d column %d spectrum %d",
row, col, specidx)
spec = spectra[specidx] # returns spectra from one row
if self.props['full Stokes']:
if "IFSPECTR" in self.data.columns.names and IFspectra:
nchans = self.props['num IFspec chans']
npols = self.props['num IFs']
else:
nchans = self.props['num chans']
npols = spec.shape[0]
else:
nchans = self.props['num chans']
npols = self.props['num IFs']
nbeams = self.props['num beams']
nrecs = self.props['num records'][cycle]
self.logger.debug(
"show_all_spectra: %d channels, %d pols, %d records",
nchans, npols, nrecs)
for rec in range(nrecs):
record = rec+1
symbol = plotsymbols[rec % 20]
for beam_idx in range(nbeams):
beam = beam_idx+1
for pol_idx in range(npols):
pol = pol_idx+1
# indices without row (0), RA (-1), dec (-2)
if len(spec.shape) == 4:
# with beam and time axis
indices = self.get_indices(scan=scan, cycle=cycle, pol=pol,
beam=beam, record=record,
trimmed=True)[1:]
elif len(spec.shape) == 3:
# with time axis
indices = self.get_indices(scan=scan, cycle=cycle, pol=pol,
record=record,
trimmed=True)[1:]
elif len(spec.shape) == 2:
indices = self.get_indices(scan=scan, cycle=cycle, pol=pol,
trimmed=True)[1:]
self.logger.debug("show_all_spectra: indices: %s", indices)
color = plotcolors[beam_idx*npols+pol_idx % 5]
trimmed = spec[indices]
label = self.make_legend_labels(sckey=cycle_idx,
bmkey=beam_idx,
plkey=pol_idx)
ax[row][col].plot(trimmed, color+',', label=label)
# end loop over records
ax[row][col].grid(True)
ax[row][col].text(0.5, 0.95, 'scan '+str(scan),
transform=ax[row][col].transAxes,
horizontalalignment='center',
verticalalignment='top')
if row == nrows-1:
for tick in ax[row][col].get_xticklabels():
tick.set_rotation(45)
if col == 0:
ax[row][col].set_ylabel("Power (counts)")
# end loop over col
# end loop over row
lines, labels = ax[0][0].get_legend_handles_labels()
fig.legend(lines, labels, loc="upper right", ncol=2, prop = fontP)
# end loop over fig
#fig.show()
def make_legend_labels(self, dskey=None, tbkey=None, sckey=None,
bmkey=None, plkey=None):
dskeys=[]
tbkeys=[]
sckeys = list(range(self.props['num cycles']))
bmkeys = list(range(self.props['num beams']))
if self.props['full Stokes']:
plkeys = list(range(4))
else:
plkeys = list(range(self.props['num IFs']))
return make_legend_labels(dskeys=dskeys, tbkeys=tbkeys, sckeys=sckeys,
bmkeys=bmkeys, plkeys=plkeys,
dskey=dskey, tbkey=tbkey, sckey=sckey,
bmkey=bmkey, plkey=plkey)
def plot_BPSW_spectra(self, spectra=None, rows=None):
"""
plot reduced beam and position switched spectra
@param spectra : optional dict of reduced spectra from 'BPSW_spectra()'
@type spectra : numpy array
@param rows : optional list of table rows to compute spectra; deault: all
@type rows : list of int
"""
if spectra:
npairs = len(spectra)
elif rows :
rows = self.acs_rows
spectra = self.BPSW_spectra(rows)
npairs = len(spectra)
else:
rows = self.acs_rows
spectra = self.BPSW_spectra(rows)
npairs = len(spectra)
pairs = list(range(npairs))
nrows, ncols = self.figure_rows_and_columns(len(spectra))
fig, ax = self.init_multiplot("BPSW Spectra", nrows, ncols)
for row in range(nrows):
for col in range(ncols):
pair = row*ncols + col
spec = spectra[pair]
nrecs, npols, nchans = spec.shape
for rec in range(nrecs):
symbol = plotsymbols[rec % 20]
for pol in range(npols):
color = plotcolors[pol % 5]
trimmed = support.trim_extremes(spec[rec, pol])
ax[row][col].plot(trimmed, color+',',
label="rec"+str(rec)+" P"+str(pol+1))
ax[row][col].grid(True)
ax[row][col].text(0.5, 0.95, 'pair '+str(pair+1),
transform=ax[row][col].transAxes,
horizontalalignment='center',
verticalalignment='top')
if row == nrows-1:
for tick in ax[row][col].get_xticklabels():
tick.set_rotation(45)
if col == 0:
ax[row][col].set_ylabel("Normalized Power")
lines, labels = ax[0][0].get_legend_handles_labels()
fig.legend(lines, labels, loc="upper right", ncol=2, prop = fontP)
show()
def plot_PSSW_spectra(self, figtitle=None, scans=[], savepath=None):
"""
Plot position switched spectra
We need to check self.data['OBSMODE']
@param scans : list of scan numbers, ons and offs
@type scans : list of int
"""
if scans == []:
scans = self.scan_keys
# lay out the figure
if self.props['num beams'] > 1:
ncols = self.props['num beams']*self.props['num IFs']
elif self.props['num cycles'] > 1:
ncols = self.props['num cycles']*self.props['num IFs']
else:
ncols = self.props['num IFs']
if figtitle:
pass
else:
first = os.path.basename(self.parent.file).index('_')+1
figtitle = "DSS-"+str(self.dss) +" "+ \
os.path.basename(self.parent.file)[first:-5]
fig, ax = self.init_multiplot(figtitle+" (Sig-Ref)/Ref",
nrows=1, ncols=ncols)
# data source
if 'IFSPECTR' in self.data.names:
# this is for Stokes spectra
datasource = 'IFSPECTR'
elif 'SPECTRUM' in self.data.names:
datasource = 'SPECTRUM'
else:
datasource = 'DATA'
self.logger.debug("plot_PSSW_spectra: data column is %s", datasource)
labels = {}
num_beams = self.props['num beams']
num_cycles = len(self.cycle_keys)
num_pols = self.props["num IFs"]
for subch_idx in range(num_cycles):
cycle = subch_idx + 1
for beam_idx in range(self.props["num beams"]):
beam = beam_idx + 1
for pol_idx in range(self.props["num IFs"]):
pol = pol_idx+1
label = make_legend_labels(sckeys=list(range(num_cycles)),
bmkeys=list(range(num_beams)),
plkeys=list(range(num_pols)),
sckey=subch_idx,
bmkey=beam_idx,
plkey=pol_idx)
if self.props['num beams'] > 1:
col = 2*beam_idx + pol_idx
elif self.props['num cycles'] > 1:
col = 2*subch_idx + pol_idx
else:
col = pol_idx
# dynamic spectra of IF power
ax[col].set_title(label)
self.logger.debug("plot_PSSW_spectra: subch %d beam %d pol %d",
cycle, beam, pol)
#on_scans = numpy.unique(self.data['SCAN'][numpy.where(
# self.data['SIG'][self.acs_rows] == True)[0]])
#of_scans = numpy.unique(self.data['SCAN'][numpy.where(
# self.data['SIG'][self.acs_rows] == False)[0]])
on_indices = numpy.where(self.data['SIG'] == True)[0]
of_indices = numpy.where(self.data['SIG'] == False)[0]
on_scans = numpy.unique(self.data['SCAN'][numpy.intersect1d(on_indices, self.rows)])
of_scans = numpy.unique(self.data['SCAN'][numpy.intersect1d(of_indices, self.rows)])
num_pairs = min(len(on_scans), len(of_scans))
for index in range(num_pairs):
on_scan = on_scans[index]
of_scan = of_scans[index]
# get the indices for the DATA cell in the ON position
try:
indices = self.get_indices(scan=on_scan, cycle=cycle, pol=pol,
beam=beam)
self.logger.debug("plot_PSSW_spectra: scan %d on indices: %s",
on_scan, indices)
except ValueError as details:
self.logger.warning("plot_PSSW_spectra: %s", str(details))
continue
row = indices[0]
# get the X-axis units for the ON position
if datasource == 'IFSPECTR':
v = self.compute_X_axis(row, frame='DELF-OBS',
num_chans=self.props['num IFspec chans'])
else:
v = self.compute_X_axis(row, frame='DELF-OBS',
num_chans=self.props['num chans'])
on = self.data[datasource][indices]
# get the indices for the DATA cell in the OFF position
try:
ref_indices = self.get_indices(scan=of_scan, cycle=cycle,
pol=pol, beam=beam)
self.logger.debug("plot_PSSW_spectra: scan %d off indices: %s",
of_scan, ref_indices)
off = self.data[datasource][ref_indices]
except ValueError as details:
self.logger.warning("plot_PSSW_spectra: %s", str(details))
continue
spectrum = (on-off)/off
label = "("+str(on_scan)+"-"+str(of_scan)+")/"+str(of_scan)
ax[col].plot(v, spectrum, label=label)
if index == 0:
ax[col].grid()
ax[col].set_xlabel("Frequency (MHz)")
#heading = "%8.3f MHz" % (self.data['OBSFREQ'][row]/1e6)
#heading += " P"+str(pol)
#ax[col].set_title(heading)
last_col = len(ax)-1
lines, labels = ax[last_col].get_legend_handles_labels()
fig.legend(lines, labels, loc="upper right", ncol=2, prop = fontP)
if savepath:
savefig(savepath)
show()
def plot_line(self, rows=[], window=(-100,100),
frame="RADI-OBJ", source='67P', savepath=None):
"""
plot reduced averaged spectral lines (from 'reduce_line()) for both pols
@param rows : optional rows to include; default: all
@type rows : list of int
@param window : optional left and right limits of the X axis (-100,100)
@type window : tuple of float
@param frame : optional reference frame for Doppler calc ("RADI-OBJ")
@type frame : str
@param source : source name for Doppler calculation; default: 67P
@type source : str
@param savepath : optional path for saved image; default: no save
@type savepath : str
"""
try:
x, y, rms, Tsys, intgr = self.reduce_line(rows=rows, window=window,
frame=frame, source=source)
except TypeError:
self.logger.error("plot_line: nothing to plot")
else:
figure()
plot(x, y[0], label="Pol1")
plot(x, y[1], label="Pol2")
grid()
xlim(*window)
legend(prop=fontP)
titlestr = \
source+" DSS-"+str(self.dss)+" "+self.datestr+" "+self.timestr
title(titlestr)
xlabel("$V_{LSR}$ (km s$^{-1}$")
ylabel("$T_{ant}$ (K)")
if savepath:
fname = titlestr.replace("/","-").replace(" ","_")+".png"
savefig(savepath+fname)
return {"rms": rms, "Tsys": Tsys, "intgr": intgr}
def plot_all_Tsys(self):
"""
Displays all Tsys values so user can select row range
This works for WVSR data but needs to be elaborated for SAO data
"""
figure()
stride = self.props['num cycles']*self.props['num IFs']
for cycle_idx in range(self.props['num cycles']):
for beam_idx in range(self.props['num beams']):
for pol_idx in range(self.props['num IFs']):
label = self.make_legend_labels(sckey=cycle_idx, bmkey=beam_idx,
plkey=pol_idx)
for sig in [True, False]:
if sig:
lbl = label+" on"
ls = "-"
index = cycle_idx
else:
lbl = label+" off"
ls = "--"
index = 2+cycle_idx
plot(self.data['TSYS'][index:len(self.acs_rows):stride,
pol_idx,0,0,0], ls=ls, label=lbl)
grid()
legend(loc='best')
xlabel("row")
ylabel("average power")
title(self.datestr)
#show()
def plot_Tsys(self, good_wx_data=None, X=None):
"""
Plot average power versus time or airmass or list index
Options for X are::
"time" - time of measurement
"airmass" - 1/sin(elev)
None - list index
"""
if good_wx_data:
pass
else:
good_wx_data = self.get_wx_datacubes()
heading = "DSS-%2d %s" % (self.dss, self.datestr)
fig, ax = self.init_multiplot(heading, 1, self.props['num cycles'])
for subch in self.cycle_keys:
cycle_idx = subch - 1
for beam in range(self.props['num beams']):
for pol in range(self.props['num IFs']):
for sig in [True, False]:
tm = good_wx_data['UNIXtime'][sig]
plottimes = MPL.dates.epoch2num(tm)
tsys = good_wx_data['TSYS'][sig][:,cycle_idx,beam,pol]
el = good_wx_data['ELEVATIO'][sig]
label = self.make_legend_labels(bmkey=beam, plkey=pol)
color_idx = 2*int(sig) + pol
if sig:
lbl = label+" sig"
ls = "-"
else:
lbl = label+" ref"
ls = "--"
if X == "time":
ax[cycle_idx].plot_date(plottimes, tsys, linestyle=ls, marker='.',
color=plotcolors[color_idx], label=lbl)
elif X == "airmass":
ax[cycle_idx].plot(1/sin(pi*array(el)/180.),
tsys, color=plotcolors[color_idx], marker='.',
ls=ls, label=lbl)
else:
ax[cycle_idx].plot(tsys, color=plotcolors[color_idx], marker='.',
ls=ls, label=lbl)
ax[cycle_idx].grid(True)
ax[cycle_idx].legend(loc='best', fontsize='xx-small', numpoints=1)
if X == "time":
ax[cycle_idx].xaxis.set_major_formatter(seconds_formatter)
fig.autofmt_xdate()
elif X == "airmass":
ax[cycle_idx].set_xlabel("Airmass")
else:
ax[cycle_idx].set_xlabel("index")
ax[cycle_idx].set_title("subchannel "+str(subch))
#fig.show()
#--------------------------- DSNFITSplotter methods -------------------------
def plot_average(self, frame='RADI-LSR', source=None,
xlimits=None, ylimits=None):
"""
plot an averaged DSN FITS spectrum
A DSN FITS spectrum is multi-dimensional with axes::
[[beam,] [time,]], pol, [dec, RA], frequency-like
[] indicates the axes may not be present. During data manipulations the
[ra, dec,] axes are the first to be eliminated.
Note that the frame of the provided spectrum is not changed. Only the
X-axis is recomputed before plotting.
@param row : row to be used to get observing parameters
@type row : int
@param frame : frame in which to plot the data
@type frame : str
@param xlimits : minimum and maximum X-axis values
@type xlimits : (float, float)
@param ylimits : minimum and maximum Y-axis values
@type ylimits : (float, float)
@param average : combine the two polarizations
@type average : bool
"""
if type(spectrum) == FITSexam.DSNFITSexaminer.Table.Data:
# this is a window into the original spectrum
x = spectrum.x
y = spectrum.y
frame = spectrum.frame
# change frame if desired
if frame == spectrum.frame:
self.logger.debug("plot_spectrum: keeping frame %s", frame)
else:
# change to the requested frame
self.logger.debug("plot_spectrum: changing to frame %s", frame)
new_x = spectrum.compute_X_axis(row, frame)
x = new_x[window.channels[0]:window.channels[1]]
self.logger.debug("plot_spectrum: plotting in frame %s", self.frame)
else:
# get SPECTRUM from current row
self.logger.info("plot_spectrum: data is not a class Data instance")
spectrum = self.get_spectra(row)
if xlimits:
ch1, ch2 = xlimits[0], xlimits[1]
if frame == "RADI-OBJ" and self.frame != "RADI-OBJ":
# Change to the object's rest frame
source = self.dataset[row]["OBJECT"]
if re.match('67P', source):
self.logger.debug("plot_spectrum: using 67P frame")
vspline = self.load_ephemeris('67P')
x = spectrum.compute_X_axis(row=row, frame="RADI-OBJ",
vspline=vspline)[ch1:ch2]
y = spectrum[ch1:ch2]
else:
self.logger.error("plot_spectrum: no ephemeris for %s", self.object)
raise RuntimeException("cannot compute velocity of %s" % source)
else:
x = spectrum.compute_X_axis(row=row)[ch1:ch2]
y = spectrum[ch1:ch2]
self.logger.debug("plot_spectrum: x = %s", x)
self.logger.debug("plot_spectrum: y = %s", y)
# make the plot
figure()
rms = {}
for pol in [0,1]:
plot(x, y[pol], label="pol "+str(pol)+" ("+("%d"%self.tau[pol])+"s)")
rms[pol] = y[pol].std()
if average:
ave = (y[0]+y[1])/2
plot(x, ave, label="both ("+("%d"%(self.tau[0]+self.tau[1]))+"s)")
rms['avg'] = ave.std()
if xlimits:
xlim(*xlimits)
if ylimits:
ylim(*ylimits)
if self.frame == "CHAN-OBS":
xlabel("Channel")
elif self.frame[:5] == ("OPTI-" or "RADI-" or "RELA-"):
if self.frame[4:] == "-OBS":
xlabel(r"$V_{obs} (\mbox{km s}^{-1})$")
else:
xlabel("Frequency (MHz)")
ylabel(r"Antenna Temperature (K)")
grid()
legend()
titlestr = support.text.clean_TeX(str(ds0.year)+"/"+str(ds0.doy))
title(titlestr)
show()
self.logger.info("plot_spectrum: pol0, pol1, avg: %s", rms)
return rms
#------------------- class for plotting TIPPING CURVE extension data ----------
class TidTipPlotter(FITSexam.TidTipAnalyzer):
"""
class for plotting data from the TIPPING CURVE extensions
"""
def __init__(self, extension):
"""
"""
FITSexam.TidTipAnalyzer.__init__(self, extension)
def plot_data():
"""
"""
fig = figure()
for IF in range(4):
PM = IF+1
rows = where(self.data['CYCLE'] == PM)
tsys = self.data['TSYS'][rows]
am = DRtip.airmass(self.data['ELEVATIO'][rows])
plot(am, tsys, plotcolors[IF]+'-')
plot(am, tsys, plotcolors[IF]+'.', label="IF%d" % PM)
# add fit to legend
label = "IF%d %5.1f$\pm$%3.1f %5.3f$\pm$%5.3f" % (
IF, Trx[IF],sigTrx[IF], tau[IF],sigtau[IF])
handles[IF].set_label(label)
legend(loc="upper left", numpoints=1)
grid()
title(self.header['DATE-OBS'])
xlabel("airmass")
legend(loc="upper left", numpoints=1)
show()
if project:
sessiondir = projects_dir+project+"/Observations/dss43/%4d/%03d/" % (
self.year, self.DOY)
fig.savefig(sessiondir+"tipdatafit-"+str(index+1)+".png")
#----------------------------------- module functions -------------------------
def make_legend_labels(dskeys=[], tbkeys=[], sckeys=[], bmkeys=[], plkeys=[],
dskey=None, tbkey=None, sckey=None, bmkey=None, plkey=None):
"""
@param dskeys : all datafile or examiner keys
@param tbkeys : all table keys
@param sckeys : all subchannel keys
@param bmkeys : all beam keys
@param plkeys : all polarization keys
@param dskey : datafile or examiner key
@param tbkey : table key
@param sckey : subchannel key
@param bmkey : beam key
@param plkeys :polarization key
"""
label = ""
if dskey != None and len(dskeys) > 1:
label += "ds"+str(dskey+1)
if tbkey != None and len(tbkeys) > 1:
label += " tb"+str(tbkey+1)
if sckey != None and len(sckeys) > 1:
label += " sc"+str(sckey+1)
if bmkey != None and len(bmkeys) > 1:
label += " B"+str(bmkey+1)
if plkey != None and len(plkeys) > 1:
label += "P"+str(plkey+1)
return label
def get_power_range(table, subch, beam, pol):
"""
calculate limits for power levels
the idea here is to avoid huge spikes compressing the spectra
will need some tuning
@param subch : sub-channel number
@param beam : beam number (1-based)
@param pol : polarization number (not NRAO pol code)
"""
subch_idx = subch-1
beam_idx = beam-1
IF_idx = pol-1
# gets the indices for scan 1
indices = table.get_indices(cycle=subch, beam=beam, IF=pol)
logger.debug("get_power_range: indices: %s", indices)
# gets spectra from all rows
spectrum = table.data['SPECTRUM'][:,indices[1:]]
logger.debug("get_power_range: spectrum shape is %s", spectrum.shape)
mean_pwr = spectrum.mean()
pwr_std = spectrum.std()
max_pwr = spectrum.max()
min_pwr = spectrum.min()
if max_pwr > mean_pwr + 4*pwr_std:
ymax = mean_pwr + pwr_std
else:
ymax = mean_pwr + 4*pwr_std
if min_pwr < mean_pwr - 4*pwr_std:
ymin = mean_pwr - pwr_std
else:
ymin = mean_pwr - 4*pwr_std
return ymin, ymax
| {
"alphanum_fraction": 0.5367081632,
"author": null,
"avg_line_length": 40.4488348531,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6facd4eefdba2e2ac4c0df75b4116e52909233dd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f007d716b5c28c086910a81206cffaf37ff6368c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "SDRAST/Data_Reduction",
"max_forks_repo_path": "FITS/SDFITSplotter.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f007d716b5c28c086910a81206cffaf37ff6368c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "SDRAST/Data_Reduction",
"max_issues_repo_path": "FITS/SDFITSplotter.py",
"max_line_length": 96,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f007d716b5c28c086910a81206cffaf37ff6368c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "SDRAST/Data_Reduction",
"max_stars_repo_path": "FITS/SDFITSplotter.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9638,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 39923
} |
import argparse
import os
import pandas as pd
import imagesize
import nltk
from nltk.corpus import wordnet as wn
import numpy as np
import re
from ..create.scads_classes import Node, Image, Clip
from ..create.create_scads import add_conceptnet
from ..create.add_datasets import add_dataset
from .wnids_to_concept import SYNSET_TO_CONCEPTNET_ID
class DatasetInstaller:
def get_name(self):
raise NotImplementedError()
def get_data(self, dataset, session, root):
raise NotImplementedError()
def get_conceptnet_id(self, label):
return "/c/en/" + label.lower().replace(" ", "_").replace("-", "_")
class ImageClassificationInstaller(DatasetInstaller):
def get_name(self):
raise NotImplementedError()
def get_data(self, dataset, session, root):
size = "full"
modes = ['train', 'test']
label_to_node_id = {}
all_images = []
for mode in modes:
df_label = pd.read_feather(
os.path.join(root, dataset.path, "labels_" + size, 'labels_' + mode + '.feather'))
df = pd.crosstab(df_label['id'], df_label['class'])
mode_dir = os.path.join(dataset.path, f'{dataset.path}_' + size, mode)
for image in os.listdir(os.path.join(root, mode_dir)):
if image.startswith('.'):
continue
label = df.loc[image].idxmax()
# Get node_id
if label in label_to_node_id:
node_id = label_to_node_id[label]
else:
node = session.query(Node).filter_by(conceptnet_id=self.get_conceptnet_id(label)).first()
node_id = node.id if node else None
label_to_node_id[label] = node_id
# Scads is missing a missing conceptnet id
if node_id is None:
continue
img = Image(dataset_id=dataset.id,
node_id=node_id,
path=os.path.join(mode_dir, image))
all_images.append(img)
return all_images
class ObjectDetectionInstaller(DatasetInstaller):
def get_name(self):
raise NotImplementedError()
def get_data(self, dataset, session, root):
size = "full"
modes = ['train', 'test']
label_to_node_id = {}
all_images = []
for mode in modes:
df_label = pd.read_feather(
os.path.join(root, dataset.path, "labels_" + size, 'labels_' + mode + '.feather'))
bbox = list(df_label.loc[:, 'bbox'].copy())
for i in range(len(bbox)):
bbx = bbox[i].split(',')
x_min = float(bbx[0].strip())
y_min = float(bbx[1].strip())
x_max = float(bbx[2].strip())
y_max = float(bbx[3].strip())
w = x_max - x_min
h = y_max - y_min
area = w * h
bbox[i] = area
df_label.loc[:, 'bbox'] = bbox
pt = df_label.pivot_table(index='id', columns='class', values='bbox', aggfunc=np.max)
mode_dir = os.path.join(dataset.path, f'{dataset.path}_' + size, mode)
for image in os.listdir(os.path.join(root, mode_dir)):
if image.startswith('.'):
continue
width, height = imagesize.get(os.path.join(root, mode_dir) + '/' + image)
img_size = width * height
label = pt.loc[image].dropna().idxmax()
bbox_area = pt.loc[image, label]
if bbox_area <= img_size * .2:
continue
# Get node_id
if label in label_to_node_id:
node_id = label_to_node_id[label]
else:
node = session.query(Node).filter_by(conceptnet_id=self.get_conceptnet_id(label)).first()
node_id = node.id if node else None
label_to_node_id[label] = node_id
# Scads is missing a missing conceptnet id
if not node_id:
continue
img = Image(dataset_id=dataset.id,
node_id=node_id,
path=os.path.join(mode_dir, image))
all_images.append(img)
return all_images
class VideoClassificationInstaller(DatasetInstaller):
def get_name(self):
raise NotImplementedError()
def composed_labels(self, string, dataset):
if dataset.name == 'HMDB':
return [w.strip() for w in string.split('_')]
elif dataset.name == 'UCF101':
list_words = re.findall('[A-Z][^A-Z]*', string)
return [w.lower().strip() for w in list_words]
def get_data(self, dataset, session, root):
size = "full"
modes = ['train', 'test']
label_to_node_id = {}
all_clips = []
for mode in modes:
base_path = os.path.join(dataset.path, dataset.path + '_' + size, mode)
#print(base_path, dataset.path, root)
df = pd.read_feather(
os.path.join(root, dataset.path, "labels_" + size, 'labels_' + mode + '.feather'))
if mode == "test":
df_label = pd.crosstab(df['id'], df['class'])
df = pd.read_feather(
os.path.join(root, dataset.path, "labels_" + size, "meta_" + mode + ".feather")
)
for _, row in df.iterrows():
row = row.astype("object")
if mode == "test":
label = df_label.loc[row['id']].idxmax()
else:
label = row['class']
if label in label_to_node_id:
node_id = label_to_node_id[label]
clip = Clip(
clip_id=row['id'],
video_id=row['video_id'],
base_path=base_path,
start_frame=row['start_frame'],
end_frame=row['end_frame'],
real_label=self.get_conceptnet_id(label).split('/')[-1],
dataset_id=dataset.id,
node_id=node_id
)
all_clips.append(clip)
else:
node = session.query(Node).filter_by(conceptnet_id=self.get_conceptnet_id(label)).first()
# If the node related to the class doesn't exist
if node:
node_id = node.id
label_to_node_id[label] = node_id
clip = Clip(
clip_id=row['id'],
video_id=row['video_id'],
base_path=base_path,
start_frame=row['start_frame'],
end_frame=row['end_frame'],
real_label=self.get_conceptnet_id(label).split('/')[-1],
dataset_id=dataset.id,
node_id=node_id
)
all_clips.append(clip)
# Else, we decompose the class name and check for concepts corresponding to each word
else:
labels = self.composed_labels(label, dataset)
for l in labels:
if l in label_to_node_id:
node_id = label_to_node_id[l]
else:
node = session.query(Node).filter_by(conceptnet_id=self.get_conceptnet_id(l)).first()
node_id = node.id if node else None
label_to_node_id[l] = node_id
# Handle the case when a classe, even decomposed, is not assign to any concept
if not node_id:
continue
real = '_'.join(labels)
clip = Clip(
clip_id=row['id'],
video_id=row['video_id'],
base_path=base_path,
start_frame=row['start_frame'],
end_frame=row['end_frame'],
real_label=self.get_conceptnet_id(real).split('/')[-1],
dataset_id=dataset.id,
node_id=node_id
)
all_clips.append(clip)
return all_clips
class CifarInstallation(ImageClassificationInstaller):
def get_name(self):
return "CIFAR100"
class MnistInstallation(ImageClassificationInstaller):
def get_name(self):
return "MNIST"
def get_conceptnet_id(self, label):
mnist_classes = {
'0': '/c/en/zero',
'1': '/c/en/one',
'2': '/c/en/two',
'3': '/c/en/three',
'4': '/c/en/four',
'5': '/c/en/five',
'6': '/c/en/six',
'7': '/c/en/seven',
'8': '/c/en/eight',
'9': '/c/en/nine',
}
return mnist_classes[label]
class ImageNetInstallation(ImageClassificationInstaller):
def get_name(self):
return "ImageNet"
def get_conceptnet_id(self, label):
return SYNSET_TO_CONCEPTNET_ID[label]
class ImageNet22kInstallation(ImageClassificationInstaller):
def wnid_to_name(self, wnid):
synset = wn.synset_from_pos_and_offset('n', int(wnid[1:]))
return synset.lemmas()[0].name()
def get_name(self):
return "ImageNet22k"
def get_data(self, dataset, session, root):
nltk.download('wordnet')
all_images = []
all_wnids = os.listdir(os.path.join(root, dataset.path))
for wnid in all_wnids:
label = self.wnid_to_name(wnid)
node = session.query(Node).filter_by(conceptnet_id=self.get_conceptnet_id(label)).first()
node_id = node.id if node else None
if not node_id:
continue
for image in os.listdir(os.path.join(root, dataset.path, wnid)):
img = Image(dataset_id=dataset.id,
node_id=node_id,
path=os.path.join(dataset.path, wnid, image))
all_images.append(img)
return all_images
def get_conceptnet_id(self, label):
if label in SYNSET_TO_CONCEPTNET_ID:
return SYNSET_TO_CONCEPTNET_ID[label]
else:
return super().get_conceptnet_id(label)
class COCO2014Installation(ObjectDetectionInstaller):
def get_name(self):
return "COCO2014"
def get_conceptnet_id(self, label):
label_to_label = {1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus', 7: 'train',
8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', 12: '-', 13: 'stop sign',
14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat', 18: 'dog', 19: 'horse', 20: 'sheep',
21: 'cow', 22: 'elephant', 23: 'bear', 24: 'zebra', 25: 'giraffe', 26: '-', 27: 'backpack',
28: 'umbrella', 29: '-', 30: '-', 31: 'handbag', 32: 'tie', 33: 'suitcase', 34: 'frisbee',
35: 'skis', 36: 'snowboard', 37: 'sports ball', 38: 'kite', 39: 'baseball bat',
40: 'baseball glove', 41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle',
45: '-', 46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon', 51: 'bowl',
52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange', 56: 'broccoli', 57: 'carrot',
58: 'hot dog', 59: 'pizza', 60: 'donut', 61: 'cake', 62: 'chair', 63: 'couch',
64: 'potted plant', 65: 'bed', 66: '-', 67: 'dining table', 68: '-', 69: '-', 70: 'toilet',
71: '-', 72: 'tv', 73: 'laptop', 74: 'mouse', 75: 'remote', 76: 'keyboard', 77: 'cell phone',
78: 'microwave', 79: 'oven', 80: 'toaster', 81: 'sink', 82: 'refrigerator', 83: '-',
84: 'book', 85: 'clock', 86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier',
90: 'toothbrush', 91: '-', 92: ''}
return "/c/en/" + label_to_label[label].lower().replace(" ", "_").replace("-", "_")
class DomainNetInstallation(ImageClassificationInstaller):
def __init__(self, domain_name):
self.domain = domain_name
def get_name(self):
return "DomainNet: " + self.domain
def get_conceptnet_id(self, label):
exceptions = {'paint_can': 'can_of_paint',
'The_Eiffel_Tower': 'eiffel_tower',
'animal_migration': 'migration',
'teddy-bear': 'teddy_bear',
'The_Mona_Lisa': 'mona_lisa',
't-shirt': 't_shirt',
'The_Great_Wall_of_China': 'great_wall_of_china'}
if label in exceptions:
return "/c/en/" + exceptions[label]
return "/c/en/" + label.lower().replace(" ", "_").replace("-", "_")
class MslCuriosityInstallation(ImageClassificationInstaller):
def get_name(self):
return "MslCuriosity"
def get_conceptnet_id(self, label):
exceptions = {'drill holes' : 'holes',
'observation tray' : 'tray',
'rover rear deck' : 'deck'}
if label in exceptions:
return "/c/en/" + exceptions[label]
return "/c/en/" + label.lower().replace(" ", "_").replace("-", "_")
class MarsSurfaceInstallation(ImageClassificationInstaller):
def get_name(self):
return "MarsSurface"
def get_conceptnet_id(self, label):
exceptions = {'drill holes' : 'holes',
'observation tray' : 'tray',
'rover rear deck' : 'deck'}
if label in exceptions:
return "/c/en/" + exceptions[label]
return "/c/en/" + label.lower().replace(" ", "_").replace("-", "_")
class VOC2009Installation(ObjectDetectionInstaller):
def get_name(self):
return "VOC2009"
def get_conceptnet_id(self, label):
exceptions = {'pottedplant': 'potted_plant',
'tvmonitor': 'tv_monitor',
'diningtable': 'dining_table'}
if label in exceptions:
return "/c/en/" + exceptions[label]
return "/c/en/" + label.lower().replace(" ", "_").replace("-", "_")
class GoogleOpenImageInstallation(ObjectDetectionInstaller):
def get_name(self):
return "GoogleOpenImage"
class HMDBInstallation(VideoClassificationInstaller):
def get_name(self):
return "HMDB"
class UCF101Installation(VideoClassificationInstaller):
def get_name(self):
return "UCF101"
def get_conceptnet_id(self, label):
exceptions = {'Skijet': 'jet_ski'}
if label in exceptions:
return "/c/en/" + exceptions[label]
else:
label_clean = '_'.join([i.lower() for i in re.findall('[A-Z][^A-Z]*', label)])
if len(label_clean) != 0:
return "/c/en/" + label_clean#label.lower().replace(" ", "_")#.replace("-", "_")
else:
return "/c/en/" + label.lower()
class Installer:
def __init__(self, path_to_database):
self.db = path_to_database
def install_conceptnet(self, path_to_conceptnet):
add_conceptnet(self.db, path_to_conceptnet)
def install_dataset(self, root, path_to_dataset, dataset_installer):
add_dataset(self.db, root, path_to_dataset, dataset_installer)
if __name__ == "__main__":
# Get arguments
parser = argparse.ArgumentParser(description='Scads')
parser.add_argument("--db", type=str, help="Path to database", required=True)
parser.add_argument("--conceptnet", type=str, help="Path to ConceptNet directory")
parser.add_argument("--root", type=str, help="Root containing dataset directories")
parser.add_argument("--cifar100", type=str, help="Path to CIFAR100 directory from the root")
parser.add_argument("--mnist", type=str, help="Path to MNIST directory from the root")
parser.add_argument("--imagenet", type=str, help="Path to ImageNet directory from the root")
parser.add_argument("--imagenet22k", type=str, help="Path to ImageNet22k directory from the root")
parser.add_argument("--coco2014", type=str, help="Path to COCO2014 directory from the root")
parser.add_argument("--voc2009", type=str, help="Path to voc2009 directory from the root")
parser.add_argument("--googleopenimage", type=str, help="Path to googleopenimage directory from the root")
parser.add_argument("--domainnet", nargs="+")
parser.add_argument("--hmdb", type=str, help="Path to hmdb directory from the root")
parser.add_argument("--ucf101", type=str, help="Path to ufc101 directory from the root")
parser.add_argument("--msl_curiosity", type=str, help="Path to msl_curiosity directory from the root")
parser.add_argument("--mars_surface_imgs", type=str, help="Path to mars_surface_imgs directory from the root")
args = parser.parse_args()
# Install SCADS
installer = Installer(args.db)
if args.conceptnet:
installer.install_conceptnet(args.conceptnet)
if not args.root:
raise RuntimeError("Must specify root directory.")
if args.cifar100:
installer.install_dataset(args.root, args.cifar100, CifarInstallation())
if args.mnist:
installer.install_dataset(args.root, args.mnist, MnistInstallation())
if args.imagenet:
installer.install_dataset(args.root, args.imagenet, ImageNetInstallation())
if args.imagenet22k:
installer.install_dataset(args.root, args.imagenet22k, ImageNet22kInstallation())
if args.coco2014:
installer.install_dataset(args.root, args.coco2014, COCO2014Installation())
if args.voc2009:
installer.install_dataset(args.root, args.voc2009, VOC2009Installation())
if args.googleopenimage:
installer.install_dataset(args.root, args.googleopenimage, GoogleOpenImageInstallation())
if args.domainnet:
for domain in args.domainnet:
name = domain.split("-")[1].capitalize()
installer.install_dataset(args.root, domain, DomainNetInstallation(name))
if args.hmdb:
installer.install_dataset(args.root, args.hmdb, HMDBInstallation())
if args.ucf101:
if not args.root:
raise RuntimeError("Must specify root directory.")
installer.install_dataset(args.root, args.ucf101, UCF101Installation())
if args.msl_curiosity:
if not args.root:
raise RuntimeError("Must specify root directory.")
installer.install_dataset(args.root, args.msl_curiosity, MslCuriosityInstallation())
if args.mars_surface_imgs:
if not args.root:
raise RuntimeError("Must specify root directory.")
installer.install_dataset(args.root, args.mars_surface_imgs, MarsSurfaceInstallation())
| {
"alphanum_fraction": 0.5398881218,
"author": null,
"avg_line_length": 42.5815450644,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7e161ca862e61e36df6c1b3e4d05470e87a9edb5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-02-27T04:27:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-14T22:40:29.000Z",
"max_forks_repo_head_hexsha": "0fa9ebeccc9177069aa09b2da84746b7532e3495",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "BatsResearch/taglets",
"max_forks_repo_path": "taglets/scads/create/install.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "0fa9ebeccc9177069aa09b2da84746b7532e3495",
"max_issues_repo_issues_event_max_datetime": "2021-11-10T16:01:47.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-10T16:01:47.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "BatsResearch/taglets",
"max_issues_repo_path": "taglets/scads/create/install.py",
"max_line_length": 119,
"max_stars_count": 13,
"max_stars_repo_head_hexsha": "0fa9ebeccc9177069aa09b2da84746b7532e3495",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "BatsResearch/taglets",
"max_stars_repo_path": "taglets/scads/create/install.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T22:56:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-10T13:17:10.000Z",
"num_tokens": 4389,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 19843
} |
import csv
import numpy as np
import cv2
def read_data(file_path,header=True,delimiter=','):
# The read-in data should be a N*W matrix,
# where N is the length of the time sequences,
# W is the number of sensors/data features
i = 0
with open(file_path, 'r') as file:
reader = csv.reader(file, delimiter = delimiter)
data=[]
for line in reader:
if i == 0 and header:
i += +1
else:
line = np.array(line, dtype = 'float') # str2float
if i == 0 or (i == 1 and header):
data = line
else:
data = np.vstack((data, line))
i += 1
return data
def read_binary(file_path,header=True,delimiter=','):
# The read-in data should be a N*W matrix,
# where N is the length of the time sequences,
# W is the number of sensors/data features
i = 0
with open(file_path, 'r') as file:
reader = csv.reader(file, delimiter = delimiter)
data=[]
for line in reader:
if i == 0 and header:
i += +1
else:
for j, element in enumerate(line):
if element == 'True':
line[j] = True
elif element == 'False':
line[j] = False
else:
raise ValueError("Data type is not boolean!!")
line = np.array(line) # str2float
if i == 0 or (i == 1 and header):
data = line
else:
data = np.vstack((data, line))
i += 1
return data
def read_human_data(file_path,num_header=2,delimiter='\t'):
data = []
with open(file_path, 'r') as file:
reader = csv.reader(file, delimiter=delimiter)
for i,line in enumerate(reader):
if i >= num_header:
act = int(line[1])
x = float(line[3])
y = float(line[5])
vx = float(line[7])
# vy = float(line[8])
ax = float(line[9])
# ay = float(line[11])
# yaw =float([line[13]])
# yaw_dot =float([line[14]])
if i == num_header:
data = np.array([act,x,y,vx,ax])
else:
data = np.vstack([data, np.array([act,x,y,vx,ax])])
return data
| {
"alphanum_fraction": 0.4574383453,
"author": null,
"avg_line_length": 34.4383561644,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0081ce0f5a205f67c9645abccd2a6851af09777a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-02-20T04:12:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-20T04:12:32.000Z",
"max_forks_repo_head_hexsha": "b5a720d59ef6243e32e97c544755fca555e713bc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "MoonBlvd/Mask_RCNN",
"max_forks_repo_path": "data_reader.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b5a720d59ef6243e32e97c544755fca555e713bc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "MoonBlvd/Mask_RCNN",
"max_issues_repo_path": "data_reader.py",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b5a720d59ef6243e32e97c544755fca555e713bc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "MoonBlvd/Mask_RCNN",
"max_stars_repo_path": "data_reader.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 579,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2514
} |
\section{EDA Figures}
\begin{figure}[!h]
\begin{minipage}{.45\textwidth}
\caption{Date of birth frequency.}
\label{fig:birth_date_freq}
\centering
\scalebox{0.5}{\input{./img/birth_date_freq.pgf}}
% \includegraphics[width=10]{./img/birth_date_freq.pgf}
\end{minipage}
%\end{figure}
%\begin{figure}[!h]
\begin{minipage}{.45\textwidth}
\caption{Interest earned frequency (logarithmic scale).}
\label{fig:interest_earned_freq}
\centering
% \input{./img/interest_earned_freq.pgf}
\includegraphics[width=1.2\textwidth]{./img/interest_earned_freq.png}
% \includegraphics{./img/interest_earned_freq.png}
\end{minipage}
%\end{figure}
%\begin{figure}[!h]
\begin{minipage}{.45\textwidth}
\caption{Monthly work frequency.}
\label{fig:monthly_work_freq}
\centering
\scalebox{0.5}{\input{./img/monthly_work_freq.pgf}}
\end{minipage}
\end{figure}
\begin{figure}[!h]
\caption{Sections of the cross-correlation matrix.}
\label{fig:cross-matrix}
% \begin{subfigure}[b]{\linewidth}
\begin{minipage}{.5\textwidth}
\centering
% \caption{A subfigure}\label{fig:1a}
\scalebox{0.22}{\input{./img/cross-matrix-0-0.pgf}}
\end{minipage}
% \end{subfigure}
% \begin{subfigure}[b]{\linewidth}
\begin{minipage}{.5\textwidth}
\centering
% \caption{A subfigure}\label{fig:1a}
\scalebox{0.22}{\input{./img/cross-matrix-0-1.pgf}}
\end{minipage}
% \end{subfigure}
\end{figure}
\begin{figure}[!h]
\ContinuedFloat
% \begin{subfigure}[b]{\linewidth}
\begin{minipage}{.5\textwidth}
\centering
% \caption{A subfigure}\label{fig:1a}
\scalebox{0.22}{\input{./img/cross-matrix-0-1.pgf}}
\end{minipage}
% \end{subfigure}
% \begin{subfigure}[b]{\linewidth}
\begin{minipage}{.5\textwidth}
\centering
% \caption{A subfigure}\label{fig:1a}
\scalebox{0.22}{\input{./img/cross-matrix-1-1.pgf}}
\end{minipage}
% \end{subfigure}
\end{figure}
\begin{figure}[!h]
\ContinuedFloat
% \begin{subfigure}[b]{\linewidth}
\begin{minipage}{.5\textwidth}
\centering
% \caption{A subfigure}\label{fig:1a}
\scalebox{0.22}{\input{./img/cross-matrix-1-2.pgf}}
\end{minipage}
% \end{subfigure}
% \begin{subfigure}[b]{\linewidth}
\begin{minipage}{.5\textwidth}
\centering
% \caption{A subfigure}\label{fig:1a}
\scalebox{0.22}{\input{./img/cross-matrix-2-2.pgf}}
\end{minipage}
% \end{subfigure}
\end{figure}
| {
"alphanum_fraction": 0.634525661,
"author": null,
"avg_line_length": 29.2272727273,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "7578443ac5fa1d95d18d3905f5dfb926aa84a540",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cc8ad76f139b3b2b92238b52f5cd2e8b7223e854",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hcastilho/hack6",
"max_forks_repo_path": "doc/report/tex/eda_figures.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cc8ad76f139b3b2b92238b52f5cd2e8b7223e854",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hcastilho/hack6",
"max_issues_repo_path": "doc/report/tex/eda_figures.tex",
"max_line_length": 73,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cc8ad76f139b3b2b92238b52f5cd2e8b7223e854",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hcastilho/hack6",
"max_stars_repo_path": "doc/report/tex/eda_figures.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 908,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2572
} |
import random
import numpy as np
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.poincare_half_space import PoincareHalfSpace
from tests.data_generation import _OpenSetTestData, _RiemannianMetricTestData
class PoincareHalfSpaceTestData(_OpenSetTestData):
dim_list = random.sample(range(2, 5), 2)
space_args_list = [(dim,) for dim in dim_list]
shape_list = [(dim,) for dim in dim_list]
n_points_list = random.sample(range(2, 5), 2)
n_vecs_list = random.sample(range(2, 5), 2)
def belongs_test_data(self):
smoke_data = [
dict(dim=2, vec=[1.5, 2.3], expected=True),
dict(dim=2, vec=[[1.5, 2.0], [2.5, -0.3]], expected=[True, False]),
]
return self.generate_tests(smoke_data)
def half_space_to_ball_coordinates_test_data(self):
smoke_data = [
dict(dim=2, point=[0.0, 1.0], expected=gs.zeros(2)),
dict(
dim=2,
point=[[0.0, 1.0], [0.0, 2.0]],
expected=[[0.0, 0.0], [0.0, 1.0 / 3.0]],
),
]
return self.generate_tests(smoke_data)
def ball_half_plane_tangent_are_inverse_test_data(self):
smoke_data = [
dict(
dim=2,
tangent_vec=gs.array([0.5, 1.0]),
base_point=gs.array([1.5, 2.3]),
)
]
return self.generate_tests(smoke_data)
def ball_to_half_space_coordinates_test_data(self):
smoke_data = [dict(dim=2, point_ball=gs.array([-0.3, 0.7]))]
return self.generate_tests(smoke_data)
def half_space_coordinates_ball_coordinates_composition_test_data(self):
smoke_data = [dict(dim=2, point_half_space=gs.array([1.5, 2.3]))]
return self.generate_tests(smoke_data)
def random_point_belongs_test_data(self):
smoke_space_args_list = [(2,), (3,)]
smoke_n_points_list = [1, 2]
return self._random_point_belongs_test_data(
smoke_space_args_list,
smoke_n_points_list,
self.space_args_list,
self.n_points_list,
)
def projection_belongs_test_data(self):
return self._projection_belongs_test_data(
self.space_args_list, self.shape_list, self.n_points_list
)
def to_tangent_is_tangent_test_data(self):
return self._to_tangent_is_tangent_test_data(
PoincareHalfSpace,
self.space_args_list,
self.shape_list,
self.n_vecs_list,
)
def random_tangent_vec_is_tangent_test_data(self):
return self._random_tangent_vec_is_tangent_test_data(
PoincareHalfSpace, self.space_args_list, self.n_vecs_list
)
def to_tangent_is_tangent_in_ambient_space_test_data(self):
return self._to_tangent_is_tangent_in_ambient_space_test_data(
PoincareHalfSpace, self.space_args_list, self.shape_list
)
class PoincareHalfSpaceMetricTestData(_RiemannianMetricTestData):
dim_list = random.sample(range(2, 5), 2)
metric_args_list = [(dim,) for dim in dim_list]
shape_list = [(dim,) for dim in dim_list]
space_list = [PoincareHalfSpace(dim) for dim in dim_list]
n_points_list = random.sample(range(1, 5), 2)
n_tangent_vecs_list = random.sample(range(1, 5), 2)
n_points_a_list = random.sample(range(1, 5), 2)
n_points_b_list = [1]
alpha_list = [1] * 2
n_rungs_list = [1] * 2
scheme_list = ["pole"] * 2
def inner_product_test_data(self):
smoke_data = [
dict(
dim=2,
tangent_vec_a=[[1.0, 2.0], [3.0, 4.0]],
tangent_vec_b=[[1.0, 2.0], [3.0, 4.0]],
base_point=[[0.0, 1.0], [0.0, 5.0]],
expected=[5.0, 1.0],
)
]
return self.generate_tests(smoke_data)
def exp_and_coordinates_tangent_test_data(self):
smoke_data = [
dict(
dim=2,
tangent_vec=gs.array([0.0, 1.0]),
base_point=gs.array([1.5, 2.3]),
)
]
return self.generate_tests(smoke_data)
def exp_test_data(self):
def _exp(tangent_vec, base_point):
circle_center = (
base_point[0] + base_point[1] * tangent_vec[1] / tangent_vec[0]
)
circle_radius = gs.sqrt(
(circle_center - base_point[0]) ** 2 + base_point[1] ** 2
)
moebius_d = 1
moebius_c = 1 / (2 * circle_radius)
moebius_b = circle_center - circle_radius
moebius_a = (circle_center + circle_radius) * moebius_c
point_complex = base_point[0] + 1j * base_point[1]
tangent_vec_complex = tangent_vec[0] + 1j * tangent_vec[1]
point_moebius = (
1j
* (moebius_d * point_complex - moebius_b)
/ (moebius_c * point_complex - moebius_a)
)
tangent_vec_moebius = (
-1j
* tangent_vec_complex
* (1j * moebius_c * point_moebius + moebius_d) ** 2
)
end_point_moebius = point_moebius * gs.exp(
tangent_vec_moebius / point_moebius
)
end_point_complex = (moebius_a * 1j * end_point_moebius + moebius_b) / (
moebius_c * 1j * end_point_moebius + moebius_d
)
end_point_expected = gs.hstack(
[np.real(end_point_complex), np.imag(end_point_complex)]
)
return end_point_expected
inputs_to_exp = [(gs.array([2.0, 1.0]), gs.array([1.0, 1.0]))]
smoke_data = []
if not geomstats.tests.tf_backend():
for tangent_vec, base_point in inputs_to_exp:
smoke_data.append(
dict(
dim=2,
tangent_vec=tangent_vec,
base_point=base_point,
expected=_exp(tangent_vec, base_point),
)
)
return self.generate_tests(smoke_data)
def exp_shape_test_data(self):
return self._exp_shape_test_data(
self.metric_args_list, self.space_list, self.shape_list
)
def log_shape_test_data(self):
return self._log_shape_test_data(
self.metric_args_list,
self.space_list,
)
def squared_dist_is_symmetric_test_data(self):
return self._squared_dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
atol=gs.atol * 1000,
)
def exp_belongs_test_data(self):
return self._exp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
belongs_atol=gs.atol * 10000,
)
def log_is_tangent_test_data(self):
return self._log_is_tangent_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
is_tangent_atol=gs.atol * 10900,
)
def geodesic_ivp_belongs_test_data(self):
return self._geodesic_ivp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def geodesic_bvp_belongs_test_data(self):
return self._geodesic_bvp_belongs_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
belongs_atol=gs.atol * 1000,
)
def exp_after_log_test_data(self):
return self._exp_after_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def log_after_exp_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
def exp_ladder_parallel_transport_test_data(self):
return self._exp_ladder_parallel_transport_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_rungs_list,
self.alpha_list,
self.scheme_list,
)
def exp_geodesic_ivp_test_data(self):
return self._exp_geodesic_ivp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
self.n_points_list,
rtol=gs.rtol * 100000,
atol=gs.atol * 100000,
)
def parallel_transport_ivp_is_isometry_test_data(self):
return self._parallel_transport_ivp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def parallel_transport_bvp_is_isometry_test_data(self):
return self._parallel_transport_bvp_is_isometry_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_list,
is_tangent_atol=gs.atol * 1000,
atol=gs.atol * 1000,
)
def dist_is_symmetric_test_data(self):
return self._dist_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_positive_test_data(self):
return self._dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def squared_dist_is_positive_test_data(self):
return self._squared_dist_is_positive_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_is_norm_of_log_test_data(self):
return self._dist_is_norm_of_log_test_data(
self.metric_args_list,
self.space_list,
self.n_points_a_list,
self.n_points_b_list,
)
def dist_point_to_itself_is_zero_test_data(self):
return self._dist_point_to_itself_is_zero_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
def triangle_inequality_of_dist_test_data(self):
return self._triangle_inequality_of_dist_test_data(
self.metric_args_list, self.space_list, self.n_points_list
)
def inner_product_is_symmetric_test_data(self):
return self._inner_product_is_symmetric_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
)
def retraction_lifting_test_data(self):
return self._log_after_exp_test_data(
self.metric_args_list,
self.space_list,
self.shape_list,
self.n_tangent_vecs_list,
rtol=gs.rtol * 100,
atol=gs.atol * 10000,
)
| {
"alphanum_fraction": 0.5928700594,
"author": null,
"avg_line_length": 33.18,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "540e863defc5ae7b6956a8d726f17b832b4c3c3f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "414b6832c63df60788eda934c694d68fc939708f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chicken-biryani/geomstats",
"max_forks_repo_path": "tests/data/poincare_half_space_data.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "414b6832c63df60788eda934c694d68fc939708f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chicken-biryani/geomstats",
"max_issues_repo_path": "tests/data/poincare_half_space_data.py",
"max_line_length": 84,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "414b6832c63df60788eda934c694d68fc939708f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chicken-biryani/geomstats",
"max_stars_repo_path": "tests/data/poincare_half_space_data.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2795,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11613
} |
[STATEMENT]
lemma naturality_hom_induced:
assumes "continuous_map X Y f" "f ` S \<subseteq> T"
shows "hom_boundary q Y T \<circ> hom_induced q X S Y T f
= hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
[PROOF STEP]
proof (cases "q \<le> 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
2. \<not> q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> q \<le> 0
goal (2 subgoals):
1. q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
2. \<not> q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> q \<le> 0
[PROOF STEP]
obtain p where p1: "p \<ge> Suc 0" and q: "q = int p"
[PROOF STATE]
proof (prove)
using this:
\<not> q \<le> 0
goal (1 subgoal):
1. (\<And>p. \<lbrakk>Suc 0 \<le> p; q = int p\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using zero_le_imp_eq_int
[PROOF STATE]
proof (prove)
using this:
\<not> q \<le> 0
0 \<le> ?k \<Longrightarrow> \<exists>n. ?k = int n
goal (1 subgoal):
1. (\<And>p. \<lbrakk>Suc 0 \<le> p; q = int p\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
Suc 0 \<le> p
q = int p
goal (2 subgoals):
1. q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
2. \<not> q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (hom_boundary q Y T \<circ> hom_induced q X S Y T f) x = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) x
[PROOF STEP]
fix c
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. (hom_boundary q Y T \<circ> hom_induced q X S Y T f) x = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) x
[PROOF STEP]
show "(hom_boundary q Y T \<circ> hom_induced q X S Y T f) c =
(hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
proof (cases "c \<in> carrier(relative_homology_group p X S)")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
c \<in> carrier (relative_homology_group (int p) X S)
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
c \<in> carrier (relative_homology_group (int p) X S)
[PROOF STEP]
obtain a where ceq: "c = homologous_rel_set p X S a" and a: "singular_relcycle p X S a"
[PROOF STATE]
proof (prove)
using this:
c \<in> carrier (relative_homology_group (int p) X S)
goal (1 subgoal):
1. (\<And>a. \<lbrakk>c = homologous_rel_set p X S a; singular_relcycle p X S a\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (force simp: carrier_relative_homology_group)
[PROOF STATE]
proof (state)
this:
c = homologous_rel_set p X S a
singular_relcycle p X S a
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
c = homologous_rel_set p X S a
singular_relcycle p X S a
[PROOF STEP]
have sr: "singular_relcycle p Y T (chain_map p f a)"
[PROOF STATE]
proof (prove)
using this:
c = homologous_rel_set p X S a
singular_relcycle p X S a
goal (1 subgoal):
1. singular_relcycle p Y T (chain_map p f a)
[PROOF STEP]
using assms singular_relcycle_chain_map
[PROOF STATE]
proof (prove)
using this:
c = homologous_rel_set p X S a
singular_relcycle p X S a
continuous_map X Y f
f ` S \<subseteq> T
\<lbrakk>singular_relcycle ?p ?X ?S ?c; continuous_map ?X ?X' ?g; ?g ` ?S \<subseteq> ?T\<rbrakk> \<Longrightarrow> singular_relcycle ?p ?X' ?T (chain_map ?p ?g ?c)
goal (1 subgoal):
1. singular_relcycle p Y T (chain_map p f a)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
singular_relcycle p Y T (chain_map p f a)
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
singular_relcycle p Y T (chain_map p f a)
[PROOF STEP]
have sb: "singular_relcycle (p - Suc 0) (subtopology X S) {} (chain_boundary p a)"
[PROOF STATE]
proof (prove)
using this:
singular_relcycle p Y T (chain_map p f a)
goal (1 subgoal):
1. singular_relcycle (p - Suc 0) (subtopology X S) {} (chain_boundary p a)
[PROOF STEP]
by (metis One_nat_def a chain_boundary_boundary singular_chain_0 singular_relcycle)
[PROOF STATE]
proof (state)
this:
singular_relcycle (p - Suc 0) (subtopology X S) {} (chain_boundary p a)
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
have p1_eq: "int p - 1 = int (p - Suc 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. int p - 1 = int (p - Suc 0)
[PROOF STEP]
using p1
[PROOF STATE]
proof (prove)
using this:
Suc 0 \<le> p
goal (1 subgoal):
1. int p - 1 = int (p - Suc 0)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
int p - 1 = int (p - Suc 0)
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
have cbm: "(chain_boundary p (chain_map p f a))
= (chain_map (p - Suc 0) f (chain_boundary p a))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. chain_boundary p (chain_map p f a) = chain_map (p - Suc 0) f (chain_boundary p a)
[PROOF STEP]
using a chain_boundary_chain_map singular_relcycle
[PROOF STATE]
proof (prove)
using this:
singular_relcycle p X S a
singular_chain ?p ?X ?c \<Longrightarrow> chain_boundary ?p (chain_map ?p ?g ?c) = chain_map (?p - Suc 0) ?g (chain_boundary ?p ?c)
singular_relcycle ?p ?X ?S ?c = (singular_chain ?p ?X ?c \<and> singular_chain (?p - 1) (subtopology ?X ?S) (chain_boundary ?p ?c))
goal (1 subgoal):
1. chain_boundary p (chain_map p f a) = chain_map (p - Suc 0) f (chain_boundary p a)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
chain_boundary p (chain_map p f a) = chain_map (p - Suc 0) f (chain_boundary p a)
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
have contf: "continuous_map (subtopology X S) (subtopology Y T) f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. continuous_map (subtopology X S) (subtopology Y T) f
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
continuous_map X Y f
f ` S \<subseteq> T
goal (1 subgoal):
1. continuous_map (subtopology X S) (subtopology Y T) f
[PROOF STEP]
by (auto simp: continuous_map_in_subtopology topspace_subtopology
continuous_map_from_subtopology)
[PROOF STATE]
proof (state)
this:
continuous_map (subtopology X S) (subtopology Y T) f
goal (2 subgoals):
1. c \<in> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
2. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
unfolding q
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (hom_boundary (int p) Y T \<circ> hom_induced (int p) X S Y T f) c = (hom_induced (int p - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary (int p) X S) c
[PROOF STEP]
using assms p1 a
[PROOF STATE]
proof (prove)
using this:
continuous_map X Y f
f ` S \<subseteq> T
Suc 0 \<le> p
singular_relcycle p X S a
goal (1 subgoal):
1. (hom_boundary (int p) Y T \<circ> hom_induced (int p) X S Y T f) c = (hom_induced (int p - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary (int p) X S) c
[PROOF STEP]
apply (simp add: ceq assms hom_induced_chain_map hom_boundary_chain_boundary
hom_boundary_chain_boundary [OF sr] singular_relcycle_def mod_subset_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Suc 0 \<le> p; singular_chain p X a \<and> singular_chain (p - Suc 0) (subtopology X S) (chain_boundary p a)\<rbrakk> \<Longrightarrow> homologous_rel_set (p - Suc 0) (subtopology Y T) {} (chain_boundary p (chain_map p f a)) = hom_induced (int p - 1) (subtopology X S) {} (subtopology Y T) {} f (homologous_rel_set (p - Suc 0) (subtopology X S) {} (chain_boundary p a))
[PROOF STEP]
apply (simp add: p1_eq contf sb cbm hom_induced_chain_map)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
(hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
goal (1 subgoal):
1. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
c \<notin> carrier (relative_homology_group (int p) X S)
goal (1 subgoal):
1. c \<notin> carrier (relative_homology_group (int p) X S) \<Longrightarrow> (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
continuous_map X Y f
f ` S \<subseteq> T
c \<notin> carrier (relative_homology_group (int p) X S)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
continuous_map X Y f
f ` S \<subseteq> T
c \<notin> carrier (relative_homology_group (int p) X S)
goal (1 subgoal):
1. (hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
[PROOF STEP]
unfolding q o_def
[PROOF STATE]
proof (prove)
using this:
continuous_map X Y f
f ` S \<subseteq> T
c \<notin> carrier (relative_homology_group (int p) X S)
goal (1 subgoal):
1. hom_boundary (int p) Y T (hom_induced (int p) X S Y T f c) = hom_induced (int p - 1) (subtopology X S) {} (subtopology Y T) {} f (hom_boundary (int p) X S c)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
continuous_map X Y f
f ` S \<subseteq> T
c \<notin> carrier (relative_homology_group (int p) X S)
continuous_map X Y f
f ` S \<subseteq> T
goal (1 subgoal):
1. hom_boundary (int p) Y T (hom_induced (int p) X S Y T f c) = hom_induced (int p - 1) (subtopology X S) {} (subtopology Y T) {} f (hom_boundary (int p) X S c)
[PROOF STEP]
apply (simp add: hom_induced_default hom_boundary_default)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>c \<notin> carrier (relative_homology_group (int p) X S); continuous_map X Y f; f ` S \<subseteq> T\<rbrakk> \<Longrightarrow> hom_boundary (int p) Y T (singular_relboundary_set p Y T) = hom_induced (int p - 1) (subtopology X S) {} (subtopology Y T) {} f \<one>\<^bsub>homology_group (int p - 1) (subtopology X S)\<^esub>
[PROOF STEP]
by (metis group_relative_homology_group hom_boundary hom_induced hom_one one_relative_homology_group)
[PROOF STATE]
proof (state)
this:
(hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(hom_boundary q Y T \<circ> hom_induced q X S Y T f) c = (hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S) c
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
goal (1 subgoal):
1. q \<le> 0 \<Longrightarrow> hom_boundary q Y T \<circ> hom_induced q X S Y T f = hom_induced (q - 1) (subtopology X S) {} (subtopology Y T) {} f \<circ> hom_boundary q X S
[PROOF STEP]
qed (force simp: hom_induced_trivial hom_boundary_trivial) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 48,
"llama_tokens": 6985,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import datetime
import logging
import sys
from pytorch_lightning.core import datamodule
from torch.utils import data
#cd /home/dcast/adversarial_project ; /usr/bin/env /home/dcast/anaconda3/envs/deep_learning_torch/bin/python -- /home/dcast/adversarial_project/irt_to_nlp/train.py
# sys.path.append("/content/adversarial_project") #to work in colab
sys.path.append("/home/dcast/adversarial_project")
import os
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.loggers import WandbLogger
import wandb
from openml.autotune import autotune_lr
from irt_to_nlp.builders import build_dataset, get_callbacks, get_trainer,get_system
from irt_to_nlp.config import CONFIG, create_config_dict
import pandas as pd
import uuid
def apply_train_test():
def get_new_system_and_do_training_and_test(config,data_module,wandb_logger,callbacks,num_repeat=None,num_fold=None,run_test:bool=False):
model=get_system(config,num_fold,num_repeat=num_repeat)
# test_dataloader=data_module.test_dataloader()
#create trainer
callbacks[0].best_score=torch.tensor(np.Inf)
trainer=get_trainer(wandb_logger,callbacks,config)
result=trainer.fit(model,data_module)
if run_test:
result=trainer.test(model,test_dataloaders=data_module.test_dataloader())
return result
return result
# def generate_csv_results(results:list,data_name:str):
# df=pd.DataFrame(results)
# df.to_csv(f"{data_name}.csv")
init_id=uuid.uuid1().int
config=CONFIG()
config_dict=create_config_dict(config)
config_dict["id_group"]=init_id
wandb.init(
project='IRT-project-NLP',
entity='dcastf01',
config=config_dict)
wandb_logger = WandbLogger(
# offline=True,
log_model=False
)
config =wandb.config
data_module=build_dataset(path_data_csv=config.path_data,
dataset_name=config.dataset_name,
batch_size=config.batch_size,
model_name=config.model_name
)
callbacks=get_callbacks(config,data_module)
num_fold=config.num_fold
if num_fold is None or num_fold==0:
wandb.run.name=config.model_name[:5]+" "+\
datetime.datetime.utcnow().strftime("%Y-%m-%d %X")
get_new_system_and_do_training_and_test(config,data_module,wandb_logger,callbacks,num_fold=num_fold,run_test=True)
else:
results=[]
for num_repeat in range(config.repetitions):
for fold in range(num_fold):
print(f"Repeticion {num_repeat}, fold {fold}")
if not num_repeat==0 or not fold==0:
config=CONFIG()
config_dict=create_config_dict(config)
config_dict["id_group"]=init_id
wandb.init(
project='IRT-project-NLP',
entity='dcastf01',
config=config_dict)
wandb_logger = WandbLogger(
# offline=True,
log_model=False
)
config =wandb.config
config=wandb.config
wandb.run.name=str(num_repeat)+"_"+str(fold)+" "+config.model_name[:5]+" "+\
datetime.datetime.utcnow().strftime("%Y-%m-%d %X")
# wandb.run.id_group=init_id
result=get_new_system_and_do_training_and_test(config,data_module,
wandb_logger,callbacks,
num_repeat=num_repeat,
num_fold=fold,
run_test=False)
if results:
results.append(*result)
wandb.finish()
# config=CONFIG()
# config_dict=create_config_dict(config)
# wandb.init(
# project='IRT-project-NLP',
# entity='dcastf01',
# config=config_dict)
# wandb_logger = WandbLogger(
# # offline=True,
# log_model=False
# )
# config =wandb.config
# config=wandb.config
# wandb.run.name="final"+config.model_name[:5]+" "+\
# datetime.datetime.utcnow().strftime("%Y-%m-%d %X")
# # wandb.run.id_group=init_id
# callbacks=get_callbacks(config,data_module,only_train_and_test=True)
# result=get_new_system_and_do_training_and_test(config,data_module,
# wandb_logger,
# callbacks,num_fold=num_fold,
# run_test=True
# )
# results.append(*result)
# generate_csv_results(results,config.dataset_name)
# "construir lo del fold"
def main():
os.environ["WANDB_IGNORE_GLOBS"]="*.ckpt"
torch.manual_seed(0)
print("empezando setup del experimento")
torch.backends.cudnn.benchmark = True
#aplicar todo lo del fold a partir de aquí
apply_train_test()
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.539257981,
"author": null,
"avg_line_length": 37.6298701299,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "52171e245698182223244a2290fe4a266c17a6f7",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "01564f7b4ff9f19021986e57f5bfad827213c8a6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dcastf01/creating_adversarial_images",
"max_forks_repo_path": "irt_to_nlp/train.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "01564f7b4ff9f19021986e57f5bfad827213c8a6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dcastf01/creating_adversarial_images",
"max_issues_repo_path": "irt_to_nlp/train.py",
"max_line_length": 164,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "7b34cc3556a1b99ac67cb155fba8d0837c9b7b10",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cmonserr/Why_Difficulty",
"max_stars_repo_path": "irt_to_nlp/train.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-04T11:33:41.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-04T11:33:41.000Z",
"num_tokens": 1121,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5795
} |
[STATEMENT]
lemma OrdP_linear_lemma:
assumes j: "atom j \<sharp> i"
shows "{ OrdP (Var i) } \<turnstile> All j (OrdP (Var j) IMP (Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))"
(is "_ \<turnstile> ?scheme")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. {OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
obtain k::name and l::name and m::name
where k: "atom k \<sharp> (i,j)" and l: "atom l \<sharp> (i,j,k)" and m: "atom m \<sharp> (i,j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>k l m. \<lbrakk>atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis obtain_fresh)
[PROOF STATE]
proof (state)
this:
atom k \<sharp> (i, j)
atom l \<sharp> (i, j, k)
atom m \<sharp> (i, j)
goal (1 subgoal):
1. {OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
proof (rule OrdIndH [where i=i and j=k])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. atom k \<sharp> (i, SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
2. {} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) ((SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))(i::=Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
show "atom k \<sharp> (i, ?scheme)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. atom k \<sharp> (i, SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
using k
[PROOF STATE]
proof (prove)
using this:
atom k \<sharp> (i, j)
goal (1 subgoal):
1. atom k \<sharp> (i, SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
by (force simp add: fresh_Pair)
[PROOF STATE]
proof (state)
this:
atom k \<sharp> (i, SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
goal (1 subgoal):
1. {} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) ((SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))(i::=Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. {} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) ((SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))(i::=Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
show "{} \<turnstile> All i (OrdP (Var i) IMP (All2 k (Var i) (?scheme(i::= Var k)) IMP ?scheme))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) ((SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))(i::=Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
using j k
[PROOF STATE]
proof (prove)
using this:
atom j \<sharp> i
atom k \<sharp> (i, j)
goal (1 subgoal):
1. {} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) ((SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))(i::=Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
[PROOF STEP]
apply (rule All_I Imp_I)+
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i IN Var j OR Var i EQ Var j OR Var j IN Var i
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>C\<in>{All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom j \<sharp> C
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>C\<in>{}. atom i \<sharp> C
[PROOF STEP]
defer 1
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>C\<in>{All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom j \<sharp> C
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>C\<in>{}. atom i \<sharp> C
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i IN Var j OR Var i EQ Var j OR Var j IN Var i
[PROOF STEP]
apply auto [2]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i IN Var j OR Var i EQ Var j OR Var j IN Var i
[PROOF STEP]
apply (rule OrdIndH [where i=j and j=l])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom l \<sharp> (j, Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP All2 l (Var j) ((Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)(j::=Var l)) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
using l
\<comment> \<open>nested induction\<close>
[PROOF STATE]
proof (prove)
using this:
atom l \<sharp> (i, j, k)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom l \<sharp> (j, Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP All2 l (Var j) ((Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)(j::=Var l)) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
apply (force simp add: fresh_Pair)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP All2 l (Var j) ((Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)(j::=Var l)) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
[PROOF STEP]
apply (rule All_I Imp_I)+
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i IN Var j OR Var i EQ Var j OR Var j IN Var i
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>C\<in>{All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom j \<sharp> C
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>C\<in>{All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom j \<sharp> C
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i IN Var j OR Var i EQ Var j OR Var j IN Var i
[PROOF STEP]
apply force
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i IN Var j OR Var i EQ Var j OR Var j IN Var i
[PROOF STEP]
apply (rule Disj_3I)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i EQ Var j
[PROOF STEP]
apply (rule Equality_I)
\<comment> \<open>Now the opposite inclusion, @{term"Var j SUBS Var i"}\<close>
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var j SUBS Var i
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
apply (rule Subset_I [where i=m])
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var i
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom m \<sharp> (Var j, Var i)
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>B\<in>{Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom m \<sharp> B
4. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
apply (rule All2_E [THEN rotate4])
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom l \<sharp> Var j
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> ?x46 IN Var j
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {(Var i IN Var l OR Var i EQ Var l OR Var l IN Var i)(l::=?x46), Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var i
4. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom m \<sharp> (Var j, Var i)
5. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>B\<in>{Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom m \<sharp> B
6. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
using l m
[PROOF STATE]
proof (prove)
using this:
atom l \<sharp> (i, j, k)
atom m \<sharp> (i, j)
goal (6 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom l \<sharp> Var j
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> ?x46 IN Var j
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {(Var i IN Var l OR Var i EQ Var l OR Var l IN Var i)(l::=?x46), Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var i
4. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom m \<sharp> (Var j, Var i)
5. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>B\<in>{Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom m \<sharp> B
6. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var i IN Var m, Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var i
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var i EQ Var m, Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var i
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
apply (blast intro: ContraProve [THEN rotate3] OrdP_Trans)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var i EQ Var m, Var m IN Var j, Neg (Var i IN Var j), Neg (Var j IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var i
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
apply (blast intro: ContraProve [THEN rotate3] Mem_cong [OF Hyp Refl, THEN Iff_MP2_same])
\<comment> \<open>Now the opposite inclusion, @{term"Var i SUBS Var j"}\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var i SUBS Var j
[PROOF STEP]
apply (rule Subset_I [where i=m])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var m IN Var i, Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)} \<turnstile> Var m IN Var j
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> atom m \<sharp> (Var i, Var j)
3. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> \<forall>B\<in>{Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), All2 k (Var i) (SyntaxN.All j (OrdP (Var j) IMP Var k IN Var j OR Var k EQ Var j OR Var j IN Var k)), OrdP (Var i)}. atom m \<sharp> B
[PROOF STEP]
apply (rule All2_E [THEN rotate6], auto)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {SyntaxN.All j (OrdP (Var j) IMP Var m IN Var j OR Var m EQ Var j OR Var j IN Var m), Var m IN Var i, Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), OrdP (Var i)} \<turnstile> Var m IN Var j
[PROOF STEP]
apply (rule All_E [where x = "Var j"], auto)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var m EQ Var j, Var m IN Var i, Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), OrdP (Var i)} \<turnstile> Var m IN Var j
2. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var j IN Var m, Var m IN Var i, Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), OrdP (Var i)} \<turnstile> Var m IN Var j
[PROOF STEP]
apply (blast intro: ContraProve [THEN rotate4] Mem_cong [OF Hyp Refl, THEN Iff_MP_same])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>atom j \<sharp> i; atom k \<sharp> (i, j); atom l \<sharp> (i, j, k); atom m \<sharp> (i, j)\<rbrakk> \<Longrightarrow> {Var j IN Var m, Var m IN Var i, Neg (Var i IN Var j), Neg (Var j IN Var i), All2 l (Var j) (Var i IN Var l OR Var i EQ Var l OR Var l IN Var i), OrdP (Var j), OrdP (Var i)} \<turnstile> Var m IN Var j
[PROOF STEP]
apply (blast intro: ContraProve [THEN rotate4] OrdP_Trans)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
{} \<turnstile> SyntaxN.All i (OrdP (Var i) IMP All2 k (Var i) ((SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))(i::=Var k)) IMP SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
{OrdP (Var i)} \<turnstile> SyntaxN.All j (OrdP (Var j) IMP Var i IN Var j OR Var i EQ Var j OR Var j IN Var i)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Incompleteness_Predicates",
"hexsha": null,
"include": null,
"lang": null,
"length": 38,
"llama_tokens": 9154,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
# Problem: https://projecteuler.net/problem=367
# To run the code: `./problem367.sh`
"""
* Define notations as follows:
. let e(i) represent the element at position i of a permutation
. a permutation p of n elements is represented as follows
p = [e(1) e(2) e(3) ... e(n)]
. a permutation p of n elements is called n-cyclic if:
e(e(e(...e(1)))) = 1
^^^^^^^^^^
`e` appears n times
. let (e(1) e(2) ... e(n)) represent an n-cyclic permutation.
. define a histogram as a function f: N -> N such that:
f(i) = j means the element i appears j times in the data
. define a `permutation cycle histogram` of permutation p of length n as a function h: N -> N such that
h(i) = j means the (i-cyclic) sub-permutations appears j times in the permutation p
where the domain of h is {0, 1, ..., n}
* Let classify a permutation p using the "permutation cycle histogram".
* Observation:
Each permutation p is a collections of cyclic sub-permutations.
For example,
permutation | cyclic subpermutations | permutation cycle histogram
[3 1 2 4 5] | (1 2 3) (4 5) | [0 0 1 1 0 0] -> one 2-cyclic subpermutation, one 3 cyclic subpermutation
[1 2 3 4 5] | (1) (2) (3) (4) (5) | [0 5 0 0 0 0] -> five 1-cyclic subpermutation
[2 3 4 5 1] | (1 2 3 4 5) | [0 0 0 0 0 1] -> one 5-cyclic subpermutation
* Let hash_p(histogram) be a bijection function between all "permutation cycle histograms" and their corresponding hash.
Let hash(permutation) be hash_p(histogram) where histogram is the "permutation cycle histogram" of the permutation.
* Consider permutations of N elements.
Let X = [0 1 ... N-1]
* The code is as follows,
- problem367.sh: compiles and runs the solver.
- problem367.cpp: generates all permutations of X and counts the number of appearances of each possible hash of permutation cycle histograms.
- problem367.py: it does 3 things
* generates the transisition matrix:
- for each possible permutation cycle histogram, it generates a representative permutation that maps to such the histogram
- for all ways of picking 3 elements and shuffling them, it determines the probability of transitioning to other permutation cycle histograms
* uses a linear solver to solve the absorbing Matrix chain where:
- the sorted permutation is the absorbing state
* calculates the expectation:
- The linear solver above returns the expected number of steps to reach the absorbing state for all possible starting states.
- We have the distribution of starting states from problem367.cpp, so we can use the linearity of expectation to calculate the answer to the problem:
Expectation = sum{(probability of starting at state S) * (the expected number of steps to reach the absorbing state from S) | for all S in all possible starting states}
"""
from sage.all import *
import itertools
from collections import defaultdict
import numpy as np
def round_up_to_nearest_log_2(n):
exp = 0
curr = 1
while curr < n:
curr = curr * 2
exp = exp + 1
return exp
def mask(n_bits):
return 2**n_bits - 1
N = 11
M = 3
BITS_PER_ELEMENT = round_up_to_nearest_log_2(N)
ELEMENT_MASK = mask(BITS_PER_ELEMENT)
def dfs1(curr_node, first_node, seq, visited):
if visited[curr_node]:
return 0
visited[curr_node] = True
return 1 + dfs1(seq[curr_node], first_node, seq, visited)
def find_cycle_length_started_at(first_node, seq, visited):
return dfs1(first_node, first_node, seq, visited)
def find_cycle_length_freqs(seq):
length_freq = [0] * (N+1)
visited = [False] * N
for first_node in range(N):
if not visited[first_node]:
cycle_length = find_cycle_length_started_at(first_node, seq, visited)
length_freq[cycle_length] += 1
return length_freq
def hash_freq(freq):
h = 0
msb_index = 0
for length in range(N+1):
element = freq[length]
h = h | (element << msb_index)
msb_index += BITS_PER_ELEMENT
return h
def hash_seq(seq):
return hash_freq(find_cycle_length_freqs(seq))
def decode_hash(h):
length_freqs = []
for i in range(N+1):
length_freqs.append(h & mask(ELEMENT_MASK))
h >>= BITS_PER_ELEMENT
return length_freqs
def dfs2(curr_node, depth, curr_sum, target_sum, path, ans):
if curr_sum > target_sum:
return
elif curr_sum == target_sum:
freq = [0] * (N+1)
for length in path[:depth]:
freq[length] += 1
ans.append(freq)
return
next_node_lowerbound = curr_node
for i in range(max(curr_node, 1), target_sum - curr_sum+1):
path[depth] = i
dfs2(i, depth+1, curr_sum + i, target_sum, path, ans)
def get_permutation(seq, permutation_index):
p = [0] * len(seq)
for p_i, seq_i in enumerate(permutation_index):
p[p_i] = seq[seq_i]
return p
def left_rotate_by_k(seq, k):
return seq[k:] + seq[:k]
def construct_representative_seq(cycle_length_freq):
seq = []
cycle_lengths = []
for length, freq in enumerate(cycle_length_freq):
for i in range(freq):
cycle_lengths.append(length)
first_index = 0
for length in cycle_lengths:
seq += get_permutation(list(range(first_index, first_index + length)), left_rotate_by_k(list(range(length)), k=1))
first_index += length
return seq
def find_next_states(curr_length_freq):
next_states = defaultdict(lambda: 0)
seq = construct_representative_seq(cycle_length_freq)
for picked_idx in itertools.combinations(list(range(N)), M):
#for permutation in [[0,1,2], [0,2,1], [1,0,2], [1,2,0], [2,0,1], [2,1,0]]:
for permutation in itertools.permutations(list(range(M))):
new_seq = seq[:]
for i in range(M):
new_seq[picked_idx[i]] = seq[picked_idx[permutation[i]]]
h = hash_seq(new_seq)
next_states[h] += 1
return next_states
if __name__ == "__main__":
curr_path = [0] * N
cycle_length_freqs_sum_to_N = []
dfs2(0, 0, 0, N, curr_path, cycle_length_freqs_sum_to_N)
hash_to_index = {}
max_index = 0
stopping_state_index = 0
for cycle_length_freq in cycle_length_freqs_sum_to_N:
h = hash_freq(cycle_length_freq)
hash_to_index[h] = max_index
if cycle_length_freq[1] == N:
stopping_state_index = max_index
max_index += 1
n = len(cycle_length_freqs_sum_to_N)
T = matrix(QQ, n, n)
for cycle_length_freq in cycle_length_freqs_sum_to_N:
curr_state_hash = hash_freq(cycle_length_freq)
next_states = find_next_states(cycle_length_freq)
n_next_states = sum(next_states.values())
if hash_to_index[curr_state_hash] == stopping_state_index:
#T[stopping_state_index, stopping_state_index] = 0
continue
for next_state_hash, freq in next_states.items():
T[hash_to_index[curr_state_hash], hash_to_index[next_state_hash]] += QQ("{}/{}".format(freq, n_next_states))
S = matrix(QQ, 1, n)
with open("p367_count.txt") as f:
for line in f.readlines():
h, freq = list(map(int, line.strip().split(" ")))
S[0, hash_to_index[h]] = QQ("{}/{}".format(freq, factorial(N)))
# Absorbing Markov Chain
ABSORBING_STATE = stopping_state_index
rhs = matrix(QQ, n, 1, lambda i, j: 1)
rhs[ABSORBING_STATE, 0] = 0
A = matrix(QQ, np.eye(n)) - T
x = A.solve_right(rhs)
expectation = sum(S * x)[0]
print(ceil(expectation))
| {
"alphanum_fraction": 0.621148632,
"author": null,
"avg_line_length": 37.2201834862,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "283e73b0ce691c24c813b7de22fb52a1f8e34669",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-11-02T12:08:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-02T12:08:46.000Z",
"max_forks_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "takekoputa/project-euler",
"max_forks_repo_path": "4th_100/problem367.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "takekoputa/project-euler",
"max_issues_repo_path": "4th_100/problem367.py",
"max_line_length": 189,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "takekoputa/project-euler",
"max_stars_repo_path": "4th_100/problem367.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2060,
"path": null,
"reason": "import numpy,from sage",
"repo": null,
"save_path": null,
"sha": null,
"size": 8114
} |
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Jens Wagemaker
-/
import algebra.big_operators.basic
import algebra.divisibility
import algebra.invertible
/-!
# Associated, prime, and irreducible elements.
-/
variables {α : Type*} {β : Type*} {γ : Type*} {δ : Type*}
theorem is_unit_iff_dvd_one [comm_monoid α] {x : α} : is_unit x ↔ x ∣ 1 :=
⟨by rintro ⟨u, rfl⟩; exact ⟨_, u.mul_inv.symm⟩,
λ ⟨y, h⟩, ⟨⟨x, y, h.symm, by rw [h, mul_comm]⟩, rfl⟩⟩
theorem is_unit_iff_forall_dvd [comm_monoid α] {x : α} :
is_unit x ↔ ∀ y, x ∣ y :=
is_unit_iff_dvd_one.trans ⟨λ h y, h.trans (one_dvd _), λ h, h _⟩
theorem is_unit_of_dvd_unit {α} [comm_monoid α] {x y : α}
(xy : x ∣ y) (hu : is_unit y) : is_unit x :=
is_unit_iff_dvd_one.2 $ xy.trans $ is_unit_iff_dvd_one.1 hu
lemma is_unit_of_dvd_one [comm_monoid α] : ∀a ∣ 1, is_unit (a:α)
| a ⟨b, eq⟩ := ⟨units.mk_of_mul_eq_one a b eq.symm, rfl⟩
lemma dvd_and_not_dvd_iff [comm_cancel_monoid_with_zero α] {x y : α} :
x ∣ y ∧ ¬y ∣ x ↔ dvd_not_unit x y :=
⟨λ ⟨⟨d, hd⟩, hyx⟩, ⟨λ hx0, by simpa [hx0] using hyx, ⟨d,
mt is_unit_iff_dvd_one.1 (λ ⟨e, he⟩, hyx ⟨e, by rw [hd, mul_assoc, ← he, mul_one]⟩), hd⟩⟩,
λ ⟨hx0, d, hdu, hdx⟩, ⟨⟨d, hdx⟩, λ ⟨e, he⟩, hdu (is_unit_of_dvd_one _
⟨e, mul_left_cancel₀ hx0 $ by conv {to_lhs, rw [he, hdx]};simp [mul_assoc]⟩)⟩⟩
lemma pow_dvd_pow_iff [comm_cancel_monoid_with_zero α]
{x : α} {n m : ℕ} (h0 : x ≠ 0) (h1 : ¬ is_unit x) :
x ^ n ∣ x ^ m ↔ n ≤ m :=
begin
split,
{ intro h, rw [← not_lt], intro hmn, apply h1,
have : x ^ m * x ∣ x ^ m * 1,
{ rw [← pow_succ', mul_one], exact (pow_dvd_pow _ (nat.succ_le_of_lt hmn)).trans h },
rwa [mul_dvd_mul_iff_left, ← is_unit_iff_dvd_one] at this, apply pow_ne_zero m h0 },
{ apply pow_dvd_pow }
end
section prime
variables [comm_monoid_with_zero α]
/-- prime element of a `comm_monoid_with_zero` -/
def prime (p : α) : Prop :=
p ≠ 0 ∧ ¬ is_unit p ∧ (∀a b, p ∣ a * b → p ∣ a ∨ p ∣ b)
namespace prime
variables {p : α} (hp : prime p)
include hp
lemma ne_zero : p ≠ 0 :=
hp.1
lemma not_unit : ¬ is_unit p :=
hp.2.1
lemma not_dvd_one : ¬ p ∣ 1 :=
mt (is_unit_of_dvd_one _) hp.not_unit
lemma ne_one : p ≠ 1 :=
λ h, hp.2.1 (h.symm ▸ is_unit_one)
lemma dvd_or_dvd (hp : prime p) {a b : α} (h : p ∣ a * b) :
p ∣ a ∨ p ∣ b :=
hp.2.2 a b h
lemma dvd_of_dvd_pow (hp : prime p) {a : α} {n : ℕ} (h : p ∣ a^n) :
p ∣ a :=
begin
induction n with n ih,
{ rw pow_zero at h,
have := is_unit_of_dvd_one _ h,
have := not_unit hp,
contradiction },
rw pow_succ at h,
cases dvd_or_dvd hp h with dvd_a dvd_pow,
{ assumption },
exact ih dvd_pow
end
lemma exists_mem_multiset_dvd {s : multiset α} :
p ∣ s.prod → ∃ a ∈ s, p ∣ a :=
multiset.induction_on s (λ h, (hp.not_dvd_one h).elim) $
λ a s ih h,
have p ∣ a * s.prod, by simpa using h,
match hp.dvd_or_dvd this with
| or.inl h := ⟨a, multiset.mem_cons_self a s, h⟩
| or.inr h := let ⟨a, has, h⟩ := ih h in ⟨a, multiset.mem_cons_of_mem has, h⟩
end
lemma exists_mem_multiset_map_dvd {s : multiset β} {f : β → α} :
p ∣ (s.map f).prod → ∃ a ∈ s, p ∣ f a :=
λ h, by simpa only [exists_prop, multiset.mem_map, exists_exists_and_eq_and]
using hp.exists_mem_multiset_dvd h
lemma exists_mem_finset_dvd {s : finset β} {f : β → α} :
p ∣ s.prod f → ∃ i ∈ s, p ∣ f i :=
hp.exists_mem_multiset_map_dvd
end prime
@[simp] lemma not_prime_zero : ¬ prime (0 : α) :=
λ h, h.ne_zero rfl
@[simp] lemma not_prime_one : ¬ prime (1 : α) :=
λ h, h.not_unit is_unit_one
end prime
lemma prime.left_dvd_or_dvd_right_of_dvd_mul [comm_cancel_monoid_with_zero α] {p : α}
(hp : prime p) {a b : α} : a ∣ p * b → p ∣ a ∨ a ∣ b :=
begin
rintro ⟨c, hc⟩,
rcases hp.2.2 a c (hc ▸ dvd_mul_right _ _) with h | ⟨x, rfl⟩,
{ exact or.inl h },
{ rw [mul_left_comm, mul_right_inj' hp.ne_zero] at hc,
exact or.inr (hc.symm ▸ dvd_mul_right _ _) }
end
/-- `irreducible p` states that `p` is non-unit and only factors into units.
We explicitly avoid stating that `p` is non-zero, this would require a semiring. Assuming only a
monoid allows us to reuse irreducible for associated elements.
-/
class irreducible [monoid α] (p : α) : Prop :=
(not_unit' : ¬ is_unit p)
(is_unit_or_is_unit' : ∀a b, p = a * b → is_unit a ∨ is_unit b)
namespace irreducible
lemma not_unit [monoid α] {p : α} (hp : irreducible p) : ¬ is_unit p :=
hp.1
lemma is_unit_or_is_unit [monoid α] {p : α} (hp : irreducible p) {a b : α} (h : p = a * b) :
is_unit a ∨ is_unit b :=
irreducible.is_unit_or_is_unit' a b h
end irreducible
lemma irreducible_iff [monoid α] {p : α} :
irreducible p ↔ ¬ is_unit p ∧ ∀a b, p = a * b → is_unit a ∨ is_unit b :=
⟨λ h, ⟨h.1, h.2⟩, λ h, ⟨h.1, h.2⟩⟩
@[simp] theorem not_irreducible_one [monoid α] : ¬ irreducible (1 : α) :=
by simp [irreducible_iff]
@[simp] theorem not_irreducible_zero [monoid_with_zero α] : ¬ irreducible (0 : α)
| ⟨hn0, h⟩ := have is_unit (0:α) ∨ is_unit (0:α), from h 0 0 ((mul_zero 0).symm),
this.elim hn0 hn0
theorem irreducible.ne_zero [monoid_with_zero α] : ∀ {p:α}, irreducible p → p ≠ 0
| _ hp rfl := not_irreducible_zero hp
theorem of_irreducible_mul {α} [monoid α] {x y : α} :
irreducible (x * y) → is_unit x ∨ is_unit y
| ⟨_, h⟩ := h _ _ rfl
theorem irreducible_or_factor {α} [monoid α] (x : α) (h : ¬ is_unit x) :
irreducible x ∨ ∃ a b, ¬ is_unit a ∧ ¬ is_unit b ∧ a * b = x :=
begin
haveI := classical.dec,
refine or_iff_not_imp_right.2 (λ H, _),
simp [h, irreducible_iff] at H ⊢,
refine λ a b h, classical.by_contradiction $ λ o, _,
simp [not_or_distrib] at o,
exact H _ o.1 _ o.2 h.symm
end
protected lemma prime.irreducible [comm_cancel_monoid_with_zero α] {p : α} (hp : prime p) :
irreducible p :=
⟨hp.not_unit, λ a b hab,
(show a * b ∣ a ∨ a * b ∣ b, from hab ▸ hp.dvd_or_dvd (hab ▸ dvd_rfl)).elim
(λ ⟨x, hx⟩, or.inr (is_unit_iff_dvd_one.2
⟨x, mul_right_cancel₀ (show a ≠ 0, from λ h, by simp [*, prime] at *)
$ by conv {to_lhs, rw hx}; simp [mul_comm, mul_assoc, mul_left_comm]⟩))
(λ ⟨x, hx⟩, or.inl (is_unit_iff_dvd_one.2
⟨x, mul_right_cancel₀ (show b ≠ 0, from λ h, by simp [*, prime] at *)
$ by conv {to_lhs, rw hx}; simp [mul_comm, mul_assoc, mul_left_comm]⟩))⟩
lemma succ_dvd_or_succ_dvd_of_succ_sum_dvd_mul [comm_cancel_monoid_with_zero α]
{p : α} (hp : prime p) {a b : α} {k l : ℕ} :
p ^ k ∣ a → p ^ l ∣ b → p ^ ((k + l) + 1) ∣ a * b → p ^ (k + 1) ∣ a ∨ p ^ (l + 1) ∣ b :=
λ ⟨x, hx⟩ ⟨y, hy⟩ ⟨z, hz⟩,
have h : p ^ (k + l) * (x * y) = p ^ (k + l) * (p * z),
by simpa [mul_comm, pow_add, hx, hy, mul_assoc, mul_left_comm] using hz,
have hp0: p ^ (k + l) ≠ 0, from pow_ne_zero _ hp.ne_zero,
have hpd : p ∣ x * y, from ⟨z, by rwa [mul_right_inj' hp0] at h⟩,
(hp.dvd_or_dvd hpd).elim
(λ ⟨d, hd⟩, or.inl ⟨d, by simp [*, pow_succ, mul_comm, mul_left_comm, mul_assoc]⟩)
(λ ⟨d, hd⟩, or.inr ⟨d, by simp [*, pow_succ, mul_comm, mul_left_comm, mul_assoc]⟩)
/-- If `p` and `q` are irreducible, then `p ∣ q` implies `q ∣ p`. -/
lemma irreducible.dvd_symm [monoid α] {p q : α}
(hp : irreducible p) (hq : irreducible q) : p ∣ q → q ∣ p :=
begin
tactic.unfreeze_local_instances,
rintros ⟨q', rfl⟩,
rw is_unit.mul_right_dvd (or.resolve_left (of_irreducible_mul hq) hp.not_unit),
end
lemma irreducible.dvd_comm [monoid α] {p q : α}
(hp : irreducible p) (hq : irreducible q) : p ∣ q ↔ q ∣ p :=
⟨hp.dvd_symm hq, hq.dvd_symm hp⟩
/-- Two elements of a `monoid` are `associated` if one of them is another one
multiplied by a unit on the right. -/
def associated [monoid α] (x y : α) : Prop := ∃u:units α, x * u = y
local infix ` ~ᵤ ` : 50 := associated
namespace associated
@[refl] protected theorem refl [monoid α] (x : α) : x ~ᵤ x := ⟨1, by simp⟩
@[symm] protected theorem symm [monoid α] : ∀{x y : α}, x ~ᵤ y → y ~ᵤ x
| x _ ⟨u, rfl⟩ := ⟨u⁻¹, by rw [mul_assoc, units.mul_inv, mul_one]⟩
@[trans] protected theorem trans [monoid α] : ∀{x y z : α}, x ~ᵤ y → y ~ᵤ z → x ~ᵤ z
| x _ _ ⟨u, rfl⟩ ⟨v, rfl⟩ := ⟨u * v, by rw [units.coe_mul, mul_assoc]⟩
/-- The setoid of the relation `x ~ᵤ y` iff there is a unit `u` such that `x * u = y` -/
protected def setoid (α : Type*) [monoid α] : setoid α :=
{ r := associated, iseqv := ⟨associated.refl, λa b, associated.symm, λa b c, associated.trans⟩ }
end associated
local attribute [instance] associated.setoid
theorem unit_associated_one [monoid α] {u : units α} : (u : α) ~ᵤ 1 := ⟨u⁻¹, units.mul_inv u⟩
theorem associated_one_iff_is_unit [monoid α] {a : α} : (a : α) ~ᵤ 1 ↔ is_unit a :=
iff.intro
(assume h, let ⟨c, h⟩ := h.symm in h ▸ ⟨c, (one_mul _).symm⟩)
(assume ⟨c, h⟩, associated.symm ⟨c, by simp [h]⟩)
theorem associated_zero_iff_eq_zero [monoid_with_zero α] (a : α) : a ~ᵤ 0 ↔ a = 0 :=
iff.intro
(assume h, let ⟨u, h⟩ := h.symm in by simpa using h.symm)
(assume h, h ▸ associated.refl a)
theorem associated_one_of_mul_eq_one [comm_monoid α] {a : α} (b : α) (hab : a * b = 1) : a ~ᵤ 1 :=
show (units.mk_of_mul_eq_one a b hab : α) ~ᵤ 1, from unit_associated_one
theorem associated_one_of_associated_mul_one [comm_monoid α] {a b : α} :
a * b ~ᵤ 1 → a ~ᵤ 1
| ⟨u, h⟩ := associated_one_of_mul_eq_one (b * u) $ by simpa [mul_assoc] using h
lemma associated_mul_unit_left {β : Type*} [monoid β] (a u : β) (hu : is_unit u) :
associated (a * u) a :=
let ⟨u', hu⟩ := hu in ⟨u'⁻¹, hu ▸ units.mul_inv_cancel_right _ _⟩
lemma associated_unit_mul_left {β : Type*} [comm_monoid β] (a u : β) (hu : is_unit u) :
associated (u * a) a :=
begin
rw mul_comm,
exact associated_mul_unit_left _ _ hu
end
lemma associated_mul_unit_right {β : Type*} [monoid β] (a u : β) (hu : is_unit u) :
associated a (a * u) :=
(associated_mul_unit_left a u hu).symm
lemma associated_unit_mul_right {β : Type*} [comm_monoid β] (a u : β) (hu : is_unit u) :
associated a (u * a) :=
(associated_unit_mul_left a u hu).symm
lemma associated.mul_mul [comm_monoid α] {a₁ a₂ b₁ b₂ : α} :
a₁ ~ᵤ b₁ → a₂ ~ᵤ b₂ → (a₁ * a₂) ~ᵤ (b₁ * b₂)
| ⟨c₁, h₁⟩ ⟨c₂, h₂⟩ := ⟨c₁ * c₂, by simp [h₁.symm, h₂.symm, mul_assoc, mul_comm, mul_left_comm]⟩
lemma associated.mul_left [comm_monoid α] (a : α) {b c : α} (h : b ~ᵤ c) :
(a * b) ~ᵤ (a * c) :=
(associated.refl a).mul_mul h
lemma associated.mul_right [comm_monoid α] {a b : α} (h : a ~ᵤ b) (c : α) :
(a * c) ~ᵤ (b * c) :=
h.mul_mul (associated.refl c)
lemma associated.pow_pow [comm_monoid α] {a b : α} {n : ℕ} (h : a ~ᵤ b) :
a ^ n ~ᵤ b ^ n :=
begin
induction n with n ih, { simp [h] },
convert h.mul_mul ih;
rw pow_succ
end
protected lemma associated.dvd [monoid α] {a b : α} : a ~ᵤ b → a ∣ b := λ ⟨u, hu⟩, ⟨u, hu.symm⟩
protected lemma associated.dvd_dvd [monoid α] {a b : α} (h : a ~ᵤ b) : a ∣ b ∧ b ∣ a :=
⟨h.dvd, h.symm.dvd⟩
theorem associated_of_dvd_dvd [cancel_monoid_with_zero α]
{a b : α} (hab : a ∣ b) (hba : b ∣ a) : a ~ᵤ b :=
begin
rcases hab with ⟨c, rfl⟩,
rcases hba with ⟨d, a_eq⟩,
by_cases ha0 : a = 0,
{ simp [*] at * },
have hac0 : a * c ≠ 0,
{ intro con, rw [con, zero_mul] at a_eq, apply ha0 a_eq, },
have : a * (c * d) = a * 1 := by rw [← mul_assoc, ← a_eq, mul_one],
have hcd : (c * d) = 1, from mul_left_cancel₀ ha0 this,
have : a * c * (d * c) = a * c * 1 := by rw [← mul_assoc, ← a_eq, mul_one],
have hdc : d * c = 1, from mul_left_cancel₀ hac0 this,
exact ⟨⟨c, d, hcd, hdc⟩, rfl⟩
end
theorem dvd_dvd_iff_associated [cancel_monoid_with_zero α] {a b : α} : a ∣ b ∧ b ∣ a ↔ a ~ᵤ b :=
⟨λ ⟨h1, h2⟩, associated_of_dvd_dvd h1 h2, associated.dvd_dvd⟩
lemma exists_associated_mem_of_dvd_prod [comm_cancel_monoid_with_zero α] {p : α}
(hp : prime p) {s : multiset α} : (∀ r ∈ s, prime r) → p ∣ s.prod → ∃ q ∈ s, p ~ᵤ q :=
multiset.induction_on s (by simp [mt is_unit_iff_dvd_one.2 hp.not_unit])
(λ a s ih hs hps, begin
rw [multiset.prod_cons] at hps,
cases hp.dvd_or_dvd hps with h h,
{ use [a, by simp],
cases h with u hu,
cases (((hs a (multiset.mem_cons.2 (or.inl rfl))).irreducible)
.is_unit_or_is_unit hu).resolve_left hp.not_unit with v hv,
exact ⟨v, by simp [hu, hv]⟩ },
{ rcases ih (λ r hr, hs _ (multiset.mem_cons.2 (or.inr hr))) h with ⟨q, hq₁, hq₂⟩,
exact ⟨q, multiset.mem_cons.2 (or.inr hq₁), hq₂⟩ }
end)
lemma associated.dvd_iff_dvd_left [monoid α] {a b c : α} (h : a ~ᵤ b) : a ∣ c ↔ b ∣ c :=
let ⟨u, hu⟩ := h in hu ▸ units.mul_right_dvd.symm
lemma associated.dvd_iff_dvd_right [monoid α] {a b c : α} (h : b ~ᵤ c) : a ∣ b ↔ a ∣ c :=
let ⟨u, hu⟩ := h in hu ▸ units.dvd_mul_right.symm
lemma associated.eq_zero_iff [monoid_with_zero α] {a b : α} (h : a ~ᵤ b) : a = 0 ↔ b = 0 :=
⟨λ ha, let ⟨u, hu⟩ := h in by simp [hu.symm, ha],
λ hb, let ⟨u, hu⟩ := h.symm in by simp [hu.symm, hb]⟩
lemma associated.ne_zero_iff [monoid_with_zero α] {a b : α} (h : a ~ᵤ b) : a ≠ 0 ↔ b ≠ 0 :=
not_congr h.eq_zero_iff
protected lemma associated.prime [comm_monoid_with_zero α] {p q : α} (h : p ~ᵤ q) (hp : prime p) :
prime q :=
⟨h.ne_zero_iff.1 hp.ne_zero,
let ⟨u, hu⟩ := h in
⟨λ ⟨v, hv⟩, hp.not_unit ⟨v * u⁻¹, by simp [hv, hu.symm]⟩,
hu ▸ by { simp [units.mul_right_dvd], intros a b, exact hp.dvd_or_dvd }⟩⟩
lemma irreducible.associated_of_dvd [cancel_monoid_with_zero α] {p q : α}
(p_irr : irreducible p) (q_irr : irreducible q) (dvd : p ∣ q) : associated p q :=
associated_of_dvd_dvd dvd (p_irr.dvd_symm q_irr dvd)
lemma irreducible.dvd_irreducible_iff_associated [cancel_monoid_with_zero α]
{p q : α} (pp : irreducible p) (qp : irreducible q) :
p ∣ q ↔ associated p q :=
⟨irreducible.associated_of_dvd pp qp, associated.dvd⟩
lemma prime.associated_of_dvd [comm_cancel_monoid_with_zero α] {p q : α}
(p_prime : prime p) (q_prime : prime q) (dvd : p ∣ q) : associated p q :=
p_prime.irreducible.associated_of_dvd q_prime.irreducible dvd
theorem prime.dvd_prime_iff_associated [comm_cancel_monoid_with_zero α]
{p q : α} (pp : prime p) (qp : prime q) :
p ∣ q ↔ associated p q :=
pp.irreducible.dvd_irreducible_iff_associated qp.irreducible
lemma associated.prime_iff [comm_monoid_with_zero α] {p q : α}
(h : p ~ᵤ q) : prime p ↔ prime q :=
⟨h.prime, h.symm.prime⟩
protected lemma associated.is_unit [monoid α] {a b : α} (h : a ~ᵤ b) : is_unit a → is_unit b :=
let ⟨u, hu⟩ := h in λ ⟨v, hv⟩, ⟨v * u, by simp [hv, hu.symm]⟩
lemma associated.is_unit_iff [monoid α] {a b : α} (h : a ~ᵤ b) : is_unit a ↔ is_unit b :=
⟨h.is_unit, h.symm.is_unit⟩
protected lemma associated.irreducible [monoid α] {p q : α} (h : p ~ᵤ q)
(hp : irreducible p) : irreducible q :=
⟨mt h.symm.is_unit hp.1,
let ⟨u, hu⟩ := h in λ a b hab,
have hpab : p = a * (b * (u⁻¹ : units α)),
from calc p = (p * u) * (u ⁻¹ : units α) : by simp
... = _ : by rw hu; simp [hab, mul_assoc],
(hp.is_unit_or_is_unit hpab).elim or.inl (λ ⟨v, hv⟩, or.inr ⟨v * u, by simp [hv]⟩)⟩
protected lemma associated.irreducible_iff [monoid α] {p q : α} (h : p ~ᵤ q) :
irreducible p ↔ irreducible q :=
⟨h.irreducible, h.symm.irreducible⟩
lemma associated.of_mul_left [comm_cancel_monoid_with_zero α] {a b c d : α}
(h : a * b ~ᵤ c * d) (h₁ : a ~ᵤ c) (ha : a ≠ 0) : b ~ᵤ d :=
let ⟨u, hu⟩ := h in let ⟨v, hv⟩ := associated.symm h₁ in
⟨u * (v : units α), mul_left_cancel₀ ha
begin
rw [← hv, mul_assoc c (v : α) d, mul_left_comm c, ← hu],
simp [hv.symm, mul_assoc, mul_comm, mul_left_comm]
end⟩
lemma associated.of_mul_right [comm_cancel_monoid_with_zero α] {a b c d : α} :
a * b ~ᵤ c * d → b ~ᵤ d → b ≠ 0 → a ~ᵤ c :=
by rw [mul_comm a, mul_comm c]; exact associated.of_mul_left
section unique_units
variables [monoid α] [unique (units α)]
lemma units_eq_one (u : units α) : u = 1 := subsingleton.elim u 1
theorem associated_iff_eq {x y : α} : x ~ᵤ y ↔ x = y :=
begin
split,
{ rintro ⟨c, rfl⟩, rw [units_eq_one c, units.coe_one, mul_one] },
{ rintro rfl, refl },
end
theorem associated_eq_eq : (associated : α → α → Prop) = eq :=
by { ext, rw associated_iff_eq }
end unique_units
/-- The quotient of a monoid by the `associated` relation. Two elements `x` and `y`
are associated iff there is a unit `u` such that `x * u = y`. There is a natural
monoid structure on `associates α`. -/
def associates (α : Type*) [monoid α] : Type* :=
quotient (associated.setoid α)
namespace associates
open associated
/-- The canonical quotient map from a monoid `α` into the `associates` of `α` -/
protected def mk {α : Type*} [monoid α] (a : α) : associates α :=
⟦ a ⟧
instance [monoid α] : inhabited (associates α) := ⟨⟦1⟧⟩
theorem mk_eq_mk_iff_associated [monoid α] {a b : α} :
associates.mk a = associates.mk b ↔ a ~ᵤ b :=
iff.intro quotient.exact quot.sound
theorem quotient_mk_eq_mk [monoid α] (a : α) : ⟦ a ⟧ = associates.mk a := rfl
theorem quot_mk_eq_mk [monoid α] (a : α) : quot.mk setoid.r a = associates.mk a := rfl
theorem forall_associated [monoid α] {p : associates α → Prop} :
(∀a, p a) ↔ (∀a, p (associates.mk a)) :=
iff.intro
(assume h a, h _)
(assume h a, quotient.induction_on a h)
theorem mk_surjective [monoid α] : function.surjective (@associates.mk α _) :=
forall_associated.2 (λ a, ⟨a, rfl⟩)
instance [monoid α] : has_one (associates α) := ⟨⟦ 1 ⟧⟩
theorem one_eq_mk_one [monoid α] : (1 : associates α) = associates.mk 1 := rfl
instance [monoid α] : has_bot (associates α) := ⟨1⟩
lemma exists_rep [monoid α] (a : associates α) : ∃ a0 : α, associates.mk a0 = a :=
quot.exists_rep a
section comm_monoid
variable [comm_monoid α]
instance : has_mul (associates α) :=
⟨λa' b', quotient.lift_on₂ a' b' (λa b, ⟦ a * b ⟧) $
assume a₁ a₂ b₁ b₂ ⟨c₁, h₁⟩ ⟨c₂, h₂⟩,
quotient.sound $ ⟨c₁ * c₂, by simp [h₁.symm, h₂.symm, mul_assoc, mul_comm, mul_left_comm]⟩⟩
theorem mk_mul_mk {x y : α} : associates.mk x * associates.mk y = associates.mk (x * y) :=
rfl
instance : comm_monoid (associates α) :=
{ one := 1,
mul := (*),
mul_one := assume a', quotient.induction_on a' $
assume a, show ⟦a * 1⟧ = ⟦ a ⟧, by simp,
one_mul := assume a', quotient.induction_on a' $
assume a, show ⟦1 * a⟧ = ⟦ a ⟧, by simp,
mul_assoc := assume a' b' c', quotient.induction_on₃ a' b' c' $
assume a b c, show ⟦a * b * c⟧ = ⟦a * (b * c)⟧, by rw [mul_assoc],
mul_comm := assume a' b', quotient.induction_on₂ a' b' $
assume a b, show ⟦a * b⟧ = ⟦b * a⟧, by rw [mul_comm] }
instance : preorder (associates α) :=
{ le := has_dvd.dvd,
le_refl := dvd_refl,
le_trans := λ a b c, dvd_trans}
@[simp] lemma mk_one : associates.mk (1 : α) = 1 := rfl
/-- `associates.mk` as a `monoid_hom`. -/
protected def mk_monoid_hom : α →* (associates α) := ⟨associates.mk, mk_one, λ x y, mk_mul_mk⟩
@[simp] lemma mk_monoid_hom_apply (a : α) : associates.mk_monoid_hom a = associates.mk a := rfl
lemma associated_map_mk {f : associates α →* α}
(hinv : function.right_inverse f associates.mk) (a : α) :
a ~ᵤ f (associates.mk a) :=
associates.mk_eq_mk_iff_associated.1 (hinv (associates.mk a)).symm
lemma mk_pow (a : α) (n : ℕ) : associates.mk (a ^ n) = (associates.mk a) ^ n :=
by induction n; simp [*, pow_succ, associates.mk_mul_mk.symm]
lemma dvd_eq_le : ((∣) : associates α → associates α → Prop) = (≤) := rfl
theorem prod_mk {p : multiset α} : (p.map associates.mk).prod = associates.mk p.prod :=
multiset.induction_on p (by simp; refl) $ assume a s ih, by simp [ih]; refl
theorem rel_associated_iff_map_eq_map {p q : multiset α} :
multiset.rel associated p q ↔ p.map associates.mk = q.map associates.mk :=
by { rw [← multiset.rel_eq, multiset.rel_map], simp only [mk_eq_mk_iff_associated] }
theorem mul_eq_one_iff {x y : associates α} : x * y = 1 ↔ (x = 1 ∧ y = 1) :=
iff.intro
(quotient.induction_on₂ x y $ assume a b h,
have a * b ~ᵤ 1, from quotient.exact h,
⟨quotient.sound $ associated_one_of_associated_mul_one this,
quotient.sound $ associated_one_of_associated_mul_one $ by rwa [mul_comm] at this⟩)
(by simp {contextual := tt})
theorem prod_eq_one_iff {p : multiset (associates α)} :
p.prod = 1 ↔ (∀a ∈ p, (a:associates α) = 1) :=
multiset.induction_on p
(by simp)
(by simp [mul_eq_one_iff, or_imp_distrib, forall_and_distrib] {contextual := tt})
theorem units_eq_one (u : units (associates α)) : u = 1 :=
units.ext (mul_eq_one_iff.1 u.val_inv).1
instance unique_units : unique (units (associates α)) :=
{ default := 1, uniq := associates.units_eq_one }
theorem coe_unit_eq_one (u : units (associates α)): (u : associates α) = 1 :=
by simp
theorem is_unit_iff_eq_one (a : associates α) : is_unit a ↔ a = 1 :=
iff.intro
(assume ⟨u, h⟩, h ▸ coe_unit_eq_one _)
(assume h, h.symm ▸ is_unit_one)
theorem is_unit_mk {a : α} : is_unit (associates.mk a) ↔ is_unit a :=
calc is_unit (associates.mk a) ↔ a ~ᵤ 1 :
by rw [is_unit_iff_eq_one, one_eq_mk_one, mk_eq_mk_iff_associated]
... ↔ is_unit a : associated_one_iff_is_unit
section order
theorem mul_mono {a b c d : associates α} (h₁ : a ≤ b) (h₂ : c ≤ d) :
a * c ≤ b * d :=
let ⟨x, hx⟩ := h₁, ⟨y, hy⟩ := h₂ in
⟨x * y, by simp [hx, hy, mul_comm, mul_assoc, mul_left_comm]⟩
theorem one_le {a : associates α} : 1 ≤ a :=
dvd.intro _ (one_mul a)
theorem prod_le_prod {p q : multiset (associates α)} (h : p ≤ q) : p.prod ≤ q.prod :=
begin
haveI := classical.dec_eq (associates α),
haveI := classical.dec_eq α,
suffices : p.prod ≤ (p + (q - p)).prod, { rwa [add_tsub_cancel_of_le h] at this },
suffices : p.prod * 1 ≤ p.prod * (q - p).prod, { simpa },
exact mul_mono (le_refl p.prod) one_le
end
theorem le_mul_right {a b : associates α} : a ≤ a * b := ⟨b, rfl⟩
theorem le_mul_left {a b : associates α} : a ≤ b * a :=
by rw [mul_comm]; exact le_mul_right
instance : order_bot (associates α) :=
{ bot := 1,
bot_le := assume a, one_le }
end order
end comm_monoid
instance [has_zero α] [monoid α] : has_zero (associates α) := ⟨⟦ 0 ⟧⟩
instance [has_zero α] [monoid α] : has_top (associates α) := ⟨0⟩
section comm_monoid_with_zero
variables [comm_monoid_with_zero α]
@[simp] theorem mk_eq_zero {a : α} : associates.mk a = 0 ↔ a = 0 :=
⟨assume h, (associated_zero_iff_eq_zero a).1 $ quotient.exact h, assume h, h.symm ▸ rfl⟩
theorem mk_ne_zero {a : α} : associates.mk a ≠ 0 ↔ a ≠ 0 :=
not_congr mk_eq_zero
instance : comm_monoid_with_zero (associates α) :=
{ zero_mul := by { rintro ⟨a⟩, show associates.mk (0 * a) = associates.mk 0, rw [zero_mul] },
mul_zero := by { rintro ⟨a⟩, show associates.mk (a * 0) = associates.mk 0, rw [mul_zero] },
.. associates.comm_monoid, .. associates.has_zero }
instance : order_top (associates α) :=
{ top := 0,
le_top := assume a, ⟨0, (mul_zero a).symm⟩ }
instance : bounded_order (associates α) :=
{ .. associates.order_top,
.. associates.order_bot }
instance [nontrivial α] : nontrivial (associates α) :=
⟨⟨0, 1,
assume h,
have (0 : α) ~ᵤ 1, from quotient.exact h,
have (0 : α) = 1, from ((associated_zero_iff_eq_zero 1).1 this.symm).symm,
zero_ne_one this⟩⟩
lemma exists_non_zero_rep {a : associates α} : a ≠ 0 → ∃ a0 : α, a0 ≠ 0 ∧ associates.mk a0 = a :=
quotient.induction_on a (λ b nz, ⟨b, mt (congr_arg quotient.mk) nz, rfl⟩)
theorem dvd_of_mk_le_mk {a b : α} : associates.mk a ≤ associates.mk b → a ∣ b
| ⟨c', hc'⟩ := (quotient.induction_on c' $ assume c hc,
let ⟨d, hd⟩ := (quotient.exact hc).symm in
⟨(↑d) * c,
calc b = (a * c) * ↑d : hd.symm
... = a * (↑d * c) : by ac_refl⟩) hc'
theorem mk_le_mk_of_dvd {a b : α} : a ∣ b → associates.mk a ≤ associates.mk b :=
assume ⟨c, hc⟩, ⟨associates.mk c, by simp [hc]; refl⟩
theorem mk_le_mk_iff_dvd_iff {a b : α} : associates.mk a ≤ associates.mk b ↔ a ∣ b :=
iff.intro dvd_of_mk_le_mk mk_le_mk_of_dvd
theorem mk_dvd_mk {a b : α} : associates.mk a ∣ associates.mk b ↔ a ∣ b :=
iff.intro dvd_of_mk_le_mk mk_le_mk_of_dvd
lemma prime.le_or_le {p : associates α} (hp : prime p) {a b : associates α} (h : p ≤ a * b) :
p ≤ a ∨ p ≤ b :=
hp.2.2 a b h
lemma exists_mem_multiset_le_of_prime {s : multiset (associates α)} {p : associates α}
(hp : prime p) :
p ≤ s.prod → ∃a∈s, p ≤ a :=
multiset.induction_on s (assume ⟨d, eq⟩, (hp.ne_one (mul_eq_one_iff.1 eq.symm).1).elim) $
assume a s ih h,
have p ≤ a * s.prod, by simpa using h,
match prime.le_or_le hp this with
| or.inl h := ⟨a, multiset.mem_cons_self a s, h⟩
| or.inr h := let ⟨a, has, h⟩ := ih h in ⟨a, multiset.mem_cons_of_mem has, h⟩
end
lemma prime_mk (p : α) : prime (associates.mk p) ↔ _root_.prime p :=
begin
rw [prime, _root_.prime, forall_associated],
transitivity,
{ apply and_congr, refl,
apply and_congr, refl,
apply forall_congr, assume a,
exact forall_associated },
apply and_congr mk_ne_zero,
apply and_congr,
{ rw [is_unit_mk], },
apply forall_congr, assume a,
apply forall_congr, assume b,
rw [mk_mul_mk, mk_dvd_mk, mk_dvd_mk, mk_dvd_mk],
end
theorem irreducible_mk (a : α) : irreducible (associates.mk a) ↔ irreducible a :=
begin
simp only [irreducible_iff, is_unit_mk],
apply and_congr iff.rfl,
split,
{ rintro h x y rfl,
simpa [is_unit_mk] using h (associates.mk x) (associates.mk y) rfl },
{ intros h x y,
refine quotient.induction_on₂ x y (assume x y a_eq, _),
rcases quotient.exact a_eq.symm with ⟨u, a_eq⟩,
rw mul_assoc at a_eq,
show is_unit (associates.mk x) ∨ is_unit (associates.mk y),
simpa [is_unit_mk] using h _ _ a_eq.symm }
end
theorem mk_dvd_not_unit_mk_iff {a b : α} :
dvd_not_unit (associates.mk a) (associates.mk b) ↔
dvd_not_unit a b :=
begin
rw [dvd_not_unit, dvd_not_unit, mk_ne_zero],
apply and_congr_right, intro ane0,
split,
{ contrapose!, rw forall_associated,
intros h x hx hbax,
rw [mk_mul_mk, mk_eq_mk_iff_associated] at hbax,
cases hbax with u hu,
apply h (x * ↑u⁻¹),
{ rw is_unit_mk at hx,
rw associated.is_unit_iff,
apply hx,
use u,
simp, },
simp [← mul_assoc, ← hu] },
{ rintro ⟨x, ⟨hx, rfl⟩⟩,
use associates.mk x,
simp [is_unit_mk, mk_mul_mk, hx], }
end
theorem dvd_not_unit_of_lt {a b : associates α} (hlt : a < b) :
dvd_not_unit a b :=
begin
split, { rintro rfl, apply not_lt_of_le _ hlt, apply dvd_zero },
rcases hlt with ⟨⟨x, rfl⟩, ndvd⟩,
refine ⟨x, _, rfl⟩,
contrapose! ndvd,
rcases ndvd with ⟨u, rfl⟩,
simp,
end
end comm_monoid_with_zero
section comm_cancel_monoid_with_zero
variable [comm_cancel_monoid_with_zero α]
instance : partial_order (associates α) :=
{ le_antisymm := λ a' b', quotient.induction_on₂ a' b' (λ a b hab hba,
quot.sound $ associated_of_dvd_dvd (dvd_of_mk_le_mk hab) (dvd_of_mk_le_mk hba))
.. associates.preorder }
instance : no_zero_divisors (associates α) :=
⟨λ x y,
(quotient.induction_on₂ x y $ assume a b h,
have a * b = 0, from (associated_zero_iff_eq_zero _).1 (quotient.exact h),
have a = 0 ∨ b = 0, from mul_eq_zero.1 this,
this.imp (assume h, h.symm ▸ rfl) (assume h, h.symm ▸ rfl))⟩
theorem irreducible_iff_prime_iff :
(∀ a : α, irreducible a ↔ prime a) ↔ (∀ a : (associates α), irreducible a ↔ prime a) :=
begin
rw forall_associated, split;
intros h a; have ha := h a; rw irreducible_mk at *; rw prime_mk at *; exact ha,
end
lemma eq_of_mul_eq_mul_left :
∀(a b c : associates α), a ≠ 0 → a * b = a * c → b = c :=
begin
rintros ⟨a⟩ ⟨b⟩ ⟨c⟩ ha h,
rcases quotient.exact' h with ⟨u, hu⟩,
have hu : a * (b * ↑u) = a * c, { rwa [← mul_assoc] },
exact quotient.sound' ⟨u, mul_left_cancel₀ (mk_ne_zero.1 ha) hu⟩
end
lemma eq_of_mul_eq_mul_right :
∀(a b c : associates α), b ≠ 0 → a * b = c * b → a = c :=
λ a b c bne0, (mul_comm b a) ▸ (mul_comm b c) ▸ (eq_of_mul_eq_mul_left b a c bne0)
lemma le_of_mul_le_mul_left (a b c : associates α) (ha : a ≠ 0) :
a * b ≤ a * c → b ≤ c
| ⟨d, hd⟩ := ⟨d, eq_of_mul_eq_mul_left a _ _ ha $ by rwa ← mul_assoc⟩
lemma one_or_eq_of_le_of_prime :
∀(p m : associates α), prime p → m ≤ p → (m = 1 ∨ m = p)
| _ m ⟨hp0, hp1, h⟩ ⟨d, rfl⟩ :=
match h m d dvd_rfl with
| or.inl h := classical.by_cases (assume : m = 0, by simp [this]) $
assume : m ≠ 0,
have m * d ≤ m * 1, by simpa using h,
have d ≤ 1, from associates.le_of_mul_le_mul_left m d 1 ‹m ≠ 0› this,
have d = 1, from bot_unique this,
by simp [this]
| or.inr h := classical.by_cases (assume : d = 0, by simp [this] at hp0; contradiction) $
assume : d ≠ 0,
have d * m ≤ d * 1, by simpa [mul_comm] using h,
or.inl $ bot_unique $ associates.le_of_mul_le_mul_left d m 1 ‹d ≠ 0› this
end
instance : comm_cancel_monoid_with_zero (associates α) :=
{ mul_left_cancel_of_ne_zero := eq_of_mul_eq_mul_left,
mul_right_cancel_of_ne_zero := eq_of_mul_eq_mul_right,
.. (infer_instance : comm_monoid_with_zero (associates α)) }
theorem dvd_not_unit_iff_lt {a b : associates α} :
dvd_not_unit a b ↔ a < b :=
dvd_and_not_dvd_iff.symm
end comm_cancel_monoid_with_zero
end associates
namespace multiset
lemma prod_ne_zero_of_prime [comm_cancel_monoid_with_zero α] [nontrivial α]
(s : multiset α) (h : ∀ x ∈ s, prime x) : s.prod ≠ 0 :=
multiset.prod_ne_zero (λ h0, prime.ne_zero (h 0 h0) rfl)
end multiset
| {
"alphanum_fraction": null,
"author": "jjaassoonn",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/algebra/associated.lean",
"reason": null,
"repo": "projective_space",
"save_path": "github-repos/lean/jjaassoonn-projective_space",
"sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce",
"size": null
} |
println("Fitting linear, time-invariant model")
n,λ = 150,2.48
M2 = RegularizedLTIModel(n,N1,λ)
lti = estfun(M2,p1,Q1)
| {
"alphanum_fraction": 0.7166666667,
"author": null,
"avg_line_length": 20,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "7afba9996c1ecfd503c9b4e359e971734b956aab",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "649709a3feb8caed4e630a21f7b3b7349a93e9f0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "wkearn/TidalDischargeModels.jl",
"max_forks_repo_path": "test/lti.jl",
"max_issues_count": 14,
"max_issues_repo_head_hexsha": "649709a3feb8caed4e630a21f7b3b7349a93e9f0",
"max_issues_repo_issues_event_max_datetime": "2017-10-18T16:43:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-03-11T19:24:39.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "wkearn/TidalDischargeModels.jl",
"max_issues_repo_path": "test/lti.jl",
"max_line_length": 47,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "649709a3feb8caed4e630a21f7b3b7349a93e9f0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "wkearn/TidalDischargeModels.jl",
"max_stars_repo_path": "test/lti.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 48,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 120
} |
"""
Utiltiy helper functions of Machine Learning
"""
import numpy as np
def sigmoid(val: np.ndarray) -> np.ndarray:
"""Sigmoid function
.. math::
f(z) = \\frac{1}{1 + e^{-z}}
Args:
val (ndarray): input value
Returns:
np.ndarray: sigmoid value
"""
return 1 / (1 + np.exp(-val))
def moving_window_matrix(arr: np.ndarray, window: int, lag: int = 1) -> np.ndarray:
"""Create Moving Window matrix for 1D data.
More details on this function.
https://machinelearningexploration.readthedocs.io/en/latest/MathExploration/MovingWindow.html
Args:
arr (np.ndarray): input 1D array.
window (int): window/ number of columns.
lag (int, optional): lag count for moving. Defaults to 1.
Returns:
np.ndarray: transformed matrix.
Raises:
AssertionError: input array shape should be 1D like (m,).
AssertionError: length of array should be greater than window size and lag.
Example:
>>> a = np.random.rand(100)
>>> print(moving_window_matrix(a, 20, 2))
"""
assert len(np.shape(arr)) == 1, 'input array shape should be 1D like (m,).'
size = arr.shape[0]
assert size > window and size > lag, \
'length of array should be greater than window size and lag.'
frame_width = size - window + 1
new_frame_width = int(np.ceil(frame_width / lag))
new_frame = np.empty(shape=(window, new_frame_width))
for row in range(0, window):
new_frame[row] = arr[row: row+frame_width][::lag]
return new_frame.T
if __name__ == "__main__":
# a = np.random.rand(100)
# print(moving_window_matrix(a, 20, 2))
# import matplotlib.pyplot as plt
# x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
# y = np.array([1, 2, 3, 3, 4, 5, 7, 10])
# s, r, t = trend(x, y)
# plt.plot(x, y, 'o', label='original')
# plt.plot(x, t, '.-', label='regression line')
# plt.legend()
# plt.show()
# x = np.arange(-10, 10)
# y = x**2 + x**3
# s, r, l = polynomial_regression(x, y, 3)
# plt.plot(x, y, 'ko', label='original')
# plt.plot(x, l, '.-', label='regression line')
# plt.legend()
# plt.show()
pass
| {
"alphanum_fraction": 0.5892696123,
"author": null,
"avg_line_length": 24.9213483146,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "699de411495f604474cfae0460531563b0b026bf",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8219ae5cfc462e02f04bec6bdd7e3751b57d2a25",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NishantBaheti/mightypy",
"max_forks_repo_path": "src/mightypy/ml/_utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8219ae5cfc462e02f04bec6bdd7e3751b57d2a25",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NishantBaheti/mightypy",
"max_issues_repo_path": "src/mightypy/ml/_utils.py",
"max_line_length": 97,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8219ae5cfc462e02f04bec6bdd7e3751b57d2a25",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NishantBaheti/mightypy",
"max_stars_repo_path": "src/mightypy/ml/_utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-03T19:32:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-03T19:32:45.000Z",
"num_tokens": 644,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2218
} |
struct LSQOutput
β::Vector{Float64}
ζ::Vector{Float64}
Σβ::Matrix{Float64}
Σζ::Matrix{Float64}
Σβζ::Matrix{Float64}
χ²::Float64
dof::Int
βindex::Dict{Symbol, Union{Int, UnitRange{Int}}}
zindex::Dict{Symbol, Union{Int, UnitRange{Int}}}
end
_names_ordered(index) = map(x->x[1], sort(collect(index), by=x->x[2]))
"""Return names of measured parameters `z`."""
names_z(output::LSQOutput) = _names_ordered(output.zindex)
"""Return names of unknown parameters `β`."""
names_β(output::LSQOutput) = _names_ordered(output.βindex)
"""Return standard uncertainty of unknown parameters `β`."""
uncertainty_β(output::LSQOutput) = [sqrt(output.Σβ[i, i]) for i in 1:length(output.β)]
"""Return covariance matrix of unknown parameters `β`."""
function covariance_β(output::LSQOutput)
names = names_β(output)
return NamedArray(output.Σβ, (names, names))
end
"""Return correlation matrix of unknown parameters `β`."""
function correlation_β(output::LSQOutput)
cov = covariance_β(output)
N = size(cov)[1]
corr = similar(cov)
for i in 1:N
for j in 1:N
corr[i, j] = cov[i, j]/√(cov[i, i]*cov[j, j])
end
end
return corr
end
"""Display covariance matrix of unknown parameters `β`."""
function print_cov_β(output::LSQOutput)
pretty_table(
covariance_β(output);
row_names=names_β(output),
header=names_β(output),
title="covariance of β parameters"
)
end
"""Display correlation matrix of unknown parameters `β`."""
function print_corr_β(output::LSQOutput)
pretty_table(
correlation_β(output);
row_names=names_β(output),
header=names_β(output),
title="correlation of β parameters"
)
end | {
"alphanum_fraction": 0.6678141136,
"author": null,
"avg_line_length": 28.5737704918,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "3c6c4a0adabf3ce867c9ca4467a2902aac1d73f2",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "147f711ea957170ddb4e4d92a979c0887be9e252",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nluetts/ConstrainedLeastSquares.jl",
"max_forks_repo_path": "src/output.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "147f711ea957170ddb4e4d92a979c0887be9e252",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nluetts/ConstrainedLeastSquares.jl",
"max_issues_repo_path": "src/output.jl",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "147f711ea957170ddb4e4d92a979c0887be9e252",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nluetts/ConstrainedLeastSquares.jl",
"max_stars_repo_path": "src/output.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 494,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1743
} |
"""
This is an example of the application of DeepESN model for multivariate time-series prediction task
on Piano-midi.de (see http://www-etud.iro.umontreal.ca/~boulanni/icml2012) dataset.
The dataset is a polyphonic music task characterized by 88-dimensional sequences representing musical compositions.
Starting from played notes at time t, the aim is to predict the played notes at time t+1.
Reference paper for DeepESN model:
C. Gallicchio, A. Micheli, L. Pedrelli, "Deep Reservoir Computing: A Critical Experimental Analysis",
Neurocomputing, 2017, vol. 268, pp. 87-99
In this Example we consider the hyper-parameters designed in the following paper:
C. Gallicchio, A. Micheli, L. Pedrelli, "Design of deep echo state networks",
Neural Networks, 2018, vol. 108, pp. 33-47
"""
from pathlib import Path
import time
import numpy as np
from DeepESN import DeepESN
from utils import computeMusicAccuracy, config_pianomidi, load_pianomidi, select_indexes
class Struct(object): pass
# sistemare indici per IP in config_pianomidi, mettere da un'altra parte
# translation: set indexes by IP in config_plans, put elsewhere
# this probably means the confusion of controlling how intrinsic plasticity is used
# sistema selezione indici con transiente messi all'interno della rete
# index selection system with transient placed inside the network
# this probably means that the transient component in main is now redundant?
def main():
# measure time for this code section
t0 = time.perf_counter()
# fix a seed for the reproducibility of results
np.random.seed(7)
# dataset path
path = Path("data")
(dataset,
Nu, # dimension of a single data point
# for example 88 for piano-midi.de
# where 88 corresponds the number of keys on a piano
error_function,
optimization_problem,
TR_indexes, # train set indices
VL_indexes, # validation set indices
TS_indexes # test set indices
) = load_pianomidi(path, computeMusicAccuracy)
# load configuration for pianomidi task
configs = config_pianomidi(list(TR_indexes) + list(VL_indexes))
# Be careful with memory usage
# TODO: What does careful with memory usage mean?
# What are the limits?
Nr = 50 # number of recurrent units
Nl = 1 # number of recurrent layers
reg = 10.0**-2 # probably refers to lambda_r, readout regularization
# BUG: however we also set regularization in the config file
transient = 5
# initialize the ESN
deepESN = DeepESN(Nu, Nr, Nl, configs, verbose=1)
#
states = deepESN.computeState(dataset.inputs, deepESN.IPconf.DeepIP, verbose=1)
train_states = select_indexes(states, list(TR_indexes) + list(VL_indexes), transient)
train_targets = select_indexes(dataset.targets, list(TR_indexes) + list(VL_indexes), transient)
test_states = select_indexes(states, TS_indexes)
test_targets = select_indexes(dataset.targets, TS_indexes)
deepESN.trainReadout(train_states, train_targets, reg)
train_outputs = deepESN.computeOutput(train_states)
train_error = error_function(train_outputs, train_targets)
print(f"Training ACC: {np.mean(train_error):0.5f} \n")
test_outputs = deepESN.computeOutput(test_states)
test_error = error_function(test_outputs, test_targets)
print(f"Test ACC: {np.mean(test_error):0.5f} \n")
# duration is difference between end time and start time
t1 = time.perf_counter()
print(f"Time elapsed: {t1-t0:0.5f} s")
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.7298127969,
"author": null,
"avg_line_length": 38.4838709677,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "380269179ae781b7f8743a2681449f105fc5f807",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "40593b083c3b2ea9a547f4c925d66a50039f7340",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Wehzie/master-thesis",
"max_forks_repo_path": "src/main_MultivariatePolyphonic.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "40593b083c3b2ea9a547f4c925d66a50039f7340",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Wehzie/master-thesis",
"max_issues_repo_path": "src/main_MultivariatePolyphonic.py",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "40593b083c3b2ea9a547f4c925d66a50039f7340",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Wehzie/master-thesis",
"max_stars_repo_path": "src/main_MultivariatePolyphonic.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 901,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3579
} |
'''
#Requirement: opencv-python, tkinter
# Usage instruction:
Choose point (double click left mouse button) in the order leftUpper, RightUpper, LeftLower, RightLower
Click "c" in the keyword for confirmation
This is for two-chamber crylic transfer box, the dimension is
two-chamber place preference
_________445mm___________
| |
| |
295mm |
| |
|________________________|
fish tank
_________500mm___________
| |
| |
250mm |
| |
|________________________|
water sink tank
_________480mm___________
| |
| |
| |
| |
480mm |
| |
| |
| |
|________________________|
self stim
_________250mm___________
| |
| |
180mm |
| |
|________________________|
Cross Maze Test
610 x 610 mm
_
| |
| |
| |
___________| |___________
|__________ ___________|
| |
| |
| |
|_|
water_searching_form_board
_________640mm___________
| |
| |
430mm |
| |
|________________________|
homeCage
_________280mm___________
| |
| |
180mm |
| |
|________________________|
The output video is 445 * 295 at 30fps, thus 1mm/pixel
Changes can be made, for "out" and the "pts2" for other behavior test
'''
import cv2
import numpy as np
#import matplotlib.pyplot as plt
import os
from math import hypot
from tkinter import Tk
from tkinter.filedialog import askopenfilenames
box_length = 500
box_width = 250
posList = []
def draw_circle(event,x,y,flags,param):
global mouseX,mouseY
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img_raw,(x,y),1,(255,0,0),-1)
mouseX,mouseY = x,y
posList.append((x, y))
root1 = Tk()
root1.withdraw()
filez = askopenfilenames(parent = root1, title = 'Choose file')
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
file_order = 0;
for fullFileName in root1.tk.splitlist(filez):
print(fullFileName)
filename = fullFileName
(root, ext) =os.path.splitext(filename)
duration = 1 # second
freq = 440 # Hz
file_order +=1
cap = cv2.VideoCapture(filename)
i=0
while i<2:
i+=1
ret, img_raw =cap.read() #start capture images from webcam
if ret == False:
break
while i==1:
cv2.namedWindow("image")
cv2.setMouseCallback("image", draw_circle)
#print(posList)
posNp = np.array(posList)
cv2.imshow('image',img_raw)
k = cv2.waitKey(20) & 0xFF
if k == ord("r"):
ret, img_raw =cap.read()
image = img_raw
if k == ord("c"):
break
cap.release()
cv2.destroyAllWindows()
j=0
for fullFileName in root1.tk.splitlist(filez):
j+=1
filename = fullFileName
print(filename)
(root, ext) =os.path.splitext(filename)
cap = cv2.VideoCapture(filename)
while not cap.isOpened():
cap = cv2.VideoCapture(filename)
#os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (duration, freq))
print("Can't load the file")
break
fps = cap.get(cv2.CAP_PROP_FPS)
#print(fps)
duration = 1 # second
freq = 440 # Hz
cap = cv2.VideoCapture(filename)
width = int(cap.get(3))
height = int(cap.get(4))
font = cv2.FONT_HERSHEY_SIMPLEX
out = cv2.VideoWriter(root+'_GeoTran.mp4',fourcc,fps,(box_length,box_width))
while True:
ret, img_raw =cap.read() #start capture images from webcam
if ret == False:
break
img = img_raw
rows,cols,ch = img.shape
pts1 = np.float32(posList[(j-1)*4:(j*4)])
pts2 = np.float32([[0,0],[box_length,0],[0,box_width],[box_length,box_width]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(box_length,box_width))
out.write(dst)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows() | {
"alphanum_fraction": 0.4995901639,
"author": null,
"avg_line_length": 26.2365591398,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "55c392905745884931fe9fccacd3dea0f1f826f2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7e82025aff0691c7f36bbea38a97349468bc3c4e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "li-shen-amy/behavior",
"max_forks_repo_path": "mouse_tracking/python_code/Batch_GeoTran.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "7e82025aff0691c7f36bbea38a97349468bc3c4e",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T08:11:19.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-31T08:11:19.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "li-shen-amy/lick_detect",
"max_issues_repo_path": "mouse_tracking/python_code/Batch_GeoTran.py",
"max_line_length": 222,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "7e82025aff0691c7f36bbea38a97349468bc3c4e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "li-shen-amy/lick_detect",
"max_stars_repo_path": "mouse_tracking/python_code/Batch_GeoTran.py",
"max_stars_repo_stars_event_max_datetime": "2021-02-26T00:02:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-26T00:02:24.000Z",
"num_tokens": 1117,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4880
} |
"""
# WordSlots are the main objects of the grid, they are the word "spaces" to be filled
"""
import numpy as np
class WordSlot():
def __init__(self, identifiant, length, first_letter_position, direction, initial_letters):
self.identifiant = identifiant
self.length = length
self.first_letter_position = first_letter_position
self.direction = direction
self.slots = self.compute_slots()
self.dic_crosses = {}
self.current_letters = initial_letters
self.number_of_possible_words = -1
self.current_possible_words = []
def set_current_possible_words(self, possible_words):
""" Sets the list of words which can fit in the word slot given the letters already in place
:param possible_words: [string] list of words which can fit in the word slot given the letters already in place
:return: None
"""
self.current_possible_words = possible_words
self.number_of_possible_words = len(possible_words)
def what_current_word_would_be_with_an_added_letter(self,letter, position):
""" Helper function to simulate the placement of a new letter
:param letter: (char) letter we want to add to current word
:param position: (int) position we want to add the letter to
:return: (string) current word with the letter added
"""
new_word = self.current_letters.copy()
new_word[position] = letter
return new_word
def check_if_word_is_filled(self):
""" Helper function to check it current word is filled or if there are still some empty letters
:return: None
"""
if "." not in self.current_letters:
return True
else:
return False
def propose_a_word_with_current_situation(self):
""" Given the current letters, choose a possible word at random
:return: (string) a possible word
"""
chosen_word = np.random.choice(self.current_possible_words)
return chosen_word
def set_a_word(self,word):
""" Set a given word to be the word of this word slot
:param word: (string) word to set
:return: None
"""
self.current_letters = [x for x in word]
self.number_of_possible_words = len(self.current_possible_words)
@staticmethod
def tab2string(tab):
""" Helper function to convert an array to a string
:param tab: array to convert
:return:
"""
str_ = ""
for item in tab:
str_+= item
return str_
def compute_crosses(self, temp_horizontal_slots_grid, temp_vertical_slots_grid):
""" Compute crosses with other word slots (updates self.dic_crosses)
:param temp_horizontal_slots_grid: ([int]) ID of horizontal words for each square
:param temp_vertical_slots_grid: ([int]) ID of vertical words for each square
:return: None
"""
relevant_grid_to_check = temp_horizontal_slots_grid if self.direction == "vertical" else \
temp_vertical_slots_grid
for e,slot in enumerate(self.slots):
corresponding_word_slot_id = relevant_grid_to_check[slot[0]][slot[1]]
if corresponding_word_slot_id != "0":
self.dic_crosses[int(corresponding_word_slot_id.split(".")[0])] = [e,int(corresponding_word_slot_id.split(".")[1])]
def compute_slots(self):
""" Compute position of spaces spanned by the word slot
:return: ([int, int]) position of spaces spanned by the word slot
"""
slots = []
for i in range(self.length):
if self.direction == "horizontal":
slots.append([int(self.first_letter_position[0]), int(self.first_letter_position[1] + i)])
else:
slots.append([int(self.first_letter_position[0] + i), int(self.first_letter_position[1])])
return slots | {
"alphanum_fraction": 0.6315142576,
"author": null,
"avg_line_length": 38.3773584906,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fa1ddf57466635b41ffb1203374fb3db16de4c85",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a53907be76a65553ed3682f46e9a1b928ae48d75",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "clementdouarre/crosswords",
"max_forks_repo_path": "src/WordSlot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a53907be76a65553ed3682f46e9a1b928ae48d75",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "clementdouarre/crosswords",
"max_issues_repo_path": "src/WordSlot.py",
"max_line_length": 132,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a53907be76a65553ed3682f46e9a1b928ae48d75",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "clementdouarre/crosswords",
"max_stars_repo_path": "src/WordSlot.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 850,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4068
} |
import argparse
import json
from pathlib import Path
import numpy as np
import torch
import yaml
from .binary_tree import BinaryTree
from .graph import Graph
from .random_walker import RandomWalker
from .skipgram import SkipGram
def run(args):
if args.config_file.absolute().exists():
with open(args.config_file.absolute(), 'r') as config_io:
hparams = yaml.load(config_io, Loader=yaml.FullLoader)
else:
raise FileNotFoundError(f"Config file not found. {args.config_file.absolute()}")
graph = Graph(
data_root=args.data_root,
)
random_walker = RandomWalker(
graph=graph,
**hparams["random_walker"],
)
binary_tree = BinaryTree(
V=graph.V,
n_dims=hparams["n_dims"]
)
device = torch.device('cuda') if args.gpu else torch.device('cpu')
skipgram = SkipGram(
binary_tree=binary_tree,
random_walker=random_walker,
device=device,
** hparams["skipgram"],
)
for epoch in range(hparams["random_walker"]["walks_per_node"]):
if args.checkpoint_period > 0 and epoch % args.checkpoint_period == 0:
checkpoint_root = args.output_root.joinpath('checkpoint', f'{epoch}')
checkpoint_root.mkdir(parents=True, exist_ok=True)
# save embeddings
np.save(
file=checkpoint_root.joinpath('Z.npy'),
arr=binary_tree.get_node_embeddings().numpy(),
)
# save context
context = {
'optimizer': skipgram.optimizer.state_dict(),
'model': binary_tree.state_dict(),
'epoch': epoch,
}
torch.save(context, checkpoint_root.joinpath(f'{epoch}.pt'))
skipgram.train()
embeddings = binary_tree.get_node_embeddings()
if not args.output_root.exists():
args.output_root.mkdir()
np.save(Path(args.output_root).joinpath('Z.npy'), embeddings.cpu().numpy())
with open(args.output_root.joinpath('loss.json'), 'w') as io:
json.dump(skipgram.loss_history, io)
def get_parser():
parser = argparse.ArgumentParser(
prog="deepwalk"
)
parser.add_argument(
"--data_root", type=Path,
help="Path to the data root directory."
)
parser.add_argument(
"--config_file", type=Path,
help="Path to the config file."
)
parser.add_argument(
"--output_root", type=Path,
help="Path to the output root directory."
)
parser.add_argument(
"--checkpoint_period", type=int, default=0,
help="Period of making checkpoint in epoch. 0 for no checkpoints."
)
parser.add_argument('--gpu', action='store_true')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
run(args)
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.6199236376,
"author": null,
"avg_line_length": 27.7019230769,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fd9e05b85996163e5cd68c3fdcdb8a89297ce51e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "58325e000732e7588694a8ce97d1843e0192ba2c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "helloybz/deepwalk_helloybz",
"max_forks_repo_path": "deepwalk/__main__.py",
"max_issues_count": 18,
"max_issues_repo_head_hexsha": "58325e000732e7588694a8ce97d1843e0192ba2c",
"max_issues_repo_issues_event_max_datetime": "2021-11-30T07:23:43.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-20T17:20:45.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "helloybz/deepwalk_helloybz",
"max_issues_repo_path": "deepwalk/__main__.py",
"max_line_length": 88,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "58325e000732e7588694a8ce97d1843e0192ba2c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "helloybz/deepwalk_helloybz",
"max_stars_repo_path": "deepwalk/__main__.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-23T12:34:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-23T12:34:32.000Z",
"num_tokens": 631,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2881
} |
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import is_valid_dm
import numpy as np
import general_scripts as gs
matplotlib.rcParams['font.family'] = ['sans-serif']
matplotlib.rcParams['font.size'] = 10
#matplotlib.rcParams['mathtext.default'] = ['regular']
matplotlib.rcParams['text.usetex'] = True
#params = {'mathtext.default': 'bf' }
#plt.rcParams.update(params)
matplotlib.rcParams['text.latex.preamble'] = [
r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
label_font0 = {'fontname':'Nimbus Mono L', 'fontsize':6, 'fontweight':'normal' }
label_font1 = {'fontname':'Nimbus Mono L', 'fontsize':10, 'fontweight':'normal' }
label_font2 = {'fontname':'Nimbus Mono L', 'fontsize':14, 'fontweight':'bold' }
colorbar_min=0.0
colorbar_max=1.0
colorbar_max2=0.5
metric='V_R'
# Adopted from gnuplot schemes
#set palette defined (0 "black", 1 '#2233bb', 2 "yellow", 3 "red", 4 "pink", 5 "white")
# i.e. #00-00-00 , #22-33-bb, #FF-FF-00, #FF-00-00, #FF-C0-CB, #FF-FF-FF
cdict = {'red': [(0.0, 0.000, 0.000),
(0.2, 0.133, 0.133),
(0.4, 1.000, 1.000),
(0.6, 0.800, 0.800),
(0.8, 1.000, 1.000),
(1.0, 1.000, 1.000)],
'green': [(0.0, 0.000, 0.000),
(0.2, 0.199, 0.199),
(0.4, 1.000, 1.000),
(0.6, 0.000, 0.000),
(0.8, 0.750, 0.750),
(1.0, 1.000, 1.000)],
'blue': [(0.0, 0.000, 0.000),
(0.2, 0.730, 0.730),
(0.4, 0.000, 0.000),
(0.6, 0.000, 0.000),
(0.8, 0.793, 0.793),
(1.0, 1.000, 1.000)]
}
#cdict = {'red': [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0)], 'green': [(0.0, 1.0, 1.0), (1.0, 0.0, 0.0)], 'blue': [(0.0, 1.0, 1.0), (1.0, 0.0, 0.0)]}
cmap_segmented = matplotlib.colors.LinearSegmentedColormap('Gnuplot', cdict)
fig_dims=(5,5)
dpi=300
#box_style = dict(boxstyle='round', facecolor='#FFEEDD')
# place a text box in upper left in axes coords
#ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
# verticalalignment='top', bbox=props)
rna_font = {'fontname':'Nimbus Mono L'}
#plt.close('all')
rna_labels=['A$_{10}$','U$_{10}$','U$_5$C$_5$','C$_5$U$_5$','C$_{10}$','U$_3$C$_4$U$_3$','C$_3$U$_4$C$_3$','U$_9$','U$_8$','U$_7$','U$_6$','U$_5$','UGU$_8$']
rna_labels_pos=range(len(rna_labels))
# Print matrix
#figC, (ax1, ax2) = plt.subplots( 1, 2, figsize=(6, 3), dpi=300 )
figC = plt.figure( figsize=(5, 3), dpi=300 )
Xaverage=[]
conc_list=['0.0','0.1','0.2','0.4','0.6','0.8','0.9','1.0']
#conc_list=['1.0']
numConc=len(conc_list)
bFirst=True
for e in range(numConc):
in_file='./fitted_'+metric+'_'+conc_list[e]+'_matrix.dat'
out_file='matrix_'+metric+'_'+conc_list[e]+'.pdf'
X = gs.load_matrix(in_file)
# Rescale and symmetrize matrix.
Xp = np.maximum( np.zeros( X.shape ), X )
Xp = 0.5*( Xp + Xp.T )
print( is_valid_dm(Xp) )
if bFirst:
bFirst=False
Xaverage=Xp
else:
Xaverage=Xaverage+Xp
plt.clf()
ax1 = plt.subplot2grid((1,7), (0,0), colspan=5)
ax2 = plt.subplot2grid((1,7), (0,5), colspan=2)
plt.subplots_adjust(left=0.04, right=0.99, bottom=0.03, top=0.76, wspace=3.0, hspace=None)
plt.figtext(s='Prot:RNA-ratio', x=0.68, y=0.95,
horizontalalignment='center', verticalalignment='center', **label_font1 )
plt.figtext(s='1.0:%s' % conc_list[e], x=0.68, y=0.90,
horizontalalignment='center', verticalalignment='center', **label_font2 )
#fig1 = plt.figure(figsize=(5,5), dpi=300 )
plt.sca(ax1)
plt.cla()
ax1.xaxis.tick_top()
plt.xticks(rna_labels_pos, rna_labels, rotation='vertical', **rna_font)
plt.yticks(rna_labels_pos, rna_labels, rotation='horizontal', **rna_font)
graph = ax1.imshow(X,interpolation='none',cmap=cmap_segmented, vmin=colorbar_min, vmax=colorbar_max)
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = figC.colorbar(graph, cax=cax)
cbar.ax.set_ylabel('pairwise reduced-$\chi$')
#plt.margins(1.0, tight=False)
plt.sca(ax2)
plt.cla()
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
# plt.xlabel('cluster distance')
plt.xlabel('$d_{ab}=0.5(\chi_{ab}+\chi_{ba})$', **label_font0)
ax2.spines['left'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['right'].set_visible(False)
# generate the linkage matrix for the dendrogram.
Z = linkage(Xp, 'average')
dendrogram(Z,
labels=rna_labels,
color_threshold=0.5*max(Z[:,2]),
orientation='right', #leaf_rotation=90., # rotates the x axis labels
leaf_font_size=10
)
figC.savefig(out_file)
#print( inconsistent(Z) )
print( "= = Output for %s is complete." % conc_list[e] )
# Do final plot with aggregate data
Xaverage /= numConc
out_file='matrix_'+metric+'_aggregate.pdf'
plt.clf()
ax1 = plt.subplot2grid((1,7), (0,0), colspan=5)
ax2 = plt.subplot2grid((1,7), (0,5), colspan=2)
plt.subplots_adjust(left=0.04, right=0.99, bottom=0.03, top=0.76, wspace=3.0, hspace=None)
plt.figtext(s='Aggregate', x=0.68, y=0.95,
horizontalalignment='center', verticalalignment='center', **label_font1 )
#fig1 = plt.figure(figsize=(5,5), dpi=300 )
plt.sca(ax1)
plt.cla()
ax1.xaxis.tick_top()
plt.xticks(rna_labels_pos, rna_labels, rotation='vertical', **rna_font)
plt.yticks(rna_labels_pos, rna_labels, rotation='horizontal', **rna_font)
graph = ax1.imshow(Xaverage,interpolation='none',cmap=cmap_segmented, vmin=colorbar_min, vmax=colorbar_max2)
#plt.margins(1.0, tight=False)
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = figC.colorbar(graph, cax=cax)
cbar.ax.set_ylabel('pairwise reduced-$\chi$')
plt.sca(ax2)
plt.cla()
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
plt.xlabel('cluster distance')
plt.xlabel('$d_{ab}=0.5(\chi_{ab}+\chi_{ba})$', **label_font0)
ax2.spines['left'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['right'].set_visible(False)
# generate the linkage matrix for the dendrogram.
Z = linkage(Xaverage, 'average')
dendrogram(Z,
labels=rna_labels,
color_threshold=0.5*max(Z[:,2]),
orientation='right', #leaf_rotation=90., # rotates the x axis labels
leaf_font_size=10
)
figC.savefig(out_file)
| {
"alphanum_fraction": 0.6265841014,
"author": null,
"avg_line_length": 37.3333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "31403b1302787ab14d366253681a621ce85ecd15",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "160666f51f024243b16774f2cfa3e130bcb5b1c8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zharmad/SAXScreen",
"max_forks_repo_path": "scripts/plot-dendrogram-VR-example.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "160666f51f024243b16774f2cfa3e130bcb5b1c8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zharmad/SAXScreen",
"max_issues_repo_path": "scripts/plot-dendrogram-VR-example.py",
"max_line_length": 157,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "160666f51f024243b16774f2cfa3e130bcb5b1c8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zharmad/SAXScreen",
"max_stars_repo_path": "scripts/plot-dendrogram-VR-example.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2345,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6944
} |
!
! -- LAPACK95 interface driver routine (version 3.0) --
! UNI-C, Denmark; Univ. of Tennessee, USA; NAG Ltd., UK
! September, 2000
!
! .. USE STATEMENTS ..
USE LA_PRECISION, ONLY: WP => SP
USE LA_AUXMOD, ONLY: ERINFO, LSAME
! .. IMPLICIT STATEMENT ..
IMPLICIT NONE
! .. SCALAR ARGUMENTS ..
CHARACTER(LEN=1), INTENT(IN), OPTIONAL :: JOBZ, UPLO
INTEGER, INTENT(IN), OPTIONAL :: ITYPE
INTEGER, INTENT(OUT), OPTIONAL :: INFO
! .. ARRAY ARGUMENTS ..
COMPLEX(WP), INTENT(INOUT) :: A(:,:), B(:,:)
REAL(WP), INTENT(OUT) :: W(:)
!----------------------------------------------------------------------
!
! Purpose
! =======
!
! LA_SYGV, LA_SYGVD, LA_HEGV and LA_HEGVD compute all eigenvalues
! and, optionally, all eigenvectors of generalized eigenvalue problems of
! the form A*z = lambda*B*z, A*B*z = lambda*z, and B*A*z = lambda*z,
! where A and B are real symmetric in the cases of LA_SYGV and LA_SYGVD
! and complex Hermitian in the cases of LA_HEGV and LA_HEGVD. In all four
! cases B is positive deffinite.
! LA_SYGVD and LA_HEGVD use a divide and conquer algorithm. If
! eigenvectors are desired, they can be much faster than LA_SYGV and
! LA_HEGV for large matrices but use more workspace.
!
! =========
!
! SUBROUTINE LA_SYGV / LA_SYGVD / LA_HEGV / LA_HEGVD( A, B, &
! W, ITYPE=itype, JOBZ=jobz, UPLO=uplo, INFO=info )
! <type>(<wp>), INTENT(INOUT) :: A(:,:), B(:,:)
! REAL(<wp>), INTENT(OUT) :: W(:)
! INTEGER, INTENT(IN), OPTIONAL :: ITYPE
! CHARACTER(LEN=1), INTENT(IN), OPTIONAL :: JOBZ, UPLO
! INTEGER, INTENT(OUT), OPTIONAL :: INFO
! where
! <type> ::= REAL | COMPLEX
! <wp> ::= KIND(1.0) | KIND(1.0D0)
!
! Arguments
! =========
!
! A (input/output) REAL or COMPLEX square array, shape (:,:).
! On entry, the matrix A.
! If UPLO = 'U', the upper triangular part of A contains the
! upper triangular part of matrix A. If UPLO = 'L', the lower
! triangular part of A contains the lower triangular part of
! matrix A.
! On exit, if JOBZ = 'V', then the columns of A contain the
! eigenvectors, normalized as follows:
! if ITYPE = 1 or 2: Z^H*B*Z = I ,
! if ITYPE = 3: Z^H*B^-1*Z = I .
! If JOBZ = 'N', then the upper triangle (if UPLO = 'U') or the
! lower triangle (if UPLO = 'L') of A, including the diagonal,
! is destroyed.
! B (input/output) REAL or COMPLEX square array, shape (:,:) with
! size(B,1) = size(A,1).
! On entry, the matrix B. If UPLO = 'U', the upper triangular
! part of B contains the upper triangular part of matrix B. If
! UPLO = 'L', the lower triangular part of B contains the lower
! triangular part of matrix B.
! On exit, if the part of B containing the matrix is overwritten
! by the triangular factor U or L of the Cholesky factorization
! B = U^H*U or B = L*L^H , respectively.
! W (output) REAL array, shape (:) with size(W) = size(A,1).
! The eigenvalues in ascending order.
! ITYPE Optional (input) INTEGER.
! Specifies the problem type to be solved:
! = 1: A*z = lambda*B*z
! = 2: A*B*z = lambda*z
! = 3: B*A*z = lambda*z
! Default value: 1.
! JOBZ Optional (input) CHARACTER(LEN=1).
! = 'N': Compute eigenvalues only;
! = 'V': Compute eigenvalues and eigenvectors.
! Default value: 'N'.
! UPLO Optional (input) CHARACTER(LEN=1).
! = 'U': Upper triangles of A and B are stored;
! = 'L': Lower triangles of A and B are stored.
! Default value: 'U'.
! INFO Optional (output) INTEGER.
! = 0: successful exit.
! < 0: if INFO = -i, the i-th argument had an illegal value.
! > 0: the algorithm failed to converge or matrix B is not
! positive deffinite:
! <= n: if INFO = i, i off-diagonal elements of an
! intermediate tridiagonal form did not converge to
! zero.
! > n: if INFO = n+i, for 1 <= i <= n, then the leading minor
! of order i of B is not positive deffinite. The
! factorization of B could not be completed and no
! eigenvalues or eigenvectors were computed.
! n is the order of A.
! If INFO is not present and an error occurs, then the program is
! terminated with an error message.
!------------------------------------------------------------------------
! .. LOCAL PARAMETERS ..
! .. LOCAL SCALARS ..
CHARACTER(LEN=1) :: LJOBZ, LUPLO
! .. LOCAL ARRAYS ..
COMPLEX(WP), POINTER :: WORK(:)
INTEGER, POINTER :: IWORK(:)
COMPLEX(WP) :: WORKMIN(1)
INTEGER :: IWORKMIN(1)
! .. INTRINSIC FUNCTIONS ..
INTRINSIC SIZE, MAX, PRESENT
! .. EXECUTABLE STATEMENTS ..
LINFO = 0; N = SIZE(A,1); LD = MAX(1,N); ISTAT = 0
IF( PRESENT(ITYPE) )THEN
LITYPE = ITYPE
ELSE
LITYPE = 1
END IF
IF( PRESENT(JOBZ) ) THEN
LJOBZ = JOBZ
ELSE
LJOBZ = 'N'
END IF
IF( PRESENT(UPLO) ) THEN
LUPLO = UPLO
ELSE
LUPLO = 'U'
END IF
! .. TEST THE ARGUMENTS ..
IF( SIZE( A, 2 ) /= N .OR. N < 0 )THEN
LINFO = -1
ELSE IF( SIZE( B, 1 ) /= N .OR. SIZE( B, 2 ) /= N )THEN
LINFO = -2
ELSE IF( SIZE( W ) /= N )THEN
LINFO = -3
ELSE IF( LITYPE < 1 .OR. LITYPE > 3 )THEN
LINFO = -4
ELSE IF( .NOT.LSAME(LJOBZ,'N') .AND. .NOT.LSAME(LJOBZ,'V') )THEN
LINFO = -5
ELSE IF( .NOT.LSAME(LUPLO,'U') .AND. .NOT.LSAME(LUPLO,'L') )THEN
LINFO = -6
ELSE IF( N > 0 )THEN
! .. DETERMINE THE WORKSPACE ..
! .. QUERING THE SIZE OF WORKSPACE ..
ENDIF
CALL ERINFO(LINFO, SRNAME, INFO, ISTAT)
| {
"alphanum_fraction": 0.5439239873,
"author": null,
"avg_line_length": 40.5337837838,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "f06960500fd06d00e7c57a75bfb2690b2f41cd58",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2021-04-15T07:12:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-12-29T15:34:01.000Z",
"max_forks_repo_head_hexsha": "bcd9d4b706f4213a6a4c0ebb4521754ffeff3752",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "MattBurn/LAPACK95",
"max_forks_repo_path": "src/la_csygvd.f90",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bcd9d4b706f4213a6a4c0ebb4521754ffeff3752",
"max_issues_repo_issues_event_max_datetime": "2018-12-31T06:45:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-12-30T15:38:47.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "MattBurn/LAPACK95",
"max_issues_repo_path": "src/la_csygvd.f90",
"max_line_length": 74,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "bcd9d4b706f4213a6a4c0ebb4521754ffeff3752",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "MattBurn/LAPACK95",
"max_stars_repo_path": "src/la_csygvd.f90",
"max_stars_repo_stars_event_max_datetime": "2022-03-02T10:09:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-29T15:07:54.000Z",
"num_tokens": 1792,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5999
} |
# This contains inherited classes from detectron2, which were defined wit h he detectron2 version on January 21, 2019.
# the changes are made to allow for logging validation set metrics during training and to allow for a custom data loader for tif imagery.
import copy
import logging
import numpy as np
import os
import operator
import torch.utils.data
import time
from detectron2.utils.comm import get_world_size
from detectron2.utils.env import seed_all_rng
from detectron2.utils.logger import log_first_n
from detectron2.data import samplers
from detectron2.data.common import (
AspectRatioGroupedDataset,
DatasetFromList,
MapDataset,
)
from detectron2.data.dataset_mapper import DatasetMapper
import detectron2.data.detection_utils as utils
import detectron2.utils.comm as comm
from detectron2.data.build import *
from detectron2.data.build import (
worker_init_reset_seed,
trivial_batch_collator
) # it's hidden by _all_ in build.py :(
from detectron2.data import transforms as T
from detectron2.engine import DefaultTrainer, hooks
from detectron2.evaluation import COCOEvaluator, DatasetEvaluators
import torch
def tif_dataset_mapper(dataset_dict):
# Implement a mapper, similar to the default DatasetMapper, but with your own customizations
# it will be modified by code below
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(
dataset_dict["file_name"]
) # relies on my own edit that updates the func to be able to read tif imagery with imageio
# image, transforms = T.apply_transform_gens([T.Resize((800, 800))], image)
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32"))
# annos = [
# utils.transform_instance_annotations(obj, transforms, image.shape[:2])
# for obj in dataset_dict.pop("annotations")
# if obj.get("iscrowd", 0) == 0
# ]
annos = [
obj for obj in dataset_dict.pop("annotations") if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image.shape[:2])
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
def build_detection_validation_loader(cfg, mapper=None):
"""
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Start workers to work on the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will return.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of validation data
"""
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset_dicts = get_detection_dataset_dicts(
# this is the only difference between the valdiation loader and train loader.
cfg.DATASETS.VALIDATION,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
# won't use this I think but if I do, needs to be changed for valdiation set.
else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = (
cfg.DATALOADER.SAMPLER_TRAIN
) # valdiation should use same batching and sampling scheme as training (rather than batch of size 1)
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
# drop_last so the batch always have the same size
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader
def run_validation_step(self):
"""
Added to write out validation metrics. Used as a CallbackHook in
the after_step stage in Trainer.build_hooks().
"""
if (self.iter % self.cfg.VALIDATION_PERIOD) == 0:
validation_data = next(self.validation_data_loader_iter)
val_losses_dict = self.model(validation_data)
val_losses = sum(loss for loss in val_losses_dict.values())
self._detect_anomaly(val_losses, val_losses_dict)
val_metrics_dict = val_losses_dict
# val_metrics_dict["data_time"] = data_time
self._write_validation_metrics(val_metrics_dict)
class Trainer(DefaultTrainer):
"""
This subclasses the Default Trainer to plot a validation loss curve alongside train loss curve in tensorboard.
This requires a train/validation/test split upfront.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
"""
super().__init__(cfg)
validation_data_loader = self.build_validation_loader(cfg)
self.validation_data_loader_iter = iter(validation_data_loader)
@classmethod
def build_evaluator(cls, cfg, dataset_name):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
return DatasetEvaluators(evaluators)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=tif_dataset_mapper)
@classmethod
def build_validation_loader(cls, cfg):
return build_detection_validation_loader(cfg, mapper=tif_dataset_mapper)
@classmethod
def build_train_loader(cls, cfg):
return build_detection_train_loader(cfg, mapper=tif_dataset_mapper)
def run_step(self):
"""
Implement the standard training logic described above.
"""
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
"""
If your want to do something with the data, you can wrap the dataloader.
"""
data = next(self._data_loader_iter)
data_time = time.perf_counter() - start
"""
If your want to do something with the losses, you can wrap the model.
"""
loss_dict = self.model(data)
losses = sum(loss for loss in loss_dict.values())
self._detect_anomaly(losses, loss_dict)
metrics_dict = loss_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
validation_data = next(self.validation_data_loader_iter)
val_losses_dict = self.model(validation_data)
val_losses = sum(loss for loss in val_losses_dict.values())
self._detect_anomaly(val_losses, val_losses_dict)
val_metrics_dict = val_losses_dict
val_metrics_dict["data_time"] = data_time
self._write_validation_metrics(val_metrics_dict)
"""
If you need accumulate gradients or something similar, you can
wrap the optimizer with your custom `zero_grad()` method.
"""
self.optimizer.zero_grad()
losses.backward()
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method.
"""
self.optimizer.step()
def _write_validation_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics. val added to metric key names
# compared to original _write_metrics func in train_loop.py
metrics_dict = {
"val_"+k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.storage.put_scalar("total_loss/val", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
def _write_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
"train_"+k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.storage.put_scalar("total_loss/train", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
| {
"alphanum_fraction": 0.6674586269,
"author": null,
"avg_line_length": 39.3741935484,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bb194a4b4af081a7c8b6a84ec28706d6772b9301",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-12-01T15:52:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-11-19T23:02:01.000Z",
"max_forks_repo_head_hexsha": "4657ed1d103acb37dc974aa6af2f0d3a3398e987",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ecohydro/CropMask_RCNN",
"max_forks_repo_path": "cropmask/detectron2_reclass.py",
"max_issues_count": 32,
"max_issues_repo_head_hexsha": "4657ed1d103acb37dc974aa6af2f0d3a3398e987",
"max_issues_repo_issues_event_max_datetime": "2020-12-31T19:48:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-21T21:14:18.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ecohydro/CropMask_RCNN",
"max_issues_repo_path": "cropmask/detectron2_reclass.py",
"max_line_length": 137,
"max_stars_count": 13,
"max_stars_repo_head_hexsha": "4657ed1d103acb37dc974aa6af2f0d3a3398e987",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ecohydro/CropMask_RCNN",
"max_stars_repo_path": "cropmask/detectron2_reclass.py",
"max_stars_repo_stars_event_max_datetime": "2021-07-12T06:28:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-01T23:41:27.000Z",
"num_tokens": 2659,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12206
} |
import numpy as np
from ssr.ext.plyfile import PlyData, PlyElement
from ssr.utility.logging_extension import logger
from ssr.ssr_types.point import Point, Measurement
class Face:
# Do NOT confuse this class with a real triangle.
# A face saves ONLY VERTEX INDICES, NO 3D information.
def __init__(self, initial_indices=np.array([-1, -1, -1], dtype=int)):
# We do NOT DEFINE COLOR PER FACE, since the color of the face is
# determined by the corresponding vertex colors
self.vertex_indices = np.array(initial_indices, dtype=int)
def __str__(self):
return str(self.vertex_indices)
class PLYFileHandler:
@staticmethod
def __ply_data_vertices_to_vetex_list(ply_data):
vertex_data_type_names = ply_data["vertex"].data.dtype.names
use_color = False
if (
"red" in vertex_data_type_names
and "green" in vertex_data_type_names
and "blue" in vertex_data_type_names
):
use_color = True
vertices = []
value_keys = [
x
for x, y in sorted(
ply_data["vertex"].data.dtype.fields.items(),
key=lambda k: k[1],
)
]
non_scalar_value_keys = [
"x",
"y",
"z",
"red",
"green",
"blue",
"nx",
"ny",
"nz",
"measurements",
]
scalar_value_keys = [
value_key
for value_key in value_keys
if not value_key in non_scalar_value_keys
]
logger.info(
"Found the following vertex properties: " + str(value_keys)
)
logger.info("Found " + str(len(ply_data["vertex"].data)) + " vertices")
for point_index, line in enumerate(ply_data["vertex"].data):
current_point = Point()
current_point.coord = np.array([line["x"], line["y"], line["z"]])
if use_color:
current_point.color = np.array(
[line["red"], line["green"], line["blue"]]
)
current_point.id = point_index
for scalar_value_key in scalar_value_keys:
current_point.scalars[scalar_value_key] = line[
scalar_value_key
]
if "measurements" in line.dtype.names:
elements_per_measurement = 4
current_point.measurements = []
for measurement_idx in range(
len(line["measurements"]) / elements_per_measurement
):
array_idx = measurement_idx * elements_per_measurement
slice = line["measurements"][
array_idx : array_idx + elements_per_measurement
]
current_point.measurements.append(
Measurement.init_from_list(slice)
)
vertices.append(current_point)
ply_data_vertex_dtype = ply_data["vertex"].dtype
ply_data_vertex_data_dtype = ply_data["vertex"].data.dtype
return vertices, ply_data_vertex_dtype, ply_data_vertex_data_dtype
@staticmethod
def __ply_data_faces_to_face_list(ply_data):
faces = []
ply_data_face_type = None
ply_data_face_data_type = None
if "face" in ply_data:
# read faces
ply_data_face_type = ply_data["face"].dtype
logger.info("Found " + str(len(ply_data["face"].data)) + " faces")
for line in ply_data["face"].data["vertex_indices"]:
current_face = Face()
current_face.vertex_indices = np.array(
[line[0], line[1], line[2]]
)
faces.append(current_face)
ply_data_face_data_type = [("vertex_indices", "i4", (3,))]
face_names = ply_data["face"].data.dtype.names
if (
"red" in face_names
and "green" in face_names
and "blue" in face_names
):
ply_data_face_data_type = [
("vertex_indices", "i4", (3,)),
("red", "u1"),
("green", "u1"),
("blue", "u1"),
]
return faces, ply_data_face_type, ply_data_face_data_type
@staticmethod
def __vertices_to_ply_vertex_element(
point_list, ply_data_vertex_data_dtype_list
):
ply_data_vertex_data_dtype = np.dtype(ply_data_vertex_data_dtype_list)
# if measurements are used, then we do not know one dimension of the array
vertex_output_array = np.empty(
(len(point_list),), dtype=ply_data_vertex_data_dtype
)
with_color = False
if (
"red" in ply_data_vertex_data_dtype.names
and "green" in ply_data_vertex_data_dtype.names
and "blue" in ply_data_vertex_data_dtype.names
):
with_color = True
with_normals = False
if (
"nx" in ply_data_vertex_data_dtype.names
and "ny" in ply_data_vertex_data_dtype.names
and "nz" in ply_data_vertex_data_dtype.names
):
with_normals = True
with_measurements = "measurements" in ply_data_vertex_data_dtype.names
# set all the values, offered / defined by property_type_list
for index, point in enumerate(point_list):
# row = np.empty(1, dtype=ply_data_vertex_data_dtype)
vertex_output_array[index]["x"] = point.coord[0]
vertex_output_array[index]["y"] = point.coord[1]
vertex_output_array[index]["z"] = point.coord[2]
if with_color:
vertex_output_array[index]["red"] = point.color[0]
vertex_output_array[index]["green"] = point.color[1]
vertex_output_array[index]["blue"] = point.color[2]
if with_normals:
vertex_output_array[index]["nx"] = point.normal[0]
vertex_output_array[index]["ny"] = point.normal[1]
vertex_output_array[index]["nz"] = point.normal[2]
for scalar_key in point.scalars:
vertex_output_array[index][scalar_key] = point.scalars[
scalar_key
]
if with_measurements:
measurements = []
for measurement in point.measurements:
measurements += measurement.to_list()
vertex_output_array[index]["measurements"] = measurements
description = PlyElement.describe(
vertex_output_array,
name="vertex",
# possible values for val_types
# ['int8', 'i1', 'char', 'uint8', 'u1', 'uchar', 'b1',
# 'int16', 'i2', 'short', 'uint16', 'u2', 'ushort',
# 'int32', 'i4', 'int', 'uint32', 'u4', 'uint',
# 'float32', 'f4', 'float', 'float64', 'f8', 'double']
val_types={"measurements": "float"},
)
return description
@staticmethod
def __faces_to_ply_face_element(face_list, property_type_list):
face_output_array = np.empty(len(face_list), dtype=property_type_list)
for index, face in enumerate(face_list):
row = np.empty(1, dtype=property_type_list)
# We don't use face colors, the color of the faces is defined using
# the vertex colors!
row[
"vertex_indices"
] = face.vertex_indices # face.vertex_indices is a np.array
face_output_array[index] = row
output_ply_data_face_element = PlyElement.describe(
face_output_array, "face"
)
return output_ply_data_face_element
@staticmethod
def __cameras_2_ply_vertex_element(camera_list, property_type_list):
camera_output_array = np.empty(
len(camera_list), dtype=property_type_list
)
for index, camera in enumerate(camera_list):
row = np.empty(1, dtype=property_type_list)
row["x"] = camera.get_camera_center()[0]
row["y"] = camera.get_camera_center()[1]
row["z"] = camera.get_camera_center()[2]
row["red"] = camera.color[0]
row["green"] = camera.color[1]
row["blue"] = camera.color[2]
row["nx"] = camera.normal[0]
row["ny"] = camera.normal[1]
row["nz"] = camera.normal[2]
camera_output_array[index] = row
return PlyElement.describe(camera_output_array, "vertex")
@staticmethod
def parse_ply_file_extended(ifp):
logger.info("Parse PLY File: ...")
ply_data = PlyData.read(ifp)
(
vertices,
ply_data_vertex_dtype,
ply_data_vertex_data_dtype,
) = PLYFileHandler.__ply_data_vertices_to_vetex_list(ply_data)
(
faces,
ply_data_face_type,
ply_data_face_data_type,
) = PLYFileHandler.__ply_data_faces_to_face_list(ply_data)
logger.info("Parse PLY File: Done")
# return always 6 arguments. However, the latter may be empty
return (
vertices,
ply_data_vertex_dtype,
ply_data_vertex_data_dtype,
faces,
ply_data_face_type,
ply_data_face_data_type,
)
@staticmethod
def parse_ply_file(ifp):
logger.info("Parse PLY File: ...")
logger.vinfo("ifp", ifp)
ply_data = PlyData.read(ifp)
vertices, _, _ = PLYFileHandler.__ply_data_vertices_to_vetex_list(
ply_data
)
faces, _, _ = PLYFileHandler.__ply_data_faces_to_face_list(ply_data)
logger.info("Parse PLY File: Done")
return vertices, faces
@staticmethod
def write_ply_file_from_vertex_mat(ofp, vertex_mat):
vertices = []
for entry in vertex_mat:
vertices.append(Point(coord=entry))
PLYFileHandler.write_ply_file(ofp, vertices)
@staticmethod
def build_type_list(
vertices, with_colors, with_normals, with_measurements
):
ply_data_vertex_data_dtype_list = [
("x", "<f4"),
("y", "<f4"),
("z", "<f4"),
]
if with_colors:
ply_data_vertex_data_dtype_list += [
("red", "u1"),
("green", "u1"),
("blue", "u1"),
]
if with_normals:
ply_data_vertex_data_dtype_list += [
("nx", "<f4"),
("ny", "<f4"),
("nz", "<f4"),
]
if len(vertices) > 0:
for scalar_keys in vertices[0].scalars:
ply_data_vertex_data_dtype_list += [(scalar_keys, "<f4")]
if with_measurements:
# since the length of the measurements varies, we use an object data type here
ply_data_vertex_data_dtype_list += [("measurements", object)]
return ply_data_vertex_data_dtype_list
@staticmethod
def write_ply_file(
ofp,
vertices,
with_colors=True,
with_normals=False,
faces=None,
plain_text_output=False,
with_measurements=False,
):
logger.info("write_ply_file: " + ofp)
ply_data_vertex_data_dtype_list = PLYFileHandler.build_type_list(
vertices, with_colors, with_normals, with_measurements
)
logger.vinfo(
"ply_data_vertex_data_dtype_list", ply_data_vertex_data_dtype_list
)
output_ply_data_vertex_element = (
PLYFileHandler.__vertices_to_ply_vertex_element(
vertices, ply_data_vertex_data_dtype_list
)
)
if faces is None or len(faces) == 0:
logger.info("Write File With Vertices Only (no faces)")
output_data = PlyData(
[output_ply_data_vertex_element], text=plain_text_output
)
else:
logger.info("Write File With Faces")
logger.info("Number faces" + str(len(faces)))
ply_data_face_data_type = [("vertex_indices", "i4", (3,))]
# we do not define colors for faces,
# since we use the vertex colors to colorize the face
output_ply_data_face_element = (
PLYFileHandler.__faces_to_ply_face_element(
faces, ply_data_face_data_type
)
)
output_data = PlyData(
[output_ply_data_vertex_element, output_ply_data_face_element],
text=plain_text_output,
)
output_data.write(ofp)
@staticmethod
def write_camera_ply_file(ofp, cameras, plain_text_output=True):
ply_data_vertex_data_dtype_list = [
("x", "<f4"),
("y", "<f4"),
("z", "<f4"),
]
ply_data_vertex_data_dtype_list += [
("red", "u1"),
("green", "u1"),
("blue", "u1"),
]
ply_data_vertex_data_dtype_list += [
("nx", "<f4"),
("ny", "<f4"),
("nz", "<f4"),
]
ply_data_vertex_data_dtype = np.dtype(ply_data_vertex_data_dtype_list)
output_ply_data_vertex_element = (
PLYFileHandler.__cameras_2_ply_vertex_element(
cameras, ply_data_vertex_data_dtype
)
)
# [('x', '<f4'), ('y', '<f4'), ('z', '<f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
logger.info("Write (Camera) File With Vertices Only (no faces)")
output_data = PlyData(
[output_ply_data_vertex_element], text=plain_text_output
)
output_data.write(ofp)
@staticmethod
def read_and_write_test(ifp, ofp):
vertices, faces = PLYFileHandler.parse_ply_file(ifp)
PLYFileHandler.write_ply_file(ofp, vertices, faces=faces)
if __name__ == "__main__":
ply_ofp = "out.ply"
measurements1 = [
Measurement(1222.428, 2, 3, 4),
Measurement(8, 9, 10.1, 11.2),
Measurement(28, 29, 30, 31),
Measurement(12213, 29, 30, 31),
]
measurements2 = [Measurement(11, 12, 13, 14), Measurement(18, 19, 20, 21)]
p_1 = Point([0, 0, 0])
p_1.measurements = measurements1
p_2 = Point([0, 0, 1])
p_2.measurements = measurements1
p_3 = Point([0, 1, 1])
p_3.measurements = measurements2
p_4 = Point([1, 0, 0])
p_4.measurements = measurements2
p_5 = Point([1, 1, 1])
p_5.measurements = measurements2
points = [p_1, p_2, p_3, p_4, p_5]
PLYFileHandler.write_ply_file(
ply_ofp, points, plain_text_output=True, with_measurements=True
)
vertices, faces = PLYFileHandler.parse_ply_file(ply_ofp)
p_1, p_2, p_3, p_4, p_5 = vertices
for p in vertices:
logger.info(p.coord, p.color, p.measurements)
| {
"alphanum_fraction": 0.5634531229,
"author": null,
"avg_line_length": 33.0021881838,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ad78c1a9c9506221724b8eddba8c38c68f33a474",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T14:09:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-11T12:18:07.000Z",
"max_forks_repo_head_hexsha": "24aa67ace67e69568cbc7e01a9e8b407463366f4",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "jieeeeeeeeeee/SatelliteSurfaceReconstruction",
"max_forks_repo_path": "ssr/file_handler/ply_file_handler.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "24aa67ace67e69568cbc7e01a9e8b407463366f4",
"max_issues_repo_issues_event_max_datetime": "2021-05-25T08:40:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-04-06T14:01:43.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "jieeeeeeeeeee/SatelliteSurfaceReconstruction",
"max_issues_repo_path": "ssr/file_handler/ply_file_handler.py",
"max_line_length": 100,
"max_stars_count": 34,
"max_stars_repo_head_hexsha": "7127bc0fb36155e31ae2928c18a65d562d7d29ba",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "SBCV/SatelliteSurfaceReconstruction",
"max_stars_repo_path": "ssr/file_handler/ply_file_handler.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-27T14:12:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-03T13:12:34.000Z",
"num_tokens": 3447,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 15082
} |
// Copyright (c) 2007-2017 Hartmut Kaiser
// Copyright (c) 2008-2009 Chirag Dekate, Anshul Tandon
// Copyright (c) 2012-2013 Thomas Heller
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <hpx/runtime/threads/topology.hpp>
#include <hpx/compat/thread.hpp>
#include <hpx/error_code.hpp>
#include <hpx/exception.hpp>
#include <hpx/throw_exception.hpp>
#include <hpx/util/assert.hpp>
#include <hpx/util/format.hpp>
#include <hpx/util/logging.hpp>
#include <hpx/util/spinlock.hpp>
#include <hpx/runtime.hpp>
#include <hpx/runtime/naming/address.hpp>
#include <hpx/runtime/threads/cpu_mask.hpp>
#include <hpx/runtime/threads/topology.hpp>
#include <boost/io/ios_state.hpp>
#include <boost/scoped_ptr.hpp>
#include <cstddef>
#include <iomanip>
#include <iostream>
#include <mutex>
#include <string>
#include <vector>
#include <memory>
#include <errno.h>
#include <hwloc.h>
#if HWLOC_API_VERSION < 0x00010b00
# define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
#endif
#if defined(__ANDROID__) && defined(ANDROID)
#include <cpu-features.h>
#endif
#if defined(__bgq__)
#include <hwi/include/bqc/A2_inlines.h>
#endif
#if defined(_POSIX_VERSION)
#include <sys/syscall.h>
#include <sys/resource.h>
#endif
namespace hpx { namespace threads { namespace detail
{
std::size_t hwloc_hardware_concurrency()
{
threads::topology& top = threads::create_topology();
return top.get_number_of_pus();
}
void write_to_log(char const* valuename, std::size_t value)
{
LTM_(debug) << "topology: "
<< valuename << ": " << value; //-V128
}
void write_to_log_mask(char const* valuename, mask_cref_type value)
{
LTM_(debug) << "topology: " << valuename
<< ": " HPX_CPU_MASK_PREFIX
<< std::hex << value;
}
void write_to_log(char const* valuename,
std::vector<std::size_t> const& values)
{
LTM_(debug) << "topology: "
<< valuename << "s, size: " //-V128
<< values.size();
std::size_t i = 0;
for (std::size_t value : values)
{
LTM_(debug) << "topology: " << valuename //-V128
<< "(" << i++ << "): " << value;
}
}
void write_to_log_mask(char const* valuename,
std::vector<mask_type> const& values)
{
LTM_(debug) << "topology: "
<< valuename << "s, size: " //-V128
<< values.size();
std::size_t i = 0;
for (mask_cref_type value : values)
{
LTM_(debug) << "topology: " << valuename //-V128
<< "(" << i++ << "): " HPX_CPU_MASK_PREFIX
<< std::hex << value;
}
}
std::size_t get_index(hwloc_obj_t obj)
{
// on Windows logical_index is always -1
if (obj->logical_index == ~0x0u)
return static_cast<std::size_t>(obj->os_index);
return static_cast<std::size_t>(obj->logical_index);
}
hwloc_obj_t adjust_node_obj(hwloc_obj_t node) noexcept
{
#if HWLOC_API_VERSION >= 0x00020000
// www.open-mpi.org/projects/hwloc/doc/hwloc-v2.0.0-letter.pdf:
// Starting with hwloc v2.0, NUMA nodes are not in the main tree
// anymore. They are attached under objects as Memory Children
// on the side of normal children.
while (hwloc_obj_type_is_memory(node->type))
node = node->parent;
HPX_ASSERT(node);
#endif
return node;
}
}}}
namespace hpx { namespace threads
{
///////////////////////////////////////////////////////////////////////////
std::ostream& operator<<(std::ostream& os, hpx_hwloc_bitmap_wrapper const* bmp)
{
char buffer[256];
hwloc_bitmap_snprintf(buffer, 256, bmp->bmp_);
os << buffer;
return os;
}
///////////////////////////////////////////////////////////////////////////
mask_type topology::get_service_affinity_mask(
mask_cref_type used_processing_units, error_code& ec) const
{
// We bind the service threads to the first NUMA domain. This is useful
// as the first NUMA domain is likely to have the PCI controllers etc.
mask_cref_type machine_mask = this->get_numa_node_affinity_mask(0, ec);
if (ec || !any(machine_mask))
return mask_type();
if (&ec != &throws)
ec = make_success_code();
mask_type res = ~used_processing_units & machine_mask;
return (!any(res)) ? machine_mask : res;
}
bool topology::reduce_thread_priority(error_code& ec) const
{
#ifdef HPX_HAVE_NICE_THREADLEVEL
#if defined(__linux__) && !defined(__ANDROID__) && !defined(__bgq__)
pid_t tid;
tid = syscall(SYS_gettid);
if (setpriority(PRIO_PROCESS, tid, 19))
{
HPX_THROWS_IF(ec, no_success, "topology::reduce_thread_priority",
"setpriority returned an error");
return false;
}
#elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__)
if (!SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_LOWEST))
{
HPX_THROWS_IF(ec, no_success, "topology::reduce_thread_priority",
"SetThreadPriority returned an error");
return false;
}
#elif defined(__bgq__)
ThreadPriority_Low();
#endif
#endif
return true;
}
///////////////////////////////////////////////////////////////////////////
mask_type topology::empty_mask = mask_type();
topology::topology()
: topo(nullptr), machine_affinity_mask_(0)
{ // {{{
int err = hwloc_topology_init(&topo);
if (err != 0)
{
HPX_THROW_EXCEPTION(no_success,
"topology::topology",
"Failed to init hwloc topology");
}
err = hwloc_topology_load(topo);
if (err != 0)
{
HPX_THROW_EXCEPTION(no_success,
"topology::topology",
"Failed to load hwloc topology");
}
init_num_of_pus();
socket_numbers_.reserve(num_of_pus_);
numa_node_numbers_.reserve(num_of_pus_);
core_numbers_.reserve(num_of_pus_);
// Initialize each set of data entirely, as some of the initialization
// routines rely on access to other pieces of topology data. The
// compiler will optimize the loops where possible anyways.
std::size_t num_of_sockets = get_number_of_sockets();
if (num_of_sockets == 0) num_of_sockets = 1;
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
std::size_t socket = init_socket_number(i);
HPX_ASSERT(socket < num_of_sockets);
socket_numbers_.push_back(socket);
}
std::size_t num_of_nodes = get_number_of_numa_nodes();
if (num_of_nodes == 0) num_of_nodes = 1;
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
std::size_t numa_node = init_numa_node_number(i);
HPX_ASSERT(numa_node < num_of_nodes);
numa_node_numbers_.push_back(numa_node);
}
std::size_t num_of_cores = get_number_of_cores();
if (num_of_cores == 0) num_of_cores = 1;
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
std::size_t core_number = init_core_number(i);
HPX_ASSERT(core_number < num_of_cores);
core_numbers_.push_back(core_number);
}
machine_affinity_mask_ = init_machine_affinity_mask();
socket_affinity_masks_.reserve(num_of_pus_);
numa_node_affinity_masks_.reserve(num_of_pus_);
core_affinity_masks_.reserve(num_of_pus_);
thread_affinity_masks_.reserve(num_of_pus_);
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
socket_affinity_masks_.push_back(init_socket_affinity_mask(i));
}
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
numa_node_affinity_masks_.push_back(init_numa_node_affinity_mask(i));
}
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
core_affinity_masks_.push_back(init_core_affinity_mask(i));
}
for (std::size_t i = 0; i < num_of_pus_; ++i)
{
thread_affinity_masks_.push_back(init_thread_affinity_mask(i));
}
} // }}}
void topology::write_to_log() const
{
std::size_t num_of_sockets = get_number_of_sockets();
if (num_of_sockets == 0) num_of_sockets = 1;
detail::write_to_log("num_sockets", num_of_sockets);
std::size_t num_of_nodes = get_number_of_numa_nodes();
if (num_of_nodes == 0) num_of_nodes = 1;
detail::write_to_log("num_of_nodes", num_of_nodes);
std::size_t num_of_cores = get_number_of_cores();
if (num_of_cores == 0) num_of_cores = 1;
detail::write_to_log("num_of_cores", num_of_cores);
detail::write_to_log("num_of_pus", num_of_pus_);
detail::write_to_log("socket_number", socket_numbers_);
detail::write_to_log("numa_node_number", numa_node_numbers_);
detail::write_to_log("core_number", core_numbers_);
detail::write_to_log_mask("machine_affinity_mask", machine_affinity_mask_);
detail::write_to_log_mask("socket_affinity_mask", socket_affinity_masks_);
detail::write_to_log_mask("numa_node_affinity_mask", numa_node_affinity_masks_);
detail::write_to_log_mask("core_affinity_mask", core_affinity_masks_);
detail::write_to_log_mask("thread_affinity_mask", thread_affinity_masks_);
}
topology::~topology()
{
if (topo)
hwloc_topology_destroy(topo);
}
std::size_t topology::get_pu_number(
std::size_t num_core
, std::size_t num_pu
, error_code& ec
) const
{ // {{{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
int num_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE);
// If num_cores is smaller 0, we have an error, it should never be zero
// either to avoid division by zero, we should always have at least one
// core
if(num_cores <= 0)
{
HPX_THROWS_IF(ec, no_success,
"topology::hwloc_get_nobjs_by_type",
"Failed to get number of cores");
return std::size_t(-1);
}
num_core %= num_cores; //-V101 //-V104 //-V107
hwloc_obj_t core_obj;
core_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_CORE, static_cast<unsigned>(num_core));
num_pu %= core_obj->arity; //-V101 //-V104
return std::size_t(core_obj->children[num_pu]->logical_index);
} // }}}
///////////////////////////////////////////////////////////////////////////
mask_cref_type topology::get_machine_affinity_mask(
error_code& ec
) const
{
if (&ec != &throws)
ec = make_success_code();
return machine_affinity_mask_;
}
mask_cref_type topology::get_socket_affinity_mask(
std::size_t num_thread
, error_code& ec
) const
{ // {{{
std::size_t num_pu = num_thread % num_of_pus_;
if (num_pu < socket_affinity_masks_.size())
{
if (&ec != &throws)
ec = make_success_code();
return socket_affinity_masks_[num_pu];
}
HPX_THROWS_IF(ec, bad_parameter
, "hpx::threads::topology::get_socket_affinity_mask"
, hpx::util::format(
"thread number %1% is out of range",
num_thread));
return empty_mask;
} // }}}
mask_cref_type topology::get_numa_node_affinity_mask(
std::size_t num_thread
, error_code& ec
) const
{ // {{{
std::size_t num_pu = num_thread % num_of_pus_;
if (num_pu < numa_node_affinity_masks_.size())
{
if (&ec != &throws)
ec = make_success_code();
return numa_node_affinity_masks_[num_pu];
}
HPX_THROWS_IF(ec, bad_parameter
, "hpx::threads::topology::get_numa_node_affinity_mask"
, hpx::util::format(
"thread number %1% is out of range",
num_thread));
return empty_mask;
} // }}}
mask_cref_type topology::get_core_affinity_mask(
std::size_t num_thread
, error_code& ec
) const
{
std::size_t num_pu = num_thread % num_of_pus_;
if (num_pu < core_affinity_masks_.size())
{
if (&ec != &throws)
ec = make_success_code();
return core_affinity_masks_[num_pu];
}
HPX_THROWS_IF(ec, bad_parameter
, "hpx::threads::topology::get_core_affinity_mask"
, hpx::util::format(
"thread number %1% is out of range",
num_thread));
return empty_mask;
}
mask_cref_type topology::get_thread_affinity_mask(
std::size_t num_thread
, error_code& ec
) const
{ // {{{
std::size_t num_pu = num_thread % num_of_pus_;
if (num_pu < thread_affinity_masks_.size())
{
if (&ec != &throws)
ec = make_success_code();
return thread_affinity_masks_[num_pu];
}
HPX_THROWS_IF(ec, bad_parameter
, "hpx::threads::topology::get_thread_affinity_mask"
, hpx::util::format(
"thread number %1% is out of range",
num_thread));
return empty_mask;
} // }}}
///////////////////////////////////////////////////////////////////////////
void topology::set_thread_affinity_mask(
mask_cref_type mask
, error_code& ec
) const
{ // {{{
#if !defined(__APPLE__)
// setting thread affinities is not supported by OSX
hwloc_cpuset_t cpuset = hwloc_bitmap_alloc();
int const pu_depth =
hwloc_get_type_or_below_depth(topo, HWLOC_OBJ_PU);
for (std::size_t i = 0; i != mask_size(mask); ++i)
{
if (test(mask, i))
{
hwloc_obj_t const pu_obj =
hwloc_get_obj_by_depth(topo, pu_depth, unsigned(i));
HPX_ASSERT(i == detail::get_index(pu_obj));
hwloc_bitmap_set(cpuset,
static_cast<unsigned int>(pu_obj->os_index));
}
}
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
if (hwloc_set_cpubind(topo, cpuset,
HWLOC_CPUBIND_STRICT | HWLOC_CPUBIND_THREAD))
{
// Strict binding not supported or failed, try weak binding.
if (hwloc_set_cpubind(topo, cpuset, HWLOC_CPUBIND_THREAD))
{
boost::scoped_ptr<char> buffer(new char [1024]);
hwloc_bitmap_snprintf(buffer.get(), 1024, cpuset);
hwloc_bitmap_free(cpuset);
HPX_THROWS_IF(ec, kernel_error
, "hpx::threads::topology::set_thread_affinity_mask"
, hpx::util::format(
"failed to set thread affinity mask ("
HPX_CPU_MASK_PREFIX "%x) for cpuset %s",
mask, buffer.get()));
return;
}
}
}
#if defined(__linux) || defined(linux) || defined(__linux__) || defined(__FreeBSD__)
sleep(0); // Allow the OS to pick up the change.
#endif
hwloc_bitmap_free(cpuset);
#endif // __APPLE__
if (&ec != &throws)
ec = make_success_code();
} // }}}
///////////////////////////////////////////////////////////////////////////
mask_type topology::get_thread_affinity_mask_from_lva(
naming::address_type lva
, error_code& ec
) const
{ // {{{
if (&ec != &throws)
ec = make_success_code();
hwloc_membind_policy_t policy = ::HWLOC_MEMBIND_DEFAULT;
hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
int ret = hwloc_get_area_membind_nodeset(topo,
reinterpret_cast<void const*>(lva), 1, nodeset, &policy, 0);
if (-1 != ret)
{
hwloc_cpuset_t cpuset = hwloc_bitmap_alloc();
hwloc_cpuset_from_nodeset(topo, cpuset, nodeset);
lk.unlock();
hwloc_bitmap_free(nodeset);
mask_type mask = mask_type();
resize(mask, get_number_of_pus());
int const pu_depth =
hwloc_get_type_or_below_depth(topo, HWLOC_OBJ_PU);
for (unsigned int i = 0; std::size_t(i) != num_of_pus_; ++i)
{
hwloc_obj_t const pu_obj =
hwloc_get_obj_by_depth(topo, pu_depth, i);
unsigned idx = static_cast<unsigned>(pu_obj->os_index);
if (hwloc_bitmap_isset(cpuset, idx) != 0)
set(mask, detail::get_index(pu_obj));
}
hwloc_bitmap_free(cpuset);
return mask;
}
}
hwloc_bitmap_free(nodeset);
return empty_mask;
} // }}}
std::size_t topology::init_node_number(
std::size_t num_thread, hwloc_obj_type_t type
)
{ // {{{
if (std::size_t(-1) == num_thread)
return std::size_t(-1);
std::size_t num_pu = (num_thread + pu_offset) % num_of_pus_;
{
hwloc_obj_t obj;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU,
static_cast<unsigned>(num_pu));
HPX_ASSERT(num_pu == detail::get_index(obj));
}
while (obj)
{
if (hwloc_compare_types(obj->type, type) == 0)
{
return detail::get_index(obj);
}
obj = obj->parent;
}
}
return 0;
} // }}}
void topology::extract_node_mask(
hwloc_obj_t parent
, mask_type& mask
) const
{ // {{{
hwloc_obj_t obj;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_next_child(topo, parent, nullptr);
}
while (obj)
{
if (hwloc_compare_types(HWLOC_OBJ_PU, obj->type) == 0)
{
do {
set(mask, detail::get_index(obj)); //-V106
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_next_child(topo, parent, obj);
}
} while (obj != nullptr &&
hwloc_compare_types(HWLOC_OBJ_PU, obj->type) == 0);
return;
}
extract_node_mask(obj, mask);
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_next_child(topo, parent, obj);
}
} // }}}
std::size_t topology::extract_node_count(
hwloc_obj_t parent
, hwloc_obj_type_t type
, std::size_t count
) const
{ // {{{
hwloc_obj_t obj;
if(parent == nullptr) return count;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_next_child(topo, parent, nullptr);
}
while (obj)
{
if (hwloc_compare_types(type, obj->type) == 0)
{
/*
do {
++count;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_next_child(topo, parent, obj);
}
} while (obj != nullptr && hwloc_compare_types(type, obj->type) == 0);
return count;
*/
++count;
}
count = extract_node_count(obj, type, count);
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_next_child(topo, parent, obj);
}
return count;
} // }}}
std::size_t topology::get_number_of_sockets() const
{
int nobjs = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_SOCKET);
if(0 > nobjs)
{
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_number_of_sockets"
, "hwloc_get_nbobjs_by_type failed");
return std::size_t(nobjs);
}
return std::size_t(nobjs);
}
std::size_t topology::get_number_of_numa_nodes() const
{
int nobjs = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_NODE);
if(0 > nobjs)
{
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_number_of_numa_nodes"
, "hwloc_get_nbobjs_by_type failed");
return std::size_t(nobjs);
}
return std::size_t(nobjs);
}
std::size_t topology::get_number_of_cores() const
{
int nobjs = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE);
// If num_cores is smaller 0, we have an error
if (0 > nobjs)
{
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_number_of_cores"
, "hwloc_get_nbobjs_by_type(HWLOC_OBJ_CORE) failed");
return std::size_t(nobjs);
}
else if (0 == nobjs)
{
// some platforms report zero cores but might still report the
// number of PUs
nobjs = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
if (0 > nobjs)
{
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_number_of_cores"
, "hwloc_get_nbobjs_by_type(HWLOC_OBJ_PU) failed");
return std::size_t(nobjs);
}
}
// the number of reported cores/pus should never be zero either to
// avoid division by zero, we should always have at least one core
if (0 == nobjs)
{
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_number_of_cores"
, "hwloc_get_nbobjs_by_type reports zero cores/pus");
return std::size_t(nobjs);
}
return std::size_t(nobjs);
}
std::size_t topology::get_number_of_socket_pus(
std::size_t num_socket
) const
{
hwloc_obj_t socket_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
socket_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_SOCKET, static_cast<unsigned>(num_socket));
}
if (socket_obj)
{
HPX_ASSERT(num_socket == detail::get_index(socket_obj));
std::size_t pu_count = 0;
return extract_node_count(socket_obj, HWLOC_OBJ_PU, pu_count);
}
return num_of_pus_;
}
std::size_t topology::get_number_of_numa_node_pus(
std::size_t numa_node
) const
{
hwloc_obj_t node_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
node_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_NODE, static_cast<unsigned>(numa_node));
}
if (node_obj)
{
HPX_ASSERT(numa_node == detail::get_index(node_obj));
std::size_t pu_count = 0;
node_obj = detail::adjust_node_obj(node_obj);
return extract_node_count(node_obj, HWLOC_OBJ_PU, pu_count);
}
return num_of_pus_;
}
std::size_t topology::get_number_of_core_pus(
std::size_t core
) const
{
hwloc_obj_t core_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
core_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_CORE, static_cast<unsigned>(core));
}
if (core_obj)
{
HPX_ASSERT(core == detail::get_index(core_obj));
std::size_t pu_count = 0;
return extract_node_count(core_obj, HWLOC_OBJ_PU, pu_count);
}
return num_of_pus_;
}
std::size_t topology::get_number_of_socket_cores(
std::size_t num_socket
) const
{
hwloc_obj_t socket_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
socket_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_SOCKET, static_cast<unsigned>(num_socket));
}
if (socket_obj)
{
HPX_ASSERT(num_socket == detail::get_index(socket_obj));
std::size_t pu_count = 0;
return extract_node_count(socket_obj, HWLOC_OBJ_CORE, pu_count);
}
return get_number_of_cores();
}
std::size_t topology::get_number_of_numa_node_cores(
std::size_t numa_node
) const
{
hwloc_obj_t node_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
node_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_NODE, static_cast<unsigned>(numa_node));
}
if (node_obj)
{
HPX_ASSERT(numa_node == detail::get_index(node_obj));
std::size_t pu_count = 0;
node_obj = detail::adjust_node_obj(node_obj);
return extract_node_count(node_obj, HWLOC_OBJ_CORE, pu_count);
}
return get_number_of_cores();
}
hwloc_bitmap_ptr topology::cpuset_to_nodeset(
mask_cref_type mask) const
{
hwloc_bitmap_t cpuset = mask_to_bitmap(mask, HWLOC_OBJ_PU);
hwloc_bitmap_t nodeset = hwloc_bitmap_alloc();
hwloc_cpuset_to_nodeset_strict(topo, cpuset, nodeset);
hwloc_bitmap_free(cpuset);
return std::make_shared<hpx::threads::hpx_hwloc_bitmap_wrapper>(nodeset);
}
namespace detail
{
void print_info(std::ostream& os, hwloc_obj_t obj, char const* name,
bool comma)
{
if (comma)
os << ", ";
os << name;
if (obj->logical_index != ~0x0u)
os << "L#" << obj->logical_index;
if (obj->os_index != ~0x0u)
os << "(P#" << obj->os_index << ")";
}
void print_info(std::ostream& os, hwloc_obj_t obj, bool comma = false)
{
switch (obj->type) {
case HWLOC_OBJ_PU:
print_info(os, obj, "PU ", comma);
break;
case HWLOC_OBJ_CORE:
print_info(os, obj, "Core ", comma);
break;
case HWLOC_OBJ_SOCKET:
print_info(os, obj, "Socket ", comma);
break;
case HWLOC_OBJ_NODE:
print_info(os, obj, "Node ", comma);
break;
default:
break;
}
}
}
void topology::print_affinity_mask(std::ostream& os,
std::size_t num_thread, mask_cref_type m, const std::string &pool_name) const
{
boost::io::ios_flags_saver ifs(os);
bool first = true;
for(std::size_t i = 0; i != num_of_pus_; ++i)
{
hwloc_obj_t obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, unsigned(i));
if (!obj)
{
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::print_affinity_mask"
, "object not found");
return;
}
if(!test(m, detail::get_index(obj))) //-V106
continue;
if (first) {
first = false;
os << std::setw(4) << num_thread << ": "; //-V112 //-V128
}
else {
os << " ";
}
detail::print_info(os, obj);
while(obj->parent)
{
detail::print_info(os, obj->parent, true);
obj = obj->parent;
}
os << ", on pool \"" << pool_name << "\"";
os << std::endl;
}
}
mask_type topology::init_machine_affinity_mask() const
{ // {{{
mask_type machine_affinity_mask = mask_type();
resize(machine_affinity_mask, get_number_of_pus());
hwloc_obj_t machine_obj;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
machine_obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_MACHINE, 0);
}
if (machine_obj)
{
extract_node_mask(machine_obj, machine_affinity_mask);
return machine_affinity_mask;
}
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::init_machine_affinity_mask"
, "failed to initialize machine affinity mask");
return empty_mask;
} // }}}
mask_type topology::init_socket_affinity_mask_from_socket(
std::size_t num_socket
) const
{ // {{{
// If we have only one or no socket, the socket affinity mask
// spans all processors
if (std::size_t(-1) == num_socket)
return machine_affinity_mask_;
hwloc_obj_t socket_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
socket_obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_SOCKET,
static_cast<unsigned>(num_socket));
}
if (socket_obj)
{
HPX_ASSERT(num_socket == detail::get_index(socket_obj));
mask_type socket_affinity_mask = mask_type();
resize(socket_affinity_mask, get_number_of_pus());
extract_node_mask(socket_obj, socket_affinity_mask);
return socket_affinity_mask;
}
return machine_affinity_mask_;
} // }}}
mask_type topology::init_numa_node_affinity_mask_from_numa_node(
std::size_t numa_node
) const
{ // {{{
// If we have only one or no NUMA domain, the NUMA affinity mask
// spans all processors
if (std::size_t(-1) == numa_node)
{
return machine_affinity_mask_;
}
hwloc_obj_t numa_node_obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
numa_node_obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_NODE,
static_cast<unsigned>(numa_node));
}
if (numa_node_obj)
{
HPX_ASSERT(numa_node == detail::get_index(numa_node_obj));
mask_type node_affinity_mask = mask_type();
resize(node_affinity_mask, get_number_of_pus());
numa_node_obj = detail::adjust_node_obj(numa_node_obj);
extract_node_mask(numa_node_obj, node_affinity_mask);
return node_affinity_mask;
}
return machine_affinity_mask_;
} // }}}
mask_type topology::init_core_affinity_mask_from_core(
std::size_t core, mask_cref_type default_mask
) const
{ // {{{
if (std::size_t(-1) == core)
return default_mask;
hwloc_obj_t core_obj = nullptr;
std::size_t num_core = (core + core_offset) % get_number_of_cores();
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
core_obj = hwloc_get_obj_by_type(topo,
HWLOC_OBJ_CORE, static_cast<unsigned>(num_core));
}
if (core_obj)
{
HPX_ASSERT(num_core == detail::get_index(core_obj));
mask_type core_affinity_mask = mask_type();
resize(core_affinity_mask, get_number_of_pus());
extract_node_mask(core_obj, core_affinity_mask);
return core_affinity_mask;
}
return default_mask;
} // }}}
mask_type topology::init_thread_affinity_mask(
std::size_t num_thread
) const
{ // {{{
if (std::size_t(-1) == num_thread)
{
return get_core_affinity_mask(num_thread);
}
std::size_t num_pu = (num_thread + pu_offset) % num_of_pus_;
hwloc_obj_t obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU,
static_cast<unsigned>(num_pu));
}
if (!obj)
{
return get_core_affinity_mask(num_thread);
}
HPX_ASSERT(num_pu == detail::get_index(obj));
mask_type mask = mask_type();
resize(mask, get_number_of_pus());
set(mask, detail::get_index(obj)); //-V106
return mask;
} // }}}
mask_type topology::init_thread_affinity_mask(
std::size_t num_core,
std::size_t num_pu
) const
{ // {{{
hwloc_obj_t obj = nullptr;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
int num_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE);
// If num_cores is smaller 0, we have an error, it should never be zero
// either to avoid division by zero, we should always have at least one
// core
if (num_cores <= 0) {
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::init_thread_affinity_mask"
, "hwloc_get_nbobjs_by_type failed");
return empty_mask;
}
num_core = (num_core + core_offset) % std::size_t(num_cores);
obj = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE,
static_cast<unsigned>(num_core));
}
if (!obj)
return empty_mask;//get_core_affinity_mask(num_thread, false);
HPX_ASSERT(num_core == detail::get_index(obj));
num_pu %= obj->arity; //-V101 //-V104
mask_type mask = mask_type();
resize(mask, get_number_of_pus());
set(mask, detail::get_index(obj->children[num_pu])); //-V106
return mask;
} // }}}
///////////////////////////////////////////////////////////////////////////
void topology::init_num_of_pus()
{
num_of_pus_ = 1;
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
int num_of_pus = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
if (num_of_pus > 0)
{
num_of_pus_ = static_cast<std::size_t>(num_of_pus);
}
}
}
std::size_t topology::get_number_of_pus() const
{
return num_of_pus_;
}
///////////////////////////////////////////////////////////////////////////
mask_type topology::get_cpubind_mask(error_code& ec) const
{
hwloc_cpuset_t cpuset = hwloc_bitmap_alloc();
mask_type mask = mask_type();
resize(mask, get_number_of_pus());
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
if (hwloc_get_cpubind(topo, cpuset, HWLOC_CPUBIND_THREAD))
{
hwloc_bitmap_free(cpuset);
HPX_THROWS_IF(ec, kernel_error
, "hpx::threads::topology::get_cpubind_mask"
, "hwloc_get_cpubind failed");
return empty_mask;
}
int const pu_depth = hwloc_get_type_or_below_depth(topo, HWLOC_OBJ_PU);
for (unsigned int i = 0; i != num_of_pus_; ++i) //-V104
{
hwloc_obj_t const pu_obj = hwloc_get_obj_by_depth(topo, pu_depth, i);
unsigned idx = static_cast<unsigned>(pu_obj->os_index);
if (hwloc_bitmap_isset(cpuset, idx) != 0)
set(mask, detail::get_index(pu_obj));
}
}
hwloc_bitmap_free(cpuset);
if (&ec != &throws)
ec = make_success_code();
return mask;
}
mask_type topology::get_cpubind_mask(compat::thread& handle,
error_code& ec) const
{
hwloc_cpuset_t cpuset = hwloc_bitmap_alloc();
mask_type mask = mask_type();
resize(mask, get_number_of_pus());
{
std::unique_lock<hpx::util::spinlock> lk(topo_mtx);
#if defined(HPX_MINGW)
if (hwloc_get_thread_cpubind(topo,
pthread_gethandle(handle.native_handle()), cpuset,
HWLOC_CPUBIND_THREAD))
#else
if (hwloc_get_thread_cpubind(topo, handle.native_handle(), cpuset,
HWLOC_CPUBIND_THREAD))
#endif
{
hwloc_bitmap_free(cpuset);
HPX_THROWS_IF(ec, kernel_error
, "hpx::threads::topology::get_cpubind_mask"
, "hwloc_get_cpubind failed");
return empty_mask;
}
int const pu_depth = hwloc_get_type_or_below_depth(topo, HWLOC_OBJ_PU);
for (unsigned int i = 0; i != num_of_pus_; ++i) //-V104
{
hwloc_obj_t const pu_obj =
hwloc_get_obj_by_depth(topo, pu_depth, i);
unsigned idx = static_cast<unsigned>(pu_obj->os_index);
if (hwloc_bitmap_isset(cpuset, idx) != 0)
set(mask, detail::get_index(pu_obj));
}
}
hwloc_bitmap_free(cpuset);
if (&ec != &throws)
ec = make_success_code();
return mask;
}
///////////////////////////////////////////////////////////////////////////
/// This is equivalent to malloc(), except that it tries to allocate
/// page-aligned memory from the OS.
void* topology::allocate(std::size_t len) const
{
return hwloc_alloc(topo, len);
}
///////////////////////////////////////////////////////////////////////////
/// Allocate some memory on NUMA memory nodes specified by nodeset
/// as specified by the hwloc hwloc_alloc_membind_nodeset call
void* topology::allocate_membind(std::size_t len,
hwloc_bitmap_ptr bitmap,
hpx_hwloc_membind_policy policy, int flags) const
{
return hwloc_alloc_membind_nodeset(topo, len, bitmap->get_bmp(),
(hwloc_membind_policy_t)(policy), flags);
}
bool topology::set_area_membind_nodeset(
const void *addr, std::size_t len, void *nodeset) const
{
hwloc_membind_policy_t policy = ::HWLOC_MEMBIND_BIND;
hwloc_nodeset_t ns = reinterpret_cast<hwloc_nodeset_t>(nodeset);
int ret = hwloc_set_area_membind_nodeset(topo, addr, len, ns, policy, 0);
if (ret<0) {
std::string msg = std::strerror(errno);
if (errno == ENOSYS) msg = "the action is not supported";
if (errno == EXDEV) msg = "the binding cannot be enforced";
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::set_area_membind_nodeset"
, "hwloc_set_area_membind_nodeset failed : " + msg);
return false;
}
return true;
}
util::thread_specific_ptr<hpx_hwloc_bitmap_wrapper, topology::tls_tag>
topology::bitmap_storage_;
threads::mask_type topology::get_area_membind_nodeset(
const void *addr, std::size_t len) const
{
hpx_hwloc_bitmap_wrapper *nodeset = topology::bitmap_storage_.get();
if (nullptr == nodeset)
{
hwloc_bitmap_t nodeset_ = hwloc_bitmap_alloc();
topology::bitmap_storage_.reset(new hpx_hwloc_bitmap_wrapper(nodeset_));
nodeset = topology::bitmap_storage_.get();
}
//
hwloc_membind_policy_t policy;
hwloc_nodeset_t ns = reinterpret_cast<hwloc_nodeset_t>(nodeset->get_bmp());
if (hwloc_get_area_membind_nodeset(topo, addr, len, ns, &policy, 0)==-1) {
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_area_membind_nodeset"
, "hwloc_get_area_membind_nodeset failed");
return -1;
std::cout << "error in " ;
}
return bitmap_to_mask(ns, HWLOC_OBJ_NUMANODE);
}
int topology::get_numa_domain(const void *addr) const
{
#if HWLOC_API_VERSION >= 0x00010b03
hpx_hwloc_bitmap_wrapper *nodeset = topology::bitmap_storage_.get();
if (nullptr == nodeset)
{
hwloc_bitmap_t nodeset_ = hwloc_bitmap_alloc();
topology::bitmap_storage_.reset(new hpx_hwloc_bitmap_wrapper(nodeset_));
nodeset = topology::bitmap_storage_.get();
}
//
hwloc_nodeset_t ns = reinterpret_cast<hwloc_nodeset_t>(nodeset->get_bmp());
int ret = hwloc_get_area_memlocation(topo, addr, 1, ns,
HWLOC_MEMBIND_BYNODESET);
if (ret<0) {
std::string msg(strerror(errno));
HPX_THROW_EXCEPTION(kernel_error
, "hpx::threads::topology::get_numa_domain"
, "hwloc_get_area_memlocation failed " + msg);
return -1;
}
threads::mask_type mask = bitmap_to_mask(ns, HWLOC_OBJ_NUMANODE);
return threads::find_first(mask);
#else
return 0;
#endif
}
/// Free memory that was previously allocated by allocate
void topology::deallocate(void* addr, std::size_t len) const
{
hwloc_free(topo, addr, len);
}
///////////////////////////////////////////////////////////////////////////
hwloc_bitmap_t topology::mask_to_bitmap(mask_cref_type mask,
hwloc_obj_type_t htype) const
{
hwloc_bitmap_t bitmap = hwloc_bitmap_alloc();
hwloc_bitmap_zero(bitmap);
//
int const depth =
hwloc_get_type_or_below_depth(topo, htype);
for (std::size_t i = 0; i != mask_size(mask); ++i) {
if (test(mask, i)) {
hwloc_obj_t const hw_obj =
hwloc_get_obj_by_depth(topo, depth, unsigned(i));
HPX_ASSERT(i == detail::get_index(hw_obj));
hwloc_bitmap_set(bitmap,
static_cast<unsigned int>(hw_obj->os_index));
}
}
return bitmap;
}
///////////////////////////////////////////////////////////////////////////
mask_type topology::bitmap_to_mask(hwloc_bitmap_t bitmap,
hwloc_obj_type_t htype) const
{
mask_type mask = mask_type();
std::size_t num = hwloc_get_nbobjs_by_type(topo, htype);
//
int const pu_depth = hwloc_get_type_or_below_depth(topo, htype);
for (unsigned int i=0; std::size_t(i)!=num; ++i) //-V104
{
hwloc_obj_t const pu_obj =
hwloc_get_obj_by_depth(topo, pu_depth, i);
unsigned idx = static_cast<unsigned>(pu_obj->os_index);
if (hwloc_bitmap_isset(bitmap, idx) != 0)
set(mask, detail::get_index(pu_obj));
}
return mask;
}
///////////////////////////////////////////////////////////////////////////
void topology::print_mask_vector(std::ostream& os,
std::vector<mask_type> const& v) const
{
std::size_t s = v.size();
if (s == 0)
{
os << "(empty)\n";
return;
}
for (std::size_t i = 0; i != s; i++)
{
os << std::hex << HPX_CPU_MASK_PREFIX << v[i] << "\n";
}
os << "\n";
}
void topology::print_vector(
std::ostream& os, std::vector<std::size_t> const& v) const
{
std::size_t s = v.size();
if (s == 0)
{
os << "(empty)\n";
return;
}
os << v[0];
for (std::size_t i = 1; i != s; i++)
{
os << ", " << std::dec << v[i];
}
os << "\n";
}
void topology::print_hwloc(std::ostream& os) const
{
os << "[HWLOC topology info] number of ...\n" << std::dec
<< "number of sockets : " << get_number_of_sockets()
<< "\n"
<< "number of numa nodes : " << get_number_of_numa_nodes()
<< "\n"
<< "number of cores : " << get_number_of_cores() << "\n"
<< "number of PUs : " << get_number_of_pus() << "\n"
<< "hardware concurrency : "
<< hpx::threads::hardware_concurrency() << "\n" << std::endl;
//! -------------------------------------- topology (affinity masks)
os << "[HWLOC topology info] affinity masks :\n"
<< "machine : \n"
<< std::hex << HPX_CPU_MASK_PREFIX
<< machine_affinity_mask_ << "\n";
os << "socket : \n";
print_mask_vector(os, socket_affinity_masks_);
os << "numa node : \n";
print_mask_vector(os, numa_node_affinity_masks_);
os << "core : \n";
print_mask_vector(os, core_affinity_masks_);
os << "PUs (/threads) : \n";
print_mask_vector(os, thread_affinity_masks_);
//! -------------------------------------- topology (numbers)
os << "[HWLOC topology info] resource numbers :\n";
os << "socket : \n";
print_vector(os, socket_numbers_);
os << "numa node : \n";
print_vector(os, numa_node_numbers_);
os << "core : \n";
print_vector(os, core_numbers_);
//os << "PUs (/threads) : \n";
//print_vector(os, pu_numbers_);
}
topology const& get_topology()
{
hpx::runtime* rt = hpx::get_runtime_ptr();
if (rt == nullptr)
{
HPX_THROW_EXCEPTION(invalid_status, "hpx::threads::get_topology",
"the hpx runtime system has not been initialized yet");
}
return rt->get_topology();
}
///////////////////////////////////////////////////////////////////////////
struct hardware_concurrency_tag {};
struct hw_concurrency
{
hw_concurrency()
#if defined(__ANDROID__) && defined(ANDROID)
: num_of_cores_(::android_getCpuCount())
#else
: num_of_cores_(detail::hwloc_hardware_concurrency())
#endif
{
if (num_of_cores_ == 0)
num_of_cores_ = 1;
}
std::size_t num_of_cores_;
};
std::size_t hardware_concurrency()
{
util::static_<hw_concurrency, hardware_concurrency_tag> hwc;
return hwc.get().num_of_cores_;
}
}}
| {
"alphanum_fraction": 0.5428243935,
"author": null,
"avg_line_length": 32.0482993197,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "4f6c540155e5f21f22a67428977b3c0c7838c89c",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d1db0def60687a662e4e25a909550f08eaf1a18a",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "victor-ludorum/hpx",
"max_forks_repo_path": "src/runtime/threads/topology.cpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "a1621a0cfa58a884b03bc8557d4f5ad896297f14",
"max_issues_repo_issues_event_max_datetime": "2018-04-20T14:17:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-04-20T14:17:33.000Z",
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "ShmuelLevine/hpx",
"max_issues_repo_path": "src/runtime/threads/topology.cpp",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a1621a0cfa58a884b03bc8557d4f5ad896297f14",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "ShmuelLevine/hpx",
"max_stars_repo_path": "src/runtime/threads/topology.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 11159,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 47111
} |
# coding: utf-8
# In[7]:
from __future__ import absolute_import
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as k
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
import numpy as np
import pandas as pd
import urllib
from sklearn.cross_validation import train_test_split
import glob
import os
import pickle
from trainer.environment import create_trainer_environment
# the trainer environment contains useful information about
env = create_trainer_environment()
print('creating SageMaker trainer environment:\n%s' % str(env))
## LOAD the data from train and test channels
npX_keras = pickle.load(open(os.path.join(env.channel_dirs['train'], 'npX_keras.pkl'), "rb"))
oh_npY = pickle.load(open(os.path.join(env.channel_dirs['train'], 'oh_npY.pkl'), "rb"))
#Lest split the data into train and validation
train_X, validation_X, train_y, validation_y = train_test_split(npX_keras, oh_npY, test_size=0.2, random_state=1001)
batch_size = 32
epochs = 1
model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = (128, 128, 3))
# Freeze the layers which you don't want to train. Here I am freezing the first 5 layers.
for layer in model.layers[:5]:
layer.trainable = False
#Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation="relu")(x)
predictions = Dense(12, activation="softmax")(x)
# creating the final model
model_final = Model(input = model.input, output = predictions)
# In[57]:
# Initiate the train and test generators with data Augumentation
img_width, img_height = 128, 128
train_datagen = ImageDataGenerator(
rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range=0.3,
rotation_range=30)
test_datagen = ImageDataGenerator(
rescale = 1./255,
horizontal_flip = True,
fill_mode = "nearest",
zoom_range = 0.3,
width_shift_range = 0.3,
height_shift_range=0.3,
rotation_range=30)
train_generator = train_datagen.flow(train_X, train_y,
batch_size=batch_size)
validation_generator = test_datagen.flow(validation_X, validation_y)
# Save the model according to the conditions
#checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1,
# save_best_only=True, save_weights_only=False, mode='auto', period=1)
#early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode='auto')
# ## PREP for AWS Sagemaker - start.py
# In[30]:
MODEL_NAME = 'seedling_model.h5'
# getting the hyperparameters
batch_size = env.hyperparameters.get('batch_size', object_type=int)
learning_rate = env.hyperparameters.get('learning_rate', default=.0001, object_type=float)
EPOCHS = env.hyperparameters.get('epochs', default=10, object_type=int)
# TRAIN Model
n_train_samples = train_X.shape[0]
n_validation_samples = validation_X.shape[0] *1.0
# compile the model
#model_final.compile(loss="categorical_crossentropy",
# optimizer=optimizers.Adam(lr=learning_rate), metrics=["accuracy"])
model_final.compile(loss="categorical_crossentropy",
optimizer=optimizers.SGD(lr=learning_rate, momentum=0.9), metrics=["accuracy"])
model_final.fit_generator(
train_generator,
samples_per_epoch=n_train_samples/batch_size,
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=n_validation_samples/batch_size)
# Save model and weights
model_path = os.path.join(env.model_dir, MODEL_NAME)
model_final.save(model_path)
print('Saved trained model at %s ' % model_path)
# Score trained model.
scores = model_final.evaluate_generator(validation_generator, n_validation_samples/batch_size, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
| {
"alphanum_fraction": 0.7049956934,
"author": null,
"avg_line_length": 32.9361702128,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "759774f3d3d6a3fc8d8140381570bd61e664f568",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-03-04T18:16:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-03-04T18:16:57.000Z",
"max_forks_repo_head_hexsha": "6ce1ad1acc80420b15ef043b6f18b9a6bca8e737",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ThePrecious/ml_projects",
"max_forks_repo_path": "3_Sagemaker_Seedling/trainer/start.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6ce1ad1acc80420b15ef043b6f18b9a6bca8e737",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ThePrecious/ml_projects",
"max_issues_repo_path": "3_Sagemaker_Seedling/trainer/start.py",
"max_line_length": 116,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "6ce1ad1acc80420b15ef043b6f18b9a6bca8e737",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ThePrecious/ml_projects",
"max_stars_repo_path": "3_Sagemaker_Seedling/trainer/start.py",
"max_stars_repo_stars_event_max_datetime": "2019-06-22T17:48:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-10-10T20:32:11.000Z",
"num_tokens": 1054,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4644
} |
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sb
sb.set(style="white")
sb.set(color_codes=True)
sb.set_context('paper')
os.chdir('/Users/pauline/Documents/Python')
df = pd.read_csv("Tab-Bathy.csv")
# define variables and plotting
g = sb.clustermap(df, cmap="BuPu")
rotation = 45
for i, ax in enumerate(g.fig.axes): # getting all axes of the fig object
ax.set_xticklabels(ax.get_xticklabels(),
rotation = rotation
)
g.fig.suptitle('Mariana Trench: Clustermap of the bathymetric observations. \nDataset: 25 cross-section profiles, 518 observations in each')
plt.subplots_adjust(bottom=0.20, top=0.90,
right=0.90, left=0.10
)
# printing and saving
plt.savefig('plot_Clust3.png', dpi=300)
plt.show()
| {
"alphanum_fraction": 0.6756756757,
"author": null,
"avg_line_length": 28.6451612903,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d5c887b723027fd64248cc2f672296905ecffd1e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b305f4b344f631d1c459d4f62bbd9587cc51c91f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "paulinelemenkova/Python-script-011-Clustermap",
"max_forks_repo_path": "Script-011-Clustermap-3.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b305f4b344f631d1c459d4f62bbd9587cc51c91f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "paulinelemenkova/Python-script-011-Clustermap",
"max_issues_repo_path": "Script-011-Clustermap-3.py",
"max_line_length": 140,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b305f4b344f631d1c459d4f62bbd9587cc51c91f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "paulinelemenkova/Python-script-011-Clustermap",
"max_stars_repo_path": "Script-011-Clustermap-3.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 228,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 888
} |
module numz
!*************************************
integer, parameter:: b8 = selected_real_kind(14) ! basic real types
integer, parameter:: b4 = selected_real_kind(4)
! integer, parameter:: i8 = selected_int_kind(14)
integer, parameter:: out1 = 11 ! output file number for writes
integer, parameter:: out2 = 20
integer, parameter:: in1 = 13
integer, parameter:: in2 = 14
! real(b8), parameter :: pi = 3.141592653589793239_b8
end module
| {
"alphanum_fraction": 0.6316916488,
"author": null,
"avg_line_length": 35.9230769231,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "446922b4774ae04c8bf460371ec6e22897976767",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "04c162ec890a1c9ba83498b275fbdc81a4704062",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "timkphd/examples",
"max_forks_repo_path": "darwin19/numz.f90",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "04c162ec890a1c9ba83498b275fbdc81a4704062",
"max_issues_repo_issues_event_max_datetime": "2022-02-09T01:59:47.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-02-09T01:59:47.000Z",
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "timkphd/examples",
"max_issues_repo_path": "darwin19/numz.f90",
"max_line_length": 71,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "04c162ec890a1c9ba83498b275fbdc81a4704062",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "timkphd/examples",
"max_stars_repo_path": "darwin19/numz.f90",
"max_stars_repo_stars_event_max_datetime": "2022-01-24T19:09:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-01T00:29:22.000Z",
"num_tokens": 130,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 467
} |
"""
Set of functions for measuring the spectral properties of galaxies.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..plot import Plot
from astropy.stats import sigma_clip
from astropy.constants import c
from astropy.units import km, s, erg, cm, angstrom
from astropy.modeling.models import Gaussian1D, GaussianAbsorption1D
from astropy.modeling.fitting import LevMarLSQFitter
from math import fabs
import matplotlib.pyplot as pl
import numpy as np
import random
__all__ = ['line_measurements']
c_kms = c.to(km / s).value
transition_wavelengths = {'OII': 3728.484,
'Hd': 4102.890,
'Hg': 4341.680,
'Hb': 4862.680,
'OIIIB': 4960.295,
'OIIIR': 5008.240,
'NIIB': 6549.840,
'Ha': 6564.610,
'NIIR': 6585.230,
'SIIB': 6718.320,
'SIIR': 6732.710} # Vacuum wavelengths
o2_doublet = (3727.09, 3729.88)
o2 = (7580, 7680)
oh = (8600, 8700)
sigma_max = 10
bbox = dict(facecolor='w', edgecolor='none')
TwoGaussians = (Gaussian1D + Gaussian1D).rename('TwoGaussians')
ThreeGaussians = (Gaussian1D + Gaussian1D +
Gaussian1D).rename('ThreeGaussians')
def tie_sigma(model):
stddev = model.stddev_0
return stddev
def tie_sii(model):
amplitude = model.amplitude_0 / 1.4
return amplitude
def tie_nii(model):
amplitude = model.amplitude_0 / 2.95
return amplitude
def tie_oiii(model):
amplitude = model.amplitude_0 / 2.98
return amplitude
def clean_spectrum(wavelength, flux, error):
inf = np.isinf(error)
zero = flux == 0
nan = np.isnan(flux)
cond = ~inf & ~zero & ~nan
return wavelength[cond], flux[cond], error[cond], cond
def monte_carlo_error(wavelength, flux, error, continuum, fitter, init,
absorption=False, n=100):
perturbed_g = []
for i in range(0, n):
if absorption:
perturb = np.array([random.gauss(0, 1) * error[i] / continuum[i]
for i in range(len(error))])
flux_perturbed = (flux / continuum) + perturb
else:
perturb = np.array([random.gauss(0, 1) * error[i]
for i in range(len(error))])
flux_perturbed = flux - continuum + perturb
perturbed_g.append(fitter(init, wavelength, flux_perturbed))
params = {}
for i, name in enumerate(init.param_names):
params[name] = []
for g in perturbed_g:
for i, name in enumerate(g.param_names):
params[name].append(g.parameters[i])
errors = {}
for key in params.keys():
errors[key] = np.std(np.array(params[key]))
return errors
def line_measurements(name, spec1d, z, sky=None, spec2d=None, resolution=200,
yposition=None, sky_threshold=None, fit_o2_doublet=False,
plot=False, show_plot=False, plot_directory=None,
sky_in_counts=False):
"""
Measures emission line fluxes and equivalent widths from a galaxy
spectrum with redshift z.
Parameters
----------
name : str
Object ID.
spec1d : `igmtools.data.Spectrum1D`
1D spectrum.
z : float
Galaxy redshift.
sky : array
1D sky spectrum.
spec2d : `igmtools.data.Spectrum2D`
2D spectrum.
R : int, optional
Spectral resolution (default = 200).
yposition : tuple, optional
y-coordinates of the object on the 2D spectrum
(edge 1, centre, edge 2).
sky_threshold : float, optional
Sky counts/flux value above which flag 3 is raised (useful for
eliminating zero-order contamination and regions of bad sky
subtraction).
fit_o2_doublet : bool, optional
Option to fit two Gaussian components to the OII line
(default = False).
plot : bool, optional
Option to plot continuum estimates across bands where a measurement
is executed, and the Gaussian fit, if performed (default = False).
show_plot : bool, optional
Option to show each plot in an interactive window (default = False).
plot_directory : str, optional
If specified, plots will be saved in this directory as PNG files.
sky_in_counts : bool, optional
Set to True if the sky spectrum is in counts rather than flux units.
Returns
-------
measurements : `astropy.table.Table`
The line measurements.
Notes
-----
Measurements are made using the following line indicies:
OII : (3655.0, 3705.0, 3708.5, 3748.5, 3750.0, 3800.0)
Hd : (4030.0, 4080.0, 4082.0, 4122.0, 4125.0, 4170.0)
Hg : (4230.0, 4270.0, 4321.5, 4361.5, 4365.0, 4400.0)
Hb : (4785.0, 4820.0, 4842.5, 4882.5, 5030.0, 5100.0)
OIII : (4785.0, 4820.0, 4988.0, 5028.0, 5030.0, 5100.0)
Ha : (6460.0, 6520.0, 6544.5, 6584.5, 6610.0, 6670.0),
SII : (6640.0, 6700.0, 6713.0, 6753.0, 6760.0, 6810.0)
These line indicies are optimised for spectra with R ~ 200, and have
measurement windows 20 Angstroms wide. We assume the maximum intrinsic
line width to be that of a Gaussian with a standard deviation of 10
Angstroms in the rest frame. Convolved with the instrument line spread
function, this corresponds to measurement windows of width close to
the maximum expected standard deviation of the Gaussian line profile.
For specified instrument resolutions different to the default value of
200, measurement windows are scaled to preserve this feature.
Lines with integrated fluxes measured at greater than 3 sigma
significance are fitted with Gaussians. The measurements are then
taken from the Gaussian fitting parameters and their errors computed
from a Monte-Carlo type estimation. The maximum allowed Gaussian
standard deviation corresponds to 10 Angstroms in the intrinsic line
profile, and the minimum to 0.5 times that of that instrumental line
spread function.
If Hdelta is absorption dominated at a greater than 3 sigma level,
a Gaussian absorption profile is fitted. This is motivated by the idea
that Hdelta may be used as a proxy for the Balmer absorption correction.
Equivalent widths are positive for emission lines, and negative for
absorption lines.
Warning flags are defined as follows:
0 : No warnings.
1 : Measurement may be affected by the OH forest between 8600 and 8700
Angstrom.
2 : Line was fit with the maximum/minimum allowed Gaussian standard
deviation.
3 : Line coincides with region above the specified sky threshold.
4 : Line may be affected by O2 telluric absorption (7580 - 7680 Angstrom).
5 : Bad continuum reduced chi squared (> 10).
6 : No spectral coverage, or human verification failed.
No measurement is recorded for flag 6 - all values are set to -99.0.
"""
plot = True if show_plot else plot
bands = {'OII': [3655.0, 3705.0, 3708.5, 3748.5, 3750.0, 3800.0],
'Hd': [4030.0, 4080.0, 4082.0, 4122.0, 4125.0, 4170.0],
'Hg': [4230.0, 4270.0, 4321.5, 4361.5, 4365.0, 4400.0],
'Hb': [4785.0, 4820.0, 4842.5, 4882.5, 5030.0, 5100.0],
'OIII': [4785.0, 4820.0, 4988.0, 5028.0, 5030.0, 5100.0],
'Ha': [6460.0, 6520.0, 6544.5, 6584.5, 6610.0, 6670.0],
'SII': [6640.0, 6700.0, 6713.0, 6753.0, 6760.0, 6810.0]}
# Modify measurement windows if appropriate:
if resolution != 200:
sigma_max200 = 18.8 # Max Gaussian sigma for R = 200 (approx)
dlambda = 7500 / resolution
sigma_lsf = dlambda / 2.35482
sigma_max_convolved = np.sqrt(sigma_lsf ** 2 + sigma_max ** 2)
scale_factor = sigma_max_convolved / sigma_max200
for key in bands.keys():
window = bands[key][3] - bands[key][2]
window0 = window * scale_factor
bands[key][1] += (window - window0) / 2
bands[key][2] += (window - window0) / 2
bands[key][3] -= (window - window0) / 2
bands[key][4] -= (window - window0) / 2
# Initialise dictionaries:
(line_flux, continuum_flux, eqw, sn,
continuum_params, line_params, flags) = {}, {}, {}, {}, {}, {}, {}
# 1D spectrum arrays:
wavelength = spec1d.wavelength.value
flux = spec1d.flux.value
error = spec1d.flux.uncertainty.value
# Clean the spectrum:
wavelength, flux, error, cond = clean_spectrum(wavelength, flux, error)
if sky is not None:
sky = sky[cond]
# Do measurements:
for key in bands.keys():
# Initialise dictionary for continuum parameters:
continuum_params[key] = {}
# Line groupings:
if (key == 'OII') and fit_o2_doublet:
lines = ['OIIR', 'OIIB']
rest_wavelengths = o2_doublet
elif key == 'OIII':
lines = ['OIIIR', 'OIIIB']
rest_wavelengths = [transition_wavelengths['OIIIR'],
transition_wavelengths['OIIIB']]
elif key == 'Ha':
lines = ['NIIR', 'Ha', 'NIIB']
rest_wavelengths = [transition_wavelengths['NIIR'],
transition_wavelengths['Ha'],
transition_wavelengths['NIIB']]
elif key == 'SII':
lines = ['SIIR', 'SIIB']
rest_wavelengths = [transition_wavelengths['SIIR'],
transition_wavelengths['SIIB']]
else:
lines = [key]
rest_wavelengths = [transition_wavelengths[key]]
# Initialise dictionaries for line parameters:
for line in lines:
line_params[line] = {}
# Observed wavelengths of the lines:
observed_wavelengths = [item * (1 + z) for item in rest_wavelengths]
# Fitting/measurement regions:
co_blue = ((wavelength >= bands[key][0] * (1 + z)) &
(wavelength < bands[key][1] * (1 + z)))
co_red = ((wavelength >= bands[key][4] * (1 + z)) &
(wavelength < bands[key][5] * (1 + z)))
co_region = co_red | co_blue
line_region = ((wavelength >= bands[key][2] * (1 + z)) &
(wavelength <= bands[key][3] * (1 + z)))
# Extended region around the measurement. Used for excluding
# measurements affected by zero orders:
centre = ((bands[key][2] * (1 + z)) + (bands[key][3] * (1 + z))) / 2
centre_band = ((wavelength >= centre - 100) &
(wavelength <= centre + 100))
# The full fitting region:
region = ((wavelength >= bands[key][0] * (1 + z)) &
(wavelength < bands[key][5] * (1 + z)))
# Masks to identify regions potentially affected by 7600A O2 telluric
# absorption:
o2_blue = ((wavelength[co_blue] >= o2[0]) &
(wavelength[co_blue] <= o2[1]))
o2_red = (wavelength[co_red] >= o2[0]) & (wavelength[co_red] <= o2[1])
o2_line = ((wavelength[line_region] >= o2[0]) &
(wavelength[line_region] <= o2[1]))
# Masks to identify regions potentially affected by the OH forest:
oh_blue = ((wavelength[co_blue] >= oh[0]) &
(wavelength[co_blue] <= oh[1]))
oh_red = (wavelength[co_red] >= oh[0]) & (wavelength[co_red] <= oh[1])
oh_line = ((wavelength[line_region] >= oh[0]) &
(wavelength[line_region] <= oh[1]))
# Assume the measurement will be good at first:
flags[key] = 0
# Check that we have spectral coverage:
if ((np.sum(co_blue) < 5) | (np.sum(co_red) < 5) |
(flux[co_blue] == 0).all() | (flux[co_red] == 0).all()):
# If no coverage, mark all measurements as -99.0 and assign flag
# 6, then go to next iteration of the loop:
for line in lines:
line_flux[line] = (-99.0, -99.0)
continuum_flux[line] = (-99.0, -99.0)
eqw[line] = (-99.0, -99.0)
line_params[line]['amplitude'] = -99.0
line_params[line]['mean'] = -99.0
line_params[line]['stddev'] = -99.0
continuum_params[key]['gradient'] = -99.0
continuum_params[key]['intercept'] = -99.0
continuum_params[key]['chi2norm'] = -99.0
sn[key] = -99.0
flags[key] = 6
continue
# See if we're affected by 7600A O2 telluric absorption:
if ((np.sum(o2_blue) > 0) | (np.sum(o2_red) > 0) |
(np.sum(o2_line) > 0)):
flags[key] = 4
# See if we're affected by OH forest:
if ((np.sum(oh_blue) > 0) | (np.sum(oh_red) > 0) |
(np.sum(oh_line) > 0)):
flags[key] = 1
# Assign sky threshold flag if a value is specified and it exceeds
# this:
if sky_threshold is not None:
if any(sky0 > sky_threshold for sky0 in sky[centre_band]):
flags[key] = 3
# Sigma clip the continuum, to ensure it's not affected by nearby
# absorption features:
filtered_blue = sigma_clip(flux[co_blue], 1.5)
filtered_red = sigma_clip(flux[co_red], 1.5)
# Take the mean value of the sigma clipped continuum either side of the
# line:
co_level1 = np.mean(filtered_blue)
co_level1_error = np.std(filtered_blue)
co_level2 = np.mean(filtered_red)
co_level2_error = np.std(filtered_red)
# Linearly interpolate between these values:
continuum = ((co_level2 - co_level1) /
(np.mean(wavelength[co_red]) -
np.mean(wavelength[co_blue])) *
(wavelength - np.mean(wavelength[co_blue])) + co_level1)
continuum_error = np.sqrt(
co_level1_error ** 2 + co_level2_error ** 2) / 2
# Continuum gradient:
gradient = ((continuum[1] - continuum[0]) /
(wavelength[1] - wavelength[0]))
continuum_params[key]['gradient'] = gradient
# Continuum intercept:
intercept = continuum[0] - gradient * wavelength[0]
continuum_params[key]['intercept'] = intercept
# Flag if normalised continuum chi squared > 10:
cont_chi2norm = (np.sum(
(flux[co_region] - continuum[co_region]) ** 2 /
error[co_region] ** 2) / (len(flux[co_region]) - 3))
if cont_chi2norm > 10:
flags[key] = 5
continuum_params[key]['chi2norm'] = cont_chi2norm
# Estimate integrated line flux and equivalent width (observed,
# not rest frame):
dl = np.mean(wavelength[line_region][1:] -
wavelength[line_region][:-1])
n = np.sum(line_region)
line_flux_value = dl * np.sum(
flux[line_region] - continuum[line_region])
line_flux_error = dl * np.sqrt(
np.sum(error[line_region] ** 2) + n * continuum_error ** 2)
eqw_value = dl * np.sum(
flux[line_region] / continuum[line_region]) - n
eqw_error = dl * np.sqrt(
np.sum(error[line_region] ** 2 / continuum[line_region] ** 2) +
n * continuum_error ** 2 *
np.sum(flux[line_region] ** 2 / continuum[line_region] ** 4))
# Continuum flux at the line centre:
ind = np.abs(wavelength - observed_wavelengths[0]).argmin()
centre_flux = continuum[ind]
centre_flux_error = error[ind]
# Estimate signal-to-noise ratio around the line:
sn_blue = (filtered_blue[~filtered_blue.mask] /
error[co_blue][~filtered_blue.mask])
sn_red = (filtered_red[~filtered_red.mask] /
error[co_red][~filtered_red.mask])
sn_value = np.average(np.concatenate([sn_blue, sn_red]))
# Calculate minimum and maximum allowed Gaussian standard
# deviations:
dlambda = rest_wavelengths[0] / resolution
sigma_lsf = dlambda / 2.35482
min_stddev = sigma_lsf / 2
max_stddev = np.sqrt(sigma_lsf ** 2 + sigma_max ** 2) * (1 + z)
# Fit Gaussian component(s) if the integrated line flux is
# positive and has greater than 3 sigma significance:
if (line_flux_value > 0) & ((line_flux_value / line_flux_error) > 3):
amplitude = np.max(flux[line_region] - continuum[line_region])
if ((key in ('Hg', 'Hd')) |
((key == 'OII') and not fit_o2_doublet)):
# One component Gaussian fit for Hg, Hd, OII:
# -------------------------------------------
mean = observed_wavelengths[0]
g_init = Gaussian1D(amplitude, mean, min_stddev)
g_init.amplitude.min = 0.0
g_init.mean.min = mean - (1000 * mean / c_kms)
g_init.mean.max = mean + (1000 * mean / c_kms)
g_init.stddev.min = min_stddev
g_init.stddev.max = max_stddev
# -------------------------------------------
elif (key == 'OII') and fit_o2_doublet:
# Optional two component Gaussian fit for OII:
# --------------------------------------------
mean_0 = observed_wavelengths[0]
mean_1 = observed_wavelengths[1]
tied_params = {'stddev_1': tie_sigma}
g_init = TwoGaussians(
amplitude, mean_0, min_stddev, amplitude, mean_1,
min_stddev, tied=tied_params)
g_init.amplitude_0.min = 0.0
g_init.amplitude_1.min = 0.0
g_init.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
g_init.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
g_init.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
g_init.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)
g_init.stddev_0.min = min_stddev
g_init.stddev_0.max = max_stddev
g_init.stddev_1.min = min_stddev
g_init.stddev_1.max = max_stddev
# --------------------------------------------
elif key == 'SII':
# Two component Gaussian fit for SII:
# -----------------------------------
mean_0 = observed_wavelengths[0]
mean_1 = observed_wavelengths[1]
tied_params = {'amplitude_1': tie_sii,
'stddev_1': tie_sigma}
g_init = TwoGaussians(
amplitude, mean_0, min_stddev, amplitude, mean_1,
min_stddev, tied=tied_params)
g_init.amplitude_0.min = 0.0
g_init.amplitude_1.min = 0.0
g_init.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
g_init.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
g_init.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
g_init.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)
g_init.stddev_0.min = min_stddev
g_init.stddev_0.max = max_stddev
g_init.stddev_1.min = min_stddev
g_init.stddev_1.max = max_stddev
# -----------------------------------
elif key in ('Hb', 'OIII'):
# Three component fit over Hb/OIII region:
# ----------------------------------------
mean_0 = observed_wavelengths[0]
if key == 'Hb':
mean_1 = transition_wavelengths['OIIIB'] * (1 + z)
mean_2 = transition_wavelengths['OIIIR'] * (1 + z)
tied_params = {'stddev_1': tie_sigma,
'stddev_2': tie_sigma}
else:
mean_1 = transition_wavelengths['OIIIB'] * (1 + z)
mean_2 = transition_wavelengths['Hb'] * (1 + z)
tied_params = {'amplitude_1': tie_oiii,
'stddev_1': tie_sigma,
'stddev_2': tie_sigma}
g_init = ThreeGaussians(
amplitude, mean_0, min_stddev, amplitude, mean_1,
min_stddev, amplitude, mean_2, min_stddev,
tied=tied_params)
g_init.amplitude_0.min = 0.0
g_init.amplitude_1.min = 0.0
g_init.amplitude_2.min = 0.0
g_init.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
g_init.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
g_init.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
g_init.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)
g_init.mean_2.min = mean_2 - (1000 * mean_2 / c_kms)
g_init.mean_2.max = mean_2 + (1000 * mean_2 / c_kms)
g_init.stddev_0.min = min_stddev
g_init.stddev_0.max = max_stddev
g_init.stddev_1.min = min_stddev
g_init.stddev_1.max = max_stddev
g_init.stddev_2.min = min_stddev
g_init.stddev_2.max = max_stddev
# ----------------------------------------
else:
# Try one and three component fit over Ha/NII region:
# ---------------------------------------------------
mean_1 = observed_wavelengths[1]
g_init = Gaussian1D(amplitude, mean_1, min_stddev)
g_init.amplitude.min = 0.0
g_init.mean.min = mean_1 - (1000 * mean_1 / c_kms)
g_init.mean.max = mean_1 + (1000 * mean_1 / c_kms)
g_init.stddev.min = min_stddev
g_init.stddev.max = max_stddev
mean_0 = observed_wavelengths[0]
mean_2 = observed_wavelengths[2]
tied_params = {'amplitude_2': tie_nii,
'stddev_1': tie_sigma,
'stddev_2': tie_sigma}
g_init2 = ThreeGaussians(
amplitude, mean_0, min_stddev, amplitude, mean_1,
min_stddev, amplitude, mean_2, min_stddev,
tied=tied_params)
g_init2.amplitude_0.min = 0.0
g_init2.amplitude_1.min = 0.0
g_init2.amplitude_2.min = 0.0
g_init2.mean_0.min = mean_0 - (1000 * mean_0 / c_kms)
g_init2.mean_0.max = mean_0 + (1000 * mean_0 / c_kms)
g_init2.mean_1.min = mean_1 - (1000 * mean_1 / c_kms)
g_init2.mean_1.max = mean_1 + (1000 * mean_1 / c_kms)
g_init2.mean_2.min = mean_2 - (1000 * mean_2 / c_kms)
g_init2.mean_2.max = mean_2 + (1000 * mean_2 / c_kms)
g_init2.stddev_0.min = min_stddev
g_init2.stddev_0.max = max_stddev
g_init2.stddev_1.min = min_stddev
g_init2.stddev_1.max = max_stddev
g_init2.stddev_2.min = min_stddev
g_init2.stddev_2.max = max_stddev
# ---------------------------------------------------
# Do the fitting:
fit_g = LevMarLSQFitter()
g = fit_g(g_init, wavelength[region],
flux[region] - continuum[region])
# Chi2 on the fit:
line_chi2 = np.sum(
(flux[region] - continuum[region] -
g(wavelength)[region]) ** 2 / error[region] ** 2)
# Monte carlo error estimation:
g_errors = monte_carlo_error(
wavelength[region], flux[region], error[region],
continuum[region], fit_g, g_init)
ha_3comp = False
# Compare chi squared values for the two Ha/NII fits and adopt
# the one that has the minimum chi squared:
if key == 'Ha':
# Three component fit of Ha/NII region:
fit_g2 = LevMarLSQFitter()
g2 = fit_g2(g_init2, wavelength[region],
flux[region] - continuum[region])
# Monte carlo error estimation:
g2_errors = monte_carlo_error(
wavelength[region], flux[region], error[region],
continuum[region], fit_g2, g_init2)
# Chi2 on the fit:
line_chi2_2 = np.sum(
(flux[region] - continuum[region] -
g2(wavelength[region])) ** 2 /
(g2(wavelength[region]) + continuum[region]))
# Compare chi2:
if line_chi2 > line_chi2_2:
g = g2
g_errors = g2_errors
ha_3comp = True
# Get lists of best-fit Gaussian parameters:
if ((key in ('Hg', 'Hd')) |
((key == 'OII') and not fit_o2_doublet)):
amplitudes = [g.amplitude.value]
amplitude_errors = [g_errors['amplitude']]
means = [g.mean.value]
stddevs = [g.stddev.value]
stddev_errors = [g_errors['stddev']]
elif ((key == 'OII') and fit_o2_doublet) | (key == 'SII'):
amplitudes = [g.amplitude_0.value, g.amplitude_1.value]
amplitude_errors = [g_errors['amplitude_0'],
g_errors['amplitude_1']]
means = [g.mean_0.value, g.mean_1.value]
stddevs = [g.stddev_0.value, g.stddev_1.value]
stddev_errors = [g_errors['stddev_0'],
g_errors['stddev_1']]
elif ((key == 'Ha') and ha_3comp) | (key in ('Hb', 'OIII')):
amplitudes = [g.amplitude_0.value, g.amplitude_1.value,
g.amplitude_2.value]
amplitude_errors = [g_errors['amplitude_0'],
g_errors['amplitude_1'],
g_errors['amplitude_2']]
means = [g.mean_0.value, g.mean_1.value, g.mean_2.value]
stddevs = [g.stddev_0.value, g.stddev_1.value,
g.mean_2.value]
stddev_errors = [g_errors['stddev_0'],
g_errors['stddev_1'],
g_errors['stddev_2']]
else:
amplitudes = [g.amplitude.value, -99.0, -99.0]
amplitude_errors = [g_errors['amplitude'], -99.0, -99.0]
means = [g.mean.value, -99.0, -99.0]
stddevs = [g.stddev.value, -99.0, -99.0]
stddev_errors = [g_errors['stddev'], -99.0, -99.0]
# Log these line by line:
for i, line in enumerate(lines):
# Log the line fitting parameters:
line_params[line]['amplitude'] = amplitudes[i]
line_params[line]['mean'] = means[i]
line_params[line]['stddev'] = stddevs[i]
# Only adopt the measurements if the fitted amplitude is
# non-zero, otherwise, measurements from direct integration
# of pixels are retained:
if amplitudes[i] != 0:
# Integrated line flux:
line_flux_value = (amplitudes[i] * stddevs[i] *
np.sqrt(2 * np.pi))
# Error on the integrated line flux:
line_flux_error = line_flux_value * np.sqrt(
(amplitude_errors[i] / amplitudes[i]) ** 2 +
(stddev_errors[i] / stddevs[i]) ** 2)
# Re-evaluate the continuum flux at the line centre:
ind = np.abs(wavelength - means[i]).argmin()
centre_flux = continuum[ind]
centre_flux_error = error[ind]
# Equivalent width:
eqw_value = line_flux_value / centre_flux
# Error on the equivalent width:
eqw_error = eqw_value * np.sqrt(
(line_flux_error / line_flux_value) ** 2 +
(centre_flux_error / centre_flux) ** 2)
# Log the line flux, continuum flux and equivalent width:
line_flux[line] = (line_flux_value, line_flux_error)
continuum_flux[line] = (centre_flux, centre_flux_error)
eqw[line] = (eqw_value, eqw_error)
fit = True
fit_hd = False
# Fit single Gaussian absorption component to Hd if the integrated
# line flux is negative and has greater than 3 sigma significance:
elif ((key == 'Hd') & (line_flux_value < 0) &
((line_flux_value / line_flux_error) < -3)):
amplitude = np.max(1 - flux[line_region] / continuum[line_region])
mean = transition_wavelengths[key] * (1 + z)
dm = 1000 * mean / c_kms
g_init = GaussianAbsorption1D(amplitude, mean, min_stddev)
g_init.mean.min = mean - dm
g_init.mean.max = mean + dm
g_init.stddev.min = min_stddev
g_init.stddev.max = max_stddev
# Do the fitting:
fit_g = LevMarLSQFitter()
g = fit_g(g_init, wavelength[region],
flux[region] / continuum[region])
# Monte carlo error estimation:
g_errors = monte_carlo_error(
wavelength[region], flux[region], error[region],
continuum[region], fit_g, g_init, absorption=True)
# Equivalent width:
eqw_value = (-g.amplitude.value * g.stddev.value *
np.sqrt(2 * np.pi) / (1 + z))
# Error on the equivalent width:
eqw_error = fabs(eqw_value) * np.sqrt(
(g_errors['amplitude'] / g.amplitude.value) ** 2 +
(g_errors['stddev'] / g.stddev.value) ** 2)
for line in lines:
# Log the line fitting parameters:
line_params[line]['amplitude'] = g.amplitude.value
line_params[line]['mean'] = g.mean.value
line_params[line]['stddev'] = g.stddev.value
# Log the line flux, continuum flux and equivalent width:
line_flux[line] = (line_flux_value, line_flux_error)
continuum_flux[line] = (centre_flux, centre_flux_error)
eqw[line] = (eqw_value, eqw_error)
fit = False
fit_hd = True
# Otherwise we won't do any line fitting:
else:
for line in lines:
# Set all line fitting parameters to -99:
line_params[line]['amplitude'] = -99.0
line_params[line]['mean'] = -99.0
line_params[line]['stddev'] = -99.0
# Log the line flux, continuum flux and equivalent width:
line_flux[line] = (line_flux_value, line_flux_error)
continuum_flux[line] = (centre_flux, centre_flux_error)
eqw[line] = (eqw_value, eqw_error)
fit = False
fit_hd = False
sn[key] = sn_value
# Make plots if that option is turned on:
if plot:
if sky is not None and spec2d is not None:
n = 3
p = Plot(n, 1, n, aspect=1, width=5.9, fontsize=12)
elif ((sky is not None and spec2d is None) |
(spec2d is not None and sky is None)):
n = 2
p = Plot(n, 1, n, aspect=0.8, width=5.9, fontsize=12)
else:
n = 1
p = Plot(n, 1, n, aspect=0.6, width=5.9, fontsize=12)
centre = (bands[key][0] * (1 + z) + bands[key][5] * (1 + z)) / 2
cond = (wavelength > centre - 250) & (wavelength < centre + 250)
if spec2d is not None:
cond2 = ((spec2d.wavelength.value > centre - 250) &
(spec2d.wavelength.value < centre + 250))
# 2D spectrum plot:
if spec2d is not None:
n = 3 if n == 3 else 2
# 2D spectrum parameters for plotting:
i = min(spec2d.data.shape[0] // 2, 3)
v1 = np.percentile(spec2d.data[i:-1, :].ravel(), 90)
wdelt = spec2d.wavelength.value[1] - spec2d.wavelength.value[0]
yvals = np.arange(spec2d.data.shape[0]) * wdelt
p.axes[n - n].pcolormesh(
spec2d.wavelength.value[cond2], yvals,
spec2d.data[:, cond2], vmin=-v1 / 5, vmax=2 * v1,
cmap=pl.cm.hot)
if yposition is not None:
p.axes[n - n].axhline(
wdelt * yposition[0], ls='--', lw=2, color='LawnGreen')
p.axes[n - n].axhline(
wdelt * yposition[2], ls='--', lw=2, color='LawnGreen')
# 1D spectrum plot:
if (n == 3) | ((n == 2) & (sky is not None and spec2d is None)):
n = 2
else:
n = 1
p.axes[n - n].plot(
wavelength[cond], flux[cond] / 1e-16, drawstyle='steps-mid',
color='k')
p.axes[n - n].plot(
wavelength[cond], error[cond] / 1e-16, drawstyle='steps-mid',
color='r')
p.axes[n - n].plot(
wavelength[region], continuum[region] / 1e-16, lw=3,
color='RoyalBlue')
if fit:
p.axes[n - n].plot(
wavelength[region],
(g(wavelength[region]) + continuum[region]) / 1e-16,
color='m', lw=2)
if fit_hd:
p.axes[n - n].plot(
wavelength[region],
(g(wavelength[region]) * continuum[region]) / 1e-16,
color='m', lw=2)
p.axes[n - n].axvspan(
bands[key][2] * (1 + z), bands[key][3] * (1 + z),
facecolor='g', edgecolor='none', alpha=0.5)
p.axes[n - n].annotate(
key, xy=(0.05, 0.8),
xycoords='axes fraction', horizontalalignment='left',
fontsize=12, bbox=bbox, color='k')
p.axes[n - n].annotate(
name, xy=(0.95, 0.8), xycoords='axes fraction',
horizontalalignment='right', fontsize=12, bbox=bbox, color='k')
# Sky spectrum plot:
if sky is not None:
if sky_in_counts:
p.axes[n - 1].plot(
wavelength[cond], sky[cond], drawstyle='steps-mid',
color='MidnightBlue')
else:
p.axes[n - 1].plot(
wavelength[cond], sky[cond] / 1e-16,
drawstyle='steps-mid', color='MidnightBlue')
p.axes[n - 1].annotate(
'sky', xy=(0.05, 0.8), xycoords='axes fraction',
horizontalalignment='left', fontsize=12, bbox=bbox,
color='k')
# Axis limits and labels, tidy up and display:
region_min = np.min(error[region])
region_max = np.max(flux[region])
sn_spec = np.median(flux / error)
for i in range(0, n):
p.axes[i].set_xlim(wavelength[cond][0], wavelength[cond][-1])
p.axes[n - 2].set_ylim(
(region_min - 0.2 * sn_spec * region_min) / 1e-16,
(region_max + 0.8 * region_max) / 1e-16)
xlabel = 'Wavelength ($\AA$)'
if spec1d.flux.unit == erg / s / cm ** 2 / angstrom:
ylabel = 'Flux ($10^{-16}$ erg s$^{-1}$ cm$^{-2}$ $\AA$)'
else:
ylabel = 'Flux ({0})'.format(
spec1d.flux.unit.to_string(format='latex'))
p.tidy(shared_axes=True)
p.labels(xlabel, ylabel)
if plot_directory is not None:
p.savefig('{0}/{1}_{2}.png'.format(plot_directory, name, key))
if show_plot:
p.display()
return (line_flux, continuum_flux, eqw, sn, continuum_params, line_params,
flags)
| {
"alphanum_fraction": 0.5232984649,
"author": null,
"avg_line_length": 38.4185803758,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e127479671e43975de018607f981b85443308ffd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-19T04:45:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-19T04:45:38.000Z",
"max_forks_repo_head_hexsha": "6e14973fd1e69d5e7bd7c40f93ffe11e2cd41990",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "cwfinn/igmtools",
"max_forks_repo_path": "igmtools/modeling/galaxy.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e14973fd1e69d5e7bd7c40f93ffe11e2cd41990",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "cwfinn/igmtools",
"max_issues_repo_path": "igmtools/modeling/galaxy.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e14973fd1e69d5e7bd7c40f93ffe11e2cd41990",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "cwfinn/igmtools",
"max_stars_repo_path": "igmtools/modeling/galaxy.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9417,
"path": null,
"reason": "import numpy,from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 36805
} |
#include <turbodbc/field_translators/timestamp_translator.h>
#include <boost/variant/get.hpp>
#include <sql.h>
namespace turbodbc { namespace field_translators {
timestamp_translator::timestamp_translator() = default;
timestamp_translator::~timestamp_translator() = default;
field timestamp_translator::do_make_field(char const * data_pointer) const
{
auto const ts = reinterpret_cast<SQL_TIMESTAMP_STRUCT const *>(data_pointer);
// map SQL nanosecond precision to posix_time microsecond precision
int64_t const adjusted_fraction = ts->fraction / 1000;
return {boost::posix_time::ptime{
{static_cast<short unsigned int>(ts->year), ts->month, ts->day},
{ts->hour, ts->minute, ts->second, adjusted_fraction}
}
};
}
} }
| {
"alphanum_fraction": 0.741130092,
"author": null,
"avg_line_length": 28.1851851852,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "fb8a4e01d2f1faa4950c1586e9f2d6e0f72beadc",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 83,
"max_forks_repo_forks_event_max_datetime": "2022-03-26T02:32:26.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-06-15T13:55:44.000Z",
"max_forks_repo_head_hexsha": "80a29a7edfbdabf12410af01c0c0ae74bfc3aab4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "arikfr/turbodbc",
"max_forks_repo_path": "cpp/turbodbc/Library/src/field_translators/timestamp_translator.cpp",
"max_issues_count": 325,
"max_issues_repo_head_hexsha": "80a29a7edfbdabf12410af01c0c0ae74bfc3aab4",
"max_issues_repo_issues_event_max_datetime": "2022-03-21T23:58:42.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-04-08T11:54:41.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "arikfr/turbodbc",
"max_issues_repo_path": "cpp/turbodbc/Library/src/field_translators/timestamp_translator.cpp",
"max_line_length": 78,
"max_stars_count": 537,
"max_stars_repo_head_hexsha": "80a29a7edfbdabf12410af01c0c0ae74bfc3aab4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "arikfr/turbodbc",
"max_stars_repo_path": "cpp/turbodbc/Library/src/field_translators/timestamp_translator.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-29T04:43:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-03-18T21:46:05.000Z",
"num_tokens": 183,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 761
} |
Base.@kwdef struct FieldInfo
name::String
ltype::Union{DataType,String,Symbol}
rtype::Union{DataType,String,Symbol} = rtype
get_returns_reference::Bool = true
end
struct TypeInfo
name::String
fields::Vector{FieldInfo}
end
| {
"alphanum_fraction": 0.7217741935,
"author": null,
"avg_line_length": 19.0769230769,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "bfc9a32bc789087a238cc615b203ff17549c939e",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "53b5fa074338a5eea096d6736453c5692faf7368",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "colinxs/Emporos.jl",
"max_forks_repo_path": "archive/CodeGen/types.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "53b5fa074338a5eea096d6736453c5692faf7368",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "colinxs/Emporos.jl",
"max_issues_repo_path": "archive/CodeGen/types.jl",
"max_line_length": 48,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "53b5fa074338a5eea096d6736453c5692faf7368",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "colinxs/Emporos.jl",
"max_stars_repo_path": "archive/CodeGen/types.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 68,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 248
} |
"""
Kernel PCA object
"""
struct kPCA{T <: Real}
X::Matrix{Union{T, Missing}} # original data
κ::Kernel{T} # kernel function (from MLKernels)
μ::Vector{T} # row/col means of the kernel matrix
μ2::T # mean of the kernel matrix
λ::Vector{T} # eigenvalues in feature space
α::Matrix{T} # eigenvectors in feature space
function kPCA(
X :: Matrix{Union{T, Missing}},
κ :: Kernel{T},
μ :: Vector{T},
μ2 :: T,
λ :: Vector{T},
α :: Matrix{T}
) where T <: Real
@assert length(λ) == size(α, 2)
@assert length(μ) == size(X, 2)
new{T}(X, κ, μ, μ2, λ, α)
end
end
indim(M::kPCA) = size(M.X, 1)
outdim(M::kPCA) = length(M.λ)
projection(M::kPCA) = M.V ./ sqrt.(M.λ)
principalvars(M::kPCA) = M.λ
"""
fit a kernel PCA.
κ :: Kernel{T} the kernel function, see MLKernels.jl for documentation.
X :: Array{Union{T, Missing}} the data to embed, with observations ins columns.
"""
function fit(
:: Type{kPCA},
κ :: Kernel{T},
X :: Matrix{Union{T, Missing}},
ncomp = 2
) where T
K = kernelmatrix(Val(:col), κ, X, true)
K2, μ, μ2 = missing_to_mean(K)
K2 .= K2 .- μ .- μ' .+ μ2
e = K2 |> Hermitian |> eigen
e_ev_perm = sortperm(e.values, rev = true)[1:ncomp]
λ = e.values[e_ev_perm]::Vector{T}
α = e.vectors[:, e_ev_perm]::Matrix{T}
return kPCA(X, κ, μ, μ2, λ, α)
end
"""Calculate transformation to kernel space"""
# function transform(M::kPCA{T}, x::AbstractMatrix{mT}) where mT <: Union{T, Missing} where T <: Real
function transform(M::kPCA{T}, x::AbstractMatrix{mT}) where mT where T
##### There is some typeinference bug that leads to illegal instructions and
##### reaches the unreachable...
K = kernelmatrix(Val(:col), M.κ, M.X, x)
# ##### instead of `kernelmatrix`:
# # K = allocate_basematrix(Val(:col), M.X, x)
# K = Array{T}(undef, size(M.X, 2), size(x, 2))
# f = basefunction(M.κ)
# # basematrix!(Val(:col), K, f, M.X, x)
# ##### instead of `basematrix!`:
# n, m = checkdimensions(Val(:col), K, M.X, x)
# @inbounds for j = 1:m
# yj = subvector(Val(:col), x, j)
# for i = 1:n
# xi = subvector(Val(:col), M.X, i)
# ##### this causes slightly less allocations but far worse performance at least it doesn't crash the julia session
# # # unsafe_base_evaluate
# # s, nn = base_initiate(f, T), 0
# # # s, nn = base_initiate(f, promote_type(eltype(x), eltype(y))), 0
# # for i in eachindex(xi, yj)
# # s, nn = base_aggregate(f, s, nn, xi[i], yj[i])
# # end
# # K[i, j] = nn > 0 ? base_return(f, s / nn) : missing
# # # end unsafe_base_evaluate
# K[i,j] = unsafe_base_evaluate(f, xi, yj)
# end
# end
# ##### end instead of `basematrix!`
# kappamatrix!(M.κ, K)
# ##### end instead of `kernelmatrix`
K .= K .- M.μ .- mean(K, dims = 1) .+ M.μ2
return (M.α' ./ sqrt.(M.λ)) * K
end
transform(M::kPCA) = sqrt.(M.λ) .* M.α'
function Base.show(io::IO, ::MIME"text/plain", M::kPCA{T}) where T
println(io, "kPCA{", T, "}(κ: ", M.κ, ", dims: ", indim(M), "─→", outdim(M), ")")
end
# """Kernel PCA type"""
# struct KernelPCA{T<:Real}
# X::AbstractMatrix{T} # fitted data or precomputed kernel
# ker::Union{Nothing, Function} # kernel function
# center::KernelCenter # kernel center
# λ::AbstractVector{T} # eigenvalues in feature space
# α::AbstractMatrix{T} # eigenvectors in feature space
# inv::AbstractMatrix{T} # inverse transform coefficients
# end
# struct KernelCenter{T<:Real}
# means::AbstractVector{T}
# total::T
# end
# import MultivariateStats: KernelPCA, KernelCenter, transform!
# import Base.convert
# """Center kernel matrix."""
# function transform!(C::KernelCenter{T}, K::AbstractMatrix{T}) where {T<:Real}
# r, c = size(K)
# tot = C.total
# means = mean(K, dims=1)
# K .= K .- C.means .- means .+ tot
# return K
# end
# TODO: does not work
# function convert(::Type{KernelPCA{T}}, M::kPCA) where T
# KernelPCA(missing_to_mean_slice(M.X, dims = 2),
# (x, y) -> kappa(M.κ, unsafe_base_evaluate(basefunction(M.κ), x, y)),
# KernelCenter(M.μ, M.μ2),
# M.λ,
# M.α,
# zeros(T, 0, 0))
# end
| {
"alphanum_fraction": 0.5645454545,
"author": null,
"avg_line_length": 30.5555555556,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "37caff6d0cddd649c7ed5fdb50bd4c496000b1d2",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fe6b98d48be7be86d2f7f000cfa8407d37f43199",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gdkrmr/MLKernelsMissing.jl",
"max_forks_repo_path": "src/kpca.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fe6b98d48be7be86d2f7f000cfa8407d37f43199",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gdkrmr/MLKernelsMissing.jl",
"max_issues_repo_path": "src/kpca.jl",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fe6b98d48be7be86d2f7f000cfa8407d37f43199",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gdkrmr/MLKernelsMissing.jl",
"max_stars_repo_path": "src/kpca.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1416,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4400
} |
from keras.models import load_model
from load_face_dataset import load_dataset, IMAGE_SIZE, resize_image
import numpy as np
from fr_utils import img_to_encoding
from sklearn.model_selection import cross_val_score, ShuffleSplit, KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
from keras.utils import CustomObjectScope
import tensorflow as tf
with CustomObjectScope({'tf': tf}):
facenet = load_model('./model/nn4.small2.v1.h5')
class Dataset:
def __init__(self, path_name):
self.X_train = None
self.y_train = None
self.path_name = path_name
def load(self, img_rows = IMAGE_SIZE, img_cols = IMAGE_SIZE, img_channels = 3, model = facenet):
images, labels = load_dataset(self.path_name)
X_embedding = img_to_encoding(images, model)
print('X_train shape', X_embedding.shape)
print('y_train shape', labels.shape)
print(X_embedding.shape[0], 'train samples')
self.X_train = X_embedding
self.y_train = labels
class Knn_Model:
def __init__(self):
self.model = None
def cross_val_and_build_model(self, dataset):
k_range = range(1,31)
k_scores = []
print("k vs accuracy:")
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
cv = KFold(n_splits = 10, shuffle = True, random_state = 0)
score = cross_val_score(knn, dataset.X_train, dataset.y_train, cv = 10, scoring = 'accuracy').mean()
k_scores.append(score)
print(k, ":", score)
plt.plot(k_range, k_scores)
plt.title("KNN") #fontsize = 24)
plt.xlabel('Value of K for KNN')#, fontsize = 14)
plt.ylabel('Cross-Validated Accuracy')#, fontsize = 14)
plt.tick_params(axis='both')#, labelsize = 14)
plt.show()
n_neighbors_max = np.argmax(k_scores) + 1
print("The best k is: ", n_neighbors_max)
print("The accuracy is: ", k_scores[n_neighbors_max - 1], "When n_neighbor is: ", n_neighbors_max)
self.model = KNeighborsClassifier(n_neighbors = n_neighbors_max)
def train(self, dataset):
self.model.fit(dataset.X_train, dataset.y_train)
def save_model(self, file_path):
#save model
joblib.dump(self.model, file_path)
def load_model(self, file_path):
self.model = joblib.load(file_path)
def predict(self, image):
image = resize_image(image)
image_embedding = img_to_encoding(np.array([image]), facenet)
label = self.model.predict(image_embedding)
return label[0]
if __name__ == "__main__":
dataset = Dataset('dataset')
dataset.load()
model = Knn_Model()
model.cross_val_and_build_model(dataset)
model.train(dataset)
model.save_model('model/knn_classifier.model')
| {
"alphanum_fraction": 0.6368642695,
"author": null,
"avg_line_length": 29.1226415094,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "70ef1ecf78fd65ee39c652583df96736a0de9d0d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ecb1fed88ab5bbb969446266f3326ce12ac086ee",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "donghu123/face_recognition_using_opencv_keras_scikit-learn-master",
"max_forks_repo_path": "face_knn_classifier.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ecb1fed88ab5bbb969446266f3326ce12ac086ee",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "donghu123/face_recognition_using_opencv_keras_scikit-learn-master",
"max_issues_repo_path": "face_knn_classifier.py",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ecb1fed88ab5bbb969446266f3326ce12ac086ee",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "donghu123/face_recognition_using_opencv_keras_scikit-learn-master",
"max_stars_repo_path": "face_knn_classifier.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 713,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3087
} |
import os
import numpy as np
from pwtools import io
from pwtools.test import tools
from .testenv import testdir
rand = np.random.rand
def test_h5():
try:
import h5py
dct1 = \
{'/a': 'abcgs',
'/b/c/x1': 3,
'/b/c/x2': rand(2,3),
}
# writing a dct w/o leading slash will always be read back in *with*
# leading slash
dct2 = \
{'a': 'abciqo4iki',
'b/c/x1': 3,
'b/c/x2': rand(2,3),
}
for idx,dct in enumerate([dct1, dct2]):
h5fn = os.path.join(testdir, 'test_%i.h5' %idx)
io.write_h5(h5fn, dct)
read_dct = io.read_h5(h5fn)
for kk in list(read_dct.keys()):
assert kk.startswith('/')
for kk in list(dct.keys()):
key = '/'+kk if not kk.startswith('/') else kk
tools.assert_all_types_equal(dct[kk], read_dct[key])
# write mode='a' is default, test appending
h5fn = os.path.join(testdir, 'test_append.h5')
io.write_h5(h5fn, {'/a': 1.0})
read_dct = io.read_h5(h5fn)
assert list(read_dct.keys()) == ['/a']
assert read_dct['/a'] == 1.0
# append '/b', using {'/a': 1.0, '/b': 2.0} would be an error since /a
# already exists, use mode='w' then, but this overwrites all!
io.write_h5(h5fn, {'/b': 2.0})
read_dct2 = io.read_h5(h5fn)
# sort(...): sort possible [/b, /a] -> [/a, /b]
assert np.sort(np.array(list(read_dct2.keys()))).tolist() == ['/a', '/b']
assert read_dct2['/a'] == 1.0
assert read_dct2['/b'] == 2.0
# overwrite
io.write_h5(h5fn, {'/b': 22.0, '/c': 33.0}, mode='w')
read_dct3 = io.read_h5(h5fn)
assert np.sort(np.array(list(read_dct3.keys()))).tolist() == ['/b', '/c']
except ImportError:
tools.skip("skipping test_h5, no h5py importable")
| {
"alphanum_fraction": 0.5114854518,
"author": null,
"avg_line_length": 36.2777777778,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "19e08c50ec4f9d13ec3ea8cb5a1b03fd9284a0ae",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-02-20T01:52:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-06-01T12:57:57.000Z",
"max_forks_repo_head_hexsha": "cee068d1c7984d85e94ace243f86de350d3a1dba",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "elcorto/pwtools",
"max_forks_repo_path": "pwtools/test/test_h5.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "cee068d1c7984d85e94ace243f86de350d3a1dba",
"max_issues_repo_issues_event_max_datetime": "2021-10-30T21:12:53.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-27T09:14:17.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "elcorto/pwtools",
"max_issues_repo_path": "pwtools/test/test_h5.py",
"max_line_length": 81,
"max_stars_count": 41,
"max_stars_repo_head_hexsha": "cee068d1c7984d85e94ace243f86de350d3a1dba",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "elcorto/pwtools",
"max_stars_repo_path": "pwtools/test/test_h5.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-24T16:08:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-06-25T13:17:57.000Z",
"num_tokens": 604,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1959
} |
import time
import math
import numpy as np
from itertools import product
from collections import deque, defaultdict, namedtuple
#TODO #4 make 2nd part of the task
def load_data():
with open("AoC2020/aoc20-20-data.txt", "r") as f:
data = f.read()
print("Loaded lines:", len(data.splitlines()))
with open("AoC2020/aoc20-20-testdata.txt", "r") as f:
testdata = f.read()
print("Loaded lines:", len(testdata.splitlines()))
return data, testdata
Shape = namedtuple('Shape',['N','E','S','W','arr'])
def make_shape(arr):
# borders = [arr[:,0], arr[0,:], arr[:,9], arr[9,:]]
borders = [arr[0,:], arr[:,9], arr[9,:], arr[:,0]]
num_borders = []
for b in borders:
num = int(''.join(map(str,b)),2)
num_borders.append(num)
sh = Shape(*num_borders,np.copy(arr))
return sh
def get_shapes(array):
shapes = []
arr = array
for _ in range(2):
for _ in range(4):
shapes.append(make_shape(arr))
arr = np.rot90(arr)
arr = np.fliplr(arr)
return tuple(shapes)
Tile = namedtuple('Tile',['id','array','shapes','edges'])
def parse_data(p_data):
tiles = []
segments = data.split('\n\n')
for segment in segments:
header, pattern = segment.split(':\n')
tile_id=int(header[5:])
bin_list=list(pattern.replace('\n','').replace('#','1').replace('.','0'))
arr = np.array(bin_list,dtype=np.int8).reshape(10,10)
shapes=get_shapes(arr)
edges = None
tiles.append(Tile(tile_id, arr,shapes, edges))
return tiles
def fit(mosaic, shape, px,py):
if not 0<=px<len(mosaic) or not 0<=py<len(mosaic):
return False
if px>0 and mosaic[px-1][py]:
if mosaic[px-1][py][1].E != shape.W:
return False
if px<len(mosaic)-1 and mosaic[px+1][py]:
if mosaic[px+1][py][1].W != shape.E:
return False
if py>0 and mosaic[px][py-1]:
if mosaic[px][py-1][1].N != shape.S:
return False
if py<len(mosaic)-1 and mosaic[px][py+1]:
if mosaic[px][py+1][1].S != shape.N:
return False
return True
def dfs_helper(mosaic, graf, px, py, pstack, tile, edge_value):
result = False
matching_tiles = graf.get(edge_value)
for item in matching_tiles.values():
if item != tile:
result = dfs(mosaic, graf, item, px, py, pstack)
return result
def dfs(mosaic, graf, tile, px, py, pstack):
if not 0<=px<len(mosaic) or not 0<=py<len(mosaic) or mosaic[px][py]:
return False
if tile not in pstack:
pstack.append(tile)
for shape in tile.shapes:
if fit(mosaic, shape, px, py):
mosaic[px][py] = (tile.id, shape)
if len(pstack)==len(tiles):
return True
result = False
if px>0 and not mosaic[px-1][py]:
edge_value = shape.W
result = dfs_helper(mosaic, graf, px-1, py, pstack, tile, shape.W)
elif px<len(mosaic)-1 and not mosaic[px+1][py]:
result = dfs_helper(mosaic, graf, px+1, py, pstack, tile, shape.E)
elif py<len(mosaic)-1 and not mosaic[px][py+1]:
result = dfs_helper(mosaic, graf, px, py+1, pstack, tile, shape.N)
# elif py>0 and not mosaic[px][py-1]:
# result = dfs_helper(mosaic, graf, px, py-1, pstack, tile, shape.S)
if result:
return result
pstack.pop()
mosaic[px][py] = 0
return False
data, test_data = load_data()
# uncomment the below line to use test data
# data = test_data
#part one
# [id, [lines], [shapes],[matching tiles]]
tiles = parse_data(data)
edge_len = int(math.sqrt(len(tiles)))
graf = defaultdict(dict)
for tile in tiles:
for shape in tile.shapes:
for ival in shape:
#todo: iterates over different types. fixit
if isinstance(ival,int):
graf[ival][tile.id]=tile
mosaic = [[0 for _ in range(edge_len)] for _ in range(edge_len)]
found = False
for i,tile in enumerate(tiles):
found = dfs(mosaic, graf, tile,0,0,[])
if found:
break
if found:
result1 = mosaic[0][0][0]\
* mosaic[0][edge_len-1][0]\
* mosaic[edge_len-1][0][0]\
* mosaic[edge_len-1][edge_len-1][0]
else:
result1 = 'Not found.'
print(f'result 1 = {result1}')
if result1==13224049461431:
print(' -=OK=- ')
else:
print(' --err-- ')
#part two
t1 = time.perf_counter()
result2 = None
#print(f'result 2 = {result2}')
t2 = time.perf_counter()
#print(f'TIME = {(t2-t1):.2f}')
| {
"alphanum_fraction": 0.5621552821,
"author": null,
"avg_line_length": 27.4069767442,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c63fc3a03d060d3f2f77d8c1204846eb72c77a85",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "82558aeb073efd475943da0040bbd538e7f75eff",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pascalddg/AoC",
"max_forks_repo_path": "AoC2020/aoc20-20-code.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "82558aeb073efd475943da0040bbd538e7f75eff",
"max_issues_repo_issues_event_max_datetime": "2021-01-06T22:02:28.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-06T10:19:10.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pascalddg/AoC",
"max_issues_repo_path": "AoC2020/aoc20-20-code.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "82558aeb073efd475943da0040bbd538e7f75eff",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pascalddg/AoC",
"max_stars_repo_path": "AoC2020/aoc20-20-code.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1328,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4714
} |
from kivy.config import Config
# Config.set('kivy', 'exit_on_escape', '0')
# Config.set('graphics', 'resizable', '0')
Config.set('graphics', 'width', '640')
Config.set('graphics', 'height', '480')
import os
import cv2
from detection import Face_Detector, Landmark_Detector
from faceswap_cam import face_swap
from kivy.app import App
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.screenmanager import Screen, ScreenManager
# from kivy.properties import ObjectProperty
import numpy as np
class MyScreenManager(ScreenManager):
pass
class PreScreen(Screen):
pass
class FrontScreen(Screen):
def __init__(self, **kwargs):
super(FrontScreen, self).__init__(**kwargs)
self.refresh_dt = 0.05
def on_enter(self, *args): # only works for multiple screens?
self.face_detector = Face_Detector()
self.lmk_detector = Landmark_Detector()
self.portraits = os.listdir('./portraits/')
# print(self.portraits)
'''
dropdown menu
'''
self.dropdown = DropDown()
for face in self.portraits:
btn = Button(text=face, size_hint_y=None, height=32,
# color = (0,0,1,1),
# background_normal='',background_color=(0.11, 0.17, 0.44, 0.2)
)
btn.bind(on_release=lambda btn: self.dropdown.select(btn.text))
self.dropdown.add_widget(btn)
self.ids.face_selection.bind(on_release=self.dropdown.open)
self.dropdown.bind(on_select=lambda instance, x: setattr(self.ids.face_selection, 'text', x))
def initialize(self, target_face):
# self.face_detector = Face_Detector()
# self.lmk_detector = Landmark_Detector()
try:
_source = int(self.ids.cam.text)
except Exception as ee:
_source = self.ids.cam.text
self.cap = cv2.VideoCapture( _source )
self.FaceSwap = face_swap( os.path.join('./portraits', target_face) )
def swap_face(self, *args):
ret, frame = self.cap.read()
frame = cv2.resize(frame, (480,640))
bboxes, _ = self.face_detector.detect(frame) # get faces
if len(bboxes) != 0:
bbox = bboxes[0] # get the first
bbox = bbox.astype(np.int)
lmks, PRY_3d = self.lmk_detector.detect(frame, bbox) # get landmarks
lmks = lmks.astype(np.int)
frame = self.FaceSwap.run(frame,lmks)
cv2.imshow("Face Swap", frame)
def update(self,*args):
Clock.schedule_interval(self.swap_face, self.refresh_dt)
def stop(self):
Clock.unschedule(self.swap_face)
cv2.destroyWindow('Face Swap')
root_widget = Builder.load_string('''
MyScreenManager:
PreScreen:
FrontScreen:
<PreScreen>:
Image:
source: ''
allow_stretch: True
keep_ratio: False
size: root.size
Button:
text: 'GO'
font_size:40
center: root.center
color: 1,0,1,1
background_color: 0,0,0,0
on_release: app.root.current = 'front'
<FrontScreen>:
name: 'front'
Image:
source: ''
allow_stretch: True
keep_ratio: False
size: root.size
Button:
id: face_selection
center: root.center
text: 'Select a face'
size: 0.25*root.width, root.height//13
# on_press: print(root.portraits)
Label:
text: 'Camera'
color: (1, 0.6, 0, 1)
font_size: 24
center: 0.2*root.width , 0.65*root.height
TextInput:
id: cam
text: '0'
font_size: 12
multiline: False
center: 0.52*root.width , 0.625*root.height
size: (0.3*root.width, root.height//16)
padding: [0.02*root.width,self.height // 2 - (self.line_height//2) * len(self._lines), 0, 0]
font_size: dp(18)
color:(0.11, 0.17, 0.44, 1.0)
Button:
id: start
text: 'START'
center: root.width//2, 0.3*root.height
height: root.height//13
on_release: root.initialize(face_selection.text)
on_release: root.update()
Button:
id: reset
text: 'RESET'
center: 1.5*root.width//2, 0.47*root.height
height: root.height//13
on_release: root.stop()
''')
class faceApp(App):
def build(self):
self.title = 'Face Swap'
return root_widget
faceApp().run() | {
"alphanum_fraction": 0.5767217919,
"author": null,
"avg_line_length": 29.487654321,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d2de28cea11632f26f235750e1c4ad63996da8ba",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-02-21T07:10:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-12T15:22:51.000Z",
"max_forks_repo_head_hexsha": "c4680fa24d809539507aa49bcd24e22723bfdf3f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "dykuang/Real-Time-FaceSwap-with-ONNX",
"max_forks_repo_path": "faceswap_kivy.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c4680fa24d809539507aa49bcd24e22723bfdf3f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "dykuang/Real-Time-FaceSwap-with-ONNX",
"max_issues_repo_path": "faceswap_kivy.py",
"max_line_length": 104,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "c4680fa24d809539507aa49bcd24e22723bfdf3f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "dykuang/Real-Time-FaceSwap-with-ONNX",
"max_stars_repo_path": "faceswap_kivy.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-21T07:10:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-12T15:22:49.000Z",
"num_tokens": 1198,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4777
} |
#include <boost/gil/extension/dynamic_image/algorithm.hpp>
| {
"alphanum_fraction": 0.8305084746,
"author": null,
"avg_line_length": 29.5,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "c585151cc6fe9f7cccb1e1e59821a4a3ded82ad2",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z",
"max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "miathedev/BoostForArduino",
"max_forks_repo_path": "src/boost_gil_extension_dynamic_image_algorithm.hpp",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e",
"max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z",
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "miathedev/BoostForArduino",
"max_issues_repo_path": "src/boost_gil_extension_dynamic_image_algorithm.hpp",
"max_line_length": 58,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "miathedev/BoostForArduino",
"max_stars_repo_path": "src/boost_gil_extension_dynamic_image_algorithm.hpp",
"max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z",
"num_tokens": 14,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 59
} |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Spin-free lambda equation of UHF-CCSD(T)
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd_lambda
from pyscf.cc import uccsd_lambda
def kernel(mycc, eris=None, t1=None, t2=None, l1=None, l2=None,
max_cycle=50, tol=1e-8, verbose=logger.INFO):
return ccsd_lambda.kernel(mycc, eris, t1, t2, l1, l2, max_cycle, tol,
verbose, make_intermediates, update_lambda)
def make_intermediates(mycc, t1, t2, eris):
def p6(t):
return (t + t.transpose(1,2,0,4,5,3) +
t.transpose(2,0,1,5,3,4) + t.transpose(0,2,1,3,5,4) +
t.transpose(2,1,0,5,4,3) + t.transpose(1,0,2,4,3,5))
def r6(w):
return (w + w.transpose(2,0,1,3,4,5) + w.transpose(1,2,0,3,4,5)
- w.transpose(2,1,0,3,4,5) - w.transpose(0,2,1,3,4,5)
- w.transpose(1,0,2,3,4,5))
imds = uccsd_lambda.make_intermediates(mycc, t1, t2, eris)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb = t2ab.shape[:2]
mo_ea, mo_eb = eris.mo_energy
eia = mo_ea[:nocca,None] - mo_ea[nocca:]
eIA = mo_eb[:noccb,None] - mo_eb[noccb:]
fvo = eris.focka[nocca:,:nocca]
fVO = eris.fockb[noccb:,:noccb]
# aaa
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eia, eia)
w = numpy.einsum('ijae,kceb->ijkabc', t2aa, numpy.asarray(eris.get_ovvv()).conj())
w-= numpy.einsum('mkbc,iajm->ijkabc', t2aa, numpy.asarray(eris.ovoo.conj()))
v = numpy.einsum('jbkc,ia->ijkabc', numpy.asarray(eris.ovov).conj(), t1a)
v+= numpy.einsum('jkbc,ai->ijkabc', t2aa, fvo) * .5
rw = r6(p6(w)) / d3
imds.l1a_t = numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.ovov)) / eia * .25
wvd = r6(p6(w * 2 + v)) / d3
l2_t = numpy.einsum('ijkabc,kceb->ijae', wvd, numpy.asarray(eris.get_ovvv()).conj())
l2_t -= numpy.einsum('ijkabc,iajm->mkbc', wvd, numpy.asarray(eris.ovoo.conj()))
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fvo)
imds.l2aa_t = l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eia) * .5
# bbb
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eIA, eIA, eIA)
w = numpy.einsum('ijae,kceb->ijkabc', t2bb, numpy.asarray(eris.get_OVVV()).conj())
w-= numpy.einsum('imab,kcjm->ijkabc', t2bb, numpy.asarray(eris.OVOO.conj()))
v = numpy.einsum('jbkc,ia->ijkabc', numpy.asarray(eris.OVOV).conj(), t1b)
v+= numpy.einsum('jkbc,ai->ijkabc', t2bb, fVO) * .5
rw = r6(p6(w)) / d3
imds.l1b_t = numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.OVOV)) / eIA * .25
wvd = r6(p6(w * 2 + v)) / d3
l2_t = numpy.einsum('ijkabc,kceb->ijae', wvd, numpy.asarray(eris.get_OVVV()).conj())
l2_t -= numpy.einsum('ijkabc,iajm->mkbc', wvd, numpy.asarray(eris.OVOO.conj()))
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fVO)
imds.l2bb_t = l2_t.conj() / lib.direct_sum('ia+jb->ijab', eIA, eIA) * .5
# baa
def r4(w):
w = w - w.transpose(0,2,1,3,4,5)
w = w + w.transpose(0,2,1,3,5,4)
return w
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eIA, eia, eia)
w = numpy.einsum('jIeA,kceb->IjkAbc', t2ab, numpy.asarray(eris.get_ovvv()).conj()) * 2
w += numpy.einsum('jIbE,kcEA->IjkAbc', t2ab, numpy.asarray(eris.get_ovVV()).conj()) * 2
w += numpy.einsum('jkbe,IAec->IjkAbc', t2aa, numpy.asarray(eris.get_OVvv()).conj())
w -= numpy.einsum('mIbA,kcjm->IjkAbc', t2ab, numpy.asarray(eris.ovoo).conj()) * 2
w -= numpy.einsum('jMbA,kcIM->IjkAbc', t2ab, numpy.asarray(eris.ovOO).conj()) * 2
w -= numpy.einsum('jmbc,IAkm->IjkAbc', t2aa, numpy.asarray(eris.OVoo).conj())
v = numpy.einsum('jbkc,IA->IjkAbc', numpy.asarray(eris.ovov).conj(), t1b)
v += numpy.einsum('kcIA,jb->IjkAbc', numpy.asarray(eris.ovOV).conj(), t1a)
v += numpy.einsum('kcIA,jb->IjkAbc', numpy.asarray(eris.ovOV).conj(), t1a)
v += numpy.einsum('jkbc,AI->IjkAbc', t2aa, fVO) * .5
v += numpy.einsum('kIcA,bj->IjkAbc', t2ab, fvo) * 2
rw = r4(w) / d3
imds.l1a_t += numpy.einsum('ijkabc,kcia->jb', rw.conj(),
numpy.asarray(eris.ovOV)) / eia * .5
imds.l1b_t += numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.ovov)) / eIA * .25
wvd = r4(w * 2 + v) / d3
l2_t = numpy.einsum('ijkabc,iaec->jkbe', wvd, numpy.asarray(eris.get_OVvv()).conj())
l2_t -= numpy.einsum('ijkabc,iakm->jmbc', wvd, numpy.asarray(eris.OVoo).conj())
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fVO)
imds.l2aa_t += l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eia) * .5
l2_t = numpy.einsum('ijkabc,kceb->jiea', wvd, numpy.asarray(eris.get_ovvv()).conj())
l2_t += numpy.einsum('ijkabc,kcea->jibe', wvd, numpy.asarray(eris.get_ovVV()).conj())
l2_t -= numpy.einsum('ijkabc,kcjm->miba', wvd, numpy.asarray(eris.ovoo).conj())
l2_t -= numpy.einsum('ijkabc,kcim->jmba', wvd, numpy.asarray(eris.ovOO).conj())
l2_t += numpy.einsum('ijkabc,bj->kica', rw, fvo)
imds.l2ab_t = l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eIA) * .5
# bba
d3 = lib.direct_sum('ia+jb+kc->ijkabc', eia, eIA, eIA)
w = numpy.einsum('ijae,kceb->ijkabc', t2ab, numpy.asarray(eris.get_OVVV()).conj()) * 2
w += numpy.einsum('ijeb,kcea->ijkabc', t2ab, numpy.asarray(eris.get_OVvv()).conj()) * 2
w += numpy.einsum('jkbe,iaec->ijkabc', t2bb, numpy.asarray(eris.get_ovVV()).conj())
w -= numpy.einsum('imab,kcjm->ijkabc', t2ab, numpy.asarray(eris.OVOO).conj()) * 2
w -= numpy.einsum('mjab,kcim->ijkabc', t2ab, numpy.asarray(eris.OVoo).conj()) * 2
w -= numpy.einsum('jmbc,iakm->ijkabc', t2bb, numpy.asarray(eris.ovOO).conj())
v = numpy.einsum('jbkc,ia->ijkabc', numpy.asarray(eris.OVOV).conj(), t1a)
v += numpy.einsum('iakc,jb->ijkabc', numpy.asarray(eris.ovOV).conj(), t1b)
v += numpy.einsum('iakc,jb->ijkabc', numpy.asarray(eris.ovOV).conj(), t1b)
v += numpy.einsum('JKBC,ai->iJKaBC', t2bb, fvo) * .5
v += numpy.einsum('iKaC,BJ->iJKaBC', t2ab, fVO) * 2
rw = r4(w) / d3
imds.l1a_t += numpy.einsum('ijkabc,jbkc->ia', rw.conj(),
numpy.asarray(eris.OVOV)) / eia * .25
imds.l1b_t += numpy.einsum('ijkabc,iakc->jb', rw.conj(),
numpy.asarray(eris.ovOV)) / eIA * .5
wvd = r4(w * 2 + v) / d3
l2_t = numpy.einsum('ijkabc,iaec->jkbe', wvd, numpy.asarray(eris.get_ovVV()).conj())
l2_t -= numpy.einsum('ijkabc,iakm->jmbc', wvd, numpy.asarray(eris.ovOO).conj())
l2_t = l2_t + l2_t.transpose(1,0,3,2)
l2_t += numpy.einsum('ijkabc,ai->jkbc', rw, fvo)
imds.l2bb_t += l2_t.conj() / lib.direct_sum('ia+jb->ijab', eIA, eIA) * .5
l2_t = numpy.einsum('ijkabc,kceb->ijae', wvd, numpy.asarray(eris.get_OVVV()).conj())
l2_t += numpy.einsum('ijkabc,kcea->ijeb', wvd, numpy.asarray(eris.get_OVvv()).conj())
l2_t -= numpy.einsum('ijkabc,kcjm->imab', wvd, numpy.asarray(eris.OVOO).conj())
l2_t -= numpy.einsum('ijkabc,kcim->mjab', wvd, numpy.asarray(eris.OVoo).conj())
l2_t += numpy.einsum('ijkabc,bj->ikac', rw, fVO)
imds.l2ab_t += l2_t.conj() / lib.direct_sum('ia+jb->ijab', eia, eIA) * .5
return imds
def update_lambda(mycc, t1, t2, l1, l2, eris=None, imds=None):
if eris is None: eris = mycc.ao2mo()
if imds is None: imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = uccsd_lambda.update_lambda(mycc, t1, t2, l1, l2, eris, imds)
l1a, l1b = l1
l2aa, l2ab, l2bb = l2
l1a += imds.l1a_t
l1b += imds.l1b_t
l2aa += imds.l2aa_t
l2ab += imds.l2ab_t
l2bb += imds.l2bb_t
return (l1a, l1b), (l2aa, l2ab, l2bb)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import cc
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 0
mol.build()
mf0 = mf = scf.RHF(mol).run(conv_tol=1)
mf = scf.addons.convert_to_uhf(mf)
mycc = cc.UCCSD(mf)
eris = mycc.ao2mo()
from pyscf.cc import ccsd_t_lambda_slow as ccsd_t_lambda
mycc0 = cc.CCSD(mf0)
eris0 = mycc0.ao2mo()
mycc0.kernel(eris=eris0)
t1 = mycc0.t1
t2 = mycc0.t2
imds = ccsd_t_lambda.make_intermediates(mycc0, t1, t2, eris0)
l1, l2 = ccsd_t_lambda.update_lambda(mycc0, t1, t2, t1, t2, eris0, imds)
l1ref, l2ref = ccsd_t_lambda.update_lambda(mycc0, t1, t2, l1, l2, eris0, imds)
t1 = (t1, t1)
t2aa = t2 - t2.transpose(1,0,2,3)
t2 = (t2aa, t2, t2aa)
l1 = (l1, l1)
l2aa = l2 - l2.transpose(1,0,2,3)
l2 = (l2aa, l2, l2aa)
imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = update_lambda(mycc, t1, t2, l1, l2, eris, imds)
print(abs(l2[1]-l2[1].transpose(1,0,2,3)-l2[0]).max())
print(abs(l2[1]-l2[1].transpose(0,1,3,2)-l2[2]).max())
print(abs(l1[0]-l1ref).max())
print(abs(l2[1]-l2ref).max())
| {
"alphanum_fraction": 0.6080818414,
"author": null,
"avg_line_length": 45.4651162791,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "86e1293f2c61d1a13edf3dcf1e7a5ed2f5b43296",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 273,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T12:25:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-11-26T10:10:24.000Z",
"max_forks_repo_head_hexsha": "0ed03633b699505c7278f1eb501342667d0aa910",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "QuESt-Calculator/pyscf",
"max_forks_repo_path": "pyscf/cc/uccsd_t_lambda.py",
"max_issues_count": 710,
"max_issues_repo_head_hexsha": "0ed03633b699505c7278f1eb501342667d0aa910",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T03:53:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-11-26T22:04:52.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "QuESt-Calculator/pyscf",
"max_issues_repo_path": "pyscf/cc/uccsd_t_lambda.py",
"max_line_length": 91,
"max_stars_count": 501,
"max_stars_repo_head_hexsha": "0ed03633b699505c7278f1eb501342667d0aa910",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "QuESt-Calculator/pyscf",
"max_stars_repo_path": "pyscf/cc/uccsd_t_lambda.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T11:53:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-06T23:48:17.000Z",
"num_tokens": 3910,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9775
} |
import logging
from typing import Dict
from typing import Sequence
from typing import Tuple
import yaml
import argparse
import torch
import torch.nn.functional as F
from espnet2.tts.abs_tts import AbsTTS
from espnet.nets.pytorch_backend.gradtts.diffusion import Diffusion
from espnet2.torch_utils.device_funcs import force_gatherable
from espnet2.tts.fastspeech2 import FastSpeech2
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class GradFastSpeech2(AbsTTS):
def __init__(
self,
idim,
odim,
config_file=None,
model_file=None,
ddim: int = 64,
beta_min: float = 0.05,
beta_max: float = 20.0,
pe_scale: int = 1000,
) -> None:
super().__init__()
with open(config_file, "r", encoding="utf-8") as f:
args = yaml.safe_load(f)
args = argparse.Namespace(**args)
self.idim = idim
self.odim = odim
self.padding_idx = 0
self.eos = idim - 1
self.fastspeech2 = FastSpeech2(idim=len(args.token_list), odim=odim, **args.tts_conf)
tmp = torch.load(model_file)
d = {}
for key, value in tmp.items():
if key.startswith("tts"):
d[key[4:]] = value
self.fastspeech2.load_state_dict(d)
for p in self.fastspeech2.parameters():
p.requires_grad = False
self.diffusion = Diffusion(ddim, beta_min, beta_max, pe_scale)
self.criterion = GradFastSpeech2Loss(odim)
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
durations: torch.Tensor,
durations_lengths: torch.Tensor,
pitch: torch.Tensor,
pitch_lengths: torch.Tensor,
energy: torch.Tensor,
energy_lengths: torch.Tensor,
spembs: torch.Tensor = None,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]:
text = text[:, : text_lengths.max()] # for data-parallel
speech = speech[:, : speech_lengths.max()] # for data-parallel
durations = durations[:, : durations_lengths.max()] # for data-parallel
pitch = pitch[:, : pitch_lengths.max()] # for data-parallel
energy = energy[:, : energy_lengths.max()] # for data-parallel
batch_size = text.size(0)
# Add eos at the last of sequence
xs = F.pad(text, [0, 1], "constant", self.padding_idx)
for i, l in enumerate(text_lengths):
xs[i, l] = self.eos
ilens = text_lengths + 1
ys, ds, ps, es = speech, durations, pitch, energy
olens = speech_lengths
before_outs, after_outs, d_outs, p_outs, e_outs = self.fastspeech2._forward(
xs, ilens, ys, olens, ds, ps, es, spembs=spembs, is_inference=False
)
ys = speech.transpose(1, 2)
y_masks = self._source_mask(olens)
mu = after_outs.transpose(1, 2)
if ys.size(2) % 4 != 0:
ys = torch.cat([ys, torch.zeros([batch_size, self.odim, 4 - ys.size(2) % 4], dtype=ys.dtype, device=ys.device)], dim=2)
mu = torch.cat([mu, torch.zeros([mu.size(0), self.odim, 4 - mu.size(2) % 4], dtype=mu.dtype, device=mu.device)], dim=2)
y_masks = torch.cat([y_masks, torch.zeros([y_masks.size(0), 1, 4 - y_masks.size(2) % 4], dtype=y_masks.dtype, device=y_masks.device)], dim=2)
noise_estimation, z = self.diffusion(ys, y_masks, mu)
diff_loss = self.criterion(noise_estimation, z, y_masks)
loss = diff_loss
stats = dict(
diff_loss=diff_loss.item(),
loss=loss.item(),
)
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def inference(
self,
text: torch.Tensor,
timesteps: int = 50,
spembs: torch.Tensor = None,
temperature: float = 1.0,
alpha: float = 1.03,
use_teacher_forcing: bool = False,
):
mu, _, _ = self.fastspeech2.inference(text, alpha=alpha, use_teacher_forcing=use_teacher_forcing)
# import numpy as np
# np.save("/nolan/inference/gradtts_pre.mel.npy", mu.data.cpu().numpy())
length = mu.shape[0]
olens = torch.tensor([length], dtype=torch.long, device=mu.device)
y_masks = self._source_mask(olens)
mu = mu.unsqueeze(0).transpose(1, 2)
if mu.size(2) % 4 != 0:
mu = torch.cat([mu, torch.zeros([1, self.odim, 4 - mu.size(2) % 4], dtype=mu.dtype, device=mu.device)], dim=2)
y_masks = torch.cat([y_masks, torch.zeros([1, 1, 4 - y_masks.size(2) % 4], dtype=y_masks.dtype, device=y_masks.device)], dim=2)
z = mu + torch.randn_like(mu, device=mu.device) / temperature
out = self.diffusion.inference(z, y_masks, mu, timesteps).transpose(1, 2)
return out[0, :length, :], None, None
def _source_mask(self, ilens: torch.Tensor) -> torch.Tensor:
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
class GradFastSpeech2Loss(torch.nn.Module):
def __init__(self, odim):
super().__init__()
self.odim = odim
def forward(
self,
noise_estimation: torch.Tensor,
z: torch.Tensor,
y_masks: torch.Tensor,
):
diff_loss = torch.sum((noise_estimation + z) ** 2) / (torch.sum(y_masks) * self.odim)
return diff_loss
| {
"alphanum_fraction": 0.6089778259,
"author": null,
"avg_line_length": 37.2281879195,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "213686a082af73aa65210e99ccede9eeda2ab854",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-10-18T08:15:31.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-10-18T08:15:31.000Z",
"max_forks_repo_head_hexsha": "73c43ac034d7a44a3bf1a10dc4079884df57de43",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "NolanYuu/espnet",
"max_forks_repo_path": "espnet2/tts/gradfastspeech2.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "73c43ac034d7a44a3bf1a10dc4079884df57de43",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "NolanYuu/espnet",
"max_issues_repo_path": "espnet2/tts/gradfastspeech2.py",
"max_line_length": 153,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "73c43ac034d7a44a3bf1a10dc4079884df57de43",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "NolanYuu/espnet",
"max_stars_repo_path": "espnet2/tts/gradfastspeech2.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1495,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5547
} |
# quantum solver
import numpy as np
from scipy.linalg import eigh
def solver(nk, nbasis, mu, hamiton_mat):
kpoints = np.linspace(0, np.pi, nk)
# build and solve eigenvalue problem
T = 0
# En = np.zeros((nk, nbasis))
density = np.zeros(nbasis, dtype=np.complex64)
for ki, k in enumerate(kpoints):
kinetic_term = np.array([0.5*(k+(i-nbasis//2)*2*np.pi)**2 for i in range(nbasis)])
np.fill_diagonal(hamiton_mat, kinetic_term)
En_k, Uq_k = eigh(hamiton_mat, overwrite_a=True, overwrite_b=True)
# En[ki] = En_k
# compute mu
if k == 0:
b = En_k[0] # set the minimum of band energy to 0 !
# compute electron density
# compute kinetic energy
num_mat_eigspace = np.zeros((nbasis, nbasis))
for i in range(nbasis):
if En_k[i] <= mu + b:
num_mat_eigspace[i, i] = 1
else:
break
density_mat_kspace = Uq_k @ (num_mat_eigspace @ (Uq_k.T).conj())
density_k = np.zeros(nbasis, dtype=np.complex64)
T_k = 0
for i in range(nbasis):
density_k[i] = np.trace(density_mat_kspace, offset=i)
T_k += 0.5*((k+(i-nbasis//2)*2*np.pi)**2)*(density_mat_kspace[i, i]).real
T += T_k
density += density_k
return T/nk, density/nk
| {
"alphanum_fraction": 0.5734317343,
"author": null,
"avg_line_length": 33.0487804878,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6fc2f97757e6319b9f7efd431ff80db0d256bc74",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a8ebd46ec0442a1fcbfa7e0571f43550e1faaa70",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "HamletWantToCode/machine_learning_kinetic_energy",
"max_forks_repo_path": "main/solver.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a8ebd46ec0442a1fcbfa7e0571f43550e1faaa70",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "HamletWantToCode/machine_learning_kinetic_energy",
"max_issues_repo_path": "main/solver.py",
"max_line_length": 90,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "a8ebd46ec0442a1fcbfa7e0571f43550e1faaa70",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "HamletWantToCode/machine_learning_kinetic_energy",
"max_stars_repo_path": "main/solver.py",
"max_stars_repo_stars_event_max_datetime": "2019-02-24T09:22:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-17T01:11:21.000Z",
"num_tokens": 401,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1355
} |
import oemof.solph as solph
from .component_electrolyzer import Electrolyzer
import pyomo.environ as po
class ElectrolyzerWasteHeat(Electrolyzer):
""" Electrolyzer agents with waste heat model are created through this subclass of the
Electrolyzer class """
def __init__(self, params):
# Split the params dict
param_bus_th = {"bus_th": params.pop("bus_th")}
# Call the init function of the mother class.
Electrolyzer.__init__(self, params)
""" PARAMETERS """
# Define the additional thermal bus
self.bus_th = None
""" UPDATE PARAMETER DEFAULT VALUES """
self.set_parameters(param_bus_th)
# Interval time [min].
self.interval_time = self.sim_params.interval_time
# Calculate the max. energy the electrolyzer can use in one time step [Wh].
self.energy_max = self.power_max * self.interval_time / 60
""" CONSTANT PARAMETERS (PHYSICS) """
# constant parameters for calculating sensible heat:
# specific heat at constant pressure [J/(kg*K)]
self.c_p_H2 = 14304
self.c_p_O2 = 920
self.c_p_H2O = 4183
""" ELECTROLYZER GEOMETRY PARAMETERS """
self.diameter_cell = (4 * self.area_cell / 3.14) ** 0.5 / 100 # m
# The height of the end of the stack which is not part of the cells is assumed to have a
# dependence on the diameter of the cell. The ratio is taken as 7 : 120
# (stack_end_height : diameter_cell), which is based on De Silva, Y.S.K. (2017). Design
# of an Alkaline Electrolysis Stack, University of Agder.
self.stack_end_height = 0.058 * self.diameter_cell
# The height of an individual cell in relation to cell diameter is calculated using example
# data from Vogt, U.F. et al. (2014). Novel Developments in Alkaline Water Electrolysis,
# Empa Laboratory of Hydrogen and Energy. The individual cell height is estimated and
# compared with the given cell radius, and a ratio of 1 : 75.5 is obtained.
self.height_cell = self.diameter_cell / 75.5
# The total stack height is calculated by taking the cell stack and the two ends of the
# stack into consideration
self.height_stack = (self.height_cell * self.z_cell) + (2 * self.stack_end_height)
# The external surface area of the electrolysis stack is calculated assuming that it is
# cylindrical
self.area_stack = (
2 * self.area_cell / 10000 + 3.14 * self.diameter_cell * self.height_stack
) # [m^2]
# The overall surface area exposed by the gas separators and the pipe communicating
# them is assumed to be in a ratio of 1 : 0.42 with the area of the stack (taken from
# Dieguez et al)
self.area_separator = 2.38 * self.area_stack
# Save the two models to set constraints later.
self.model_h2 = None
self.model_th = None
def conversion_fun_ely(self, ely_energy):
# Create a function that will give out the mass values for the electric energy
# values at the breakpoints.
# Check the index of this ely_energy entry.
this_index = self.supporting_points["energy_halved"].index(ely_energy)
# Return the according hydrogen production value [kg].
return self.supporting_points["h2_produced"][this_index]
def conversion_fun_thermal(self, ely_energy):
# Create a function that will give out the thermal energy values for the electric
# energy values at the breakpoints.
# Check the index of this ely_energy entry.
this_index = self.supporting_points["energy_halved"].index(ely_energy)
# Return the according hydrogen production value [kg].
return self.supporting_points["thermal_energy"][this_index]
def create_oemof_model(self, busses, model):
# Get the non-linear behaviour.
self.update_nonlinear_behaviour()
# First create the hydrogen producing oemof component
electrolyzer = solph.custom.PiecewiseLinearTransformer(
label=self.name,
inputs={
busses[self.bus_el]: solph.Flow(
nominal_value=self.energy_max / 2, variable_costs=0
)
},
outputs={busses[self.bus_h2]: solph.Flow()},
in_breakpoints=self.supporting_points["energy_halved"],
conversion_function=self.conversion_fun_ely,
pw_repn="CC",
)
# Then create the thermal oemof component.
electrolyzer_thermal = solph.custom.PiecewiseLinearTransformer(
label=self.name + "_thermal",
inputs={
busses[self.bus_el]: solph.Flow(
nominal_value=self.energy_max / 2, variable_costs=0
)
},
outputs={busses[self.bus_th]: solph.Flow()},
in_breakpoints=self.supporting_points["energy_halved"],
conversion_function=self.conversion_fun_thermal,
pw_repn="CC",
)
# Add the two components to the model.
model.add(electrolyzer, electrolyzer_thermal)
self.model_h2 = electrolyzer
self.model_th = electrolyzer_thermal
return None
def update_nonlinear_behaviour(self):
# Set up the breakpoints for the electrolyzer conversion of electricity to hydrogen.
n_supporting_point = 10
# Get the breakpoint values for electric energy [Wh] and produced hydrogen [kg].
bp_ely_energy = []
bp_ely_h2 = []
bp_ely_temp = []
bp_ely_thermal = []
for i_supporting_point in range(n_supporting_point + 1):
# Calculate the energy for this breakpoint [Wh].
this_energy = (
i_supporting_point / n_supporting_point * self.energy_max
)
bp_ely_energy.append(this_energy)
# Calculate the hydrogen produced [kg] and resulting temperature [K] with the
# energy of this breakpoint and at the current temperature.
[this_mass, this_temp] = self.get_mass_and_temp(this_energy / 1000)
bp_ely_h2.append(this_mass)
bp_ely_temp.append(this_temp)
# Calculate the waste heat [Wh] with the energy, hydrogen produced and resulting
# temperature of this breakpoint at the current temperature.
this_waste_heat = (
self.get_waste_heat(this_energy / 1000, this_mass, this_temp)
* 1000
) # [Wh]
bp_ely_thermal.append(this_waste_heat)
self.supporting_points["temperature"] = bp_ely_temp
self.supporting_points["h2_produced"] = bp_ely_h2
self.supporting_points["energy"] = bp_ely_energy
self.supporting_points["thermal_energy"] = bp_ely_thermal
self.supporting_points["energy_halved"] = [
this_bp / 2 for this_bp in bp_ely_energy
]
def get_waste_heat(self, energy_used, h2_produced, new_ely_temp):
# source: Dieguez et al., 'Thermal Performance of a commercial alkaline
# water electrolyzer: Experimental study and mathematical modeling',
# Int. J. Hydrogen Energy, 2008 energy_used [kWh] --> internal_heat_generation [kWh]
internal_heat_generation = (
energy_used - h2_produced * self.upp_heat_val * 1e6 / 3600 / 1000
) # [kWh]
# heat losses:
dT = new_ely_temp - self.temp_min # [K]
# equation from Dieguez et al:
heat_transfer_coefficient = (
1.32 * (dT / self.diameter_cell) ** 0.25
) # [W/(m^2*K)]
heat_losses = (
heat_transfer_coefficient
* (self.area_stack + self.area_separator)
* dT * self.interval_time / 60 / 1000) # [kWh]
[sensible_heat, latent_heat] = self.sensible_and_latent_heats(
h2_produced, new_ely_temp
) # [kWh]
if new_ely_temp >= (0.999 * self.temp_max):
waste_heat = internal_heat_generation - heat_losses + sensible_heat
else:
waste_heat = 0
return waste_heat
def sensible_and_latent_heats(self, mass_H2, new_ely_temp):
# mass of H2, O2 and H2O is related by the water decomposition stoichiometry
# and the mass balance
mass_O2 = mass_H2 * 0.5 * self.molar_mass_O2 / self.molarity
# as a first approximation mass_H2O_vapor is neglected in the mass balance,
# since condensers temperature and pressure are not known
mass_H2O = mass_H2 + mass_O2
# sensible heat removed from the system with the H2 and O2 streams, as well
# as the sensible heat required to warm the deionized water from room temperature
# to the stack operating temperature
sensible_heat = (
mass_H2O * self.c_p_H2O * (self.temp_min - new_ely_temp)
- mass_H2 * self.c_p_H2 * (new_ely_temp - self.temp_min)
- mass_O2 * self.c_p_O2 * (new_ely_temp - self.temp_min)
) / 3.6e6 # [kWh], 1J = 1/3.6e6 kWh
# latent heat is neglected since mass_H2O_vapor is neglected
latent_heat = 0
return [sensible_heat, latent_heat]
def update_constraints(self, busses, model_to_solve):
# Set a constraint so that the electric inflow of the hydrogen producing and the
# thermal part are always the same (which is necessary while the piecewise linear
# transformer cannot have two outputs yet and therefore the two parts need to be
# separate components).
def electrolyzer_ratio_rule(model, t):
# Inverter flow
expr = 0
expr += model.flow[busses[self.bus_el], self.model_th, t]
# force discharge to zero when grid available
expr += -model.flow[busses[self.bus_el], self.model_h2, t]
return expr == 0
model_to_solve.electrolyzer_flow_ratio_fix = po.Constraint(
model_to_solve.TIMESTEPS, rule=electrolyzer_ratio_rule
)
def update_flows(self, results, sim_params):
# Check if the component has an attribute 'flows', if not, create it as an empty dict.
Electrolyzer.update_flows(self, results, sim_params, self.name)
Electrolyzer.update_flows(
self, results, sim_params, self.name + "_thermal"
)
| {
"alphanum_fraction": 0.6440026889,
"author": null,
"avg_line_length": 46.0752212389,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1f1ae7c604af497219008ebfbf892eec12275e4a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "06f0c9a6af51be73ef1d5eaaeed9946be672e51e",
"max_forks_repo_licenses": [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
],
"max_forks_repo_name": "morrme/smooth",
"max_forks_repo_path": "smooth/components/component_electrolyzer_waste_heat.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "06f0c9a6af51be73ef1d5eaaeed9946be672e51e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
],
"max_issues_repo_name": "morrme/smooth",
"max_issues_repo_path": "smooth/components/component_electrolyzer_waste_heat.py",
"max_line_length": 99,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "06f0c9a6af51be73ef1d5eaaeed9946be672e51e",
"max_stars_repo_licenses": [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
],
"max_stars_repo_name": "morrme/smooth",
"max_stars_repo_path": "smooth/components/component_electrolyzer_waste_heat.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2466,
"path": null,
"reason": "import pyomo",
"repo": null,
"save_path": null,
"sha": null,
"size": 10413
} |
from efficientnet_pytorch import EfficientNet
import torch
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader, Dataset
import tokenizers
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import numpy as np
import pandas as pd
model = EfficientNet.from_pretrained('efficientnet-b7', advprop=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cpu = torch.device('cpu')
print(f"使用デバイス: {device}")
model.to(device)
class ThumbnailDataset(Dataset):
def __init__(self, df):
self.df = df
self.tfms = transforms.Compose(
[
transforms.Resize((90, 120)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
def __getitem__(self, index):
data = {}
row = self.df.iloc[index]
data['images'] = self.get_image_data(row)
return data
def __len__(self):
return len(self.df)
def get_image_data(self, row):
image_tensor = self.tfms(Image.open(f"./data/thumbnail/{row.video_id}.jpg").convert('RGB'))
return image_tensor
def get_features(df):
ds = ThumbnailDataset(df)
dl = DataLoader(ds, batch_size=64, shuffle=False)
preds = []
for batch in tqdm(dl):
input_images = batch["images"]
bs = input_images.size(0)
output = model.extract_features(input_images)
output = nn.AdaptiveAvgPool2d(1)(output)
output = output.view(bs, -1)
output = output.to(cpu)
preds.append(output.detach().clone().numpy())
return np.concatenate(preds, axis=0)
def main():
train = pd.read_csv("./data/input/train_data.csv")
test = pd.read_csv("./data/input/test_data.csv")
train_image_features = get_features(train)
test_image_features = get_features(test)
num = train_image_features.shape[1]
cols = [f"image_{i}" for i in range(num)]
train_image_df = pd.DataFrame(train_image_features, columns=cols)
test_image_df = pd.DataFrame(test_image_features, columns=cols)
train_image_df.to_csv("./data/input/train_image_features.csv", index=False)
test_image_df.to_csv("./data/input/test_image_features.csv", index=False)
| {
"alphanum_fraction": 0.6686877436,
"author": null,
"avg_line_length": 31.2027027027,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b8c2869fd005364d7008bc2f6c4240bdf1c8e376",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "04740862fb28fb9a38131554369d6c54eb560fc5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Lain-progressivehouse/probspace-youtube",
"max_forks_repo_path": "src/efficient_net_features.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "04740862fb28fb9a38131554369d6c54eb560fc5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Lain-progressivehouse/probspace-youtube",
"max_issues_repo_path": "src/efficient_net_features.py",
"max_line_length": 99,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "04740862fb28fb9a38131554369d6c54eb560fc5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Lain-progressivehouse/probspace-youtube",
"max_stars_repo_path": "src/efficient_net_features.py",
"max_stars_repo_stars_event_max_datetime": "2021-02-08T03:54:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-29T04:32:07.000Z",
"num_tokens": 544,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2309
} |
[STATEMENT]
lemma sH_seq: "\<^bold>{P\<^bold>} X;Y \<^bold>{Q\<^bold>} = \<^bold>{P\<^bold>} X \<^bold>{\<lambda>s. \<forall>s'. (s, s') \<in> Y \<longrightarrow> Q s'\<^bold>}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>{P\<^bold>}X ; Y\<^bold>{Q\<^bold>} = \<^bold>{P\<^bold>}X\<^bold>{\<lambda>s. \<forall>s'. (s, s') \<in> Y \<longrightarrow> Q s'\<^bold>}
[PROOF STEP]
unfolding rel_kat_H
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>s s'. P s \<longrightarrow> (s, s') \<in> X ; Y \<longrightarrow> Q s') = (\<forall>s s'. P s \<longrightarrow> (s, s') \<in> X \<longrightarrow> (\<forall>s'a. (s', s'a) \<in> Y \<longrightarrow> Q s'a))
[PROOF STEP]
by auto | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Hybrid_Systems_VCs_KleeneAlgebraTests_HS_VC_KAT_rel",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 296,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
def getting_and_knowing_your_data(chipo):
#Step 4. See the first 10 entries
print(chipo.head(10))
#Step 5. What is the number of observations in the dataset?
print(len(chipo.index))
#Step 6. What is the number of columns in the dataset?
print(len(chipo.columns))
#Step 7. Print the name of all the columns.
chipoFeatureList= []
[chipoFeatureList.append(feature) for feature in chipo.columns]
for feature in chipoFeatureList:
print(feature)
#Step 8. How is the dataset indexed?
print(chipo.info)
#Step 9. Which was the most ordered item?
#Step 10. How many items were ordered?
itemByOrderQuantity = chipo['item_name'].value_counts()
print("{} with {} orders".format(itemByOrderQuantity.index[0],
itemByOrderQuantity[0]))
#Step 11. What was the most ordered item in the choice_description column?
choiceDescriptionByQuantity = chipo['choice_description'].value_counts()
print("{} with {} orders".format(choiceDescriptionByQuantity.index[0],
choiceDescriptionByQuantity[0]))
#Step 12. How many items were ordered in total?
print(chipo['quantity'].sum())
#Step 13. Turn the item price into a float ( hint: you can create a
#function and use the apply method to do this )
cleanItemPrice= []
for i in range(0,len(chipo)):
newCell = chipo['item_price'][i]
newCell = re.sub('\$', '', newCell)
cleanItemPrice.append(float(newCell))
chipo['item_price'].update(cleanItemPrice)
chipo['item_price'] = chipo['item_price'].astype(float)
print(chipo['item_price'].dtypes)
#Step 14. How much was the revenue for the period in the dataset?
print(chipo['item_price'].sum(), '$')
#Step 15. How many orders were made in the period?
print(chipo['order_id'].max())
#Step 16. What is the average amount per order?
chipoByOrder = pd.DataFrame(chipo, columns=['order_id', 'item_price'])
chipoByOrderMean = chipo.groupby(by='order_id').mean()
print(chipoByOrderMean['item_price'].sum()/len(chipoByOrderMean.index))
#Step 17. How many different items are sold?
print(len(itemByOrderQuantity.unique()))
return chipo
def filtering_and_sorting(chipo)
#Step 4. How many products cost more than $10.00?
#Step 5. What is the price of each item?
counterProducts = pd.DataFrame(chipo, columns=['item_name', 'item_price'])
counterProductsByType = counterProducts.groupby(by='item_name').mean()
totalBelowTenUSD = 0
itemNamePrice = []
for currentPrice in counterProductsByType['item_price']:
if currentPrice>10:
totalBelowTenUSD += 1
itemNamePrice.append(currentPrice)
print('Total items under $10:', totalBelowTenUSD)
print('Prices list:', itemNamePrice)
#Step 6. Print a data frame with only two columns item_name and item_price
print(counterProducts)
#Step 7. Sort by the name of the item
counterProductsSortedByName = counterProducts.sort_values(by='item_name')
print(counterProductsSortedByName)
#Step 8. What was the quantity of the most expensive item ordered?
print(counterProducts.sort_values(by='item_price').max())
#Step 9. How many times were a Veggie Salad Bowl ordered?
counterProductsSortedByName = pd.DataFrame(chipo, columns=['item_name'])
counterProductsSortedByName = counterProductsSortedByName.assign(counterValue ='1')
counterProductsSortedByName = counterProductsSortedByName.groupby(by='item_name').count()
print(counterProductsSortedByName.loc['Veggie Salad Bowl']['counterValue'])
#Step 10. How many times people ordered more than one Canned Soda?
itemByQuantity = pd.DataFrame(chipo, columns=['item_name', 'quantity'])
quantitiesCounter = 0
for i in range(0,len(itemByQuantity)):
if itemByQuantity['item_name'][i] == 'Canned Soda' and itemByQuantity['quantity'][i] == 2:
quantitiesCounter+=1
print(quantitiesCounter)
def grouping(users):
#Step 4. Discover what is the mean age per occupation
usersMeanAgeByOccupation = pd.DataFrame(users, columns=['age','occupation'])
usersMeanAgeByOccupation = usersMeanAgeByOccupation.groupby(by='occupation').mean()
print(usersMeanAgeByOccupation)
#Step 5. Discover the Male ratio per occupation and sort it from the most to the least
usersRatioGenderByOccupation = pd.DataFrame(users, columns=['gender','occupation'])
usersRatioGenderByOccupationMale = usersRatioGenderByOccupation.loc[
usersRatioGenderByOccupation['gender'] == 'M']
usersRatioGenderByOccupationFemale = usersRatioGenderByOccupation.loc[
usersRatioGenderByOccupation['gender'] == 'F']
usersRatioGenderByOccupationMale = usersRatioGenderByOccupationMale.groupby(
by='occupation').count()
usersRatioGenderByOccupationMale = usersRatioGenderByOccupationMale.rename(
columns={'gender':'SumMale'})
usersRatioGenderByOccupationFemale = usersRatioGenderByOccupationFemale.groupby(
by='occupation').count()
usersRatioGenderByOccupationFemale = usersRatioGenderByOccupationFemale.rename(
columns={'gender':'SumFemale'})
ratioList = pd.DataFrame(columns=['occupation','ratio'])
for i in range(0,len(usersRatioGenderByOccupationMale)):
if usersRatioGenderByOccupationMale.index[i] in usersRatioGenderByOccupationFemale.index:
currentOccupation = usersRatioGenderByOccupationMale.index[i]
ratioValue = usersRatioGenderByOccupationMale['SumMale'][i]/usersRatioGenderByOccupationFemale[
'SumFemale'][usersRatioGenderByOccupationMale.index[i]]
ratioList.loc[len(ratioList.index)] = [currentOccupation,ratioValue]
else:
currentOccupation = usersRatioGenderByOccupationMale.index[i]
ratioValue = 100
ratioList.loc[len(ratioList.index)] = [currentOccupation,ratioValue]
for i in range(0,len(usersRatioGenderByOccupationFemale)):
if usersRatioGenderByOccupationFemale.index[i] not in usersRatioGenderByOccupationFemale.index:
currentOccupation = usersRatioGenderByOccupationFemale.index[i]
ratioValue = 0
ratioList.loc[len(ratioList.index)] = [currentOccupation,ratioValue]
ratioList = ratioList.sort_values(by = 'ratio', ascending = False)
print(ratioList)
#Step 6. For each occupation, calculate the minimum and maximum ages
usersOccupationAgeMinMax = pd.DataFrame(users, columns=['age','occupation'])
occupationListMaxAge = usersOccupationAgeMinMax.groupby(by='occupation').max()
occupationListMinAge = usersOccupationAgeMinMax.groupby(by='occupation').min()
#Step 7. For each combination of occupation and gender, calculate the mean age
usersOccupationGenderMeanAge = pd.DataFrame(users, columns=['occupation', 'gender' ,'age'])
usersOccupationGenderMeanAge = usersOccupationGenderMeanAge.groupby(by=['occupation','gender']).mean()
#Step 8. For each occupation present the percentage of women and men
usersRatioGenderByOccupationSum = usersRatioGenderByOccupationMale[
'SumMale'] + usersRatioGenderByOccupationFemale['SumFemale']
usersRatioGenderByOccupationSum['doctor']=usersRatioGenderByOccupationMale.loc['doctor']
usersRatioGenderByOccupationRatioMale = (usersRatioGenderByOccupationMale[
'SumMale'] / usersRatioGenderByOccupationSum) * 100
usersRatioGenderByOccupationRatioFeale = 100 - usersRatioGenderByOccupationRatioMale
def merge(data1, data2, data3):
#Step 4. Join the two dataframes along rows and assign all_data
all_data = pd.concat([data1, data2], ignore_index=True, axis = 0)
#Step 5. Join the two dataframes along columns and assign to all_data_col
all_data_col = pd.concat([data1, data2], ignore_index=True, axis = 1)
#Step 6. Print data3
print(data3)
#Step 7. Merge all_data and data3 along the subject_id value
all_data_and_data3 = all_data.merge(data3, on = 'subject_id', how = 'right')
#Step 8. Merge only the data that has the same 'subject_id' on both data1 and data2
all_data_same = data1.merge(data2, on = 'subject_id', how = 'inner')
#Step 9. Merge all values in data1 and data2, with matching records
#from both sides where available.
all_data_same2 = data1.merge(data2, on = 'subject_id', how = 'outer')
def iris_function(iris):
#Step 4. Create columns for the dataset
iris.columns = ['SepalLength (cm)', 'SepalWidth (cm)', 'PetalLength (cm)', 'PetalWidth (cm)', 'class']
#Step 5. Is there any missing value in the dataframe?
irisFeatureList= []
[irisFeatureList.append(feature) for feature in iris.columns]
for feature in irisFeatureList:
print("{} had {} % missing values".format(feature,np.round(iris[feature].isnull().sum()/len(iris)*100,2)))
print ('\n')
#Step 6. Let’s set the values of the rows 10 to 29 of the column 'petal_length' to NaN
for i in range(10,29):
iris.loc[i,'PetalLength (cm)'] = np.NaN
#Step 7. Good, now lets substitute the NaN values to 1.0
iris['PetalLength (cm)'] = iris['PetalLength (cm)'].replace(np.nan, 1.0)
#Step 8. Now let's delete the column class
iris = iris.drop(columns='class')
#Step 9. Set the first 3 rows as NaN
iris.iloc[0:3,:] = np.nan
#Step 10. Delete the rows that have NaN
iris = iris.dropna()
#Step 11. Reset the index so it begins with 0 again
iris = iris.reset_index()
return iris
def main():
chipo = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv', sep = '\t')
chipo = getting_and_knowing_your_data(chipo)
filtering_and_sorting(chipo)
users = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user', sep = '|')
users = grouping(users)
data1 = pd.DataFrame({'subject_id': ['1', '2', '3', '4', '5'],
'first_name': ['Alex', 'Amy', 'Allen', 'Alice',
'Ayoung'],
'last_name': ['Anderson', 'Ackerman', 'Ali',
'Aoni', 'Atiches']})
data2 = pd.DataFrame({'subject_id': ['4', '5', '6', '7', '8'],
'first_name': ['Billy', 'Brian', 'Bran', 'Bryce',
'Betty'],
'last_name': ['Bonder', 'Black', 'Balwner', 'Brice',
'Btisan']})
data3 = pd.DataFrame({'subject_id': ['1', '2', '3', '4', '5', '7', '8',
'9', '10', '11'],
'test_id': [51, 15, 15, 61, 16, 14, 15, 1, 61, 16]})
merge(data1, data2, data3)
iris = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')
iris = iris_function(iris)
if __name__ == "__main__":
main() | {
"alphanum_fraction": 0.6405439864,
"author": null,
"avg_line_length": 41.2807017544,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9754575b4fe635780cb9a18b48948eb64925e834",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "71233cf3764b528c39438d5d45b433f094456717",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ofir-frd/Machine-Learning-Bootcamp",
"max_forks_repo_path": "pandas/PandasExerciseBasic.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "71233cf3764b528c39438d5d45b433f094456717",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ofir-frd/Machine-Learning-Bootcamp",
"max_issues_repo_path": "pandas/PandasExerciseBasic.py",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "71233cf3764b528c39438d5d45b433f094456717",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ofir-frd/Machine-Learning-Bootcamp",
"max_stars_repo_path": "pandas/PandasExerciseBasic.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2865,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11765
} |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
import random
import os
import numpy as np
from pythia.models.cnn_lstm import CNNLSTM
from pythia.common.registry import registry
from pythia.common.sample import Sample, SampleList
from pythia.utils.configuration import ConfigNode, Configuration
from pythia.utils.general import get_pythia_root
class TestModelCNNLSTM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
registry.register("clevr_text_vocab_size", 80)
registry.register("clevr_num_final_outputs", 32)
config_path = os.path.join(
get_pythia_root(), "..", "configs", "vqa", "clevr", "cnn_lstm.yml"
)
config_path = os.path.abspath(config_path)
configuration = Configuration(config_path)
configuration.config["datasets"] = "clevr"
configuration.freeze()
self.config = configuration.config
registry.register("config", self.config)
def test_forward(self):
model_config = self.config.model_attributes.cnn_lstm
cnn_lstm = CNNLSTM(model_config)
cnn_lstm.build()
cnn_lstm.init_losses_and_metrics()
self.assertTrue(isinstance(cnn_lstm, torch.nn.Module))
test_sample = Sample()
test_sample.text = torch.randint(1, 79, (10,), dtype=torch.long)
test_sample.image = torch.randn(3, 320, 480)
test_sample.targets = torch.randn(32)
test_sample_list = SampleList([test_sample])
test_sample_list.dataset_type = "train"
test_sample_list.dataset_name = "clevr"
output = cnn_lstm(test_sample_list)
scores = output["scores"]
loss = output["losses"]["train/logit_bce"]
accuracy = output["metrics"]["train/accuracy"]
np.testing.assert_almost_equal(loss.item(), 23.4751, decimal=4)
np.testing.assert_almost_equal(accuracy.item(), 0)
self.assertEqual(scores.size(), torch.Size((1, 32)))
expected_scores = [
2.2298e-02, -2.4975e-01, -1.1960e-01, -5.0868e-01, -9.3013e-02,
1.3202e-02, -1.7536e-01, -3.1180e-01, 1.5369e-01, 1.4900e-01,
1.9006e-01, -1.9457e-01, 1.4924e-02, -1.1032e-01, 1.3777e-01,
-3.6255e-01, -2.9327e-01, 5.6247e-04, -4.8732e-01, 4.0949e-01,
-1.1069e-01, 2.9696e-01, 4.1903e-02, 6.7062e-02, 7.0094e-01,
-1.9898e-01, -2.9502e-03, -3.9040e-01, 1.2218e-01, 3.7895e-02,
2.4472e-02, 1.7213e-01
]
np.testing.assert_almost_equal(scores[0].tolist(), expected_scores, decimal=5)
| {
"alphanum_fraction": 0.6508058327,
"author": null,
"avg_line_length": 38.3235294118,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "126d74ed63a92230fa452d77527a1e8fba0cc3b9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2021-06-24T05:39:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-03-07T08:10:15.000Z",
"max_forks_repo_head_hexsha": "2ad4c4cfe121e951db12efa3fe5b6f1cc0188da7",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "vatsalg29/btp",
"max_forks_repo_path": "tests/models/test_cnn_lstm.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2ad4c4cfe121e951db12efa3fe5b6f1cc0188da7",
"max_issues_repo_issues_event_max_datetime": "2020-10-05T10:11:24.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-05T10:11:24.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "vatsalg29/btp",
"max_issues_repo_path": "tests/models/test_cnn_lstm.py",
"max_line_length": 86,
"max_stars_count": 35,
"max_stars_repo_head_hexsha": "2ad4c4cfe121e951db12efa3fe5b6f1cc0188da7",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "vatsalg29/btp",
"max_stars_repo_path": "tests/models/test_cnn_lstm.py",
"max_stars_repo_stars_event_max_datetime": "2021-07-30T15:12:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-06T13:05:17.000Z",
"num_tokens": 784,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2606
} |
% Ubah judul dan label berikut sesuai dengan yang diinginkan.
\section{Design and Implementation}
\label{sec:designandimplementation}
% Ubah paragraf-paragraf pada bagian ini sesuai dengan yang diinginkan.
This research is explaining about the implementation of one of the branch of deep learning studies with the aim to automatically detect Indonesian hoax news by leveraging BERT method. This detection method is trained by using a combination of dataset from \url{https://data.mendeley.com/datasets/p3hfgr5j3m/1} and dataset that we made ourself for this paper alone by using web crawling technology. Picture \ref{fig:metodologi} is the outline of this research in a nutshell.
\begin{figure} [h!]
\centering
\includegraphics[width=0.3\columnwidth]{gambar/Metodologi_Vertical_en.png}
\caption{This research method in a nutshell.}
\label{fig:metodologi}
\end{figure}
\subsection{Material and Tools Specification}
The dataset that is being used in this research is a dataset originated from \url{https://data.mendeley.com/datasets/p3hfgr5j3m/1} coupled with our own made dataset in which we have create it using web crawling technology. Both of these dataset combined, is resulting in total of 1621 data with the exact details can be seen at table \ref{tab:dataset}. Meanwhile, table \ref{tab:dataset_mendeley} is the starting point of our dataset which we gotten from \url{https://data.mendeley.com/datasets/p3hfgr5j3m/1} alone.
Each of these dataset is containing the content of the news along with its label which can be either "Valid" or "Hoaks". We took the news from accredited and verified news sources for the valid news, while on the other side, we took all of the hoax news mostly from \url{https://turnbackhoax.id}, a website that contains the list of user reported hoax news from many sources.
\begin{table}[h]
\caption{Total of news from \url{data.mendeley.com}}
\label{tab:dataset_mendeley}
\centering
\begin{tabular}{ | l | l | }
\hline
\textbf{Label} & \textbf{Total Data} \\ \hline
\textit{Hoaks} & 228 \\ \hline
\textit{Valid} & 372 \\ \hline
\textbf{Total} & \textbf{600} \\ \hline
\end{tabular}
\end{table}
\begin{table}[h]
\caption{Total of training dataset}
\label{tab:dataset}
\centering
\begin{tabular}{ | l | l | }
\hline
\textbf{Label} & \textbf{Total Data} \\ \hline
\textit{Hoaks} & 885 \\ \hline
\textit{Valid} & 736 \\ \hline
\textbf{Total} & \textbf{1621} \\ \hline
\end{tabular}
\end{table}
\begin{table}[h]
\caption{Dataset Sample}
\label{tab:contoh_dataset}
\centering
\begin{tabular}{ | p{.8\linewidth} | l | }
\hline
\textbf{news} & \textbf{tagging} \\ \hline
Wakil Gubernur DKI Jakarta Sandiaga Uno menargetkan pengerjaan tahap awal Stadion BMW dilakukan pada Oktober. Stadion ini diperuntukkan bagi klub Persija.... & Valid \\ \hline
"Komisi II bersama KPU dan Bawaslu masih membahas ketentuan wajib cuti bagi petahana presiden yang maju Pilpres 2019. Mekanisme pengambilan..... & Valid \\ \hline
Jaksa penuntut Ulumum (JPU) pada Komisi Pemberantasan Korupsi (KPK) mencecar Pejabat Pembuat Komitmen (PPK) reguler pada Direktorat Perlindungan Sosial Korban Bencana Sosial Kemensos Victorious Saut Hamonangan Siahaan soal... & Valid \\ \hline
“Halo Kak! Aku Winda Dari Team Giveaway BAIM WONG Anda Memenangkan Hadiah Uang 100Jt dari kami info klik: https://wa.me/+6285796306857” & Hoax \\ \hline
“Apa yang terjadi dengan hewan dalam penelitian? Teknologi ini telah dicoba pada hewan, dan pada hewan penelitian yang dilakukan, semua hewan mati , tidak langsung dari suntikan... & Hoax \\ \hline
“Kadrun istilah dr PKI alias KOMUNIS ditujukan buat islam. Kl mau jd komunis pake aja istilah kadrun buat umat islam. Auto lsg Komunis” & Hoax \\ \hline
\end{tabular}
\end{table}
\subsection{Data Acquisition}
Because the dataset that we get from \url{https://data.mendeley.com/datasets/p3hfgr5j3m/1} feel severely lacking for our purpose because it only consist of 600 data, and because there are no web crawling which outputting its result into a convenient CSV file from Indonesian news sites, we took on our hand a task to create a webcrawling program to take news content from many Indonesian news sites, those sites included but not limited to \url{liputan6.com}, \url{detik.com}, \url{tempo.com} and others. As all of those sites is rightfully accredited and verified by the government, it is used for our valid news dataset. Our hoax news site however, only has one source from \url{turnbackhoax.id}, this is mainly because said site has quite an active forum behind it in which lots of people can report their finding of hoax text, seen and checked by lots of other people, before lastly, will be uploaded to the \url{turnbackhoax.id} site. But, the biggest factor in choosing that site compare to others is mainly because \url{turnbackhoax.id} wrote the original hoaxes text in their website, this coupled with the fact that their website has some kind of structure into it has shorten our task significantly. For this research, the webcrawling process has took news from varied dates, ranging from April 2018 as the oldest to April 2021.
\begin{figure} [h!]
\centering
\includegraphics[width=0.35\linewidth]{gambar/webcrawl method_long_en.png}
\caption{Garis besar alur program \textit{web crawl}.}
\label{fig:webcrawl_method}
\end{figure}
Picture \ref{fig:webcrawl_method} is the outline flow of the webcrawling program. Starting with inputting raw HTML code into the program, changing said code into an easier-to-process objects, get the news text and do some post-cleaning on the text, lastly, create a .CSV file to store all of the obtained news text with the appropriate format.
\begin{lstlisting}[
language=HTML,
caption={Penggalan Kode Sumber HTML \url{detik.com}.},
label={lst:source_detik}
]
...
<div class="detail__body itp_bodycontent_wrapper">
<div class="detail__body-text itp_bodycontent">
<strong>Jakarta</strong> - Koalisi <a href="https://
detik.com/tag/jokowi" target="_blank">Jokowi</a>
sedang menyusun visi-misi jagoannya. Setelah
menerima masukan dari <a href="https://detik.com/
tag/muhammadiyah" target="_blank"> Muhammadiyah</a>,
...
Dan kita pun membuka diri untuk menerima
masukan untuk penyempurnaan," imbuhnya.<br><br><!--
s:parallaxindetail--><div class="clearfix"></div><style>
...
\end{lstlisting}
Firstly, we need to determine tag or class of the HTML code for our first filter. If we look into listing \ref{lst:source_detik} as a reference, we can see \texttt{detail\_\_body\-text} class is the one that containing our desired news text. We filtered that class by inputting the class name into the appropriate parameter.
More often than not, our filtering result will contain some garbage or unrelated text resulting in the need to refine it further by post-clean it after the filter process. Usually, those text is writer or editorial notes, ad, or related news links which we don't need at all.
Finally, the last step is outputting all of the acquired news text as a .CSV file. There are no particular reason on the article of why we chose CSV file format compared to other famous Copy of file format aside from the CSV file format is easier to use in our training program and because it is an open-format that can be opened and edited if need be, by nearly any spreadsheet program.
As the general interface and improving user experience for our webcrawling software, we use a .json format file to configure what news sources that we want to get, how much is it, and when is it. All of those configuration will be processed by the program and the program will take the news in accordance with said configuration.
\subsection{Preprocessing}
\begin{figure}[h!]
\begin{center}
\includegraphics[width= .7\linewidth]{gambar/preprocess_long_en.png}
\caption{Preprocessing Method}
\label{fig: metodologi_preprocessing}
\end{center}
\end{figure}
In this particular process, data need to be prepared first thing before being processed into BERT. This data preprocessing method is consisted of removing any punctuation in the text, change all of the capital letter into its lower letter and truncate any texs that is longer than the capacitites of word that BERT could process at once that is at 512 words or token. There are a few options on how to truncate the text, for example we can take the first 512 words and delete the rest, we can take the last 512 words, or we can combine from both the start of the text and the end portion of the text with some ratio. Last step of this preprocessing is to add a special \texttt{[CLS]} token. Picture \ref{fig: metodologi_preprocessing} will explain the same thing but with a better clarity.
Other than that, we will also divide the dataset into 3 portionss with details stated below :
\begin{itemize}
\item 70\% Training, 10\% Validation, 20\% Test
\end{itemize}
\begin{enumerate}
\item Training
This set is used with BERT as an input when it is in its training phase so we can get an optimized model for our task.
\item Validation
This set is used right after BERT finished its training phase. Used to determined whether our created model has appropriate weight for our task or if our model still need to be trained again. This portion is also used to determine whether our model is overfitting or underfitting which is a bad thing.
\item Test
This set is used as an accuracy test after both the validation and the training phase is finished. The resulting accuracy of this set is the one that we consider as our result.
\end{enumerate}
To make it clearer, check table \ref{tab:dataset_section}. We can see based on this table that the divition of the dataset is already appropriate.
\begin{table}[h]
\caption{Dataset Portioning Details}
\label{tab:dataset_section}
\centering
\begin{tabular}{ | l | l | l | l | }
\hline
\textbf{Bagian} & \textbf{Hoaks} & \textbf{Valid} & \textbf{Total Data} \\ \hline
\textit{Training} & 647 & 519 & 1166 \\ \hline
\textit{Validasi} & 85 & 78 & 163 \\ \hline
\textit{Pegujian} & 153 & 139 & 292 \\ \hline
\multicolumn{3}{|l|}{\textbf{Total}} & \textbf{1621} \\ \hline
\end{tabular}
\end{table}
\subsection{The Architecture of BERT}
BERT is one of the latest machine learning implementation at this time especially for Natural Language Processing (NLP) task. It is based on the Transformer implementation that is based on a previous research by Vaswani et al. \cite{attention_is_all_you_need}. BERT has successfully achieved a higher accuracy than ever before in understanding the context of a raw text if compared to other transformer implementation.
One of the distinct feature of BERT is in the way it is pre-trained. There are 2 steps for pretraining BERT. The first is by doing a Masked Language Model (MLM) in which BERT will be given masked text A and some words B that can be the correct word for the masked text or not as an input, and it will need to predict whether the word B is the correct word for the masked part in text A. This way, BERT will be able to "learn" the relationship between words. The next steps for pretraining BERT is to do some Next Sentence Prediction (NSP) task. The inputted text of this task is 2 sentence, sentence A and sentence B and BERT task is to predict whether these 2 sentences will form a complete paragraph or not. By doing NSP tasks, BERT should be able to get the relationship between sentences easier.
\begin{figure*}[h!]
\begin{center}
\includegraphics[width= 0.9\textwidth]{gambar/bert_arch.png}
\caption{The Architecture of BERT in this research}
\label{fig: bert_arch}
\end{center}
\end{figure*}
In this research, we decided to use fine-tuning approach, what this mean is that we use a pre-trained BERT model rather than create our own BERT model from scratch, however, we still need to connect the last layer of BERT into a classification layer. In this case, we chose Linear Reggression as the classification layer. For greater detail, figure \ref{fig: bert_arch} is the architecture of BERT that will be used throughout this research.
\subsection{Training and Validation}
\begin{figure}[h!]
\begin{center}
\includegraphics[width= 0.9\linewidth]{gambar/training.png}
\caption{Training Method}
\label{fig: metodologi_training}
\end{center}
\end{figure}
At this stage, the raw text that will be inputted into BERT has already going through its preprocessing phase and is now going into a process called Tokenizer. Tokenizer is a process to change words in a text into token according to its word embedding that is already obtained beforehand when BERT is still in its pretraining phase. Only after all of these process has done, BERT will start its training phase based on the tokenized and preprocessed data.
Not all of the output of the BERT is being used in this particular research, we only need the content of the \texttt{[CLS]} token that is filled with the pooled output of all the other tokens and layers. The content of the \texttt{[CLS]} token is then inputted into Linear Regresson method. This method is choosen as it is easy enough while still retain quite good accuracy. Figure \ref{fig: metodologi_training} is the training method in a nutshell.
There are also some parameters that we can adjust only in this stage, namely batch, learning rate, and epoch. Batch is a parameter to adjust how much data is being processed at once per iteration, mind you that there are usually a few iteration per epoch. By adjusting the batch values, the higher it is, the faster the training process is but at the cost of the memory usage. Thanks> to BERT method having quite a large number of layers (718 layers, in general) it can be considered quite heavy, hence we are set the batch value at 10.
Epoch is how much training and validation will take place before the training phase is considered as final. This parameter is one of the most important parameters to adjust as it has a direct effect on the accuracy and the loss of our model. If our loss is too high but the accuracy is too low, it is a clear indication that our model is suffering from underfitting state, meanwhile, if our loss is too low while the accuracy is too high we still need to check if our model is actually good or if it is suffering from overfitting. As our goals in this research is only to process text that is considered to be easier compared to processing image or video, we only set the epoch value to 10.
Learning rate is how much hyperparameter is allowed to change while the model is still in training process, this in turn will change the weight of the layers while in the same process based on the feedback gotten from validation phase. We decided to use the recommended value of 0.00002 \cite{koto2020indolem}
The validation process is used as a way to get the loss validation value that we can use as a comparator between the loss value that we are able to obtain from the training process and the loss validation value that we get from this process. If the loss validation value is getting higher but coupled with a loss training value that shows sign of going lower still, it is a surefire way to know that our model is suffering from overfitting. In another note, if both of our loss value in our model is quite high, then there is a high chance our model is underfitting. Both cases indicate that our model can be further optimized and requiring more training while adjusting the parameter.
\subsection{Testing}
After going through validation and training phase, lastly, we need to test our newly created model. Based on the result of this process, we should be able to conclude whether our model can be considered good enough for our use case, or still can be further adjusted by reconfiguring some of the parameters back at the training phase.
\subsection{Performance Analysis}
The last step right after testing is performance analysis on our tested model. This process is quite important to see how our model will fare in real world scenario after it is being implemented. There are a few metrics that we are using to do this process, all of these metrics is considered to be the industry standard in the world that is machine learning industry. Firstly, there are confustion matrix to categorize the prediction result based on the actual label in the dataset into 4 division. The division being True Positive (TP), False Positive (FP), True Negative (TN), and False Negative (FN). We also using Recall, Precision, and F1-Score as our performance metrics in this research. | {
"alphanum_fraction": 0.7208559745,
"author": null,
"avg_line_length": 88.8109452736,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "4a2cd55d7f1d7e21094829673f020774a8394012",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6523992a634ac8d6fbd5397dc0c42274e00d0cc8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chillytaka/buku-final-eng",
"max_forks_repo_path": "konten/3-arsitektur.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6523992a634ac8d6fbd5397dc0c42274e00d0cc8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chillytaka/buku-final-eng",
"max_issues_repo_path": "konten/3-arsitektur.tex",
"max_line_length": 1338,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6523992a634ac8d6fbd5397dc0c42274e00d0cc8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chillytaka/buku-final-eng",
"max_stars_repo_path": "konten/3-arsitektur.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4233,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 17851
} |
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from flearn.models.client import Client
from flearn.utils.tf_utils import process_grad
class BaseFedarated(object):
def __init__(self, params, learner, dataset):
'''initialize local models, build comp. graph'''
# transfer parameters to self
for key, val in params.items(): setattr(self, key, val);
# get number of clients
users, _, _, _ = dataset
num_clients = len(users)
if self.clients_per_round < 1:
self.clients_per_round = num_clients
# create server and clients' models
self.client_model = learner(*params['model_params'], self.inner_opt, self.seed)
self.clients = self.setup_clients(dataset, self.client_model)
print('{} Clients in Total'.format(len(self.clients)))
# initialize server model params
self.latest_model = self.client_model.get_params()
def __del__(self):
'''clean up models'''
self.client_model.close()
def setup_clients(self, dataset, model=None):
'''instantiates clients based on given train and test data directories
Return:
list of Clients
'''
users, groups, train_data, test_data = dataset
if len(groups) == 0:
groups = [None for _ in users]
all_clients = [Client(u, g, train_data[u], test_data[u], model) for u, g in zip(users, groups)]
return all_clients
def train_error_and_loss(self):
'''compute clients' train statistics
Return:
train stats
'''
num_samples = []
tot_correct = []
losses = []
for c in self.clients:
ct, cl, ns = c.train_error_and_loss()
tot_correct.append(ct*1.0)
num_samples.append(ns)
losses.append(cl*1.0)
ids = [c.id for c in self.clients]
groups = [c.group for c in self.clients]
return ids, groups, num_samples, tot_correct, losses
def test(self):
'''tests self.latest_model on given clients'''
num_samples = []
tot_correct = []
self.client_model.set_params(self.latest_model)
for c in self.clients:
ct, ns = c.test()
tot_correct.append(ct*1.0)
num_samples.append(ns)
ids = [c.id for c in self.clients]
groups = [c.group for c in self.clients]
return ids, groups, num_samples, tot_correct
def save(self):
pass
def select_clients(self, round, num_clients=20):
'''selects num_clients clients weighted by number of samples from possible_clients
Args:
num_clients: number of clients to select; default 20
note that within function, num_clients is set to
min(num_clients, len(possible_clients))
Return:
list of selected clients objects
'''
num_clients = min(num_clients, len(self.clients))
np.random.seed(round) # make sure for each comparison, we are selecting the same clients each round
indices = np.random.choice(range(len(self.clients)), num_clients, replace=False)
return indices, np.asarray(self.clients)[indices]
def aggregate(self, wsolns):
'''aggregate local solutions
Args:
wsolns: a list of local model params
Return:
averaged model params
'''
total_weight = 0.0
base = [0]*len(wsolns[0][1])
for (w, soln) in wsolns: # w is the number of local samples
total_weight += w
for i, v in enumerate(soln):
base[i] += w*v.astype(np.float64)
averaged_soln = [v / total_weight for v in base]
return averaged_soln
| {
"alphanum_fraction": 0.5793450882,
"author": null,
"avg_line_length": 33.6440677966,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "eee456027042d54e1c310f759be8ce0727c02ce4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-05T01:50:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-05T01:50:53.000Z",
"max_forks_repo_head_hexsha": "4097eb447a99c7180388527a2d05974906b77eb1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "unc-optimization/FedDR",
"max_forks_repo_path": "FedDR/flearn/trainers/fedbase.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "4097eb447a99c7180388527a2d05974906b77eb1",
"max_issues_repo_issues_event_max_datetime": "2022-03-27T13:37:18.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-08T12:05:22.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "unc-optimization/FedDR",
"max_issues_repo_path": "FedDR/flearn/trainers/fedbase.py",
"max_line_length": 109,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "4097eb447a99c7180388527a2d05974906b77eb1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "unc-optimization/FedDR",
"max_stars_repo_path": "FedDR/flearn/trainers/fedbase.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-08T21:04:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-01T12:00:24.000Z",
"num_tokens": 839,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3970
} |
from typing import Union
from pathlib import Path
from glob import glob
from tqdm import tqdm
import json
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from utils.bert.utils.utils import BertTokenizer
def collate_fn(data):
document, section, sentence, contains_ds = data[0]
section = torch.tensor(section, dtype=torch.long)
sentence = torch.tensor(sentence, dtype=torch.long)
contains_ds = torch.tensor(contains_ds, dtype=torch.long)
return document, section, sentence, contains_ds
class ShowUsDataset(Dataset):
def __init__(self, dataset_path:Union[str, Path], vocab_file:Union[str, Path], max_seq:int=512):
super().__init__()
self.dataset_path = Path(dataset_path)
self.vocab_path = Path(vocab_file)
self.max_seq = max_seq
self.tokenizer = BertTokenizer(str(self.vocab_path), to_lower=True)
self.data, self.documents, self.sections, self.sentences = self.__load_data(self.dataset_path)
def __load_data(self, dataset_path:Union[str, Path]):
dataset_path = Path(dataset_path)
files = [Path(f) for f in glob(str(dataset_path / '*.json'))]
# (doc_idx, sec_idx, sent_idx, contains_ds)
data_with_ds = []
data_without_ds = []
documents = {}
sections = {}
sentences = {}
doc_idx, sent_idx, sec_idx = 0, 0, 0
for f_path in tqdm(files, desc='loading data...'):
# document data
documents[doc_idx] = f_path.stem
a_data = json.load(open(f_path))
for section in a_data:
# section data
sections[sec_idx] = section['section_text']
for sentence in section['sentences']:
# sentence data
sentences[sent_idx] = sentence['text']
if len(sentence['cleaned_labels']) > 0:
data_with_ds.append((doc_idx, sec_idx, sent_idx, 1))
else:
data_without_ds.append((doc_idx, sec_idx, sent_idx, 0))
sent_idx += 1
sec_idx += 1
doc_idx += 1
data_with_ds = np.array(data_with_ds)
data_without_ds = np.array(data_without_ds)
indices = np.random.choice(range(len(data_without_ds)), size=len(data_with_ds), replace=False)
data = np.concatenate([data_with_ds, data_without_ds[indices]])
np.random.shuffle(data)
return data, documents, sections, sentences
def __len__(self):
return len(self.data)
def __getitem__(self, index):
# get texts
doc_idx, sec_idx, sent_idx, contains_ds = self.data[index]
document = self.documents[doc_idx]
section = self.sections[sec_idx]
sentence = self.sentences[sent_idx]
section = section.replace(sentence, ' ')
# text -> tokens
section_tokens = self.tokenizer.tokenize(section)
sentence_tokens = self.tokenizer.tokenize(sentence)
# tokens -> ids
section_ids = self.tokenizer.tokens2ids(section_tokens)
sentence_ids = self.tokenizer.tokens2ids(sentence_tokens)
return document, section_ids[:self.max_seq], sentence_ids[:self.max_seq], contains_ds
| {
"alphanum_fraction": 0.6245487365,
"author": null,
"avg_line_length": 36.9333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "af03545072002df2ec1ba313b72cc498c5c3e597",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "66bf4176a9f71dc9653706ff04b29038970dea04",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "akitenkrad/show-us-the-data",
"max_forks_repo_path": "datasets.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "66bf4176a9f71dc9653706ff04b29038970dea04",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "akitenkrad/show-us-the-data",
"max_issues_repo_path": "datasets.py",
"max_line_length": 102,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "66bf4176a9f71dc9653706ff04b29038970dea04",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "akitenkrad/show-us-the-data",
"max_stars_repo_path": "datasets.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 721,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3324
} |
from flask import Flask,request, url_for, redirect, render_template, jsonify
from pycaret.regression import *
import pandas as pd
import pickle
import numpy as np
app = Flask(__name__)
model = load_model('insurance_14052020')
cols = ['age', 'sex', 'bmi', 'children', 'smoker', 'region']
@app.route('/')
def home():
return render_template("home.html")
@app.route('/predict',methods=['POST'])
def predict():
int_features = [x for x in request.form.values()]
final = np.array(int_features)
data_unseen = pd.DataFrame([final], columns = cols)
prediction = predict_model(model, data=data_unseen, round = 0)
prediction = int(prediction.Label[0])
return render_template('home.html',pred='Expected Bill will be {}'.format(prediction))
@app.route('/predict_api',methods=['POST'])
def predict_api():
data = request.get_json(force=True)
data_unseen = pd.DataFrame([data])
prediction = predict_model(model, data=data_unseen)
output = prediction.Label[0]
return jsonify(output)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False) | {
"alphanum_fraction": 0.7019319227,
"author": null,
"avg_line_length": 31.9705882353,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6b2df42d48ced47ae8de4538d43436a05825ac8f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-08-24T12:17:11.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-08-24T12:17:11.000Z",
"max_forks_repo_head_hexsha": "f994f96cf310d77c913ee9ccc705f0a92ef87862",
"max_forks_repo_licenses": [
"RSA-MD"
],
"max_forks_repo_name": "hhaydar/ml-app-insurance",
"max_forks_repo_path": "app.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f994f96cf310d77c913ee9ccc705f0a92ef87862",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"RSA-MD"
],
"max_issues_repo_name": "hhaydar/ml-app-insurance",
"max_issues_repo_path": "app.py",
"max_line_length": 90,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f994f96cf310d77c913ee9ccc705f0a92ef87862",
"max_stars_repo_licenses": [
"RSA-MD"
],
"max_stars_repo_name": "hhaydar/ml-app-insurance",
"max_stars_repo_path": "app.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 264,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1087
} |
import random
import numpy as np
from rl.policies import Policy
class RandomPolicy(Policy):
def __init__(self,
action_mapper,
epsilon=0.1,
epsilon_decay_step=0.0001,
epsilon_decay_epoch=0.,
epsilon_min=0.001,
reset_on_game_changed=True,
dropout=0.4,
reset_on_epoch_start=False):
super().__init__(action_mapper)
self.current_epsilon = epsilon
self._initial_epsilon = epsilon
self._epsilon_decay_step = epsilon_decay_step
self._epsilon_decay_epoch = epsilon_decay_epoch
self._epsilon_min = epsilon_min
self._reset_on_game_changed = reset_on_game_changed
self._reset_on_epoch_start = reset_on_epoch_start
if reset_on_epoch_start:
assert epsilon_decay_epoch == 0, "Decay per level should be 0 if epsilon is reset each epoch."
self._dropout = dropout
self._is_dropout = False
def allows_async_training(self):
return False
def epoch_start(self):
dropout_chance = random.uniform(0., 1.)
self._is_dropout = False
if dropout_chance < self._dropout:
self._is_dropout = True
elif self._reset_on_epoch_start:
self.current_epsilon = self._initial_epsilon
else:
self.current_epsilon = max(
self._epsilon_min,
self.current_epsilon - self._epsilon_decay_epoch
)
def game_changed(self):
if self._reset_on_game_changed:
self.current_epsilon = self._initial_epsilon
def get_action(self, env, qvalues):
turn_epsilon = random.uniform(0., 1.)
if not self._is_dropout and turn_epsilon < self.current_epsilon:
action = env.action_space.sample()
else:
action = super().get_action(env, qvalues)
if not self._is_dropout:
self.current_epsilon = max(self._epsilon_min,
self.current_epsilon - self._epsilon_decay_step)
return action
| {
"alphanum_fraction": 0.618556701,
"author": null,
"avg_line_length": 32.8307692308,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "de1d659f25a2a07037386aab08679f19ddc5949d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a8ffdafa70d735e609296b13b0aa9950f73cfb07",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "valiro21/MarioLearningCompany",
"max_forks_repo_path": "rl/policies/RandomPolicy.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a8ffdafa70d735e609296b13b0aa9950f73cfb07",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "valiro21/MarioLearningCompany",
"max_issues_repo_path": "rl/policies/RandomPolicy.py",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a8ffdafa70d735e609296b13b0aa9950f73cfb07",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "valiro21/MarioLearningCompany",
"max_stars_repo_path": "rl/policies/RandomPolicy.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 434,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2134
} |
#!/usr/bin/env python
# A simple script to test the installed version of numpy by calling
# 'numpy.test()'. Key features:
# -- convenient command-line syntax
# -- sets exit status appropriately, useful for automated test environments
# It would be better to set this up as a module in the numpy namespace, so
# that it could be run as:
# python -m numpy.run_tests <args>
# But, python2.4's -m switch only works with top-level modules, not modules
# that are inside packages. So, once we drop 2.4 support, maybe...
import sys
# In case we are run from the source directory, we don't want to import numpy
# from there, we want to import the installed version:
sys.path.pop(0)
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] -- [nosetests options]")
parser.add_option("-v", "--verbose",
action="count", dest="verbose", default=1,
help="increase verbosity")
parser.add_option("--doctests",
action="store_true", dest="doctests", default=False,
help="Run doctests in module")
parser.add_option("--coverage",
action="store_true", dest="coverage", default=False,
help="report coverage of NumPy code (requires 'coverage' module")
parser.add_option("-m", "--mode",
action="store", dest="mode", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: %default]")
(options, args) = parser.parse_args()
import numpy
result = numpy.test(options.mode,
verbose=options.verbose,
extra_argv=args,
doctests=options.doctests,
coverage=options.coverage)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
| {
"alphanum_fraction": 0.63304253,
"author": null,
"avg_line_length": 39.0212765957,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "91e619e96e1e9e609ed76b6706976d86df264724",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-07-06T21:12:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-08T05:14:16.000Z",
"max_forks_repo_head_hexsha": "665a00aec896344cfb12599add600f27a5f519d3",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "ewmoore/numpy",
"max_forks_repo_path": "tools/test-installed-numpy.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "665a00aec896344cfb12599add600f27a5f519d3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "ewmoore/numpy",
"max_issues_repo_path": "tools/test-installed-numpy.py",
"max_line_length": 83,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "665a00aec896344cfb12599add600f27a5f519d3",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "ewmoore/numpy",
"max_stars_repo_path": "tools/test-installed-numpy.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-11T00:36:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-02T13:32:41.000Z",
"num_tokens": 406,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1834
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
# import numpy as np
import time
import discogan
import sys
sys.path.insert(0, '../')
import image_utils as iu
from datasets import Pix2PixDataSet as DataSets
results = {
'sample_output': './gen_img/',
'model': './model/DiscoGAN-model.ckpt'
}
paras = {
'epoch': 200,
'batch_size': 64,
'logging_interval': 5
}
def main():
start_time = time.time() # clocking start
# Dataset
dataset = DataSets(height=64,
width=64,
channel=3,
ds_path='D:/DataSets/pix2pix/',
ds_name="vangogh2photo")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# DiscoGAN model
model = discogan.DiscoGAN(s)
# load model & graph & weight
global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print("[+] global step : %s" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
# initializing variables
tf.global_variables_initializer().run()
d_overpowered = False # G loss > D loss * 2
for epoch in range(paras['epoch']):
for step in range(1000):
offset_a = (step * paras['batch_size']) % (dataset.images_a.shape[0] - paras['batch_size'])
offset_b = (step * paras['batch_size']) % (dataset.images_b.shape[0] - paras['batch_size'])
# batch data set
batch_a = dataset.images_a[offset_a:(offset_a + paras['batch_size']), :]
batch_b = dataset.images_b[offset_b:(offset_b + paras['batch_size']), :]
# update D network
if not d_overpowered:
s.run(model.d_op, feed_dict={model.A: batch_a})
# update G network
s.run(model.g_op, feed_dict={model.B: batch_b})
if epoch % paras['logging_interval'] == 0:
d_loss, g_loss, summary = s.run([
model.d_loss,
model.g_loss,
model.merged
], feed_dict={
model.A: batch_a,
model.B: batch_b
})
# print loss
print("[+] Epoch %03d Step %04d => " % (epoch, global_step),
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss))
# update overpowered
d_overpowered = d_loss < g_loss / 2.
# training G model with sample image and noise
ab_samples = s.run(model.G_s2b, feed_dict={model.A: batch_a})
ba_samples = s.run(model.G_b2s, feed_dict={model.B: batch_b})
# summary saver
model.writer.add_summary(summary, global_step=global_step)
# export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_ab_dir = results['sample_output'] + 'train_A_{0}_{1}.png'.format(epoch, global_step)
sample_ba_dir = results['sample_output'] + 'train_B_{0}_{1}.png'.format(epoch, global_step)
# Generated image save
iu.save_images(ab_samples, size=[sample_image_height, sample_image_width],
image_path=sample_ab_dir)
iu.save_images(ba_samples, size=[sample_image_height, sample_image_width],
image_path=sample_ba_dir)
# model save
model.saver.save(s, results['model'], global_step=global_step)
end_time = time.time() - start_time
# elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# close tf.Session
s.close()
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.5371993706,
"author": null,
"avg_line_length": 34.7578125,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "83a84321ea0a0bc9570475d0ce3c63e9712bd0ca",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-16T01:35:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-16T01:35:21.000Z",
"max_forks_repo_head_hexsha": "6e20e9cd07d0ec413a187d496159b97d793dab0c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Psyche-mia/Awesome-GANs",
"max_forks_repo_path": "DiscoGAN/discogan_train.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e20e9cd07d0ec413a187d496159b97d793dab0c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Psyche-mia/Awesome-GANs",
"max_issues_repo_path": "DiscoGAN/discogan_train.py",
"max_line_length": 111,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "6e20e9cd07d0ec413a187d496159b97d793dab0c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sumersumerdjl/kozistr-Awesome-GANs",
"max_stars_repo_path": "DiscoGAN/discogan_train.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-16T01:40:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-16T01:40:46.000Z",
"num_tokens": 957,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4449
} |
//=============================================================================================================
/**
* @file mne_project_to_surface.cpp
* @author Jana Kiesel <jana.kiesel@tu-ilmenau.de>;
* Matti Hamalainen <msh@nmr.mgh.harvard.edu>
* @version 1.0
* @date August, 2016
*
* @section LICENSE
*
* Copyright (C) 2016, Jana Kiesel and Matti Hamalainen. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that
* the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the
* following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
* the following disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of MNE-CPP authors nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* @brief MNEProjectToSurface class definition.
*
*/
//*************************************************************************************************************
//=============================================================================================================
// INCLUDES
//=============================================================================================================
#include "mne_project_to_surface.h"
//*************************************************************************************************************
//=============================================================================================================
// INCLUDES
//=============================================================================================================
#include <mne/mne_bem_surface.h>
#include <mne/mne_surface.h>
//*************************************************************************************************************
//=============================================================================================================
// QT INCLUDES
//=============================================================================================================
//*************************************************************************************************************
//=============================================================================================================
// Eigen INCLUDES
//=============================================================================================================
#include <Eigen/Geometry>
//*************************************************************************************************************
//=============================================================================================================
// USED NAMESPACES
//=============================================================================================================
using namespace MNELIB;
using namespace Eigen;
//*************************************************************************************************************
//=============================================================================================================
// DEFINE GLOBAL METHODS
//=============================================================================================================
//*************************************************************************************************************
//=============================================================================================================
// DEFINE MEMBER METHODS
//=============================================================================================================
MNEProjectToSurface::MNEProjectToSurface()
: r1(MatrixX3f::Zero(1,3))
, r12(MatrixX3f::Zero(1,3))
, r13(MatrixX3f::Zero(1,3))
, nn(MatrixX3f::Zero(1,3))
, a(VectorXf::Zero(1))
, b(VectorXf::Zero(1))
, c(VectorXf::Zero(1))
, det(VectorXf::Zero(1))
{
}
//*************************************************************************************************************
MNEProjectToSurface::MNEProjectToSurface(const MNEBemSurface &p_MNEBemSurf)
: r1(MatrixX3f::Zero(p_MNEBemSurf.ntri,3))
, r12(MatrixX3f::Zero(p_MNEBemSurf.ntri,3))
, r13(MatrixX3f::Zero(p_MNEBemSurf.ntri,3))
, nn(MatrixX3f::Zero(p_MNEBemSurf.ntri,3))
, a(VectorXf::Zero(p_MNEBemSurf.ntri))
, b(VectorXf::Zero(p_MNEBemSurf.ntri))
, c(VectorXf::Zero(p_MNEBemSurf.ntri))
, det(VectorXf::Zero(p_MNEBemSurf.ntri))
{
for (int i = 0; i < p_MNEBemSurf.ntri; ++i)
{
r1.row(i) = p_MNEBemSurf.rr.row(p_MNEBemSurf.tris(i,0));
r12.row(i) = p_MNEBemSurf.rr.row(p_MNEBemSurf.tris(i,1)) - r1.row(i);
r13.row(i) = p_MNEBemSurf.rr.row(p_MNEBemSurf.tris(i,2)) - r1.row(i);
a(i) = r12.row(i) * r12.row(i).transpose();
b(i) = r13.row(i) * r13.row(i).transpose();
c(i) = r12.row(i) * r13.row(i).transpose();
}
if (!(p_MNEBemSurf.tri_nn.isZero(0)))
{
nn = p_MNEBemSurf.tri_nn.cast<float>();
}
else
{
for (int i = 0; i < p_MNEBemSurf.ntri; ++i)
{
nn.row(i) = r12.row(i).transpose().cross(r13.row(i).transpose()).transpose();
}
}
det = (a.array()*b.array() - c.array()*c.array()).matrix();
}
//*************************************************************************************************************
MNEProjectToSurface::MNEProjectToSurface(const MNESurface &p_MNESurf)
: r1(MatrixX3f::Zero(p_MNESurf.ntri,3))
, r12(MatrixX3f::Zero(p_MNESurf.ntri,3))
, r13(MatrixX3f::Zero(p_MNESurf.ntri,3))
, nn(MatrixX3f::Zero(p_MNESurf.ntri,3))
, a(VectorXf::Zero(p_MNESurf.ntri))
, b(VectorXf::Zero(p_MNESurf.ntri))
, c(VectorXf::Zero(p_MNESurf.ntri))
, det(VectorXf::Zero(p_MNESurf.ntri))
{
for (int i = 0; i < p_MNESurf.ntri; ++i)
{
r1.row(i) = p_MNESurf.rr.row(p_MNESurf.tris(i,0));
r12.row(i) = p_MNESurf.rr.row(p_MNESurf.tris(i,1)) - r1.row(i);
r13.row(i) = p_MNESurf.rr.row(p_MNESurf.tris(i,2)) - r1.row(i);
nn.row(i) = r12.row(i).transpose().cross(r13.row(i).transpose()).transpose();
a(i) = r12.row(i) * r12.row(i).transpose();
b(i) = r13.row(i) * r13.row(i).transpose();
c(i) = r12.row(i) * r13.row(i).transpose();
}
det = (a.array()*b.array() - c.array()*c.array()).matrix();
}
//*************************************************************************************************************
bool MNEProjectToSurface::mne_find_closest_on_surface(const MatrixXf &r, const int np, MatrixXf &rTri,
VectorXi &nearest, VectorXf &dist)
{
nearest.resize(np);
dist.resize(np);
if (this->r1.isZero(0))
{
qDebug() << "No surface loaded to make the projection./n";
return false;
}
int bestTri = -1;
float bestDist = -1;
Vector3f rTriK;
for (int k = 0; k < np; ++k)
{
/*
* To do: decide_search_restriction for the use in an iterative closest point to plane algorithm
* For now it's OK to go through all triangles.
*/
if (!this->mne_project_to_surface(r.row(k).transpose(), rTriK, bestTri, bestDist))
{
qDebug() << "The projection of point number " << k << " didn't work./n";
return false;
}
rTri.row(k) = rTriK.transpose();
nearest[k] = bestTri;
dist[k] = bestDist;
}
return true;
}
//*************************************************************************************************************
bool MNEProjectToSurface::mne_project_to_surface(const Vector3f &r, Vector3f &rTri, int &bestTri, float &bestDist)
{
float p = 0, q = 0, p0 = 0, q0 = 0, dist0 = 0;
bestDist = 0.0f;
bestTri = -1;
for (int tri = 0; tri < a .size(); ++tri)
{
if (!this->nearest_triangle_point(r, tri, p0, q0, dist0))
{
qDebug() << "The projection on triangle " << tri << " didn't work./n";
return false;
}
if ((bestTri < 0) || (std::fabs(dist0) < std::fabs(bestDist)))
{
bestDist = dist0;
p = p0;
q = q0;
bestTri = tri;
}
}
if (bestTri >= 0)
{
if (!this->project_to_triangle(rTri, p, q, bestTri))
{
qDebug() << "The coordinate transform to cartesian system didn't work./n";
return false;
}
return true;
}
qDebug() << "No best Triangle found./n";
return false;
}
//*************************************************************************************************************
bool MNEProjectToSurface::nearest_triangle_point(const Vector3f &r, const int tri, float &p, float &q, float &dist)
{
//Calculate some helpers
Vector3f rr = r - this->r1.row(tri).transpose(); //Vector from triangle corner #1 to r
float v1 = this->r12.row(tri)*rr;
float v2 = this->r13.row(tri)*rr;
//Calculate the orthogonal projection of the point r on the plane
dist = this->nn.row(tri)*rr;
p = (this->b(tri)*v1 - this->c(tri)*v2)/det(tri);
q = (this->a(tri)*v2 - this->c(tri)*v1)/det(tri);
//If the point projects into the triangle we are done
if (p >= 0.0 && p <= 1.0 && q >= 0.0 && q <= 1.0 && (p+q) <= 1.0)
{
return true;
}
/*
* Tough: must investigate the sides
* We might do something intelligent here. However, for now it is ok
* to do it in the hard way
*/
float p0, q0, t0, dist0, best, bestp, bestq;
/*
* Side 1 -> 2
*/
p0 = p + (q * this->c(tri)) / this->a(tri);
// Place the point in the corner if it is not on the side
if (p0 < 0.0)
{
p0 = 0.0;
}
else if (p0 > 1.0)
{
p0 = 1.0;
}
q0 = 0;
// Distance
dist0 = sqrt((p-p0)*(p-p0)*this->a(tri) +
(q-q0)*(q-q0)*this->b(tri) +
2*(p-p0)*(q-q0)*this->c(tri) +
dist*dist);
best = dist0;
bestp = p0;
bestq = q0;
/*
* Side 2 -> 3
*/
t0 = ((a(tri)-c(tri))*(-p) + (b(tri)-c(tri))*q)/(a(tri)+b(tri)-2*c(tri));
// Place the point in the corner if it is not on the side
if (t0 < 0.0)
{
t0 = 0.0;
}
else if (t0 > 1.0)
{
t0 = 1.0;
}
p0 = 1.0 - t0;
q0 = t0;
// Distance
dist0 = sqrt((p-p0)*(p-p0)*this->a(tri) +
(q-q0)*(q-q0)*this->b(tri) +
2*(p-p0)*(q-q0)*this->c(tri) +
dist*dist);
if (dist0 < best)
{
best = dist0;
bestp = p0;
bestq = q0;
}
/*
* Side 1 -> 3
*/
p0 = 0.0;
q0 = q + (p * c(tri))/b(tri);
// Place the point in the corner if it is not on the side
if (q0 < 0.0)
{
q0 = 0.0;
}
else if (q0 > 1.0)
{
q0 = 1.0;
}
// Distance
dist0 = sqrt((p-p0)*(p-p0)*this->a(tri) +
(q-q0)*(q-q0)*this->b(tri) +
2*(p-p0)*(q-q0)*this->c(tri) +
dist*dist);
if (dist0 < best)
{
best = dist0;
bestp = p0;
bestq = q0;
}
dist = best;
p = bestp;
q = bestq;
return true;
}
//*************************************************************************************************************
bool MNEProjectToSurface::project_to_triangle(Vector3f &rTri, const float p, const float q, const int tri)
{
rTri = this->r1.row(tri) + p*this->r12.row(tri) + q*this->r13.row(tri);
return true;
}
| {
"alphanum_fraction": 0.4358686049,
"author": null,
"avg_line_length": 35.0732394366,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "84713ba83b656e77f04261c025ef40951c1d3770",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "36f21b3ab0c65a133027da83fa8e2a652acd1485",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "ChunmingGu/mne-cpp-master",
"max_forks_repo_path": "libraries/mne/mne_project_to_surface.cpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "36f21b3ab0c65a133027da83fa8e2a652acd1485",
"max_issues_repo_issues_event_max_datetime": "2018-08-23T12:40:56.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-08-23T12:40:56.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "ChunmingGu/mne-cpp-master",
"max_issues_repo_path": "libraries/mne/mne_project_to_surface.cpp",
"max_line_length": 116,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "36f21b3ab0c65a133027da83fa8e2a652acd1485",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "ChunmingGu/mne-cpp-master",
"max_stars_repo_path": "libraries/mne/mne_project_to_surface.cpp",
"max_stars_repo_stars_event_max_datetime": "2019-05-14T07:38:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-14T07:38:25.000Z",
"num_tokens": 3081,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 12451
} |
#include <boost/python/class.hpp>
#include <boost/python/overloads.hpp>
#include <boost/python/manage_new_object.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include "MultiChannel.h"
using namespace boost::python;
//
// MultiChannel class
//
void wrapMultiChannel()
{
#if PVA_API_VERSION >= 481
class_<MultiChannel>("MultiChannel",
"This class is used to communicate with multiple PV channels.\n\n"
"**MultiChannel(names [, providerType=PVA])**\n\n"
"\t:Parameter: *names* (list) - channel names\n\n"
"\t:Parameter: *providerType* (PROVIDERTYPE) - provider type, either PVA (PV Access) or CA (Channel Access)\n\n"
"\tThe following example allows access to PVA channels 'int01' and 'double01':\n\n"
"\t::\n\n"
"\t\tmChannel = MultiChannel(['int01','double01'])\n\n",
init<const boost::python::list&>())
.def(init<const boost::python::list&, PvProvider::ProviderType>())
//
// Get methods
//
.def("get",
static_cast<PvObject*(MultiChannel::*)(const std::string&)>(&MultiChannel::get),
return_value_policy<manage_new_object>(),
args("requestDescriptor"),
"Retrieves PV data from multiple channels.\n\n"
":Parameter: *requestDescriptor* (str) - PV request descriptor\n\n"
":Returns: PvObject with NTMultiChannel structure that contains retrieved data from all member channels as a variant union array\n\n"
"::\n\n"
" pv = mChannel.get('field(value,alarm)')\n\n")
.def("get",
static_cast<PvObject*(MultiChannel::*)()>(&MultiChannel::get),
return_value_policy<manage_new_object>(),
"Retrieves PV data from multiple channels using the default request descriptor 'field(value,alarm,timeStamp)'.\n\n"
":Returns: PvObject with NTMultiChannel structure that contains retrieved data from all member channels as a variant union array\n\n"
"::\n\n"
" pv = mChannel.get()\n\n")
.def("getAsDoubleArray",
static_cast<boost::python::list(MultiChannel::*)()>(&MultiChannel::getAsDoubleArray),
"Retrieves PV data from multiple channels as array of doubles.\n\n"
":Returns: list of floats\n\n"
"::\n\n"
" valueList = mChannel.getAsDoubleArray()\n\n")
.def("put",
static_cast<void(MultiChannel::*)(const boost::python::list&)>(&MultiChannel::put),
args("pvObjectList"),
"Assigns data to multi-channel member PVs'.\n\n"
":Parameter: *pvObjectList* (list) - list of PvObject instances that will be assigned to the multi-channel PVs\n\n"
"::\n\n"
" mChannel = MultiChannel(['PVRstringArray', 'PVRdouble'])\n\n"
" pv1 = PvObject({'value' : [STRING]}, {'value' : ['ccc', 'ddd']})\n\n"
" pv2 = PvDouble(44.44)\n\n"
" mChannel.put([pv1,pv2])\n\n")
.def("putAsDoubleArray",
static_cast<void(MultiChannel::*)(const boost::python::list&)>(&MultiChannel::putAsDoubleArray),
args("valueList"),
"Assigns data to multi-channel member PVs'.\n\n"
":Parameter: *valueList* (list) - list of float values that will be assigned to the multi-channel PVs\n\n"
"::\n\n"
" mChannel = MultiChannel(['PVRdouble01', 'PVRdouble02'])\n\n"
" mChannel.put([1.0,2.0])\n\n")
.def("monitor",
static_cast<void(MultiChannel::*)(const boost::python::object&)>(&MultiChannel::monitor),
args("subscriber"),
"Starts multi-channel monitor with default poll period and request descriptor 'field(value,alarm,timeStamp)'.\n\n"
":Parameter: *subscriber* (object) - reference to python function that will be executed when PV value changes; the function should take PvObject instance as its argument\n\n"
"::\n\n"
" def echo(pvObject):\n\n"
" print('New PV values: %s' % pvObject)\n\n"
" mChannel.monitor(echo)\n\n")
.def("monitor",
static_cast<void(MultiChannel::*)(const boost::python::object&, double)>(&MultiChannel::monitor),
args("subscriber", "pollPeriod"),
"Starts multi-channel monitor with default request descriptor 'field(value,alarm,timeStamp)'.\n\n"
":Parameter: *subscriber* (object) - reference to python function that will be executed when PV value changes; the function should take PvObject instance as its argument\n\n"
":Parameter: *pollPeriod* (float) - period in seconds between two multi-channel polls\n\n"
"::\n\n"
" def echo(pvObject):\n\n"
" print('New PV values: %s' % pvObject)\n\n"
" mChannel.monitor(echo, 1.0)\n\n")
.def("monitor",
static_cast<void(MultiChannel::*)(const boost::python::object&, double, const std::string&)>(&MultiChannel::monitor),
args("subscriber", "pollPeriod", "requestDescriptor"),
"Starts multi-channel monitor.\n\n"
":Parameter: *subscriber* (object) - reference to python function that will be executed when PV value changes; the function should take PvObject instance as its argument\n\n"
":Parameter: *pollPeriod* (float) - period in seconds between two multi-channel polls\n\n"
":Parameter: *requestDescriptor* (str) - describes what PV data should be sent to subscribed channel clients\n\n"
"::\n\n"
" def echo(pvObject):\n\n"
" print('New PV values: %s' % pvObject)\n\n"
" mChannel.monitor(echo, 1.0, 'field(value,alarm,timeStamp)')\n\n")
.def("monitorAsDoubleArray",
static_cast<void(MultiChannel::*)(const boost::python::object&)>(&MultiChannel::monitorAsDoubleArray),
args("subscriber"),
"Starts multi-channel monitor for processing list of double values.\n\n"
":Parameter: *subscriber* (object) - reference to python function that will be executed when PV values change; the function should take list of python floats as its argument\n\n"
"::\n\n"
" def echo(valueList):\n\n"
" print('New PV values: %s' % x)\n\n"
" mChannel.monitorAsDoubleArray(echo, 1.0)\n\n")
.def("monitorAsDoubleArray",
static_cast<void(MultiChannel::*)(const boost::python::object&, double)>(&MultiChannel::monitorAsDoubleArray),
args("subscriber", "pollPeriod"),
"Starts multi-channel monitor for processing list of double values.\n\n"
":Parameter: *subscriber* (object) - reference to python function that will be executed when PV values change; the function should take list of python floats as its argument\n\n"
":Parameter: *pollPeriod* (float) - period in seconds between two multi-channel polls\n\n"
"::\n\n"
" def echo(valueList):\n\n"
" print('New PV values: %s' % x)\n\n"
" mChannel.monitorAsDoubleArray(echo, 1.0)\n\n")
.def("stopMonitor",
&MultiChannel::stopMonitor,
"Stops multi-channel monitor for PV value changes.\n\n"
"::\n\n"
" mChannel.stopMonitor()\n\n")
;
#endif // if PVA_API_VERSION >= 481
} // wrapChannel()
| {
"alphanum_fraction": 0.6396510483,
"author": null,
"avg_line_length": 49.0137931034,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "83744930db4cba5a0809579242be1e9a67c0e606",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 26,
"max_forks_repo_forks_event_max_datetime": "2021-08-24T08:26:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-03-31T23:20:27.000Z",
"max_forks_repo_head_hexsha": "71c6cf56c76221b718ebd11fefcade194fcfdd28",
"max_forks_repo_licenses": [
"CNRI-Python"
],
"max_forks_repo_name": "mrkraimer/pvaPy",
"max_forks_repo_path": "src/pvaccess/pvaccess.MultiChannel.cpp",
"max_issues_count": 63,
"max_issues_repo_head_hexsha": "71c6cf56c76221b718ebd11fefcade194fcfdd28",
"max_issues_repo_issues_event_max_datetime": "2022-03-10T23:14:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-06-11T14:12:55.000Z",
"max_issues_repo_licenses": [
"CNRI-Python"
],
"max_issues_repo_name": "mrkraimer/pvaPy",
"max_issues_repo_path": "src/pvaccess/pvaccess.MultiChannel.cpp",
"max_line_length": 186,
"max_stars_count": 19,
"max_stars_repo_head_hexsha": "71c6cf56c76221b718ebd11fefcade194fcfdd28",
"max_stars_repo_licenses": [
"CNRI-Python"
],
"max_stars_repo_name": "mrkraimer/pvaPy",
"max_stars_repo_path": "src/pvaccess/pvaccess.MultiChannel.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-02-24T10:47:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-09T08:58:19.000Z",
"num_tokens": 1812,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 7107
} |
SUBROUTINE ML5_0_HELAS_CALLS_AMPB_1(P,NHEL,H,IC)
C
C Modules
C
USE ML5_0_POLYNOMIAL_CONSTANTS
C
IMPLICIT NONE
C
C CONSTANTS
C
INTEGER NEXTERNAL
PARAMETER (NEXTERNAL=4)
INTEGER NCOMB
PARAMETER (NCOMB=16)
INTEGER NBORNAMPS
PARAMETER (NBORNAMPS=3)
INTEGER NLOOPS, NLOOPGROUPS, NCTAMPS
PARAMETER (NLOOPS=44, NLOOPGROUPS=28, NCTAMPS=85)
INTEGER NLOOPAMPS
PARAMETER (NLOOPAMPS=129)
INTEGER NWAVEFUNCS,NLOOPWAVEFUNCS
PARAMETER (NWAVEFUNCS=10,NLOOPWAVEFUNCS=93)
REAL*8 ZERO
PARAMETER (ZERO=0D0)
REAL*16 MP__ZERO
PARAMETER (MP__ZERO=0.0E0_16)
C These are constants related to the split orders
INTEGER NSO, NSQUAREDSO, NAMPSO
PARAMETER (NSO=1, NSQUAREDSO=1, NAMPSO=2)
C
C ARGUMENTS
C
REAL*8 P(0:3,NEXTERNAL)
INTEGER NHEL(NEXTERNAL), IC(NEXTERNAL)
INTEGER H
C
C LOCAL VARIABLES
C
INTEGER I,J,K
COMPLEX*16 COEFS(MAXLWFSIZE,0:VERTEXMAXCOEFS-1,MAXLWFSIZE)
LOGICAL DUMMYFALSE
DATA DUMMYFALSE/.FALSE./
C
C GLOBAL VARIABLES
C
INCLUDE 'coupl.inc'
INCLUDE 'mp_coupl.inc'
INTEGER HELOFFSET
INTEGER GOODHEL(NCOMB)
LOGICAL GOODAMP(NSQUAREDSO,NLOOPGROUPS)
COMMON/ML5_0_FILTERS/GOODAMP,GOODHEL,HELOFFSET
LOGICAL CHECKPHASE
LOGICAL HELDOUBLECHECKED
COMMON/ML5_0_INIT/CHECKPHASE, HELDOUBLECHECKED
INTEGER SQSO_TARGET
COMMON/ML5_0_SOCHOICE/SQSO_TARGET
LOGICAL UVCT_REQ_SO_DONE,MP_UVCT_REQ_SO_DONE,CT_REQ_SO_DONE
$ ,MP_CT_REQ_SO_DONE,LOOP_REQ_SO_DONE,MP_LOOP_REQ_SO_DONE
$ ,CTCALL_REQ_SO_DONE,FILTER_SO
COMMON/ML5_0_SO_REQS/UVCT_REQ_SO_DONE,MP_UVCT_REQ_SO_DONE
$ ,CT_REQ_SO_DONE,MP_CT_REQ_SO_DONE,LOOP_REQ_SO_DONE
$ ,MP_LOOP_REQ_SO_DONE,CTCALL_REQ_SO_DONE,FILTER_SO
INTEGER I_SO
COMMON/ML5_0_I_SO/I_SO
INTEGER I_LIB
COMMON/ML5_0_I_LIB/I_LIB
COMPLEX*16 AMP(NBORNAMPS)
COMMON/ML5_0_AMPS/AMP
COMPLEX*16 W(20,NWAVEFUNCS)
COMMON/ML5_0_W/W
COMPLEX*16 WL(MAXLWFSIZE,0:LOOPMAXCOEFS-1,MAXLWFSIZE,
$ -1:NLOOPWAVEFUNCS)
COMPLEX*16 PL(0:3,-1:NLOOPWAVEFUNCS)
COMMON/ML5_0_WL/WL,PL
COMPLEX*16 AMPL(3,NCTAMPS)
COMMON/ML5_0_AMPL/AMPL
C
C ----------
C BEGIN CODE
C ----------
C The target squared split order contribution is already reached
C if true.
IF (FILTER_SO.AND.CT_REQ_SO_DONE) THEN
GOTO 1001
ENDIF
CALL VXXXXX(P(0,1),ZERO,NHEL(1),-1*IC(1),W(1,1))
CALL VXXXXX(P(0,2),ZERO,NHEL(2),-1*IC(2),W(1,2))
CALL OXXXXX(P(0,3),MDL_MT,NHEL(3),+1*IC(3),W(1,3))
CALL IXXXXX(P(0,4),MDL_MT,NHEL(4),-1*IC(4),W(1,4))
CALL VVV1P0_1(W(1,1),W(1,2),GC_4,ZERO,ZERO,W(1,5))
C Amplitude(s) for born diagram with ID 1
CALL FFV1_0(W(1,4),W(1,3),W(1,5),GC_5,AMP(1))
CALL FFV1_1(W(1,3),W(1,1),GC_5,MDL_MT,MDL_WT,W(1,6))
C Amplitude(s) for born diagram with ID 2
CALL FFV1_0(W(1,4),W(1,6),W(1,2),GC_5,AMP(2))
CALL FFV1_2(W(1,4),W(1,1),GC_5,MDL_MT,MDL_WT,W(1,7))
C Amplitude(s) for born diagram with ID 3
CALL FFV1_0(W(1,7),W(1,3),W(1,2),GC_5,AMP(3))
CALL FFV1P0_3(W(1,4),W(1,3),GC_5,ZERO,ZERO,W(1,8))
C Counter-term amplitude(s) for loop diagram number 4
CALL R2_GG_1_R2_GG_2_0(W(1,5),W(1,8),R2_GGG_1,R2_GGG_2,AMPL(1,1))
C Counter-term amplitude(s) for loop diagram number 5
CALL FFV1_0(W(1,4),W(1,3),W(1,5),R2_GQQ,AMPL(1,2))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB_1EPS,AMPL(2,3))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB_1EPS,AMPL(2,4))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB_1EPS,AMPL(2,5))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB_1EPS,AMPL(2,6))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB_1EPS,AMPL(2,7))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB_1EPS,AMPL(2,8))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQG_1EPS,AMPL(2,9))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQB,AMPL(1,10))
CALL FFV1_0(W(1,4),W(1,3),W(1,5),UV_GQQT,AMPL(1,11))
CALL FFV1_2(W(1,4),W(1,2),GC_5,MDL_MT,MDL_WT,W(1,9))
C Counter-term amplitude(s) for loop diagram number 7
CALL R2_QQ_1_R2_QQ_2_0(W(1,9),W(1,6),R2_QQQ,R2_QQT,AMPL(1,12))
CALL R2_QQ_2_0(W(1,9),W(1,6),UV_TMASS_1EPS,AMPL(2,13))
CALL R2_QQ_2_0(W(1,9),W(1,6),UV_TMASS,AMPL(1,14))
C Counter-term amplitude(s) for loop diagram number 8
CALL FFV1_0(W(1,4),W(1,6),W(1,2),R2_GQQ,AMPL(1,15))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB_1EPS,AMPL(2,16))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB_1EPS,AMPL(2,17))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB_1EPS,AMPL(2,18))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB_1EPS,AMPL(2,19))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB_1EPS,AMPL(2,20))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB_1EPS,AMPL(2,21))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQG_1EPS,AMPL(2,22))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQB,AMPL(1,23))
CALL FFV1_0(W(1,4),W(1,6),W(1,2),UV_GQQT,AMPL(1,24))
CALL FFV1_1(W(1,3),W(1,2),GC_5,MDL_MT,MDL_WT,W(1,10))
C Counter-term amplitude(s) for loop diagram number 10
CALL R2_QQ_1_R2_QQ_2_0(W(1,7),W(1,10),R2_QQQ,R2_QQT,AMPL(1,25))
CALL R2_QQ_2_0(W(1,7),W(1,10),UV_TMASS_1EPS,AMPL(2,26))
CALL R2_QQ_2_0(W(1,7),W(1,10),UV_TMASS,AMPL(1,27))
C Counter-term amplitude(s) for loop diagram number 11
CALL FFV1_0(W(1,7),W(1,3),W(1,2),R2_GQQ,AMPL(1,28))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB_1EPS,AMPL(2,29))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB_1EPS,AMPL(2,30))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB_1EPS,AMPL(2,31))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB_1EPS,AMPL(2,32))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB_1EPS,AMPL(2,33))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB_1EPS,AMPL(2,34))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQG_1EPS,AMPL(2,35))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQB,AMPL(1,36))
CALL FFV1_0(W(1,7),W(1,3),W(1,2),UV_GQQT,AMPL(1,37))
C Counter-term amplitude(s) for loop diagram number 13
CALL FFV1_0(W(1,4),W(1,10),W(1,1),R2_GQQ,AMPL(1,38))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB_1EPS,AMPL(2,39))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB_1EPS,AMPL(2,40))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB_1EPS,AMPL(2,41))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB_1EPS,AMPL(2,42))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB_1EPS,AMPL(2,43))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB_1EPS,AMPL(2,44))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQG_1EPS,AMPL(2,45))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQB,AMPL(1,46))
CALL FFV1_0(W(1,4),W(1,10),W(1,1),UV_GQQT,AMPL(1,47))
C Counter-term amplitude(s) for loop diagram number 14
CALL FFV1_0(W(1,9),W(1,3),W(1,1),R2_GQQ,AMPL(1,48))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB_1EPS,AMPL(2,49))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB_1EPS,AMPL(2,50))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB_1EPS,AMPL(2,51))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB_1EPS,AMPL(2,52))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB_1EPS,AMPL(2,53))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB_1EPS,AMPL(2,54))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQG_1EPS,AMPL(2,55))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQB,AMPL(1,56))
CALL FFV1_0(W(1,9),W(1,3),W(1,1),UV_GQQT,AMPL(1,57))
C Counter-term amplitude(s) for loop diagram number 17
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GG,AMPL(1,58))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB_1EPS,AMPL(2,59))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB_1EPS,AMPL(2,60))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB_1EPS,AMPL(2,61))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB_1EPS,AMPL(2,62))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB_1EPS,AMPL(2,63))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB_1EPS,AMPL(2,64))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GG_1EPS,AMPL(2,65))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GB,AMPL(1,66))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),UV_3GT,AMPL(1,67))
C Counter-term amplitude(s) for loop diagram number 31
CALL R2_GG_1_0(W(1,5),W(1,8),R2_GGQ,AMPL(1,68))
CALL R2_GG_1_0(W(1,5),W(1,8),R2_GGQ,AMPL(1,69))
CALL R2_GG_1_0(W(1,5),W(1,8),R2_GGQ,AMPL(1,70))
CALL R2_GG_1_0(W(1,5),W(1,8),R2_GGQ,AMPL(1,71))
C Counter-term amplitude(s) for loop diagram number 32
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GQ,AMPL(1,72))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GQ,AMPL(1,73))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GQ,AMPL(1,74))
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GQ,AMPL(1,75))
C Counter-term amplitude(s) for loop diagram number 34
CALL R2_GG_1_R2_GG_3_0(W(1,5),W(1,8),R2_GGQ,R2_GGB,AMPL(1,76))
C Counter-term amplitude(s) for loop diagram number 35
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GQ,AMPL(1,77))
C Counter-term amplitude(s) for loop diagram number 37
CALL R2_GG_1_R2_GG_3_0(W(1,5),W(1,8),R2_GGQ,R2_GGT,AMPL(1,78))
C Counter-term amplitude(s) for loop diagram number 38
CALL VVV1_0(W(1,1),W(1,2),W(1,8),R2_3GQ,AMPL(1,79))
C At this point, all CT amps needed for (QCD=6), i.e. of split
C order ID=1, are computed.
IF(FILTER_SO.AND.SQSO_TARGET.EQ.1) GOTO 2000
GOTO 1001
2000 CONTINUE
CT_REQ_SO_DONE=.TRUE.
1001 CONTINUE
END
| {
"alphanum_fraction": 0.6241134752,
"author": null,
"avg_line_length": 43.3348623853,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "cb912048c843496f63ef640defa460288af3fb7d",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-10T09:02:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-10T09:02:00.000Z",
"max_forks_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869",
"max_forks_repo_licenses": [
"NCSA"
],
"max_forks_repo_name": "valassi/mg5amc_test",
"max_forks_repo_path": "tests/input_files/IOTestsComparison/MadLoop_output_from_the_interface/TIR_output/%ggttx_IOTest%SubProcesses%P0_gg_ttx%helas_calls_ampb_1.f",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T16:15:01.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-10T09:13:31.000Z",
"max_issues_repo_licenses": [
"NCSA"
],
"max_issues_repo_name": "valassi/mg5amc_test",
"max_issues_repo_path": "tests/input_files/IOTestsComparison/MadLoop_output_from_the_interface/TIR_output/%ggttx_IOTest%SubProcesses%P0_gg_ttx%helas_calls_ampb_1.f",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2e04f23353051f64e1604b23105fe3faabd32869",
"max_stars_repo_licenses": [
"NCSA"
],
"max_stars_repo_name": "valassi/mg5amc_test",
"max_stars_repo_path": "tests/input_files/IOTestsComparison/MadLoop_output_from_the_interface/TIR_output/%ggttx_IOTest%SubProcesses%P0_gg_ttx%helas_calls_ampb_1.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4656,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 9447
} |
Require Import Bool List.
Import ListNotations.
Require Import Undecidability.PCP.PCP.
Require Import Undecidability.PCP.Util.Facts.
Import PCPListNotation.
Require Import Undecidability.PCP.Util.PCP_facts.
Require Import Undecidability.Synthetic.Definitions.
Local Hint Rewrite <- app_assoc : list.
Local Hint Resolve in_eq in_nil in_cons in_or_app : core.
Set Default Goal Selector "!".
(* * MPCP to PCP *)
Section MPCP_PCP.
Local Definition card := card nat.
Local Definition string := string nat.
Local Notation "x / y" := (x, y).
Variable R : list (card).
Variable x0 y0 : string.
Definition Sigma := sym (x0/y0 :: R).
Definition R_Sigma : sym R <<= Sigma.
Proof. unfold Sigma. cbn. do 2 apply incl_appr. apply incl_refl. Qed.
Definition dollar := fresh Sigma.
Notation "$" := dollar.
Definition hash := fresh (dollar :: Sigma).
Notation "#" := hash.
Fixpoint hash_l (x : string) :=
match x with
| [] => []
| a :: x => # :: a :: hash_l x
end.
Notation "#_L" := hash_l.
Fixpoint hash_r (x : string) :=
match x with
| [] => []
| a :: x => a :: # :: hash_r x
end.
Notation "#_R" := hash_r.
Definition d := ($ :: (#_L x0)) / ($ :: # :: (#_R y0)).
Definition e := [#;$] / [$].
Definition P := [d;e] ++ map (fun '(x,y) => (#_L x, #_R y)) (filter (fun '(x,y) => is_cons x || is_cons y) (x0/y0::R)).
Lemma P_inv c :
c el P ->
c = d \/ c = e \/ (exists x y, (x,y) el (x0/y0 :: R) /\ c = (#_L x, #_R y) /\ ( (x,y) <> (nil, nil))).
Proof.
cbn -[filter]. intros. firstorder. eapply in_map_iff in H as ((x,y) & <- & (? & ? % orb_prop) % filter_In).
rewrite !is_cons_true_iff in H0.
right. right. exists x, y. firstorder; destruct x, y; cbn; firstorder congruence.
Qed.
Lemma P_inv_top x y a :
a el Sigma ->
~(a :: x,y) el P.
Proof.
intros ? [ | [ | (x'' & y' & ? & ? & ?) ] ] % P_inv.
- inv H0. edestruct fresh_spec; eauto.
- inv H0. edestruct fresh_spec with (l := dollar :: Sigma); [ | reflexivity]. firstorder.
- inv H1. destruct x''; cbn -[fresh] in *; [congruence | ]. inversion H4.
edestruct fresh_spec with (l := dollar :: Sigma); [ | reflexivity ]. unfold hash in *.
rewrite <- H3. firstorder.
Qed.
Lemma P_inv_bot x y :
~(y, # :: x) el P.
Proof.
intros [ | [ | (x'' & y' & ? & ? & ?) ] ] % P_inv.
- inv H. edestruct fresh_spec; eauto.
- inv H. edestruct fresh_spec; eauto.
- inv H0. destruct y'; cbn -[fresh] in *; [congruence | ]. inversion H4.
edestruct fresh_spec with (l := dollar :: Sigma); [ | reflexivity ]. unfold hash in *.
rewrite H2. right. eapply sym_word_R; eauto.
Qed.
Lemma match_start d' B :
d' :: B <<= P -> tau1 (d' :: B) = tau2 (d' :: B) -> d' = d.
Proof.
intros Hs Hm.
assert (d' el P) as [ -> | [ -> | (x & y & ? & -> & ?) ] ] % P_inv by now eapply Hs; cbn -[fresh] in Hm.
- congruence.
- inv Hm. now edestruct fresh_spec; eauto.
- cbn in Hm. destruct x, y; try firstorder congruence.
+ destruct (tau1_inv Hm) as (x' & y' & ? ).
pose (cons_incl Hs).
assert ( (n :: x') / y' el P) as [] % P_inv_top by eauto.
eapply sym_word_R; eauto.
+ cbn -[fresh] in Hm. symmetry in Hm. destruct (tau2_inv Hm) as (x' & y' & ? ).
pose (cons_incl Hs).
assert ( y' / (# :: x') el P) as [] % P_inv_bot by eauto.
+ cbn -[fresh] in Hm. inversion Hm. assert (fresh (dollar :: Sigma) = hash) by reflexivity.
edestruct fresh_spec; try eassumption. right.
eapply sym_word_R in H. subst. eauto.
Qed.
Lemma hash_swap x :
#_L x ++ [#] = # :: #_R x.
Proof.
induction x; cbn in *; now try rewrite IHx.
Qed.
Lemma hash_L_app x y :
#_L (x ++ y) = #_L x ++ #_L y.
Proof.
induction x; cbn in *; now try rewrite IHx.
Qed.
Lemma hash_R_app x y :
#_R (x ++ y) = #_R x ++ #_R y.
Proof.
induction x; cbn in *; now try rewrite IHx.
Qed.
Lemma hash_L_diff x y :
#_L x <> # :: #_R y.
Proof.
revert y. induction x; cbn -[fresh]; intros ? ?; inv H.
destruct y; inv H1. firstorder.
Qed.
Lemma hash_R_inv x y :
#_R x = #_R y -> x = y.
Proof.
revert y; induction x; intros [] H; inv H; firstorder congruence.
Qed.
Lemma doll_hash_L x:
x <<= Sigma -> ~ $ el #_L x.
Proof.
induction x; intros.
-- firstorder.
-- intros [ | []].
++ now eapply fresh_spec in H0 as [].
++ symmetry in H0. eapply fresh_spec in H0 as []. firstorder.
++ firstorder.
Qed.
Lemma MPCP_PCP x y A :
A <<= x0/y0 :: R -> x ++ tau1 A = y ++ tau2 A ->
exists B, B <<= P /\ (#_L x) ++ tau1 B = # :: #_R y ++ tau2 B.
Proof.
revert x y; induction A; cbn -[fresh P] in *; intros.
- rewrite !app_nil_r in H0. subst. exists [e]. firstorder.
cbn -[fresh].
enough ((#_L y ++ [#]) ++ [$] = # :: #_R y ++ [$]) by now autorewrite with list in *.
now rewrite hash_swap.
- destruct a as (x', y').
assert ( (x ++ x') ++ tau1 A = (y ++ y') ++ tau2 A) by now simpl_list.
eapply IHA in H1 as (B & ? & ?) ; [ | firstorder].
rewrite hash_L_app, hash_R_app in H2.
autorewrite with list in H2.
destruct (is_cons x' || is_cons y') eqn:E.
+ exists ( (#_L x' / #_R y') :: B). split.
* intros ? [ <- | ]; [ | eauto].
unfold P. rewrite in_app_iff, in_map_iff. right. exists (x', y'). split; [easy|].
eapply filter_In. eauto.
* eassumption.
+ exists B. rewrite orb_false_iff, <- !not_true_iff_false, !is_cons_true_iff in E. destruct E.
destruct x', y'; firstorder congruence.
Qed.
Lemma PCP_MPCP x y B :
B <<= P -> x <<= Sigma -> y <<= Sigma ->
(#_L x) ++ tau1 B = # :: (#_R y) ++ tau2 B ->
exists A, A <<= x0/y0::R /\ x ++ tau1 A = y ++ tau2 A.
Proof.
revert x y. induction B; cbn -[fresh P] in *; intros.
- exfalso. rewrite !app_nil_r in H2. eapply hash_L_diff in H2 as [].
- assert (a el P) as [ -> | [ -> | (x' & y' & ? & -> & ?) ] ] % P_inv by intuition; cbn -[fresh P] in *.
+ exfalso. setoid_rewrite app_comm_cons in H2 at 2.
eapply list_prefix_inv in H2 as [[] % hash_L_diff E2].
* now eapply doll_hash_L.
* rewrite <- hash_swap. rewrite in_app_iff. intros [ | [ | []]]. { eapply doll_hash_L; eauto. }
now eapply fresh_spec in H3 as [].
+ exists []. assert ((#_L x ++ [#]) ++ $ :: tau1 B = (# :: #_R y) ++ $ :: tau2 B) by now simpl_list.
eapply list_prefix_inv in H3.
{ rewrite hash_swap in H3. inv H3.
inv H4. eapply hash_R_inv in H6 as ->. firstorder. }
* rewrite in_app_iff. intros [ | [ | []]]. { eapply doll_hash_L in H4; eauto. }
now eapply fresh_spec in H4 as [].
* rewrite <- hash_swap. rewrite in_app_iff. intros [ | [ | []]]. { eapply doll_hash_L; eauto. }
now eapply fresh_spec in H4 as [].
+ assert ((#_L x ++ #_L x') ++ tau1 B = # :: (#_R y ++ #_R y') ++ tau2 B) by now simpl_list.
rewrite <- hash_L_app, <- hash_R_app in H5. eapply IHB in H5 as (A & ? & ?).
* exists (x' / y' :: A). intuition idtac; try inv H7; auto with datatypes;
cbn; now autorewrite with list in *.
* eapply incl_cons_inv. eassumption.
* eapply incl_app. { eauto. }
intros ? ?. destruct H3. { inv H3. eauto. } eapply R_Sigma, sym_word_l; eauto.
* eapply incl_app. { eauto. }
intros ? ?. destruct H3. { inv H3. eauto. } eapply R_Sigma, sym_word_R; eauto.
Qed.
Lemma MPCP_PCP_cor :
MPCP (x0/y0, R) <-> PCP P.
Proof.
split.
- intros (A & Hi & (B & HiB & H) % MPCP_PCP).
+ exists (d :: B).
firstorder try congruence.
cbn. f_equal. now rewrite H.
+ eassumption.
- intros ([|d' B] & Hi & He & H); firstorder.
pose proof H as -> % match_start; eauto.
cbn -[fresh] in H. inv H.
eapply PCP_MPCP in H1; cbn.
+ eassumption.
+ eapply cons_incl. eassumption.
+ apply incl_appl. apply incl_refl.
+ apply incl_appr. apply incl_appl. apply incl_refl.
Qed.
End MPCP_PCP.
Theorem reduction :
MPCP ⪯ PCP.
Proof.
exists (fun '(x, y, R) => P R x y).
intros ((x & y) & R).
eapply MPCP_PCP_cor.
Qed.
| {
"alphanum_fraction": null,
"author": "uds-psl",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/uds-psl-coq-library-undecidability/coq-library-undecidability-4547d325e8ce7a6d841fbfe5df4429ee9cb6f214/theories/PCP/Reductions/MPCP_to_PCP.v",
"reason": null,
"repo": "coq-library-undecidability",
"save_path": "github-repos/coq/uds-psl-coq-library-undecidability",
"sha": "4547d325e8ce7a6d841fbfe5df4429ee9cb6f214",
"size": null
} |
from tkinter import *
from PIL import ImageTk, Image
from tkinter import filedialog
import numpy as np
from tensorflow import keras
import tensorflow as tf
from tensorflow.python.keras.models import load_model
from tkinter import messagebox
longitud, altura = 200, 200
modelo = './modelo_project_modelo_20200903_211740/modelo.h5'
pesos_modelo = './modelo_project_modelo_20200903_211740/pesos.h5'
cnn = load_model(modelo)
cnn.load_weights(pesos_modelo)
def open_img():
x = openfilename()
img = Image.open(x)
img = img.resize((300, 300), Image.ANTIALIAS)
img = ImageTk.PhotoImage(img)
panel = Label(root, image=img)
panel.image = img
panel.grid(row=2)
img = keras.preprocessing.image.load_img(
x, target_size=(altura, longitud)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = cnn.predict(img_array)
score = tf.nn.softmax(predictions[0])
class_names = ['CON MASCARILLA', 'SIN MASCARILLA']
messagebox.showinfo("CNN mask", "Imagen: {} \nTipo: {} \nPorcentaje de acierto: {:.2f}."
.format(x, class_names[np.argmax(score)], 100 * np.max(score)))
def openfilename():
filename = filedialog.askopenfilename(title='"Seleccione la imagen')
return filename
root = Tk()
root.title("CNN Covid mask")
root.geometry("350x350")
root.resizable(width=True, height=True)
btn = Button(root, text='Cargar una imagen', command=open_img).grid(row=1)
root.mainloop()
| {
"alphanum_fraction": 0.6856780735,
"author": null,
"avg_line_length": 27.6842105263,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6a22310329aa77b68564040c3994cc4b4f578736",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "52ea60cf534c75b0b1d78a54567b0fa5b90f797f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "BryanManzano2016/CNN-mask---IA",
"max_forks_repo_path": "interfaz_proyecto.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "52ea60cf534c75b0b1d78a54567b0fa5b90f797f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "BryanManzano2016/CNN-mask---IA",
"max_issues_repo_path": "interfaz_proyecto.py",
"max_line_length": 93,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "52ea60cf534c75b0b1d78a54567b0fa5b90f797f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "BryanManzano2016/CNN-mask---IA",
"max_stars_repo_path": "interfaz_proyecto.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 389,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1578
} |
import vnmrjpy as vj
import numpy as np
from scipy.ndimage.filters import gaussian_filter, median_filter
from vnmrjpy.core.utils import vprint
import copy
# for hiding zero-divide warnigns
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
"""
Generate fieldmap from a set of gradient echo images
"""
def make_fieldmap(varr, mask=None, selfmask=True, combine_receivers=True, \
method='triple_echo'):
"""Generate B0 map from gradient echo images.
Args:
varr -- input gradient echo image set with different
echo time acquisitions at time dimension
method -- triple_echo: see ref [1]
mask -- ndarray of [0 or 1] with same shape of varr.data in spatial dims
selfmask -- Boolean, set True to create mask based on magnitude data
Return:
fieldmap -- vj.varray with data attribute updated to the B0 map
Refs:
[1] Windischberger et al.: Robust Field Map Generation Using a Triple-
Echo Acquisition, JMRI, (2004)
"""
if mask == None and selfmask==True:
varr, mask = vj.func.mask(varr, mask_out=True)
if method=='triple_echo':
gaussian_kernel = _get_gaussian_kernel(varr)
median_kernel = _get_median_kernel(varr)
# checking for echos
time_dim = varr.data.shape[3]
# calcin milliseconds
te = [float(i)*1000 for i in varr.pd['te']]
phasedata = np.arctan2(np.imag(varr.data),np.real(varr.data))
magnitudedata = np.abs(varr.data)
phasedata.astype('float32')
phase_set = _make_phase_set(phasedata)
d_set, c_set, residual_set = _calc_freq_shift(phase_set,te)
indice_arr = _get_indice_map(residual_set)
c_map = _get_from_set(c_set, indice_arr)
res_map = _get_from_set(residual_set, indice_arr)
# pre field map without filters and receivers not combined
fieldmap = _get_from_set(d_set, indice_arr)
# fieldmap processing
fieldmap = _combine_receivers(fieldmap, res_map)
fieldmap = _median_filter(fieldmap, kernel=median_kernel, mask=mask)
fieldmap = _gaussian_filter(fieldmap, kernel=gaussian_kernel,mask=mask)
# creating final varray
varr.data = fieldmap
varr.description = 'B0_map'
return varr
else:
raise(Exception('Not implemented fieldmap generating method'))
def _get_gaussian_kernel(varr):
mult = 0.5 # means size in mm
if type(varr.nifti_header) == type(None):
varr = varr.set_nifti_header()
affine = varr.nifti_header.get_qform()
kernel = [1/i*mult for i in [affine[0,0],affine[1,1],affine[2,2]]]
return kernel
def _get_median_kernel(varr):
slc_dim = varr.sdims.index('slice')
kernel = [2,2,2]
kernel[slc_dim] = 1
return tuple(kernel)
def _make_phase_set(phasedata):
"""Return all possible phase wrapping combinations
Args:
phasedata -- numpy ndarray of dim (x,y,z, time, rcvrs) with phase data
Return:
phasedata_list -- list of possible phase data in all possible
cases of phase wrapping
Note: This phase ordering rule is also used in _get_fieldmap function
"""
# only implemented for 3 TE points
p0 = phasedata[...,0,:]
p1 = phasedata[...,1,:]
p2 = phasedata[...,2,:]
#See ref [1] in make_fieldmap()
#case 1
case1 = phasedata
phasedata_list = [case1]
#case 2
case2 = np.stack([p0,p1+2*np.pi,p2+2*np.pi],axis=3)
phasedata_list.append(case2)
#case 3
case3 = np.stack([p0,p1-2*np.pi,p2-2*np.pi],axis=3)
phasedata_list.append(case3)
#case 4
case4 = np.stack([p0,p1,p2+2*np.pi],axis=3)
phasedata_list.append(case4)
#case 5
case5 = np.stack([p0,p1,p2-2*np.pi],axis=3)
phasedata_list.append(case5)
#case 6
case6 = np.stack([p0,p1+2*np.pi,p2],axis=3)
phasedata_list.append(case6)
#case 7
case7 = np.stack([p0,p1-2*np.pi,p2],axis=3)
phasedata_list.append(case7)
return phasedata_list
def _calc_freq_shift(phase_set, te):
"""Calculate frequency shift at each point for each phase wrapping scenario
Do linear regression of the form Phase(TE) = c + d * TE
Args:
phase_set -- list of phase sets in different phase wrapping cases
te -- echo times in ms
Return:
d_set
c_set
residual_set
"""
d_set = []
c_set = []
residual_set = []
shape = phase_set[0].shape
te = np.array(te,dtype=float)
for num, phase in enumerate(phase_set):
(x,y,z,t,rcvr) = phase.shape
# reshape to accomodate polyfit vectorization
phase = np.reshape(np.swapaxes(phase, 0,3),(t,-1))
out = np.polyfit(te, phase, 1, full=True)
d,c = out[0]
res = out[1]
# reshape back to otiginal
d = np.swapaxes(np.reshape(d,(1,y,z,x,rcvr)),0,3)
c = np.swapaxes(np.reshape(c,(1,y,z,x,rcvr)),0,3)
res = np.swapaxes(np.reshape(res,(1,y,z,x,rcvr)),0,3)
# hack: just make the loss large where d is negative
res[d < 0] = 10000
# append to list
d_set.append(d)
c_set.append(c)
residual_set.append(res)
return d_set, c_set, residual_set
def _combine_receivers(arr, res_map):
"""Pick the value with the lowest residual from each chanel at a voxel"""
min_ind = np.argmin(res_map, axis=4)
arr = np.moveaxis(arr,[4,0,1,2,3],[0,1,2,3,4])
arr = np.choose(min_ind, arr)
return np.expand_dims(arr,axis=4)
def _get_indice_map(chisqr_set):
"""Find element with lowest chisqr at each voxel """
#make chisqr array of dims [x,y,z,0,rcvr,chisqr]
chisqr_arr = np.stack(chisqr_set,axis=5)
indice_arr = np.argmin(chisqr_arr,axis=5)
return indice_arr
def _get_from_set(par_set, indice_arr):
"""Extract values from set according to index array"""
par_arr = np.stack(par_set,axis=0)
par_map = np.choose(indice_arr, par_arr)
return par_map
def _median_filter(fieldmap, kernel=(3,3,3), mask=None):
for rcvr in range(fieldmap.shape[4]):
arr = copy.copy(fieldmap[...,0,rcvr])
fieldmap[...,0,rcvr] = median_filter(arr,size=kernel)
return fieldmap
def _gaussian_filter(fieldmap, kernel=(2,2,2), mask=None):
for rcvr in range(fieldmap.shape[4]):
arr = copy.copy(fieldmap[...,0,rcvr])
if type(mask) == type(None):
fieldmap[...,0,rcvr] = gaussian_filter(arr,sigma=kernel)
elif type(mask) != type(None):
mask_rcvr = mask[...,0,rcvr]
arr = gaussian_filter(arr * mask_rcvr,sigma=kernel)
arr /= gaussian_filter(mask_rcvr,sigma=kernel)
arr[mask_rcvr == 0] = 0
fieldmap[...,0,rcvr] = arr
return fieldmap
| {
"alphanum_fraction": 0.6383758429,
"author": null,
"avg_line_length": 32.9565217391,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "25e1d2ce8c0312c5da1641ec6cbaa3b6df737651",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "48707a1000dc87e646e37c8bd686e695bd31a61e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hlatkydavid/vnmrjpy",
"max_forks_repo_path": "vnmrjpy/func/fieldmap.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "48707a1000dc87e646e37c8bd686e695bd31a61e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hlatkydavid/vnmrjpy",
"max_issues_repo_path": "vnmrjpy/func/fieldmap.py",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "48707a1000dc87e646e37c8bd686e695bd31a61e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hlatkydavid/vnmrjpy",
"max_stars_repo_path": "vnmrjpy/func/fieldmap.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1911,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6822
} |
Quad Phone 2 on the Campus Payphones node
Phone Number: (530) 7599256
Location: Sits not exactly in the Quad, but on the same block. Lives in the indented area next to the Memorial Union computer lab.
Description: Its identical twin, Quad Phone 1, is to its right.
Charge: 50 cents for local.
| {
"alphanum_fraction": 0.7651006711,
"author": null,
"avg_line_length": 27.0909090909,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "022a2499771e5345bf11425403e543ac771efdcd",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "voflo/Search",
"max_forks_repo_path": "lab/davisWiki/Quad_Phone_2.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "voflo/Search",
"max_issues_repo_path": "lab/davisWiki/Quad_Phone_2.f",
"max_line_length": 130,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "voflo/Search",
"max_stars_repo_path": "lab/davisWiki/Quad_Phone_2.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 77,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 298
} |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_transforms.fractal.ipynb (unless otherwise specified).
__all__ = ['MandelBrotFractalTransform', 'JuliaFractalTransform']
# Cell
import numpy as np
from albumentations.core.transforms_interface import ImageOnlyTransform
from PIL import Image
# Cell
class MandelBrotFractalTransform(ImageOnlyTransform):
def __init__(
self,
n_iter: int = 10,
deg: int = 2,
delta: float = 0.01,
always_apply: bool = False,
p: float = 1.0,
):
super(MandelBrotFractalTransform, self).__init__(always_apply, p)
self.n_iter = n_iter
self.deg = deg
self.delta = delta
self.F = lambda z, c, deg: np.power(z, deg) + c
def apply(self, image, **params):
image = Image.fromarray(image)
if self.deg == 2:
x_min, x_max = -2.0, 0.6
y_min, y_max = -1.2, 1.2
else:
x_min, x_max = -2.5, 2
y_min, y_max = -1.5, 1.5
delta = self.delta
re, im = np.mgrid[x_min:x_max:delta, y_min:y_max:delta]
c = (re + 1j * im).T
x0 = np.zeros_like(c) # x0 = 0
x = x0
btmleft = (-0.5, -0.5)
topright = (0.5, 0.5)
trap_size = (1.0, 1.0)
fractal = np.zeros((*(np.abs(c).shape), 3))
trapped = np.zeros_like(np.abs(c))
with np.nditer(x, flags=["multi_index"], op_flags=["readwrite"]) as it:
for point in it:
for iter in range(self.n_iter):
point[...] = self.F(point, c[it.multi_index], self.deg)
# if this point was trapped previously then break
if (abs(point) > 10) or trapped[it.multi_index] == 1:
break
# check if this point can be trapped
if (point.real > btmleft[0] and point.real < topright[0]) and (
point.imag > btmleft[1] and point.imag < topright[1]
):
trapped[it.multi_index] = 1
fractal[it.multi_index] = np.array(
image.getpixel(
(
int((point.real + 0.5) * image.width),
int((point.imag + 0.5) * image.height),
)
)
)
return fractal.astype(np.uint8)
# Cell
class JuliaFractalTransform(ImageOnlyTransform):
def __init__(
self,
n_iter: int = 10,
deg: int = 2,
delta: float = 0.01,
always_apply: bool = False,
p: float = 1.0,
):
super(JuliaFractalTransform, self).__init__(always_apply, p)
self.n_iter = n_iter
self.deg = deg
self.delta = delta
self.F = lambda z, c, deg: np.power(z, deg) + c
def apply(self, image, **params):
image = Image.fromarray(image)
if self.deg == 2:
x_min, x_max = -1.2, 1.2
y_min, y_max = -1.2, 1.2
else:
x_min, x_max = -1.5, 1.5
y_min, y_max = -1.5, 1.5
delta = self.delta
re, im = np.mgrid[x_min:x_max:delta, y_min:y_max:delta]
grid = (re + 1j * im).T
x = grid
c = np.zeros_like(x)
c.fill(0.3 + 0.6j)
btmleft = (-0.5, -0.5)
topright = (0.5, 0.5)
trap_size = (1.0, 1.0)
fractal = np.zeros((*(np.abs(c).shape), 3))
trapped = np.zeros_like(np.abs(c))
with np.nditer(x, flags=["multi_index"], op_flags=["readwrite"]) as it:
for point in it:
for iter in range(self.n_iter):
point[...] = self.F(point, c[it.multi_index], self.deg)
# if this point was trapped previously then break
if (abs(point) > 10) or trapped[it.multi_index] == 1:
break
# check if this point can be trapped
if (point.real > btmleft[0] and point.real < topright[0]) and (
point.imag > btmleft[1] and point.imag < topright[1]
):
trapped[it.multi_index] = 1
pixel = (
int((point.real + 0.5) * (image.width - 1)),
int((point.imag + 0.5) * (image.height - 1)),
)
try:
fractal[it.multi_index] = np.array(image.getpixel(pixel))
except:
print(
f"ERROR: pixel: ({int( ( point.real + 0.5 ) * image.width )}, {int( ( point.imag + 0.5 ) * image.height )}), point: ({point})"
)
return fractal.astype(np.uint8) | {
"alphanum_fraction": 0.4702889703,
"author": null,
"avg_line_length": 33.4285714286,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "57e461821f2fd61a69b152dd8b58b3fb354181a1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a43182f0c42896c5a9d5e519216ebc812cf7299d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "satyajitghana/fractal_augmentation",
"max_forks_repo_path": "fractal_augmentation/transforms/fractal.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a43182f0c42896c5a9d5e519216ebc812cf7299d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "satyajitghana/fractal_augmentation",
"max_issues_repo_path": "fractal_augmentation/transforms/fractal.py",
"max_line_length": 159,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a43182f0c42896c5a9d5e519216ebc812cf7299d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "satyajitghana/fractal_augmentation",
"max_stars_repo_path": "fractal_augmentation/transforms/fractal.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1290,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4914
} |
\section{Decomposition 8: UC22, UC23 (application upload and statistics)}
\subsection*{Selected architectural drivers}
The functional drivers are:
\begin{itemize}
\item \emph{UC22}: Upload an application
\item \emph{UC23}: Consult application statistics
\end{itemize}
% UC22:
% 1. The primary actor indicates that he/she wants to upload an application.
% IN CLIENT
%
% 2. The system asks the primary actor if he/she wants to update an existing application.
% IN CLIENT
%
% 3. The primary actor provides his/her choice.
%
% IN CLIENT
%
% 4. If the primary actor indicated that he/she wants to update an existing application,
% the system composes an overview of applications uploaded by the primary actor
% (e.g., as a list or table), presents this, and requests the primary actor to indicate which application to update.
%
% ApplicationProviderClient -> ApplicationProviderFacade: interface Applications: List<Application> getApplicationsOfApplicationProvider(int applicationProviderID)
% ApplicationProviderFacade -> ApplicationManager: interface FrontEndAppRequests: List<Application> getApplicationsOfApplicationProvider(int applicationProviderID)
% ApplicationManager -> OtherDataDB: interface DBAppMgmt: List<Application> getApplicationsOfApplicationProvider(int applicationProviderID)
%
% 5. The primary actor chooses the application to be updated.
% IN CLIENT
%
% 6. The system asks the primary actor if this is an update that should be automatically activated for existing subscriptions.
% IN CLIENT
%
% 7. The primary actor provides his/her choice.
% IN CLIENT
%
% 8. If the primary actor has indicated that existing subscriptions should be automatically updated, the system requests the versions (or range of versions) that should be automatically updated.
% IN CLIENT
%
% 9. The primary actor provides the requested information.
% IN CLIENT
%
% 10. The system asks the primary actor to upload the application code, a description for display to customer organisations, and meta-data (such as the version number and subscription price).
% IN CLIENT
%
% 11. The primary actor uploads the requested information.
% ApplicationProviderClient -> ApplicationProviderFacade: interface Applications: void uploadApplication(int applicationProviderID, ApplicationCode code, string description, Map<string, string> metaData, List<Version> versionsToUpdate)
% ApplicationProviderFacade -> ApplicationManagementLogic: interface FrontEndAppRequests: void uploadApplication(int applicationProviderID, ApplicationCode code, string description, Map<string, string> metaData, List<Version> versionsToUpdate)
% ApplicationManagementLogic -> OtherDataDB: interface DBAppMgmt: int createNewApplication(int applicationProviderID, ApplicationCode code, string description, Map<string, string> metaData, List<Version> versionsToUpdate)
%
% 12. The system performs automated application checks.
%
% ApplicationManagementLogic -> ApplicationContainerManager: interface AppInstanceMgmt: void testApplication(int applicationID, ApplicationCode code)
%
% 13. If the checks were successful, the system sends a notification to the primary actor (Include: UC15 : Send notification).
%
% ApplicationContainerManager -> ApplicationManagementLogic: interface ApplicationTesting: void applicationTestsSuccessful(int applicationID)
% ApplicationContainerManager-> ApplicationManagementLogic: interface ApplicationTesting: void applicationTestsUnsuccessful(int applicationID)
%
% ApplicationManageMentLogic -> NotificationHandler: interface Notify: notify(int userID) app provider
% ApplicationManageMentLogic -> NotificationHandler: interface Notify: notify(int userID) sys admin
%
% 14. The system makes the application available in the application store. In case of an updated application, it replaces the previous versions of the application.
%
% ApplicationManagementLogic -> OtherDataDB: interface DBAppMgmt: void updateApplicationAvailable(int applicationID)
%
% 15. In case of an updated application that should automatically be activated for existing subscriptions (cf. step 6),
% the system updates the existing subscriptions of the application to the new version (Include: UC17 : Activate an application).
%
% ApplicationManagementLogic -> OtherDataDB: interface DBAppMgmt: List<int> getApplicationInstancesToUpdate(int applicationID)
% ApplicationManagementLogic -> OtherDataDB: interface DBAppMgmt: ApplicationCode getApplicationCode(int applicationID)
% ApplicationManagementLogic -> ApplicationContainerManager: interface AppInstanceMgmt: void updateApplicationInstances(List<int> applicationInstanceIDs, ApplicationCode code)
% ApplicationManagementLogic -> OtherDataDB: interface DBAppMgmt: void updateApplicationInstancesOfApplication(int applicationID)
%
% ApplicationManagementLogic -> ApplicationContainerManager: interface AppInstanceMgmt: void createApplicationInstance(ApplicationInstance instance, ApplicationCode code)
% UC23:
% 1. The primary actor indicates that he/she wants to an overview of uploaded applications.
%
% ApplicationProviderClient -> ApplicationProviderFacade: interface Applications: Map<Applications, Map<string, string>> getApplicationsWithStatsForApplicationProvider(int applicationProviderID)
%
% 2. The system retrieves all applications uploaded by the primary actor, and information about the amount of subscribers for that application. It presents this overview to the primary actor.
%
% ApplicationProviderFacade -> ApplicationManager: interface FrontEndAppRequests: Map<Applications, Map<string, string>> getApplicationsWithStatsForApplicationProvider(int applicationProviderID)
% ApplicationManager -> OtherDataDB: interface DBAppMgmt: Map<Applications, Map<string, string>> getApplicationsWithStatsForApplicationProvider(int applicationProviderID)
%
% 3. The primary actor selects an application.
%
% ApplicationProviderClient -> ApplicationProviderFacade: interface Applications: Map<string, string> getApplicationDetailedStats(int applicationProviderID, int applicationID)
%
% 4. If the selected application is an approved application, the system presents detailed statistics including the amount of subscribers.
%
% ApplicationProviderFacade -> ApplicationManager: interface FrontEndAppRequests: Map<string, string> getApplicationDetailedStats(int applicationID)
% ApplicationManager -> OtherDataDB: interface DBAppMgmt: Map<string, string> getApplicationDetailedStats(int applicationID)
\subsection*{Interfaces for child modules}
This section lists new interfaces assigned to the components defined
in the section above. Detailed information about each interface and
its methods can be found under chapter \ref{ch:elements-datatypes}.
\subsubsection{ApplicationProviderFacade}
\begin{itemize}
\item Applications
\end{itemize}
\subsubsection{ApplicationManagementLogic}
\begin{itemize}
\item ApplicationTesting
\end{itemize}
\subsection*{New data types}
This section lists the new data types introduced during this decomposition.
\begin{itemize}
\item Version
\item ApplicationCode
\end{itemize}
| {
"alphanum_fraction": 0.739755663,
"author": null,
"avg_line_length": 64.9421487603,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "7991d9dcbbfc822395574d981f0492f1904eded3",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "56f14be5801eb7c4a8dcb53f552ca36ffbc13066",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "arminnh/software-architecture-assignment",
"max_forks_repo_path": "part-2b/report/decomposition-8.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "56f14be5801eb7c4a8dcb53f552ca36ffbc13066",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "arminnh/software-architecture-assignment",
"max_issues_repo_path": "part-2b/report/decomposition-8.tex",
"max_line_length": 255,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "56f14be5801eb7c4a8dcb53f552ca36ffbc13066",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "arminnh/software-architecture-assignment",
"max_stars_repo_path": "part-2b/report/decomposition-8.tex",
"max_stars_repo_stars_event_max_datetime": "2017-12-16T08:12:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-16T08:12:00.000Z",
"num_tokens": 1567,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 7858
} |
# -----------------------------------------------------------------------------
#
# -*- coding: utf-8 -*-
#
# phlox-libdc1394/dc1394/frame.py
#
# Copyright (C) 2016, by Matthias Yang Chen <matthias_cy@outlook.com>
# All rights reserved.
#
# phlox-libdc1394 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# phlox-libdc1394 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with phlox-libdc1394. If not,
# see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from ctypes import ARRAY, c_byte
from numpy import ndarray
from .core import *
__all__ = ['Frame']
class Frame(ndarray):
"""
A frame returned by the camera.
All metadata are retained as attributes of the resulting image.
"""
_cam = None
_frame = None
def __new__(cls, camera, frame):
"""
Convert a dc1394 frame into a Frame instance.
:param camera:
:param frame:
:return:
"""
dtype = ARRAY(c_byte, frame.contents.image_bytes)
buf = dtype.from_address(frame.contents.image)
width, height = frame.contents.size
pixels = width * height
endian = frame.contents.little_endian and '<' or '>'
type_str = '%su%i' % (endian, frame.contents.image_bytes / pixels)
img = ndarray.__new__(cls, shape=(height, width), dtype=type_str, buffer=buf)
img.frame_id = frame.contents.id
img.frames_behind = frame.contents.frames_behind
img.position = frame.contents.position
img.packet_size = frame.contents.packet_size
img.packets_per_frame = frame.contents.packet_per_frame
img.timestamp = frame.contents.timestamp
img.video_mode = video_modes[frame.contents.video_mode]
img.data_depth = frame.contents.data_depth
img.color_coding = color_codings[frame.contents.color_coding]
img.color_filter = frame.contents.color_filter
img.yuv_byte_order = frame.contents.yuv_byte_order
img.stride = frame.contents.stride
# save camera and frame for enqueue()
img._frame = frame
img._cam = camera
return img
def __array_finalize__(self, img):
"""
Finalize the new Image class array.
If called with an image object, inherit the properties of that image.
"""
if img is None:
return
# do not inherit _frame and _cam since we also get called on copy()
# and should not hold references to the frame in this case
for key in ["position", "color_coding", "color_filter",
"yuv_byte_order", "stride", "packet_size",
"packets_per_frame", "timestamp", "frames_behind",
"frame_id", "data_depth", "video_mode"]:
setattr(self, key, getattr(img, key, None))
def enqueue(self):
"""
Returns a frame to the ring buffer once it has been used.
This method is also called implicitly on ``del``.
Only call this method on the original frame obtained from
Camera.dequeue` and not on its views, new-from-templates or
copies. Otherwise an AttributeError will be raised.
"""
if not hasattr(self, "_frame"): # or self.base is not None:
raise AttributeError("can only enqueue the original frame")
if self._frame is not None:
dll.dc1394_capture_enqueue(self._cam, self._frame)
self._frame = None
self._cam = None
# from contextlib iport closing
# with closing(camera.dequeue()) as im:
# do stuff with im
close = enqueue
def __del__(self):
try:
self.enqueue()
except AttributeError:
pass
@property
def corrupt(self):
"""
Whether this frame corrupt.
Returns ``True`` if the given frame has been detected to be
corrupt (missing data, corrupted data, overrun buffer, etc.) and
``False`` otherwise.
.. note::
Certain types of corruption may go undetected in which case
``False`` will be returned erroneously. The ability to
detect corruption also varies between platforms.
.. note::
Corrupt frames still need to be enqueued with `enqueue`
when no longer needed by the user.
"""
return bool(dll.dc1394_capture_is_frame_corrupt(self._cam, self._frame))
def to_rgb(self):
"""
Convert the image to an RGB image.
Array shape is: (image.shape[0], image.shape[1], 3)
Uses the dc1394_convert_to_RGB() function for the conversion.
"""
res = ndarray(3 * self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_RGB8(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
res.shape = shape[0], shape[1], 3
return res
def to_mono8(self):
"""
Convert he image to 8 bit gray scale.
Uses the dc1394_convert_to_MONO8() function
"""
res = ndarray(self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_MONO8(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
res.shape = shape
return res
def to_yuv422(self):
"""
Convert he image to YUV422 color format.
Uses the dc1394_convert_to_YUV422() function
"""
res = ndarray(self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_YUV422(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
return ndarray(shape=shape, buffer=res.data, dtype='u2')
| {
"alphanum_fraction": 0.6030410393,
"author": null,
"avg_line_length": 38.0561797753,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "22df73cb320628fed7eb171e06fab821cbb9e8f4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f312569ec983b5f27c75846b34debc04fe7bdf98",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jardinier/phlox",
"max_forks_repo_path": "PhloxAR/dc1394/frame.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f312569ec983b5f27c75846b34debc04fe7bdf98",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jardinier/phlox",
"max_issues_repo_path": "PhloxAR/dc1394/frame.py",
"max_line_length": 85,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f312569ec983b5f27c75846b34debc04fe7bdf98",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jardinier/phlox",
"max_stars_repo_path": "PhloxAR/dc1394/frame.py",
"max_stars_repo_stars_event_max_datetime": "2016-05-22T00:12:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-22T00:12:14.000Z",
"num_tokens": 1478,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6774
} |
#' @title save_data
#' @description This function takes either a dataframe or all of the data you've filtered, and rolls it up into a csv and/
#' or a shapefile for continued analysis
#' @param df This is a dataframe that you want to save in some other format. If a spatial format is selected
#' (e.g. shapefile), it must have LATITUDE and LONGITUDE fields
#' @param filename default is \code{NULL}. This will be the prefix of your filename
#' @param df.crs This is the CRS value for your dataframe. This should be the reference system that your data is known to be in.
#' The default value \code{"EPSG:4326"} is WGS84 and is appropriate for most data collected using a GPS.
#' @param db default is \code{NULL}. This identifies the dataset you are working
#' with.
#' @param req.coords default is TRUE. This filters out records without values for LATITUDE or
#' LONGITUDE. The function aborts if req.coords=TRUE and no records remain.
#' @param lat.field the default is \code{"LATITUDE"}. the name of the field holding latitude values (in decimal degrees)
#' @param lon.field the default is \code{"LONGITUDE"}. the name of the field holding longitude values (in decimal degrees)
#' @param formats This is a vector of the formats in which you would like to save the current data,
#' including "raw" for a (local) dataframe, "sp" for a (local) SpatialPointsDataFrame,
#' "shp" for a shapefile or "csv" (both written to the wd). The raw and sp objects will
#' just have the name specified by filename, while the csv and shapefiles, since they're
#' written externally also get a timestamp.
#' @param env This the the environment you want this function to work in. The
#' default value is \code{.GlobalEnv}.
#' @family general_use
#' @author Mike McMahon, \email{Mike.McMahon@@dfo-mpo.gc.ca}
#' @export
save_data <- function(db = NULL, df= NULL, filename = NULL, df.crs = "EPSG:4326",
req.coords=TRUE,
lat.field = "LATITUDE",
lon.field = "LONGITUDE",
formats = c('csv', 'shp'),
env=.GlobalEnv){
if (req.coords == FALSE & 'shp' %in% formats) warning("\n","Since req.coords = FALSE, not all of the
records necessarily have positions and will not be visible in your shapefile")
if (is.null(df)) {
df = summarize_catches(db=ds_all[[.GlobalEnv$db]]$db, valid.coords = req.coords, env=env, drop.na.cols = F)
if (is.null(df)){
cat("\n","No records to save")
return(NULL)
}
}
if (is.null(filename)) {
name = match.call()[2]
}else {
name = filename
}
name = gsub('()','',name)
name = gsub('\\.','',name)
ts = format(Sys.time(), "%Y%m%d_%H%M")
fn = paste(name,"_",ts,sep="" )
#id posix and date fields
df=data.frame(lapply(df, function(x) if(inherits(x, "POSIXct")|inherits(x, "Date")) as.Date(strftime(x, format="%Y-%m-%d")) else x))
if ('raw' %in% formats) assign(paste0("raw_",name), df, envir = env)
if ('shp' %in% formats | 'sp' %in% formats){
df.sp = Mar.utils::df_qc_spatial(df, lat.field, lon.field)
df.sp = sp::SpatialPointsDataFrame(
coords = df.sp[, c(lon.field, lat.field)],
data = df.sp,
proj4string = sp::CRS(SRS_string=df.crs)
)
if (nrow(df.sp@data) != nrow(df)) {
cat("\n",paste0(nrow(df)-nrow(df.sp@data), " records were lost due to invalid coordinates"))
}
if ('sp' %in% formats) assign(paste0("sp_",name), df.sp, envir = env)
if ('shp' %in% formats){
df.sp = Mar.utils::prepare_shape_fields(df.sp)
rgdal::writeOGR(df.sp, ".", fn, driver="ESRI Shapefile", overwrite_layer=TRUE)
cat(paste0("\nWrote file to: ",file.path(getwd(),fn)))
}
}
if ('csv' %in% formats){
utils::write.csv(df, paste0(fn,".csv"))
}
if ('rds' %in% formats){
saveRDS(df, paste0(fn,".rds"))
}
return(invisible(df))
}
| {
"alphanum_fraction": 0.6494579246,
"author": null,
"avg_line_length": 49.6666666667,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "0e2f97f4b488fcb50e53247cc74ed2f7c0f24718",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "19f31cdfc11033dacad547fae958d553d93cec7d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Maritimes/Mar.datawrangling",
"max_forks_repo_path": "R/save_data.r",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "19f31cdfc11033dacad547fae958d553d93cec7d",
"max_issues_repo_issues_event_max_datetime": "2017-11-08T19:39:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-11-08T19:39:31.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Maritimes/Mar.datawrangling",
"max_issues_repo_path": "R/save_data.r",
"max_line_length": 134,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "19f31cdfc11033dacad547fae958d553d93cec7d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Maritimes/Mar.datawrangling",
"max_stars_repo_path": "R/save_data.r",
"max_stars_repo_stars_event_max_datetime": "2020-03-11T21:01:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-07T23:42:10.000Z",
"num_tokens": 1080,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3874
} |
import sys
import numpy as np
import torch
from tqdm import trange
from atom_joggling.utils import ROOT, mean, parser
# Method specific options
# Mixup parameter of the Beta distribution
parser.add_argument("--alpha", default=0.75, type=float)
# weighting factor for the unlabeled loss term
parser.add_argument("--lambda-u", default=75, type=float)
# temperature used during sharpening of the pseudo-label distribution
parser.add_argument("--T", default=0.5, type=float)
# mixmatch used a default of 1024 but CIFAR10 is a way bigger dataset so 128 seems good
parser.add_argument(
"--train-iterations", type=int, default=128, help="Number of batches per epoch"
)
args, _ = parser.parse_known_args()
# add time stamp if no custom out_dir was specified
if not args.resume and not args.out_dir:
out_dir = f"{ROOT}/runs/mixup/iters={args.train_iterations}_T={args.T}"
out_dir += f"alpha={args.alpha}-lambdaU={args.lambda_u}"
out_dir += "_robust" if args.robust else ""
args.out_dir = out_dir
def train_with_mixup(
labeled_loader, unlabeled_loader, model, optimizer, criterion, verbose=True
) -> tuple:
"""Train a model with mixup by randomly sampling linear combinations of
unlabeled and unlabeled inputs as well as targets. Uses model-generated
pseudo-labels to compute interpolated targets.
Args:
labeled_loader (torch.utils.data.DataLoader): labeled data
unlabeled_loader (torch.utils.data.DataLoader): unlabeled data
model (nn.Module): the instantiated model
optimizer (torch.optim.Optimizer): optimizer
criterion: loss function that computes labeled, unlabeled and combined losses
Returns:
[loss, Lx, Lu]: 3-tuple of total, labeled and unlabeled losses
"""
losses = {"total": [], "loss_u": [], "loss_x": []}
labeled_train_iter = iter(labeled_loader)
unlabeled_train_iter = iter(unlabeled_loader)
model.train()
# file=sys.stdout (default stderr) prevents print order issues by using
# same stream as print https://stackoverflow.com/a/45265707
for _ in trange(
args.train_iterations, desc="Batches:", file=sys.stdout, disable=not verbose
):
try:
inputs_x, targets_x, *_ = next(labeled_train_iter)
except StopIteration:
labeled_train_iter = iter(labeled_loader)
inputs_x, targets_x, *_ = next(labeled_train_iter)
try:
inputs_u, *_ = next(unlabeled_train_iter)
except StopIteration:
unlabeled_train_iter = iter(unlabeled_loader)
inputs_u, *_ = next(unlabeled_train_iter)
batch_size = targets_x.size(0)
# Transform labels to one-hot
targets_x = torch.nn.functional.one_hot(targets_x)
crys_fea_x = model.material_nn(*inputs_x)
crys_fea_u = model.material_nn(*inputs_u)
with torch.no_grad():
# compute guessed labels of unlabeled samples
outputs_u = model(*inputs_u)
# sharpen the model's output distribution (for entropy minimization)
proba_u = outputs_u.softmax(dim=1)
pt = proba_u ** (1 / args.T)
targets_u = pt / pt.sum(dim=1, keepdim=True)
targets_u = targets_u.detach()
# mixup
all_inputs = torch.cat([crys_fea_x, crys_fea_u], dim=0)
all_targets = torch.cat([targets_x, targets_u], dim=0)
ell = np.random.beta(args.alpha, args.alpha)
ell = max(ell, 1 - ell)
idx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[idx]
target_a, target_b = all_targets, all_targets[idx]
mixed_input = ell * input_a + (1 - ell) * input_b
mixed_target = ell * target_a + (1 - ell) * target_b
# interleave labeled and unlabeled samples between batches to
# get correct batchnorm calculation
mixed_input = list(torch.split(mixed_input, batch_size))
mixed_input = interleave(mixed_input, batch_size)
logits = [model.output_nn(mixed_input[0])]
for input in mixed_input[1:]:
logits.append(model.output_nn(input))
# put interleaved samples back
logits = interleave(logits, batch_size)
logits_x = logits[0]
logits_u = torch.cat(logits[1:], dim=0)
Lx, Lu, u_ramp = criterion(
logits_x, mixed_target[:batch_size], logits_u, mixed_target[batch_size:]
)
loss = Lx + u_ramp * Lu
# record loss
losses["total"].append(loss.item())
losses["loss_x"].append(Lx.item())
losses["loss_u"].append(Lu.item())
# update model weights
optimizer.zero_grad()
loss.backward()
optimizer.step()
# dicts are insertion ordered as of Python 3.6
losses = (mean(x) for x in losses.values())
return (*losses, u_ramp)
@torch.no_grad()
def validate_mixup(val_loader, model, criterion) -> tuple:
losses, avg_acc = [], []
# switch to evaluate mode
model.eval()
for inputs, targets, *_ in val_loader:
# compute output
outputs = model(*inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
acc = (outputs.argmax(1) == targets).float().mean()
losses.append(loss.item())
avg_acc.append(acc.item())
return mean(losses), mean(avg_acc)
def linear_rampup(current: int, rampup_length: int) -> float:
"""Linearly ramps up the unlabeled loss with epoch count. As the
pseudo-labels become more accurate, they can be be weighted more.
Args:
current (float): current loss weighting factor
rampup_length (ing, optional): number of epochs until rampup completes.
Defaults to args.epochs.
Returns:
float: increasing weighting factor for the unlabeled loss
"""
if rampup_length == 0:
return 1.0
else:
current /= rampup_length
current = max(0.0, min(current, 1.0))
return current
class SemiLoss:
def __init__(self, u_ramp_length: int, ramp_start: int = 0) -> None:
self.u_ramp_length = u_ramp_length
self.ramp_count = ramp_start
def __call__(self, preds_x, targets_x, preds_u, targets_u) -> tuple:
self.ramp_count += 1
probs_u = preds_u.softmax(dim=1)
Lx = -(preds_x.log_softmax(dim=1) * targets_x).sum(dim=1).mean()
Lu = (probs_u - targets_u).pow(2).mean() # MSE
ramp = linear_rampup(self.ramp_count, self.u_ramp_length)
return Lx, Lu, args.lambda_u * ramp
def interleave_offsets(batch_size: int, nu: int) -> list:
groups = [batch_size // (nu + 1)] * (nu + 1)
for x in range(batch_size - sum(groups)):
groups[-x - 1] += 1
offsets = [0]
for g in groups:
offsets.append(offsets[-1] + g)
assert offsets[-1] == batch_size
return offsets
def interleave(xy: list, batch_size: int) -> list:
"""Interleave labeled and unlabeled samples between batches to
get correct batch normalization calculation.
Args:
xy (tuple): list of inputs or targets split by batch size
batch_size (int): batch size
Returns:
[list]: list of interleaved inputs or targets
"""
nu = len(xy) - 1
offsets = interleave_offsets(batch_size, nu)
xy = [[v[offsets[p] : offsets[p + 1]] for p in range(nu + 1)] for v in xy]
for i in range(1, nu + 1):
xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
return [torch.cat(v, dim=0) for v in xy]
| {
"alphanum_fraction": 0.6470119522,
"author": null,
"avg_line_length": 33.1718061674,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "37d4ebb25acea6ac26d7e6196c6ced81b9b07121",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-10-05T11:47:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-10-05T11:47:58.000Z",
"max_forks_repo_head_hexsha": "5d9b19f136634b5d4d7a92a5b815510902b7da54",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "janosh/atom-joggling",
"max_forks_repo_path": "atom_joggling/mixup.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5d9b19f136634b5d4d7a92a5b815510902b7da54",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "janosh/atom-joggling",
"max_issues_repo_path": "atom_joggling/mixup.py",
"max_line_length": 87,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5d9b19f136634b5d4d7a92a5b815510902b7da54",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "janosh/atom-joggling",
"max_stars_repo_path": "atom_joggling/mixup.py",
"max_stars_repo_stars_event_max_datetime": "2021-10-05T11:47:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-05T11:47:56.000Z",
"num_tokens": 1879,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7530
} |
[STATEMENT]
lemma order_dense_order_ow_transfer[transfer_rule]:
assumes [transfer_rule]:
"bi_unique A" "right_total A" "bi_unique B" "right_total B"
shows
"(
rel_set A ===> (A ===> A ===> (=)) ===> (A ===> A ===> (=)) ===>
rel_set B ===> (B ===> B ===> (=)) ===> (B ===> B ===> (=)) ===>
(=)
) order_dense_order_ow order_dense_order_ow"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (rel_set A ===> (A ===> A ===> (=)) ===> (A ===> A ===> (=)) ===> rel_set B ===> (B ===> B ===> (=)) ===> (B ===> B ===> (=)) ===> (=)) order_dense_order_ow order_dense_order_ow
[PROOF STEP]
by (ow_locale_transfer locale_defs: order_dense_order_ow_def) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Types_To_Sets_Extension_Examples_TTS_Foundations_Orders_Set_Simple_Orders",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 284,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import scanpy as sc
import muon as mu
import numpy as np
## VIASH START
par = {
'input': 'resources_test/pbmc_1k_protein_v3/pbmc_1k_protein_v3_filtered_feature_bc_matrix.h5mu',
'modality': ['rna'],
'output': 'output.h5mu',
'var_name_filter': 'filter_with_hvg',
'do_subset': False,
'flavor': 'seurat',
'n_top_genes': 123,
'min_mean': 0.0125,
'max_mean': 3.0,
'min_disp': 0.5,
'span': 0.3,
'n_bins': 20,
'varm_name': 'hvg'
}
## VIASH END
mdata = mu.read_h5mu(par["input"])
mdata.var_names_make_unique()
for mod in par['modality']:
print(f"Processing modality '{mod}'")
data = mdata.mod[mod]
#sc.pp.log1p(data)
print(f" Unfiltered data: {data}")
print(" Computing hvg")
# construct arguments
hvg_args = {
'adata': data,
'n_top_genes': par["n_top_genes"],
'min_mean': par["min_mean"],
'max_mean': par["max_mean"],
'min_disp': par["min_disp"],
'span': par["span"],
'n_bins': par["n_bins"],
'flavor': par["flavor"],
'subset': False,
'inplace': False
}
# only add parameter if it's passed
if par.get("max_disp", None) is not None:
hvg_args["max_disp"] = par["max_disp"]
if par.get("obs_batch_key", None) is not None:
hvg_args["batch_key"] = par["obs_batch_key"]
# call function
try:
out = sc.pp.highly_variable_genes(**hvg_args)
out.index = data.var.index
except ValueError as err:
if str(err) == "cannot specify integer `bins` when input data contains infinity":
err.args = ("Cannot specify integer `bins` when input data contains infinity. Perhaps input data has not been log normalized?",)
raise err
print(" Storing output into .var")
if par.get("var_name_filter", None) is not None:
data.var[par["var_name_filter"]] = out["highly_variable"]
if par.get("varm_name", None) is not None:
# drop mean_bin as muon/anndata doesn't support tuples
data.varm[par["varm_name"]] = out.drop("mean_bin", axis=1)
if par["do_subset"]:
keep_feats = np.ravel(data.var[par["var_name_filter"]])
mdata.mod[mod] = data[:,keep_feats]
# # can we assume execution_log exists?
# if mdata.uns is None or "execution_log" not in mdata.uns:
# mdata.uns["execution_log"] = []
# # store new entry
# new_entry = {"component": meta["functionality_name"], "params": par}
# mdata.uns["execution_log"].append(new_entry)
print("Writing h5mu to file")
mdata.write_h5mu(par["output"])
| {
"alphanum_fraction": 0.6294695481,
"author": null,
"avg_line_length": 29.5930232558,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cfed615c93813dc24fbc5ba55b533312a1aae9d2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1e030961303b21b4351503cdc5f0a25d2879d094",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "openpipeline-bio/openpipeline",
"max_forks_repo_path": "src/filter/filter_with_hvg/script.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "1e030961303b21b4351503cdc5f0a25d2879d094",
"max_issues_repo_issues_event_max_datetime": "2022-02-02T11:05:20.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-10-20T12:58:52.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "openpipeline-bio/openpipeline",
"max_issues_repo_path": "src/filter/filter_with_hvg/script.py",
"max_line_length": 140,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "1e030961303b21b4351503cdc5f0a25d2879d094",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "openpipeline-bio/openpipeline",
"max_stars_repo_path": "src/filter/filter_with_hvg/script.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-11T12:46:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-10T13:16:09.000Z",
"num_tokens": 740,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2545
} |
#include <stdlib.h> /* srand, rand */
#include <iostream>
#include <fstream> // creating (.csv) files
#include <vector>
#include <cstring>
#include <string>
#include <functional>
#include <armadillo> // http://arma.sourceforge.net/docs.html
#include <thread> /* std::this_thread::sleep_for */
#include "EvolutionaryAlgorithm.hpp"
// NOT USED??
// enum events{
// BASE_RATE = 0,
// MUTA_RATE,
// PRED_RATE,
// PART_RATE,
// GENO_RATE,
// EV_A_RATE,
// };
//arma::rowvec semiFinalsAndFinals[SEMI_FINALS][TOURNMENT_RATE];
//arma::rowvec indvFinals[TOURNMENT_RATE];
void EvolutionaryAlgorithm::saveBestIndvParamsCSV(){
std::fstream myFile;
myFile.open("constValues.cfg", std::ios_base::app);
myFile << parametersList[0] << " = " << population(bestFitIndex,0);
for (size_t i = 1; i < NB_PARAMETERS; i++) myFile << std::endl << parametersList[i] << " = " << population(bestFitIndex,i);
//printf("k%sk\n", parametersList[i]);
myFile.close();
}
// 1st step: initialize population
void EvolutionaryAlgorithm::initializePop(int tournmentType) {
double minVal,maxVal;
std::cout << "INITIALIZING POPULATION\n";
// for (int i = 0; i < POPULATION_SIZE; i++) {
// for (int j = 0; j < NB_PARAMETERS; j++) {
// population[i][j] = (double) (rand() % MAX_PARAM_VALUE); // number range = [0, MAX_PARAM_VALUE[
// }
// }
population.randu(); // initialize with values between 0 and 1
if(tournmentType == INITIALS) population *= MAX_PARAM_VALUE; // if a normal EA is taking place, just multiply the population by a givern value
else{
int middle = TOURNMENT_RATE + (POPULATION_SIZE - TOURNMENT_RATE)/2;
fillInitialsWithBests(tournmentType);
for(int j = 0;j< NB_PARAMETERS;j++){ // Min and max values are defined for each col, and converting the current col values to a number between this constraints.
// this starts after the first TOURNMENT_RATE individuls, and the remaining is divided into two pars, one with biased rand values, and other without
minVal = MIN_MULT * bestTournmentIndv[tournmentType].col(j).min();
maxVal = MAX_MULT * bestTournmentIndv[tournmentType].col(j).max();
for(int i = TOURNMENT_RATE;i<middle;i++){
population(i,j) = minVal + (maxVal - minVal)*(population(i,j));
}
}
for(int i = middle;i < POPULATION_SIZE;i++)
population.row(i) *= MAX_PARAM_VALUE;
}
population.print("Population matrix initialized:");
return;
}
// Function to normalize fitness (values between 0 and 1)
void EvolutionaryAlgorithm::normalizeFitness(double * normalizedFitness) {
if (normalizedFitness == NULL)
return;
for (int i = 0; i < POPULATION_SIZE; i++)
normalizedFitness[i] = (fitness[i]-bestFitness)/(worstFitness-bestFitness);
return;
}
// 2nd step: evaluate population (calculate fitness)
void EvolutionaryAlgorithm::evaluatePop() {
std::cout << "EVALUATING POPULATION\n";
nbNoImprovementGens++; // we begin considering there was no improvement in the generation
pthread_mutex_lock(&mutex);
remainingFitnessToCalc = POPULATION_SIZE;
pthread_mutex_unlock(&mutex);
for (int i = 0; i<POPULATION_SIZE; i++){
m_eaGameControler.startGame(i,arma::conv_to<std::vector<double>>::from(population.row(i)));
}
for (int i = 0; i< POPULATION_SIZE; i++){
pthread_mutex_lock(&mutex2);
}
for (int i = 0; i < POPULATION_SIZE; i++) {
// example of fitness calculation -> CHANGE!!!!
// fitness[i] = abs(terminoReal - terminoEsperado)/terminoEsperado
if (fitness[i] < bestFitness){ // searching for the max fitnnes from new generation
bestFitness = fitness[i];
bestFitIndex = i;
nbNoImprovementGens = 0; // this generation shows improvement -> reset counter
}
else if (fitness[i] > worstFitness){
worstFitness = fitness[i];
worstFitIndex = i;
}
// printf("fitness[%d] %lf\n", i, fitness[i]);
}
printf("BEST FITNESS: %lf - INDEX: %d\n", bestFitness, bestFitIndex);
printf("WORST FITNESS: %lf - INDEX: %d\n", worstFitness, worstFitIndex);
return;
}
void EvolutionaryAlgorithm::crossover(int indv, arma::rowvec parent1, arma::rowvec parent2) {
// for(int j = 0; j < NB_PARAMETERS; j++){
// population[i][j] = (parent1[j] + parent2[j])/2.0;
// }
population.row(indv) = (parent1 + parent2)/2.0;
return;
}
void EvolutionaryAlgorithm::elitism() {
std::cout << "ELITISM\n";
arma::rowvec bestIndv = population.row(bestFitIndex);
for (int i = 0; i < POPULATION_SIZE; i++) {
// crossover
crossover(i, population.row(i), bestIndv);
}
return;
}
void EvolutionaryAlgorithm::tournament() {
std::cout << "TOURNAMENT\n";
arma::mat::fixed<POPULATION_SIZE, NB_PARAMETERS> oldPopulation;
int parentIndex[2];
// copying last population (new one will be different)
// for (int i = 0; i < POPULATION_SIZE; i++) {
// for (int j = 0; j < NB_PARAMETERS; j++) {
// oldPopulation[i][j] = population[i][j];
// }
// }
oldPopulation = population;
for (int i = 0; i < POPULATION_SIZE; i++) {
if (i == bestFitIndex)
continue;
// chossing parents for new individual
for (int j = 0; j < 2; j++) {
int indexIndA = rand() % POPULATION_SIZE; // indv 1 that will "fight" to be parent
int indexIndB = rand() % POPULATION_SIZE; // indv 2 that will "fight" to be parent
parentIndex[j] = (fitness[indexIndA] < fitness[indexIndB] ? indexIndA : indexIndB);
}
// crossover
crossover(i, oldPopulation.row(parentIndex[0]), oldPopulation.row(parentIndex[1]));
}
return;
}
//TODO: swap priorities. Smaller fitness must be morre relevant
void EvolutionaryAlgorithm::roulette() {
std::cout << "ROULETTE\n";
arma::mat::fixed<POPULATION_SIZE, NB_PARAMETERS> oldPopulation;
double standardizedFitness[POPULATION_SIZE];
int parentIndex[2], rNb;
double probSum = 0.0, partialSum = 0.0;
// copying last population (new one will be different)
// for (int i = 0; i < POPULATION_SIZE; i++) {
// for (int j = 0; j < NB_PARAMETERS; j++) {
// oldPopulation[i][j] = population[i][j];
// }
// }
oldPopulation = population;
// Standardize fitness (set probabilites that add up to 100%)
for (int i = 0; i < POPULATION_SIZE; i++)
probSum += fitness[i];
for (int i = 0; i < POPULATION_SIZE; i++)
standardizedFitness[i] = fitness[i]/probSum;
// Chosing new parents for each individual
for (int i = 0; i < POPULATION_SIZE; i++) {
if (i == bestFitIndex) // preserves best individual
continue;
for (int k = 0; k < 2; k++) { // chosing 2 parents
rNb = ((double) rand() / RAND_MAX); // rand between 0 and 1
partialSum = 0.0;
for (int j = 0; j < POPULATION_SIZE; j++) { // randomly chosing and individual according to its fitness (+fitness = +probabity)
partialSum += fitness[j];
if (partialSum >= rNb) {
parentIndex[k] = j; // new parent at index j
break;
}
}
}
// crossover
crossover(i, oldPopulation.row(parentIndex[0]), oldPopulation.row(parentIndex[1]));
}
}
void EvolutionaryAlgorithm::mutate(int indIndex) {
int plusMinusSign = (rand() % 2 ? MUTATE_POSITIVE_MULT : MUTATE_NEGATIVE_MULT); // mutation will increase the param value or decrease it
int mutateParamIndex = rand() % NB_PARAMETERS; // index of parameter that will be mutated
population(indIndex, mutateParamIndex) += population(indIndex, mutateParamIndex) * mutationRate * plusMinusSign;
return;
}
// 3rd step: selection + mutation and crossover
void EvolutionaryAlgorithm::selectionAndMutation() { // tournament, elitism, roulette...
std::cout << "SELECTION\n";
// Selection method (+ crossover)
(this->*(selectionType[selectionMethod]))(); // void (*selectionType[])() => tournament(), elitism() or roulette()
// Mutation
// Mutating all params from individuals
// arma::rowvec bestIndv = population.row(maxFitIndex);
// arma::mat mutateMatrix(POPULATION_SIZE, NB_PARAMETERS, arma::fill::randu);
// mutateMatrix = ((mutateMatrix * MAX_PARAM_VALUE) - MAX_PARAM_VALUE/2.0) * mutationRate;
// population = population + mutateMatrix;
// population.row(maxFitIndex) = bestIndv;
// population.transform( [](double x) { return ((x < 0 || x > MAX_PARAM_VALUE) ? abs(MAX_PARAM_VALUE-abs(x)) : x); } );
// Mutating only one parameter from each individual
for(int i = 0; i < bestFitIndex; i++) mutate(i);
// (don't mutate best one)
for(int i = bestFitIndex+1; i < POPULATION_SIZE; i++) mutate(i);
return;
}
// Initialize .csv file to sabe data from the EA
// Creates the column's headers
// generation, paramsIndv1, fitnessIndv1 ..., paramsIndvN, fitnessIndvN, bestParams, bestFitness
void EvolutionaryAlgorithm::createCSV(std::string pathPos) {
std::ofstream csvFileWriter;
csvFileWriter.open("historyEA-" + pathPos + ".csv");
if (!csvFileWriter.good()) {
std::cout << "[!] Error occurred while trying to create historyEA" << pathPos << ".csv!\n";
return;
}
csvFileWriter << "generation,";
for (int i = 0; i < POPULATION_SIZE; i++)
csvFileWriter << "paramsIndv" << i << ",fitnessIndv" << i << ",";
csvFileWriter << "paramsBestIndv,fitnessBestIndv\n";
return;
}
std::string EvolutionaryAlgorithm::formatParamsString(int indvIndex) {
std::string paramsFormated = "[ ";
for (int i = 0; i < NB_PARAMETERS; i++) {
paramsFormated += std::to_string(population(indvIndex, i));
paramsFormated += " ";
}
paramsFormated += "]";
return paramsFormated;
}
// Saves information about a generation in a .csv file
// Information: generation, paramsIndv1, fitnessIndv1 ..., paramsIndvN, fitnessIndvN, bestParams, bestFitness
void EvolutionaryAlgorithm::saveGenerationData(int generation) {
std::ofstream csvFileWriter;
csvFileWriter.open("historyEA.csv", std::ios_base::app); // append instead of overwrite
if (!csvFileWriter.good()) {
std::cout << "[!] Error occurred while trying to open historyEA.csv!\n";
return;
}
csvFileWriter << generation << ",";
for (int i = 0; i < POPULATION_SIZE; i++) {
csvFileWriter << formatParamsString(i) << "," << fitness[i] << ",";
}
csvFileWriter << formatParamsString(bestFitIndex) << "," << bestFitness << "\n";
}
void EvolutionaryAlgorithm::increaseMutation() {
if(!mutationRate) {
mutationRate = INITIAL_MUTATION;
}
else {
mutationRate *= MUTATION_INCREASE_RATIO;
if (mutationRate > MAX_MUTATION_RATE) // mutation reached its max value
mutationRate = MAX_MUTATION_RATE;
}
return;
}
// Function that will kill the worst individual each "APPLY_PREDATION_INTERVAL" number of generations
void EvolutionaryAlgorithm::predationOfOne() {
arma::rowvec newInd(NB_PARAMETERS, arma::fill::randu); // creating totally new individual
newInd = newInd * MAX_PARAM_VALUE;
population.row(worstFitIndex) = newInd;
return;
}
// A sequence of selections will run, this method will indicate the selection method that will be used by the next batch of generations
void EvolutionaryAlgorithm::partIncrease(){
selectionMethod = partsSelectionMethods[++partPos];
return;
}
// Function that will kill all individuals but the best to reset the population and generate new individuals (without biases)
void EvolutionaryAlgorithm::oneRemainingPopReset() {
std::cout << "APPLYING POPULATION RESET\n";
arma::rowvec best = population.row(bestFitIndex);
initializePop(INITIALS);
population.row(0) = best;
partIncrease();
return;
}
// Ends the batch of generations from current EA
void EvolutionaryAlgorithm::endEABatch() {
std::cout << "END EA BATCH";
continueBatch = false;
return;
}
// Generic function that checks if certain event should happen (predation, population reset, mutation increase...)
bool EvolutionaryAlgorithm::eventHappens(int eventType) {
if(nbNoImprovementGens % eventTriggerModule[eventType] == 0)
return true; // the event being verified should happen!
return false;
}
void EvolutionaryAlgorithm::checkEvents() {
if(nbNoImprovementGens == 0)
return;
for(int i = 5; i > 0; i++){
if(eventHappens(i)) {
(this->*(eventTypes[i-1]))(); // calls one of these functions: increaseMutation, predationOfOne, partIncrease, oneRemainingPopReset, fullPopReset
return;
}
}
}
void EvolutionaryAlgorithm::startEventTriggerList(){
for(int i = 1; i < 6; i++)
eventTriggerModule[i] *= eventTriggerModule[i-1];
}
void EvolutionaryAlgorithm::evoAlg(std::string csvStr){
nbNoImprovementGens = 0;
createCSV(csvStr);
continueBatch = true;
partPos = 0; // defines selection method of current batch of generations
generationIndex = 0;
while (continueBatch) {
std::cout << "\n==== Generation " << generationIndex << " ====\n";
// population.print("Current population:");
evaluatePop();
selectionAndMutation();
saveGenerationData(generationIndex);
checkEvents(); // checks if mutation should increase, predation or population reset should occur etc.
// if fullPopReset() is called, continueEA = false
generationIndex++;
scanf("%*c");
}
return;
}
void EvolutionaryAlgorithm::fillInitialsWithBests(int tournmentType){
for(int i = 0; i < TOURNMENT_RATE; i++)
population.row(i) = bestTournmentIndv[tournmentType].row(i);
}
void EvolutionaryAlgorithm::semiFinalsTournment(int semiFinalPos){
for(int i = 0; i < TOURNMENT_RATE; i++){
initializePop(INITIALS);
evoAlg("SF-" + std::to_string(semiFinalPos) + "_EA-" + std::to_string(i));
bestTournmentIndv[SEMI_FINALS].row(i) = population.row(bestFitIndex); // saves the best individual from current EA
}
initializePop(SEMI_FINALS);
evoAlg("SF-" + std::to_string(semiFinalPos)); // this EA will use the best individuals from each previous EA
bestTournmentIndv[FINALS].row(semiFinalPos) = population.row(bestFitIndex); // saves the best individual from current EA
return;
}
void EvolutionaryAlgorithm::finalTournment(){
for(int i = 0; i < TOURNMENT_RATE; i++)
semiFinalsTournment(i);
initializePop(FINALS);
evoAlg("Main"); // this EA will use the best individuals from each semifinal
bestIndividual = population.row(bestFitIndex);
std::cout << "EVOLUTIONARY ALGORITHM FINISHED!" << std::endl;
//eaFinished = true;
return;
}
EvolutionaryAlgorithm::EvolutionaryAlgorithm():eaFinished(false){
srand(time(NULL));
startEventTriggerList();
system("clear");
pthread_create(&scriptThread, NULL, runScript, this);
}
EvolutionaryAlgorithm::~EvolutionaryAlgorithm(){
pthread_join(scriptThread, NULL);
}
void *runScript(void *scriptFuncObject) {
EvolutionaryAlgorithm *script = (EvolutionaryAlgorithm*) scriptFuncObject;
script->finalTournment();
return NULL;
}
void EvolutionaryAlgorithm::setFitness(int pos,std::vector<std::pair<int,int>> fitnessResults){
fitness[pos] = 0;
for(auto fitnessPair : fitnessResults){
fitness[pos] += std::abs(fitnessPair.first - fitnessPair.second)/fitnessPair.first;
}
remainingFitnessToCalc -= 1;
return;
} | {
"alphanum_fraction": 0.6446322379,
"author": null,
"avg_line_length": 34.0618336887,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "817f3f3e140d414af2ed2a806bf75ed146ffc175",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2f694ec2a36969af51162556b40fd513d6c1213e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Haltz01/GA-AdjustmentGameParameters",
"max_forks_repo_path": "EvolutionaryAlgorithm.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2f694ec2a36969af51162556b40fd513d6c1213e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Haltz01/GA-AdjustmentGameParameters",
"max_issues_repo_path": "EvolutionaryAlgorithm.cpp",
"max_line_length": 168,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2f694ec2a36969af51162556b40fd513d6c1213e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Haltz01/GA-AdjustmentGameParameters",
"max_stars_repo_path": "EvolutionaryAlgorithm.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4083,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 15975
} |
abstract type distribution end
module genericDistribution <: distribution
#default values
E(x)=0 #mean value #1st moment generating function
σ(x)=1 #variance #2nd moment generating function
function getPdf(x=smoothFunction)
"""
generalization of (any )
"""
return pdf(x)= smoothFunction
end
function pdf(x, range)
return pdf(x, range)
function cdf(x, range) #TODO: approx Integral of pdf(x)
return sum(pdf(x,range)) #sum of all points #assumes all points are reachable, and funcion is continuoous on all of them #no-Jumps model
end # module genericDistribution() genericDistribution()
| {
"alphanum_fraction": 0.7398373984,
"author": null,
"avg_line_length": 26.7391304348,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "a7f53b14cd62701c96c96999ba0ec9d89f90e6bb",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f920e93af5ece5c4f8751bf493047770689ecaff",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamwillisXanax/DeepLearner",
"max_forks_repo_path": "src/Distributions/genericDistribution.jl",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "f920e93af5ece5c4f8751bf493047770689ecaff",
"max_issues_repo_issues_event_max_datetime": "2022-02-01T05:33:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-23T12:20:47.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamwillisMastery/DeepLearner",
"max_issues_repo_path": "src/Distributions/genericDistribution.jl",
"max_line_length": 137,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "f920e93af5ece5c4f8751bf493047770689ecaff",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamwillisMastery/DeepLearner",
"max_stars_repo_path": "src/Distributions/genericDistribution.jl",
"max_stars_repo_stars_event_max_datetime": "2022-01-17T12:38:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-15T00:43:05.000Z",
"num_tokens": 148,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 615
} |
from collections import OrderedDict
import copy
import cPickle
import gzip
import os
import urllib
import random
import stat
import subprocess
import sys
import time
import numpy
import theano
from theano import tensor as T
PREFIX = os.getenv(
'ATISDATA',
os.path.join(os.path.split(os.path.abspath(os.path.dirname(__file__)))[0],
'data'))
# utils functions
def shuffle(lol, seed):
'''
lol :: list of list as input
seed :: seed the shuffling
shuffle inplace each list in the same order
'''
for l in lol:
random.seed(seed)
random.shuffle(l)
# start-snippet-1
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
l :: array containing the word indexes
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [-1] + l + win // 2 * [-1]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
# end-snippet-1
# data loading functions
def atisfold(fold):
assert fold in range(5)
filename = os.path.join(PREFIX, 'atis.fold'+str(fold)+'.pkl.gz')
f = gzip.open(filename, 'rb')
train_set, valid_set, test_set, dicts = cPickle.load(f)
return train_set, valid_set, test_set, dicts
# metrics function using conlleval.pl
def conlleval(p, g, w, filename, script_path):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
OUTPUT:
filename :: name of the file where the predictions
are written. it will be the input of conlleval.pl script
for computing the performance in terms of precision
recall and f1 score
OTHER:
script_path :: path to the directory containing the
conlleval.pl script
'''
out = ''
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
f = open(filename, 'w')
f.writelines(out)
f.close()
return get_perf(filename, script_path)
def download(origin, destination):
'''
download the corresponding atis file
from http://www-etud.iro.umontreal.ca/~mesnilgr/atis/
'''
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, destination)
def get_perf(filename, folder):
''' run conlleval.pl perl script to obtain
precision/recall and F1 score '''
_conlleval = os.path.join(folder, 'conlleval.pl')
if not os.path.isfile(_conlleval):
url = 'http://www-etud.iro.umontreal.ca/~mesnilgr/atis/conlleval.pl'
download(url, _conlleval)
os.chmod(_conlleval, stat.S_IRWXU) # give the execute permissions
proc = subprocess.Popen(["perl",
_conlleval],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, _ = proc.communicate(''.join(open(filename).readlines()))
for line in stdout.split('\n'):
if 'accuracy' in line:
out = line.split()
break
precision = float(out[6][:-2])
recall = float(out[8][:-2])
f1score = float(out[10])
return {'p': precision, 'r': recall, 'f1': f1score}
# start-snippet-2
class RNNSLU(object):
''' elman neural net model '''
def __init__(self, nh, nc, ne, de, cs):
'''
nh :: dimension of the hidden layer
nc :: number of classes
ne :: number of word embeddings in the vocabulary
de :: dimension of the word embeddings
cs :: word window context size
'''
# parameters of the model
self.emb = theano.shared(name='embeddings',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(ne+1, de))
# add one for padding at the end
.astype(theano.config.floatX))
self.wx = theano.shared(name='wx',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(de * cs, nh))
.astype(theano.config.floatX))
self.wh = theano.shared(name='wh',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(nh, nh))
.astype(theano.config.floatX))
self.w = theano.shared(name='w',
value=0.2 * numpy.random.uniform(-1.0, 1.0,
(nh, nc))
.astype(theano.config.floatX))
self.bh = theano.shared(name='bh',
value=numpy.zeros(nh,
dtype=theano.config.floatX))
self.b = theano.shared(name='b',
value=numpy.zeros(nc,
dtype=theano.config.floatX))
self.h0 = theano.shared(name='h0',
value=numpy.zeros(nh,
dtype=theano.config.floatX))
# bundle
self.params = [self.emb, self.wx, self.wh, self.w,
self.bh, self.b, self.h0]
# end-snippet-2
# as many columns as context window size
# as many lines as words in the sentence
# start-snippet-3
idxs = T.imatrix()
x = self.emb[idxs].reshape((idxs.shape[0], de*cs))
y_sentence = T.ivector('y_sentence') # labels
# end-snippet-3 start-snippet-4
def recurrence(x_t, h_tm1):
h_t = T.nnet.sigmoid(T.dot(x_t, self.wx)
+ T.dot(h_tm1, self.wh) + self.bh)
s_t = T.nnet.softmax(T.dot(h_t, self.w) + self.b)
return [h_t, s_t]
[h, s], _ = theano.scan(fn=recurrence,
sequences=x,
outputs_info=[self.h0, None],
n_steps=x.shape[0])
p_y_given_x_sentence = s[:, 0, :]
y_pred = T.argmax(p_y_given_x_sentence, axis=1)
# end-snippet-4
# cost and gradients and learning rate
# start-snippet-5
lr = T.scalar('lr')
sentence_nll = -T.mean(T.log(p_y_given_x_sentence)
[T.arange(x.shape[0]), y_sentence])
sentence_gradients = T.grad(sentence_nll, self.params)
sentence_updates = OrderedDict((p, p - lr*g)
for p, g in
zip(self.params, sentence_gradients))
# end-snippet-5
# theano functions to compile
# start-snippet-6
self.classify = theano.function(inputs=[idxs], outputs=y_pred)
self.sentence_train = theano.function(inputs=[idxs, y_sentence, lr],
outputs=sentence_nll,
updates=sentence_updates)
# end-snippet-6 start-snippet-7
self.normalize = theano.function(inputs=[],
updates={self.emb:
self.emb /
T.sqrt((self.emb**2)
.sum(axis=1))
.dimshuffle(0, 'x')})
# end-snippet-7
def train(self, x, y, window_size, learning_rate):
cwords = contextwin(x, window_size)
words = map(lambda x: numpy.asarray(x).astype('int32'), cwords)
labels = y
self.sentence_train(words, labels, learning_rate)
self.normalize()
def save(self, folder):
for param in self.params:
numpy.save(os.path.join(folder,
param.name + '.npy'), param.get_value())
def load(self, folder):
for param in self.params:
param.set_value(numpy.load(os.path.join(folder,
param.name + '.npy')))
def main(param=None):
if not param:
param = {
'fold': 3,
# 5 folds 0,1,2,3,4
'data': 'atis',
'lr': 0.0970806646812754,
'verbose': 1,
'decay': True,
# decay on the learning rate if improvement stops
'win': 7,
# number of words in the context window
'nhidden': 200,
# number of hidden units
'seed': 345,
'emb_dimension': 50,
# dimension of word embedding
'nepochs': 60,
# 60 is recommended
'savemodel': False}
print param
folder_name = os.path.basename(__file__).split('.')[0]
folder = os.path.join(os.path.dirname(__file__), folder_name)
if not os.path.exists(folder):
os.mkdir(folder)
# load the dataset
train_set, valid_set, test_set, dic = atisfold(param['fold'])
idx2label = dict((k, v) for v, k in dic['labels2idx'].iteritems())
idx2word = dict((k, v) for v, k in dic['words2idx'].iteritems())
train_lex, train_ne, train_y = train_set
valid_lex, valid_ne, valid_y = valid_set
test_lex, test_ne, test_y = test_set
vocsize = len(set(reduce(lambda x, y: list(x) + list(y),
train_lex + valid_lex + test_lex)))
nclasses = len(set(reduce(lambda x, y: list(x)+list(y),
train_y + test_y + valid_y)))
nsentences = len(train_lex)
groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]
words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]
groundtruth_test = [map(lambda x: idx2label[x], y) for y in test_y]
words_test = [map(lambda x: idx2word[x], w) for w in test_lex]
# instanciate the model
numpy.random.seed(param['seed'])
random.seed(param['seed'])
rnn = RNNSLU(nh=param['nhidden'],
nc=nclasses,
ne=vocsize,
de=param['emb_dimension'],
cs=param['win'])
# train with early stopping on validation set
best_f1 = -numpy.inf
param['clr'] = param['lr']
for e in xrange(param['nepochs']):
# shuffle
shuffle([train_lex, train_ne, train_y], param['seed'])
param['ce'] = e
tic = time.time()
for i, (x, y) in enumerate(zip(train_lex, train_y)):
rnn.train(x, y, param['win'], param['clr'])
print '[learning] epoch %i >> %2.2f%%' % (
e, (i + 1) * 100. / nsentences),
print 'completed in %.2f (sec) <<\r' % (time.time() - tic),
sys.stdout.flush()
# evaluation // back into the real world : idx -> words
predictions_test = [map(lambda x: idx2label[x],
rnn.classify(numpy.asarray(
contextwin(x, param['win'])).astype('int32')))
for x in test_lex]
predictions_valid = [map(lambda x: idx2label[x],
rnn.classify(numpy.asarray(
contextwin(x, param['win'])).astype('int32')))
for x in valid_lex]
# evaluation // compute the accuracy using conlleval.pl
res_test = conlleval(predictions_test,
groundtruth_test,
words_test,
folder + '/current.test.txt',
folder)
res_valid = conlleval(predictions_valid,
groundtruth_valid,
words_valid,
folder + '/current.valid.txt',
folder)
if res_valid['f1'] > best_f1:
if param['savemodel']:
rnn.save(folder)
best_rnn = copy.deepcopy(rnn)
best_f1 = res_valid['f1']
if param['verbose']:
print('NEW BEST: epoch', e,
'valid F1', res_valid['f1'],
'best test F1', res_test['f1'])
param['vf1'], param['tf1'] = res_valid['f1'], res_test['f1']
param['vp'], param['tp'] = res_valid['p'], res_test['p']
param['vr'], param['tr'] = res_valid['r'], res_test['r']
param['be'] = e
subprocess.call(['mv', folder + '/current.test.txt',
folder + '/best.test.txt'])
subprocess.call(['mv', folder + '/current.valid.txt',
folder + '/best.valid.txt'])
else:
if param['verbose']:
print ''
# learning rate decay if no improvement in 10 epochs
if param['decay'] and abs(param['be']-param['ce']) >= 10:
param['clr'] *= 0.5
rnn = best_rnn
if param['clr'] < 1e-5:
break
print('BEST RESULT: epoch', param['be'],
'valid F1', param['vf1'],
'best test F1', param['tf1'],
'with the model', folder)
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.5116349123,
"author": null,
"avg_line_length": 34.0487179487,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4ab80b54b27e6bf293f12223a4a70e109b8a6252",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d2d1b0148989187c1433597f9c3ae4357178c082",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fegonda/icon_demo",
"max_forks_repo_path": "code/external/rnnslu.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d2d1b0148989187c1433597f9c3ae4357178c082",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fegonda/icon_demo",
"max_issues_repo_path": "code/external/rnnslu.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d2d1b0148989187c1433597f9c3ae4357178c082",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fegonda/icon_demo",
"max_stars_repo_path": "code/external/rnnslu.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3121,
"path": null,
"reason": "import numpy,import theano,from theano",
"repo": null,
"save_path": null,
"sha": null,
"size": 13279
} |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""export checkpoint file into air, onnx, mindir models"""
import argparse
import ast
import os
import numpy as np
from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export
import mindspore.common.dtype as mstype
from mindspore import nn
from src.cell import WithLossCellD, WithLossCellG
from src.dcgan import DCGAN
from src.discriminator import Discriminator
from src.generator import Generator
from src.config import dcgan_imagenet_cfg as cfg
parser = argparse.ArgumentParser(description='ntsnet export')
parser.add_argument("--run_modelart", type=ast.literal_eval, default=False, help="Run on modelArt, default is false.")
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file name.")
parser.add_argument('--data_url', default=None, help='Directory contains CUB_200_2011 dataset.')
parser.add_argument('--train_url', default=None, help='Directory contains checkpoint file')
parser.add_argument("--file_name", type=str, default="ntsnet", help="output file name.")
parser.add_argument("--file_format", type=str, default="MINDIR", help="file format")
parser.add_argument('--device_target', type=str, default="Ascend",
choices=['Ascend', 'GPU', 'CPU'], help='device target (default: Ascend)')
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
if args.device_target == "Ascend":
context.set_context(device_id=args.device_id)
if __name__ == '__main__':
netD = Discriminator()
netG = Generator()
criterion = nn.BCELoss(reduction='mean')
netD_with_criterion = WithLossCellD(netD, netG, criterion)
netG_with_criterion = WithLossCellG(netD, netG, criterion)
optimizerD = nn.Adam(netD.trainable_params(), learning_rate=cfg.learning_rate, beta1=cfg.beta1)
optimizerG = nn.Adam(netG.trainable_params(), learning_rate=cfg.learning_rate, beta1=cfg.beta1)
myTrainOneStepCellForD = nn.TrainOneStepCell(netD_with_criterion, optimizerD)
myTrainOneStepCellForG = nn.TrainOneStepCell(netG_with_criterion, optimizerG)
net = DCGAN(myTrainOneStepCellForD, myTrainOneStepCellForG)
param_dict = load_checkpoint(os.path.join(args.train_url, args.ckpt_file))
load_param_into_net(net, param_dict)
net.set_train(False)
# inputs = Tensor(np.random.rand(args.batch_size, 3, 448, 448), mstype.float32)
real_data = Tensor(np.random.rand(args.batch_size, 3, 32, 32), mstype.float32)
latent_code = Tensor(np.random.rand(args.batch_size, 100, 1, 1), mstype.float32)
inputs = [real_data, latent_code]
export(net, *inputs, file_name=args.file_name, file_format=args.file_format)
| {
"alphanum_fraction": 0.74734119,
"author": null,
"avg_line_length": 47.6575342466,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a5d53fbbbde0aaca332183cb7f46700e5f751e6f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "LottieWang/mindspore",
"max_forks_repo_path": "model_zoo/research/cv/dcgan/export.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "LottieWang/mindspore",
"max_issues_repo_path": "model_zoo/research/cv/dcgan/export.py",
"max_line_length": 118,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1331c7e432fb691d1cfa625ab7cc7451dcfc7ce0",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "LottieWang/mindspore",
"max_stars_repo_path": "model_zoo/research/cv/dcgan/export.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 837,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3479
} |
# influence functions for shapley values
def shapley_influence_function(Z, z_counts, W, v, psi, G, c_n, ics, measure):
"""
Compute influence function for the given predictiveness measure
@param Z the subsets of the power set with estimates
@param W the matrix of weights
@param v the estimated predictivness
@param psi the estimated Shapley values
@param G the constrained ls matrix
@param c_n the constraints
@param ics a list of all ics
@param measure the predictiveness measure
"""
import numpy as np
## compute contribution from estimating V
Z_W = Z.transpose().dot(W)
A_m = Z_W.dot(Z)
A_m_inv = np.linalg.inv(A_m)
phi_01 = A_m_inv.dot(Z_W).dot(ics)
## compute contribution from estimating Q
qr_decomp = np.linalg.qr(G.transpose(), mode = 'complete')
U_2 = qr_decomp[0][:, 3:(Z.shape[1])]
V = U_2.transpose().dot(Z.transpose().dot(W).dot(Z)).dot(U_2)
phi_02_shared_mat = (-1) * U_2.dot(np.linalg.inv(V))
phi_02_uniq_vectors = np.array([(Z[z, :].dot(psi) - v[z]) * (U_2.transpose().dot(Z[z, :])) for z in range(Z.shape[0])], dtype = np.float64).transpose()
phi_02_uniq = phi_02_shared_mat.dot(phi_02_uniq_vectors)
phi_02 = np.repeat(phi_02_uniq, z_counts, axis=1)
return {'contrib_v': phi_01, 'contrib_s': phi_02}
def shapley_se(shapley_ics, idx, gamma, na_rm = True):
"""
Standard error for the desired Shapley value
@param shapley_ics: all influence function estimates
@param idx: the index of interest
@param gamma: the constant for sampling
@param na_rm: remove NaNs?
@return the standard error corresponding to the shapley value at idx
"""
import numpy as np
if na_rm:
var_v = np.nanvar(shapley_ics['contrib_v'][idx, :])
var_s = np.nanvar(shapley_ics['contrib_s'][idx, :])
else:
var_v = np.var(shapley_ics['contrib_v'][idx, :])
var_s = np.var(shapley_ics['contrib_s'][idx, :])
se = np.sqrt(var_v / shapley_ics['contrib_v'].shape[1] + var_s / shapley_ics['contrib_s'].shape[1] / gamma)
return se
| {
"alphanum_fraction": 0.6676190476,
"author": null,
"avg_line_length": 37.5,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b41c723de4428c8f8fa5b297281a5294734406c4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "681eb21e1ff1141dc9fbaa35261e24dd17296857",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bdwilliamson/npvipy",
"max_forks_repo_path": "vimpy/spvim_ic.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "681eb21e1ff1141dc9fbaa35261e24dd17296857",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bdwilliamson/npvipy",
"max_issues_repo_path": "vimpy/spvim_ic.py",
"max_line_length": 155,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "681eb21e1ff1141dc9fbaa35261e24dd17296857",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bdwilliamson/npvipy",
"max_stars_repo_path": "vimpy/spvim_ic.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 626,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2100
} |
import numpy as np
from pengle.transformer.base import FeatureOverwriter, timer
import copy
class ComplementMissingValue(FeatureOverwriter):
def __init__(self, columns, agg_func=np.mean):
super().__init__(columns)
self.agg_func = agg_func
def apply(self, train_dataset, test_dataset):
train = copy.deepcopy(train_dataset)
test = copy.deepcopy(test_dataset)
for column in self.columns:
agg_result = self.agg_func(train_dataset.data[column])
train.data[column].fillna(agg_result, inplace=True)
test.data[column].fillna(agg_result, inplace=True)
return train, test
class ExtractStrings(FeatureOverwriter):
def __init__(self, columns, regexps):
super().__init__(columns)
self.regexps = regexps
def apply(self, train_dataset, test_dataset):
train = copy.deepcopy(train_dataset)
test = copy.deepcopy(test_dataset)
for column, regexp in zip(self.columns, self.regexps):
train.data[column] = train_dataset.data[column].str.extract(
regexp, expand=False)
test.data[column] = test_dataset.data[column].str.extract(
regexp, expand=False)
return train, test
class ReplaceStrings(FeatureOverwriter):
def __init__(self, columns, replace_rule):
super().__init__(columns)
self.replace_rule = replace_rule
def apply(self, train_dataset, test_dataset):
train = copy.deepcopy(train_dataset)
test = copy.deepcopy(test_dataset)
for column in self.columns:
train.data[column].replace(self.replace_rule, inplace=True)
test.data[column].replace(self.replace_rule, inplace=True)
return train, test
| {
"alphanum_fraction": 0.6727066818,
"author": null,
"avg_line_length": 36.0408163265,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "406874c7e5050d3199d65f5e4d48e83ed2507c8c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cd8c60f953ee7936bf25af84503fac8092ef7b94",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pesuchin/pengle",
"max_forks_repo_path": "pengle/transformer/preprocessor.py",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "cd8c60f953ee7936bf25af84503fac8092ef7b94",
"max_issues_repo_issues_event_max_datetime": "2020-05-30T02:19:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-05-16T05:44:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pesuchin/pengle",
"max_issues_repo_path": "pengle/transformer/preprocessor.py",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cd8c60f953ee7936bf25af84503fac8092ef7b94",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pesuchin/pengle",
"max_stars_repo_path": "pengle/transformer/preprocessor.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 363,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1766
} |
import chainer
import chainer.functions as F
import chainer.links as L
import onnx
import onnx.helper as oh
from onnx import numpy_helper
from onnx import TensorProto
from onnx import ModelProto
from chainer_compiler.elichika.parser import core
from chainer_compiler.elichika.parser import graphs
from chainer_compiler.elichika.parser import values
from chainer_compiler.elichika.parser import nodes
from chainer_compiler.elichika.parser import functions
from chainer_compiler.elichika.parser import functions_builtin
from chainer_compiler.elichika.parser import utils
import numpy as np
import collections
from chainer_compiler.elichika import onnx_converters as oc
def _pair(x):
if isinstance(x, collections.Iterable):
return x
return (x, x)
def _list(v) -> 'List[int]':
if isinstance(v, collections.Iterable):
return list(x for x in v)
return [v]
def get_onnx_dtype(dtype):
a = np.zeros((), dtype=dtype)
dt = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[a.dtype]
return dt
class BaseConverter(object):
def __init__(self):
self.expected_args = ()
def parse_args(self, onnx_graph, node):
assert hasattr(
self, 'expected_args'), 'BaseConverter subclass must have `expected_args`'
parser = oc.NodeParse()
for arg_def in self.expected_args:
parser.add_def(*arg_def)
parser.parse(onnx_graph, node)
return parser
def __call__(self, onnx_graph, node):
raise NotImplementedError
class ConverterRelu(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),)
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'Relu',
[parser.get('x')],
node.outputs,
name=str(node.lineprop))
class ConverterElu(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),
('alpha', oc.ParseType.Att))
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'Elu',
[parser.get('x')],
node.outputs,
name=str(node.lineprop),
alpha=parser.get('alpha'))
class ConverterLeakyRelu(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),
('slope', oc.ParseType.Att))
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'LeakyRelu',
[parser.get('x')],
node.outputs,
name=str(node.lineprop),
alpha=parser.get('slope'))
class ConverterLogSoftmax(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),
('axis', oc.ParseType.Att))
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'LogSoftmax',
[parser.get('x')],
node.outputs,
name=str(node.lineprop),
axis=parser.get('axis'),
chainer_is_onnx_semantics=False)
class ConverterSelu(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),
('alpha', oc.ParseType.Att),
('scale', oc.ParseType.Att))
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'Selu',
[parser.get('x')],
node.outputs,
name=str(node.lineprop),
alpha=parser.get('alpha'),
gamma=parser.get('scale'),
)
class ConverterSigmoid(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),)
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'Sigmoid',
[parser.get('x')],
node.outputs,
name=str(node.lineprop))
class ConverterSoftmax(BaseConverter):
def __init__(self):
self.expected_args = (
('x', oc.ParseType.In),
('axis', oc.ParseType.Att))
def __call__(self, onnx_graph, node):
parser = self.parse_args(onnx_graph, node)
onnx_graph.add_node(
'Softmax',
[parser.get('x')],
node.outputs,
name=str(node.lineprop),
axis=parser.get('axis'),
chainer_is_onnx_semantics=False)
| {
"alphanum_fraction": 0.6059701493,
"author": null,
"avg_line_length": 26.4971751412,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f2b898e02c1aa7075b806196125c0a2caf17aa97",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 26,
"max_forks_repo_forks_event_max_datetime": "2021-11-26T04:24:35.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-01-25T07:21:09.000Z",
"max_forks_repo_head_hexsha": "5f5ad365d14398d6ae0214fa012eb10360db8e7e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vermashresth/chainer-compiler",
"max_forks_repo_path": "chainer_compiler/elichika/functions_chainer_activation.py",
"max_issues_count": 431,
"max_issues_repo_head_hexsha": "5f5ad365d14398d6ae0214fa012eb10360db8e7e",
"max_issues_repo_issues_event_max_datetime": "2020-06-17T05:28:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-25T10:18:44.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vermashresth/chainer-compiler",
"max_issues_repo_path": "chainer_compiler/elichika/functions_chainer_activation.py",
"max_line_length": 86,
"max_stars_count": 116,
"max_stars_repo_head_hexsha": "5f5ad365d14398d6ae0214fa012eb10360db8e7e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vermashresth/chainer-compiler",
"max_stars_repo_path": "chainer_compiler/elichika/functions_chainer_activation.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-08T00:11:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-01-25T03:54:44.000Z",
"num_tokens": 1074,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4690
} |
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from torch import optim
from torch.utils.data import DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from evaluation import *
# from unet import U_Net, R2U_Net, AttU_Net, R2AttU_Net, init_weights
from models import ChooseModel
from utils.dataset import BasicDataset
conf = yaml.load(open(os.path.join(
sys.path[0], 'config', 'evaluate.yaml')), Loader=yaml.FullLoader)
dir_img = conf['DATASET']['IMGS_DIR']
dir_mask = conf['DATASET']['MASKS_DIR']
def count_param(model):
param_count = 0
for param in model.parameters():
param_count += param.view(-1).size()[0]
return param_count
def evaluate_net(net,
device,
batch_size=16,
img_scale=0.5,
classes=2):
dataset = BasicDataset(dir_img, dir_mask, img_scale,
train=True, classes=classes)
val_loader = DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=8, pin_memory=True)
n_val = len(dataset)
writer = SummaryWriter(
comment=f'BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Batch size: {batch_size}
Device: {device.type}
Images scaling: {img_scale}
''')
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
net.eval()
epoch_loss = 0
tot = 0
PA = 0
OA = 0
pre = 0
recal = 0
f1s = 0
params = None
for batch in tqdm(val_loader):
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
if params is None:
params = count_param(net)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
if net.n_classes > 1:
b, c, w, h = true_masks.shape
true_masks = true_masks.view(b, w, h)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
for true_mask, pred in zip(true_masks, masks_pred):
pred = (pred > 0.5).float()
if net.n_classes > 1:
tot += F.cross_entropy(pred.unsqueeze(dim=0),
true_mask.unsqueeze(dim=0)).item()
else:
PA += pixel_accuracy(pred,
true_mask.squeeze(dim=1)).item()
tot += dice_coeff(pred, true_mask.squeeze(dim=1)).item()
OA += overall_accuracy(pred,
true_mask.squeeze(dim=1)).item()
pre += precision(pred,
true_mask.squeeze(dim=1)).item()
recal += recall(pred,
true_mask.squeeze(dim=1)).item()
f1s += f1score(pred,
true_mask.squeeze(dim=1)).item()
epoch_loss /= n_val
tot /= n_val
PA /= n_val
OA /= n_val
pre /= n_val
recal /= n_val
f1s /= n_val
if net.n_classes > 1:
logging.info(f'Validation loss:{epoch_loss}')
logging.info(
'Validation cross entropy: {}'.format(tot))
logging.info(f'Params in this model is: {params}')
writer.add_scalar('Loss/test', tot, global_step)
else:
logging.info(f'Validation loss:{epoch_loss}')
logging.info(
'Validation Dice Coeff: {}'.format(tot))
writer.add_scalar('Dice/test', tot, global_step)
logging.info(
'Validation Pixel Accuracy: {}'.format(PA))
writer.add_scalar('pA/test', PA, global_step)
logging.info(
'Validation Overall Accuracy: {}'.format(OA))
writer.add_scalar('oA/test', OA, global_step)
logging.info(
'Validation Precision: {}'.format(pre))
writer.add_scalar('precision/test',
pre, global_step)
logging.info(
'Validation Recall: {}'.format(recal))
writer.add_scalar('recall/test', recal, global_step)
logging.info(
'Validation F1-score: {}'.format(f1s))
writer.add_scalar(
'F1-score/test', f1s, global_step)
logging.info(f'Params in this model is: {params}')
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--network', metavar='NETWORK', type=str,
default=conf['MODEL']['MODEL_NAME'], help='network type', dest='network')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=conf['BATCH_SIZE'],
help='Batch size', dest='batchsize')
parser.add_argument('-f', '--load', dest='load', type=str, default=conf['MODEL']['LOAD_PATH'],
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=conf['SCALE'],
help='Downscaling factor of the images')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available(
) and conf['DEVICE'].lower() == 'cuda' else 'cpu')
logging.info(f'Using device {device}')
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
network = args.network.lower()
net = ChooseModel(network)(
n_channels=3, n_classes=conf['DATASET']['NUM_CLASSES'])
assert net is not None, f'check your argument --network'
# net = AttU_Net(n_channels=3,n_classes=1)
# net = AttU_Net(n_channels=3, n_classes=1)
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Dilated conv"} upscaling\n')
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
evaluate_net(net=net,
batch_size=args.batchsize,
device=device,
img_scale=args.scale,
classes=conf['DATASET']['NUM_CLASSES'])
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| {
"alphanum_fraction": 0.5822586948,
"author": null,
"avg_line_length": 36.2122641509,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2a3790dca9b3ad7a15a57d4bee446ccfe0cda367",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7a2c21346739a79c33e7a7ccc081018821868eb7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "QinchengZhang/PathologySegmentation",
"max_forks_repo_path": "Training/pytorch/evaluate.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7a2c21346739a79c33e7a7ccc081018821868eb7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "QinchengZhang/PathologySegmentation",
"max_issues_repo_path": "Training/pytorch/evaluate.py",
"max_line_length": 107,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "7a2c21346739a79c33e7a7ccc081018821868eb7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "QinchengZhang/PathologySegmentation",
"max_stars_repo_path": "Training/pytorch/evaluate.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-23T01:42:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-25T06:18:21.000Z",
"num_tokens": 1751,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7677
} |
"""
============================
StrongTree Fit Example
============================
An example of fitting a StrongTree decision tree using :class:`trees.StrongTree.StrongTreeClassifier`
"""
from pickle import TRUE
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from trees.StrongTree import StrongTreeClassifier
data = pd.read_csv("./data/balance-scale_enc.csv")
y = data.pop("target")
X_train, X_test, y_train, y_test = train_test_split(
data, y, test_size=0.33, random_state=42
)
stcl = StrongTreeClassifier(
depth = 1,
time_limit = 60,
_lambda = 0,
benders_oct= False,
num_threads=None,
obj_mode = 'acc'
)
stcl.fit(X_train, y_train, verbose=True)
stcl.print_tree()
test_pred = stcl.predict(X_test)
print('The out-of-sample acc is {}'.format(np.sum(test_pred==y_test)/y_test.shape[0]))
| {
"alphanum_fraction": 0.6896551724,
"author": null,
"avg_line_length": 25.5882352941,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4340e20e891e4239ee975c1eb55d8e739176922a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9a921e59cb2578c2a344dd2fa0b88bacb3571b5f",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "nathanaj99/decision_tree_estimators",
"max_forks_repo_path": "examples/fit_StrongTree_example.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9a921e59cb2578c2a344dd2fa0b88bacb3571b5f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "nathanaj99/decision_tree_estimators",
"max_issues_repo_path": "examples/fit_StrongTree_example.py",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9a921e59cb2578c2a344dd2fa0b88bacb3571b5f",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "nathanaj99/decision_tree_estimators",
"max_stars_repo_path": "examples/fit_StrongTree_example.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 222,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 870
} |
#include <server_lib/emergency_helper.h>
#if !defined(STACKTRACE_DISABLED)
#include <boost/version.hpp>
#include <boost/filesystem.hpp>
#include <iostream>
#include <sstream>
#include <fstream>
#if !defined(CUSTOM_STACKTRACE_IMPL) && BOOST_VERSION >= 106500
#include <boost/stacktrace.hpp>
namespace server_lib {
void emergency_helper::save_dump(const char* dump_file_path)
{
try
{
boost::stacktrace::safe_dump_to(dump_file_path);
}
catch (const std::exception& e)
{
std::cerr << "Can't save dump: " << e.what() << std::endl;
}
}
std::string emergency_helper::load_dump(const char* dump_file_path, bool remove)
{
std::stringstream ss;
if (boost::filesystem::exists(dump_file_path))
{
try
{
std::ifstream ifs(dump_file_path);
boost::stacktrace::stacktrace st = boost::stacktrace::stacktrace::from_dump(ifs);
ss << st;
// cleaning up
ifs.close();
}
catch (const std::exception& e)
{
ss << e.what();
}
ss << '\n';
try
{
if (remove)
boost::filesystem::remove(dump_file_path);
}
catch (const std::exception&)
{
// ignore
}
}
return ss.str();
}
} // namespace server_lib
#else
#include <stdlib.h>
#include <execinfo.h>
#include <cxxabi.h>
namespace server_lib {
void emergency_helper::save_dump(const char* dump_file_path)
{
FILE* out = fopen(dump_file_path, "w");
if (!out)
{
perror("Can't save dump");
return;
}
/** Print a demangled stack backtrace of the caller function to FILE* out. */
const size_t max_frames = 100;
fprintf(out, "stack trace:\n");
// storage array for stack trace address data
void* addrlist[max_frames + 1] = {};
// retrieve current stack addresses:
// https://linux.die.net/man/3/backtrace_symbols:
//"The symbol names may be unavailable without the use of special linker options.
// For systems using the GNU linker, it is necessary to use the -rdynamic linker option.
// Note that names of "static" functions are not exposed, and won't be available in the backtrace."
//
int addrlen = backtrace(addrlist, sizeof(addrlist) / sizeof(void*));
if (addrlen == 0)
{
fprintf(out, " <empty, possibly corrupt>\n");
return;
}
// resolve addresses into strings containing "filename(function+address)",
// this array must be free()-ed
char** symbollist = backtrace_symbols(addrlist, addrlen);
// allocate string which will be filled with the demangled function name
size_t funcnamesize = 256;
char* funcname = reinterpret_cast<char*>(alloca(funcnamesize));
// iterate over the returned symbol lines. skip the first, it is the
// address of this function.
for (int i = 1; i < addrlen; i++)
{
char *begin_name = nullptr, *begin_offset = nullptr, *end_offset = nullptr;
// find parentheses and +address offset surrounding the mangled name:
// ./module(function+0x15c) [0x8048a6d]
for (char* p = symbollist[i]; *p; ++p)
{
if (*p == '(')
begin_name = p;
else if (*p == '+')
begin_offset = p;
else if (*p == ')' && begin_offset)
{
end_offset = p;
break;
}
}
if (begin_name && begin_offset && end_offset
&& begin_name < begin_offset)
{
*begin_name++ = '\0';
*begin_offset++ = '\0';
*end_offset = '\0';
// mangled name is now in [begin_name, begin_offset) and caller
// offset in [begin_offset, end_offset). now apply
// __cxa_demangle():
int status;
char* ret = abi::__cxa_demangle(begin_name,
funcname, &funcnamesize, &status);
if (status == 0)
{
funcname = ret; // use possibly realloc()-ed string
fprintf(out, " %s : %s+%s\n",
symbollist[i], funcname, begin_offset);
}
else
{
// demangling failed. Output function name as a C function with
// no arguments.
fprintf(out, " %s : %s()+%s\n",
symbollist[i], begin_name, begin_offset);
}
}
else
{
// couldn't parse the line? print the whole line.
fprintf(out, " %s\n", symbollist[i]);
}
}
fclose(out);
free(symbollist);
}
std::string emergency_helper::load_dump(const char* dump_file_path, bool remove)
{
if (dump_file_path && boost::filesystem::exists(dump_file_path))
{
std::stringstream ss;
try
{
std::ifstream ifs(dump_file_path);
std::string line;
while (std::getline(ifs, line))
{
ss << line << '\n';
}
// cleaning up
ifs.close();
}
catch (const std::exception& e)
{
ss << e.what();
}
ss << '\n';
try
{
if (remove)
boost::filesystem::remove(dump_file_path);
}
catch (const std::exception&)
{
// ignore
}
return ss.str();
}
else
return {};
}
} // namespace server_lib
#endif
namespace server_lib {
bool emergency_helper::test_for_write(const char* dump_file_path)
{
if (boost::filesystem::exists(dump_file_path))
return false;
{
std::ofstream ofs(dump_file_path);
if (!ofs)
return false;
}
boost::filesystem::remove(dump_file_path);
return true;
}
} // namespace server_lib
#else //!STACKTRACE_DISABLED
// raise linker error for save_dump, load_dump
#endif
| {
"alphanum_fraction": 0.5386509109,
"author": null,
"avg_line_length": 25.3875,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "33b042f69c5293fe11643ebbed12756cfdac17dd",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-16T13:38:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-16T13:38:15.000Z",
"max_forks_repo_head_hexsha": "d0778f08958b6f2d37f67d577c98ebf46c42b2b9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "romualdo-bar/barbacoa-server-lib",
"max_forks_repo_path": "src/emergency_helper.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d0778f08958b6f2d37f67d577c98ebf46c42b2b9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "romualdo-bar/barbacoa-server-lib",
"max_issues_repo_path": "src/emergency_helper.cpp",
"max_line_length": 103,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d0778f08958b6f2d37f67d577c98ebf46c42b2b9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "romualdo-bar/barbacoa-server-lib",
"max_stars_repo_path": "src/emergency_helper.cpp",
"max_stars_repo_stars_event_max_datetime": "2020-10-27T19:23:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-27T19:23:17.000Z",
"num_tokens": 1394,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6093
} |
"""
Algorithms that Involve Multiple DataFrames
===========================================
The pandas operations ``concat``, ``join``, and ``merge`` combine multiple
DataFrames. This module contains analogous algorithms in the parallel case.
There are two important cases:
1. We combine along a partitioned index
2. We combine along an unpartitioned index or other column
In the first case we know which partitions of each dataframe interact with
which others. This lets us be significantly more clever and efficient.
In the second case each partition from one dataset interacts with all
partitions from the other. We handle this through a shuffle operation.
Partitioned Joins
-----------------
In the first case where we join along a partitioned index we proceed in the
following stages.
1. Align the partitions of all inputs to be the same. This involves a call
to ``dd.repartition`` which will split up and concat existing partitions as
necessary. After this step all inputs have partitions that align with
each other. This step is relatively cheap.
See the function ``align_partitions``.
2. Remove unnecessary partitions based on the type of join we perform (left,
right, inner, outer). We can do this at the partition level before any
computation happens. We'll do it again on each partition when we call the
in-memory function. See the function ``require``.
3. Embarrassingly parallel calls to ``pd.concat``, ``pd.join``, or
``pd.merge``. Now that the data is aligned and unnecessary blocks have
been removed we can rely on the fast in-memory Pandas join machinery to
execute joins per-partition. We know that all intersecting records exist
within the same partition
Hash Joins via Shuffle
----------------------
When we join along an unpartitioned index or along an arbitrary column any
partition from one input might interact with any partition in another. In
this case we perform a hash-join by shuffling data in each input by that
column. This results in new inputs with the same partition structure cleanly
separated along that column.
We proceed with hash joins in the following stages:
1. Shuffle each input on the specified column. See the function
``dask.dataframe.shuffle.shuffle``.
2. Perform embarrassingly parallel join across shuffled inputs.
"""
import math
import pickle
import warnings
from functools import partial, wraps
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype, is_dtype_equal
from tlz import merge_sorted, unique
from dask.base import is_dask_collection, tokenize
from dask.dataframe import methods
from dask.dataframe.core import (
DataFrame,
Index,
Series,
_concat,
_Frame,
_maybe_from_pandas,
is_broadcastable,
map_partitions,
new_dd_object,
prefix_reduction,
suffix_reduction,
)
from dask.dataframe.dispatch import group_split_dispatch, hash_object_dispatch
from dask.dataframe.io import from_pandas
from dask.dataframe.shuffle import (
partitioning_index,
rearrange_by_divisions,
shuffle,
shuffle_group,
)
from dask.dataframe.utils import (
asciitable,
is_dataframe_like,
is_series_like,
make_meta,
strip_unknown_categories,
)
from dask.highlevelgraph import HighLevelGraph
from dask.layers import BroadcastJoinLayer
from dask.utils import M, apply
def align_partitions(*dfs):
"""Mutually partition and align DataFrame blocks
This serves as precursor to multi-dataframe operations like join, concat,
or merge.
Parameters
----------
dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar
Sequence of dataframes to be aligned on their index
Returns
-------
dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar
These must have consistent divisions with each other
divisions: tuple
Full divisions sequence of the entire result
result: list
A list of lists of keys that show which data exist on which
divisions
"""
_is_broadcastable = partial(is_broadcastable, dfs)
dfs1 = [df for df in dfs if isinstance(df, _Frame) and not _is_broadcastable(df)]
if len(dfs) == 0:
raise ValueError("dfs contains no DataFrame and Series")
if not all(df.known_divisions for df in dfs1):
raise ValueError(
"Not all divisions are known, can't align "
"partitions. Please use `set_index` "
"to set the index."
)
divisions = list(unique(merge_sorted(*[df.divisions for df in dfs1])))
if len(divisions) == 1: # single value for index
divisions = (divisions[0], divisions[0])
dfs2 = [
df.repartition(divisions, force=True) if isinstance(df, _Frame) else df
for df in dfs
]
result = list()
inds = [0 for df in dfs]
for d in divisions[:-1]:
L = list()
for i, df in enumerate(dfs2):
if isinstance(df, _Frame):
j = inds[i]
divs = df.divisions
if j < len(divs) - 1 and divs[j] == d:
L.append((df._name, inds[i]))
inds[i] += 1
else:
L.append(None)
else: # Scalar has no divisions
L.append(None)
result.append(L)
return dfs2, tuple(divisions), result
def _maybe_align_partitions(args):
"""Align DataFrame blocks if divisions are different.
Note that if all divisions are unknown, but have equal npartitions, then
they will be passed through unchanged. This is different than
`align_partitions`, which will fail if divisions aren't all known"""
_is_broadcastable = partial(is_broadcastable, args)
dfs = [df for df in args if isinstance(df, _Frame) and not _is_broadcastable(df)]
if not dfs:
return args
divisions = dfs[0].divisions
if not all(df.divisions == divisions for df in dfs):
dfs2 = iter(align_partitions(*dfs)[0])
return [a if not isinstance(a, _Frame) else next(dfs2) for a in args]
return args
def require(divisions, parts, required=None):
"""Clear out divisions where required components are not present
In left, right, or inner joins we exclude portions of the dataset if one
side or the other is not present. We can achieve this at the partition
level as well
>>> divisions = [1, 3, 5, 7, 9]
>>> parts = [(('a', 0), None),
... (('a', 1), ('b', 0)),
... (('a', 2), ('b', 1)),
... (None, ('b', 2))]
>>> divisions2, parts2 = require(divisions, parts, required=[0])
>>> divisions2
(1, 3, 5, 7)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 0), None),
(('a', 1), ('b', 0)),
(('a', 2), ('b', 1)))
>>> divisions2, parts2 = require(divisions, parts, required=[1])
>>> divisions2
(3, 5, 7, 9)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 1), ('b', 0)),
(('a', 2), ('b', 1)),
(None, ('b', 2)))
>>> divisions2, parts2 = require(divisions, parts, required=[0, 1])
>>> divisions2
(3, 5, 7)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 1), ('b', 0)),
(('a', 2), ('b', 1)))
"""
if not required:
return divisions, parts
for i in required:
present = [j for j, p in enumerate(parts) if p[i] is not None]
divisions = tuple(divisions[min(present) : max(present) + 2])
parts = tuple(parts[min(present) : max(present) + 1])
return divisions, parts
###############################################################
# Join / Merge
###############################################################
required = {
"left": [0],
"leftsemi": [0],
"leftanti": [0],
"right": [1],
"inner": [0, 1],
"outer": [],
}
allowed_left = ("inner", "left", "leftsemi", "leftanti")
allowed_right = ("inner", "right")
def merge_chunk(lhs, *args, empty_index_dtype=None, categorical_columns=None, **kwargs):
rhs, *args = args
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
if categorical_columns is not None:
for col in categorical_columns:
left = None
right = None
if col in lhs:
left = lhs[col]
elif col == kwargs.get("right_on", None) and left_index:
if is_categorical_dtype(lhs.index):
left = lhs.index
if col in rhs:
right = rhs[col]
elif col == kwargs.get("left_on", None) and right_index:
if is_categorical_dtype(rhs.index):
right = rhs.index
dtype = "category"
if left is not None and right is not None:
dtype = methods.union_categoricals(
[left.astype("category"), right.astype("category")]
).dtype
if left is not None:
if isinstance(left, pd.Index):
lhs.index = left.astype(dtype)
else:
lhs = lhs.assign(**{col: left.astype(dtype)})
if right is not None:
if isinstance(right, pd.Index):
rhs.index = right.astype(dtype)
else:
rhs = rhs.assign(**{col: right.astype(dtype)})
out = lhs.merge(rhs, *args, **kwargs)
# Workaround pandas bug where if the output result of a merge operation is
# an empty dataframe, the output index is `int64` in all cases, regardless
# of input dtypes.
if len(out) == 0 and empty_index_dtype is not None:
out.index = out.index.astype(empty_index_dtype)
return out
def merge_indexed_dataframes(lhs, rhs, left_index=True, right_index=True, **kwargs):
"""Join two partitioned dataframes along their index"""
how = kwargs.get("how", "left")
kwargs["left_index"] = left_index
kwargs["right_index"] = right_index
(lhs, rhs), divisions, parts = align_partitions(lhs, rhs)
divisions, parts = require(divisions, parts, required[how])
name = "join-indexed-" + tokenize(lhs, rhs, **kwargs)
meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **kwargs)
kwargs["empty_index_dtype"] = meta.index.dtype
kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns
dsk = dict()
for i, (a, b) in enumerate(parts):
dsk[(name, i)] = (apply, merge_chunk, [a, b], kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[lhs, rhs])
return new_dd_object(graph, name, meta, divisions)
shuffle_func = shuffle # name sometimes conflicts with keyword argument
def hash_join(
lhs,
left_on,
rhs,
right_on,
how="inner",
npartitions=None,
suffixes=("_x", "_y"),
shuffle=None,
indicator=False,
max_branch=None,
):
"""Join two DataFrames on particular columns with hash join
This shuffles both datasets on the joined column and then performs an
embarrassingly parallel join partition-by-partition
>>> hash_join(lhs, 'id', rhs, 'id', how='left', npartitions=10) # doctest: +SKIP
"""
if npartitions is None:
npartitions = max(lhs.npartitions, rhs.npartitions)
lhs2 = shuffle_func(
lhs, left_on, npartitions=npartitions, shuffle=shuffle, max_branch=max_branch
)
rhs2 = shuffle_func(
rhs, right_on, npartitions=npartitions, shuffle=shuffle, max_branch=max_branch
)
if isinstance(left_on, Index):
left_on = None
left_index = True
else:
left_index = False
if isinstance(right_on, Index):
right_on = None
right_index = True
else:
right_index = False
kwargs = dict(
how=how,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
)
# dummy result
# Avoid using dummy data for a collection it is empty
_lhs_meta = lhs._meta_nonempty if len(lhs.columns) else lhs._meta
_rhs_meta = rhs._meta_nonempty if len(rhs.columns) else rhs._meta
meta = _lhs_meta.merge(_rhs_meta, **kwargs)
if isinstance(left_on, list):
left_on = (list, tuple(left_on))
if isinstance(right_on, list):
right_on = (list, tuple(right_on))
kwargs["empty_index_dtype"] = meta.index.dtype
kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns
joined = map_partitions(
merge_chunk,
lhs2,
rhs2,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
align_dataframes=False,
**kwargs,
)
return joined
def single_partition_join(left, right, **kwargs):
# if the merge is performed on_index, divisions can be kept, otherwise the
# new index will not necessarily correspond with the current divisions
meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)
use_left = kwargs.get("right_index") or right._contains_index_name(
kwargs.get("right_on")
)
use_right = kwargs.get("left_index") or left._contains_index_name(
kwargs.get("left_on")
)
if len(meta) == 0:
if use_left:
meta.index = meta.index.astype(left.index.dtype)
elif use_right:
meta.index = meta.index.astype(right.index.dtype)
else:
meta.index = meta.index.astype("int64")
kwargs["empty_index_dtype"] = meta.index.dtype
kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns
if right.npartitions == 1 and kwargs["how"] in allowed_left:
if use_left:
divisions = left.divisions
elif use_right and len(right.divisions) == len(left.divisions):
divisions = right.divisions
else:
divisions = [None for _ in left.divisions]
elif left.npartitions == 1 and kwargs["how"] in allowed_right:
if use_right:
divisions = right.divisions
elif use_left and len(left.divisions) == len(right.divisions):
divisions = left.divisions
else:
divisions = [None for _ in right.divisions]
else:
raise NotImplementedError(
"single_partition_join has no fallback for invalid calls"
)
joined = map_partitions(
merge_chunk,
left,
right,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
align_dataframes=False,
**kwargs,
)
joined.divisions = tuple(divisions)
return joined
def warn_dtype_mismatch(left, right, left_on, right_on):
"""Checks for merge column dtype mismatches and throws a warning (#4574)"""
if not isinstance(left_on, list):
left_on = [left_on]
if not isinstance(right_on, list):
right_on = [right_on]
if all(col in left.columns for col in left_on) and all(
col in right.columns for col in right_on
):
dtype_mism = [
((lo, ro), left.dtypes[lo], right.dtypes[ro])
for lo, ro in zip(left_on, right_on)
if not is_dtype_equal(left.dtypes[lo], right.dtypes[ro])
]
if dtype_mism:
col_tb = asciitable(
("Merge columns", "left dtype", "right dtype"), dtype_mism
)
warnings.warn(
(
"Merging dataframes with merge column data "
"type mismatches: \n{}\nCast dtypes explicitly to "
"avoid unexpected results."
).format(col_tb)
)
@wraps(pd.merge)
def merge(
left,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
suffixes=("_x", "_y"),
indicator=False,
npartitions=None,
shuffle=None,
max_branch=None,
broadcast=None,
):
for o in [on, left_on, right_on]:
if isinstance(o, _Frame):
raise NotImplementedError(
"Dask collections not currently allowed in merge columns"
)
if not on and not left_on and not right_on and not left_index and not right_index:
on = [c for c in left.columns if c in right.columns]
if not on:
left_index = right_index = True
if on and not left_on and not right_on:
left_on = right_on = on
on = None
supported_how = ("left", "right", "outer", "inner", "leftanti", "leftsemi")
if how not in supported_how:
raise ValueError(
f"dask.dataframe.merge does not support how='{how}'. Options are: {supported_how}."
f" Note that 'leftanti' and 'leftsemi' are only dask_cudf options."
)
if isinstance(left, (pd.Series, pd.DataFrame)) and isinstance(
right, (pd.Series, pd.DataFrame)
):
return pd.merge(
left,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
)
# Transform pandas objects into dask.dataframe objects
if not is_dask_collection(left):
if right_index and left_on: # change to join on index
left = left.set_index(left[left_on])
left_on = None
left_index = True
left = from_pandas(left, npartitions=1) # turn into DataFrame
if not is_dask_collection(right):
if left_index and right_on: # change to join on index
right = right.set_index(right[right_on])
right_on = None
right_index = True
right = from_pandas(right, npartitions=1) # turn into DataFrame
# Both sides are now dd.DataFrame or dd.Series objects
merge_indexed_left = (
left_index or left._contains_index_name(left_on)
) and left.known_divisions
merge_indexed_right = (
right_index or right._contains_index_name(right_on)
) and right.known_divisions
# Both sides indexed
if merge_indexed_left and merge_indexed_right: # Do indexed join
return merge_indexed_dataframes(
left,
right,
how=how,
suffixes=suffixes,
indicator=indicator,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
# Single partition on one side
# Note that cudf supports "leftsemi" and "leftanti" joins
elif (
left.npartitions == 1
and how in allowed_right
or right.npartitions == 1
and how in allowed_left
):
return single_partition_join(
left,
right,
how=how,
right_on=right_on,
left_on=left_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
)
# One side is indexed, the other not
elif (
left_index
and left.known_divisions
and not right_index
or right_index
and right.known_divisions
and not left_index
):
left_empty = left._meta_nonempty
right_empty = right._meta_nonempty
meta = left_empty.merge(
right_empty,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
)
categorical_columns = meta.select_dtypes(include="category").columns
if merge_indexed_left and left.known_divisions:
right = rearrange_by_divisions(
right, right_on, left.divisions, max_branch, shuffle=shuffle
)
left = left.clear_divisions()
elif merge_indexed_right and right.known_divisions:
left = rearrange_by_divisions(
left, left_on, right.divisions, max_branch, shuffle=shuffle
)
right = right.clear_divisions()
return map_partitions(
merge_chunk,
left,
right,
meta=meta,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
empty_index_dtype=meta.index.dtype,
categorical_columns=categorical_columns,
)
# Catch all hash join
else:
if left_on and right_on:
warn_dtype_mismatch(left, right, left_on, right_on)
# Check if we should use a broadcast_join
# See note on `broadcast_bias` below.
broadcast_bias = 0.5
if isinstance(broadcast, float):
broadcast_bias = float(broadcast)
broadcast = None
elif not isinstance(broadcast, bool) and broadcast is not None:
# Let's be strict about the `broadcast` type to
# avoid arbitrarily casting int to float or bool.
raise ValueError(
f"Optional `broadcast` argument must be float or bool."
f"Type={type(broadcast)} is not supported."
)
bcast_side = "left" if left.npartitions < right.npartitions else "right"
n_small = min(left.npartitions, right.npartitions)
n_big = max(left.npartitions, right.npartitions)
if (
shuffle == "tasks"
and how in ("inner", "left", "right")
and how != bcast_side
and broadcast is not False
):
# Note on `broadcast_bias`:
# We can expect the broadcast merge to be competitive with
# the shuffle merge when the number of partitions in the
# smaller collection is less than the logarithm of the number
# of partitions in the larger collection. By default, we add
# a small preference for the shuffle-based merge by multiplying
# the log result by a 0.5 scaling factor. We call this factor
# the `broadcast_bias`, because a larger number will make Dask
# more likely to select the `broadcast_join` code path. If
# the user specifies a floating-point value for the `broadcast`
# kwarg, that value will be used as the `broadcast_bias`.
if broadcast or (n_small < math.log2(n_big) * broadcast_bias):
return broadcast_join(
left,
left.index if left_index else left_on,
right,
right.index if right_index else right_on,
how,
npartitions,
suffixes,
indicator=indicator,
)
return hash_join(
left,
left.index if left_index else left_on,
right,
right.index if right_index else right_on,
how,
npartitions,
suffixes,
shuffle=shuffle,
indicator=indicator,
max_branch=max_branch,
)
###############################################################
# ASOF Join
###############################################################
def most_recent_tail(left, right):
if len(right.index) == 0:
return left
return right.tail(1)
def most_recent_tail_summary(left, right, by=None):
return pd.concat([left, right]).drop_duplicates(subset=by, keep="last")
def compute_tails(ddf, by=None):
"""For each partition, returns the last row of the most recent nonempty
partition.
"""
empty = ddf._meta.iloc[0:0]
if by is None:
return prefix_reduction(most_recent_tail, ddf, empty)
else:
kwargs = {"by": by}
return prefix_reduction(most_recent_tail_summary, ddf, empty, **kwargs)
def most_recent_head(left, right):
if len(left.index) == 0:
return right
return left.head(1)
def most_recent_head_summary(left, right, by=None):
return pd.concat([left, right]).drop_duplicates(subset=by, keep="first")
def compute_heads(ddf, by=None):
"""For each partition, returns the first row of the next nonempty
partition.
"""
empty = ddf._meta.iloc[0:0]
if by is None:
return suffix_reduction(most_recent_head, ddf, empty)
else:
kwargs = {"by": by}
return suffix_reduction(most_recent_head_summary, ddf, empty, **kwargs)
def pair_partitions(L, R):
"""Returns which partitions to pair for the merge_asof algorithm and the
bounds on which to split them up
"""
result = []
n, m = len(L) - 1, len(R) - 1
i, j = 0, -1
while j + 1 < m and R[j + 1] <= L[i]:
j += 1
J = []
while i < n:
partition = max(0, min(m - 1, j))
lower = R[j] if j >= 0 and R[j] > L[i] else None
upper = (
R[j + 1]
if j + 1 < m
and (R[j + 1] < L[i + 1] or R[j + 1] == L[i + 1] and i == n - 1)
else None
)
J.append((partition, lower, upper))
i1 = i + 1 if j + 1 == m or (i + 1 < n and R[j + 1] >= L[i + 1]) else i
j1 = j + 1 if i + 1 == n or (j + 1 < m and L[i + 1] >= R[j + 1]) else j
if i1 > i:
result.append(J)
J = []
elif i == n - 1 and R[j1] > L[n]:
result.append(J)
break
i, j = i1, j1
return result
def merge_asof_padded(left, right, prev=None, next=None, **kwargs):
"""merge_asof but potentially adding rows to the beginning/end of right"""
frames = []
if prev is not None:
frames.append(prev)
frames.append(right)
if next is not None:
frames.append(next)
frame = pd.concat(frames)
result = pd.merge_asof(left, frame, **kwargs)
# pd.merge_asof() resets index name (and dtype) if left is empty df
if result.index.name != left.index.name:
result.index.name = left.index.name
return result
def get_unsorted_columns(frames):
"""
Determine the unsorted column order.
This should match the output of concat([frames], sort=False)
"""
new_columns = pd.concat([frame._meta for frame in frames]).columns
order = []
for frame in frames:
order.append(new_columns.get_indexer_for(frame.columns))
order = np.concatenate(order)
order = pd.unique(order)
order = new_columns.take(order)
return order
def merge_asof_indexed(left, right, **kwargs):
dsk = dict()
name = "asof-join-indexed-" + tokenize(left, right, **kwargs)
meta = pd.merge_asof(left._meta_nonempty, right._meta_nonempty, **kwargs)
if all(map(pd.isnull, left.divisions)):
# results in an empty df that looks like ``meta``
return from_pandas(meta.iloc[len(meta) :], npartitions=left.npartitions)
if all(map(pd.isnull, right.divisions)):
# results in an df that looks like ``left`` with nulls for
# all ``right.columns``
return map_partitions(
pd.merge_asof,
left,
right=right,
left_index=True,
right_index=True,
meta=meta,
)
dependencies = [left, right]
tails = heads = None
if kwargs["direction"] in ["backward", "nearest"]:
tails = compute_tails(right, by=kwargs["right_by"])
dependencies.append(tails)
if kwargs["direction"] in ["forward", "nearest"]:
heads = compute_heads(right, by=kwargs["right_by"])
dependencies.append(heads)
for i, J in enumerate(pair_partitions(left.divisions, right.divisions)):
frames = []
for j, lower, upper in J:
slice = (methods.boundary_slice, (left._name, i), lower, upper, False)
tail = (tails._name, j) if tails is not None else None
head = (heads._name, j) if heads is not None else None
frames.append(
(
apply,
merge_asof_padded,
[slice, (right._name, j), tail, head],
kwargs,
)
)
dsk[(name, i)] = (methods.concat, frames)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
result = new_dd_object(graph, name, meta, left.divisions)
return result
@wraps(pd.merge_asof)
def merge_asof(
left,
right,
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
by=None,
left_by=None,
right_by=None,
suffixes=("_x", "_y"),
tolerance=None,
allow_exact_matches=True,
direction="backward",
):
if direction not in ["backward", "forward", "nearest"]:
raise ValueError(
"Invalid merge_asof direction. Choose from 'backward'"
" 'forward', or 'nearest'"
)
kwargs = {
"on": on,
"left_on": left_on,
"right_on": right_on,
"left_index": left_index,
"right_index": right_index,
"by": by,
"left_by": left_by,
"right_by": right_by,
"suffixes": suffixes,
"tolerance": tolerance,
"allow_exact_matches": allow_exact_matches,
"direction": direction,
}
if left is None or right is None:
raise ValueError("Cannot merge_asof on None")
# if is_dataframe_like(left) and is_dataframe_like(right):
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
return pd.merge_asof(left, right, **kwargs)
if on is not None:
if left_on is not None or right_on is not None:
raise ValueError(
"Can only pass argument 'on' OR 'left_on' and 'right_on', not a "
"combination of both."
)
left_on = right_on = on
for o in [left_on, right_on]:
if isinstance(o, _Frame):
raise NotImplementedError(
"Dask collections not currently allowed in merge columns"
)
if not is_dask_collection(left):
left = from_pandas(left, npartitions=1)
ixname = ixcol = divs = None
if left_on is not None:
if right_index:
divs = left.divisions if left.known_divisions else None
ixname = left.index.name
left = left.reset_index()
ixcol = left.columns[0]
left = left.set_index(left_on, sorted=True)
if not is_dask_collection(right):
right = from_pandas(right, npartitions=1)
if right_on is not None:
right = right.set_index(right_on, drop=(left_on == right_on), sorted=True)
if by is not None:
if left_by is not None or right_by is not None:
raise ValueError(
"Can only pass argument 'by' OR 'left_by' and 'right_by', not a combination of both."
)
kwargs["left_by"] = kwargs["right_by"] = by
if left_by is None and right_by is not None:
raise ValueError("Must specify both left_on and right_on if one is specified.")
if left_by is not None and right_by is None:
raise ValueError("Must specify both left_on and right_on if one is specified.")
del kwargs["on"], kwargs["left_on"], kwargs["right_on"], kwargs["by"]
kwargs["left_index"] = kwargs["right_index"] = True
if not left.known_divisions or not right.known_divisions:
raise ValueError("merge_asof input must be sorted!")
result = merge_asof_indexed(left, right, **kwargs)
if left_on or right_on:
result = result.reset_index()
if ixcol is not None:
if divs is not None:
result = result.set_index(ixcol, sorted=True, divisions=divs)
else:
result = result.map_partitions(M.set_index, ixcol)
result = result.map_partitions(M.rename_axis, ixname)
return result
###############################################################
# Concat
###############################################################
def concat_and_check(dfs, ignore_order=False):
if len(set(map(len, dfs))) != 1:
raise ValueError("Concatenated DataFrames of different lengths")
return methods.concat(dfs, axis=1, ignore_order=ignore_order)
def concat_unindexed_dataframes(dfs, ignore_order=False, **kwargs):
name = "concat-" + tokenize(*dfs)
dsk = {
(name, i): (concat_and_check, [(df._name, i) for df in dfs], ignore_order)
for i in range(dfs[0].npartitions)
}
kwargs.update({"ignore_order": ignore_order})
meta = methods.concat([df._meta for df in dfs], axis=1, **kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dfs)
return new_dd_object(graph, name, meta, dfs[0].divisions)
def concat_indexed_dataframes(dfs, axis=0, join="outer", ignore_order=False, **kwargs):
"""Concatenate indexed dataframes together along the index"""
warn = axis != 0
kwargs.update({"ignore_order": ignore_order})
meta = methods.concat(
[df._meta for df in dfs],
axis=axis,
join=join,
filter_warning=warn,
**kwargs,
)
empties = [strip_unknown_categories(df._meta) for df in dfs]
dfs2, divisions, parts = align_partitions(*dfs)
name = "concat-indexed-" + tokenize(join, *dfs)
parts2 = [
[df if df is not None else empty for df, empty in zip(part, empties)]
for part in parts
]
filter_warning = True
uniform = False
dsk = {
(name, i): (methods.concat, part, axis, join, uniform, filter_warning, kwargs)
for i, part in enumerate(parts2)
}
for df in dfs2:
dsk.update(df.dask)
return new_dd_object(dsk, name, meta, divisions)
def stack_partitions(dfs, divisions, join="outer", ignore_order=False, **kwargs):
"""Concatenate partitions on axis=0 by doing a simple stack"""
# Use _meta_nonempty as pandas.concat will incorrectly cast float to datetime
# for empty data frames. See https://github.com/pandas-dev/pandas/issues/32934.
kwargs.update({"ignore_order": ignore_order})
meta = make_meta(
methods.concat(
[df._meta_nonempty for df in dfs],
join=join,
filter_warning=False,
**kwargs,
)
)
empty = strip_unknown_categories(meta)
name = f"concat-{tokenize(*dfs)}"
dsk = {}
i = 0
astyped_dfs = []
for df in dfs:
# dtypes of all dfs need to be coherent
# refer to https://github.com/dask/dask/issues/4685
# and https://github.com/dask/dask/issues/5968.
if is_dataframe_like(df):
shared_columns = df.columns.intersection(meta.columns)
needs_astype = [
col
for col in shared_columns
if df[col].dtype != meta[col].dtype
and not is_categorical_dtype(df[col].dtype)
]
if needs_astype:
# Copy to avoid mutating the caller inplace
df = df.copy()
df[needs_astype] = df[needs_astype].astype(meta[needs_astype].dtypes)
if is_series_like(df) and is_series_like(meta):
if not df.dtype == meta.dtype and not is_categorical_dtype(df.dtype):
df = df.astype(meta.dtype)
else:
pass # TODO: there are other non-covered cases here
astyped_dfs.append(df)
# An error will be raised if the schemas or categories don't match. In
# this case we need to pass along the meta object to transform each
# partition, so they're all equivalent.
try:
df._meta == meta
match = True
except (ValueError, TypeError):
match = False
filter_warning = True
uniform = False
for key in df.__dask_keys__():
if match:
dsk[(name, i)] = key
else:
dsk[(name, i)] = (
apply,
methods.concat,
[[empty, key], 0, join, uniform, filter_warning],
kwargs,
)
i += 1
graph = HighLevelGraph.from_collections(name, dsk, dependencies=astyped_dfs)
return new_dd_object(graph, name, meta, divisions)
def concat(
dfs,
axis=0,
join="outer",
interleave_partitions=False,
ignore_unknown_divisions=False,
ignore_order=False,
**kwargs,
):
"""Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
ignore_unknown_divisions : bool, default False
By default a warning is raised if any input has unknown divisions.
Set to True to disable this warning.
ignore_order : bool, default False
Whether to ignore order when doing the union of categoricals.
Notes
-----
This differs in from ``pd.concat`` in the when concatenating Categoricals
with different categories. Pandas currently coerces those to objects
before concatenating. Coercing to objects is very expensive for large
arrays, so dask preserves the Categoricals by taking the union of
the categories.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> import dask.dataframe as dd
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
By default concatenating with unknown divisions will raise a warning.
Set ``ignore_unknown_divisions=True`` to disable this:
>>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
Different categoricals are unioned
>>> dd.concat([
... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),
... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),
... ], interleave_partitions=True).dtype
CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError("No objects to concatenate")
if len(dfs) == 1:
if axis == 1 and isinstance(dfs[0], Series):
return dfs[0].to_frame()
else:
return dfs[0]
if join not in ("inner", "outer"):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
dfs = _maybe_from_pandas(dfs)
if axis == 1:
if all(df.known_divisions for df in dasks):
return concat_indexed_dataframes(
dfs, axis=axis, join=join, ignore_order=ignore_order, **kwargs
)
elif (
len(dasks) == len(dfs)
and all(not df.known_divisions for df in dfs)
and len({df.npartitions for df in dasks}) == 1
):
if not ignore_unknown_divisions:
warnings.warn(
"Concatenating dataframes with unknown divisions.\n"
"We're assuming that the indices of each dataframes"
" are \n aligned. This assumption is not generally "
"safe."
)
return concat_unindexed_dataframes(dfs, ignore_order=ignore_order, **kwargs)
else:
raise ValueError(
"Unable to concatenate DataFrame with unknown "
"division specifying axis=1"
)
else:
if all(df.known_divisions for df in dasks):
# each DataFrame's division must be greater than previous one
if all(
dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)
):
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return stack_partitions(
dfs, divisions, join=join, ignore_order=ignore_order, **kwargs
)
elif interleave_partitions:
return concat_indexed_dataframes(
dfs, join=join, ignore_order=ignore_order, **kwargs
)
else:
divisions = [None] * (sum(df.npartitions for df in dfs) + 1)
return stack_partitions(
dfs, divisions, join=join, ignore_order=ignore_order, **kwargs
)
else:
divisions = [None] * (sum(df.npartitions for df in dfs) + 1)
return stack_partitions(
dfs, divisions, join=join, ignore_order=ignore_order, **kwargs
)
def _contains_index_name(df, columns_or_index):
"""
Test whether ``columns_or_index`` contains a reference
to the index of ``df
This is the local (non-collection) version of
``dask.core.DataFrame._contains_index_name``.
"""
def _is_index_level_reference(x, key):
return (
x.index.name is not None
and (np.isscalar(key) or isinstance(key, tuple))
and key == x.index.name
and key not in getattr(x, "columns", ())
)
if isinstance(columns_or_index, list):
return any(_is_index_level_reference(df, n) for n in columns_or_index)
else:
return _is_index_level_reference(df, columns_or_index)
def _select_columns_or_index(df, columns_or_index):
"""
Returns a DataFrame with columns corresponding to each
column or index level in columns_or_index. If included,
the column corresponding to the index level is named _index.
This is the local (non-collection) version of
``dask.core.DataFrame._select_columns_or_index``.
"""
def _is_column_label_reference(df, key):
return (np.isscalar(key) or isinstance(key, tuple)) and key in df.columns
# Ensure columns_or_index is a list
columns_or_index = (
columns_or_index if isinstance(columns_or_index, list) else [columns_or_index]
)
column_names = [n for n in columns_or_index if _is_column_label_reference(df, n)]
selected_df = df[column_names]
if _contains_index_name(df, columns_or_index):
# Index name was included
selected_df = selected_df.assign(_index=df.index)
return selected_df
def _split_partition(df, on, nsplits):
"""
Split-by-hash a DataFrame into `nsplits` groups.
Hashing will be performed on the columns or index specified by `on`.
"""
if isinstance(on, bytes):
on = pickle.loads(on)
if isinstance(on, str) or pd.api.types.is_list_like(on):
# If `on` is a column name or list of column names, we
# can hash/split by those columns.
on = [on] if isinstance(on, str) else list(on)
nset = set(on)
if nset.intersection(set(df.columns)) == nset:
ind = hash_object_dispatch(df[on], index=False)
ind = ind % nsplits
return group_split_dispatch(df, ind.values, nsplits, ignore_index=False)
# We are not joining (purely) on columns. Need to
# add a "_partitions" column to perform the split.
if not isinstance(on, _Frame):
on = _select_columns_or_index(df, on)
partitions = partitioning_index(on, nsplits)
df2 = df.assign(_partitions=partitions)
return shuffle_group(
df2,
["_partitions"],
0,
nsplits,
nsplits,
False,
nsplits,
)
def _concat_wrapper(dfs):
"""Concat and remove temporary "_partitions" column"""
df = _concat(dfs, False)
if "_partitions" in df.columns:
del df["_partitions"]
return df
def _merge_chunk_wrapper(*args, **kwargs):
return merge_chunk(
*args,
**{
k: pickle.loads(v) if isinstance(v, bytes) else v for k, v in kwargs.items()
},
)
def broadcast_join(
lhs,
left_on,
rhs,
right_on,
how="inner",
npartitions=None,
suffixes=("_x", "_y"),
shuffle=None,
indicator=False,
parts_out=None,
):
"""Join two DataFrames on particular columns by broadcasting
This broadcasts the partitions of the smaller DataFrame to each
partition of the larger DataFrame, joins each partition pair,
and then concatenates the new data for each output partition.
"""
if npartitions:
# Repartition the larger collection before the merge
if lhs.npartitions < rhs.npartitions:
rhs = rhs.repartition(npartitions=npartitions)
else:
lhs = lhs.repartition(npartitions=npartitions)
if how not in ("inner", "left", "right"):
# Broadcast algorithm cannot handle an "outer" join
raise ValueError(
"Only 'inner', 'left' and 'right' broadcast joins are supported."
)
if how == "left" and lhs.npartitions < rhs.npartitions:
# Must broadcast rhs for a "left" broadcast join
raise ValueError("'left' broadcast join requires rhs broadcast.")
if how == "right" and rhs.npartitions <= lhs.npartitions:
# Must broadcast lhs for a "right" broadcast join
raise ValueError("'right' broadcast join requires lhs broadcast.")
# TODO: It *may* be beneficial to perform the hash
# split for "inner" join as well (even if it is not
# technically needed for correctness). More testing
# is needed here.
if how != "inner":
# Shuffle to-be-broadcasted side by hash. This
# means that we will need to perform a local
# shuffle and split on each partition of the
# "other" collection (with the same hashing
# approach) to ensure the correct rows are
# joined by `merge_chunk`. The local hash and
# split of lhs is in `_split_partition`.
if lhs.npartitions < rhs.npartitions:
lhs2 = shuffle_func(
lhs,
left_on,
shuffle="tasks",
)
lhs_name = lhs2._name
lhs_dep = lhs2
rhs_name = rhs._name
rhs_dep = rhs
else:
rhs2 = shuffle_func(
rhs,
right_on,
shuffle="tasks",
)
lhs_name = lhs._name
lhs_dep = lhs
rhs_name = rhs2._name
rhs_dep = rhs2
else:
lhs_name = lhs._name
lhs_dep = lhs
rhs_name = rhs._name
rhs_dep = rhs
if isinstance(left_on, Index):
left_on = None
left_index = True
else:
left_index = False
if isinstance(right_on, Index):
right_on = None
right_index = True
else:
right_index = False
merge_kwargs = dict(
how=how,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
)
# dummy result
meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **merge_kwargs)
merge_kwargs["empty_index_dtype"] = meta.index.dtype
merge_kwargs["categorical_columns"] = meta.select_dtypes(include="category").columns
# Assume the output partitions/divisions
# should correspond to the collection that
# is NOT broadcasted.
if lhs.npartitions < rhs.npartitions:
npartitions = rhs.npartitions
divisions = rhs.divisions
_index_names = set(rhs._meta_nonempty.index.names)
else:
npartitions = lhs.npartitions
divisions = lhs.divisions
_index_names = set(lhs._meta_nonempty.index.names)
# Cannot preserve divisions if the index is lost
if _index_names != set(meta.index.names):
divisions = [None] * (npartitions + 1)
token = tokenize(lhs, rhs, npartitions, **merge_kwargs)
name = "bcast-join-" + token
broadcast_join_layer = BroadcastJoinLayer(
name,
npartitions,
lhs_name,
lhs.npartitions,
rhs_name,
rhs.npartitions,
parts_out=parts_out,
**merge_kwargs,
)
graph = HighLevelGraph.from_collections(
name,
broadcast_join_layer,
dependencies=[lhs_dep, rhs_dep],
)
return new_dd_object(graph, name, meta, divisions)
def _recursive_pairwise_outer_join(
dataframes_to_merge, on, lsuffix, rsuffix, npartitions, shuffle
):
"""
Schedule the merging of a list of dataframes in a pairwise method. This is a recursive function that results
in a much more efficient scheduling of merges than a simple loop
from:
[A] [B] [C] [D] -> [AB] [C] [D] -> [ABC] [D] -> [ABCD]
to:
[A] [B] [C] [D] -> [AB] [CD] -> [ABCD]
Note that either way, n-1 merges are still required, but using a pairwise reduction it can be completed in parallel.
:param dataframes_to_merge: A list of Dask dataframes to be merged together on their index
:return: A single Dask Dataframe, comprised of the pairwise-merges of all provided dataframes
"""
number_of_dataframes_to_merge = len(dataframes_to_merge)
merge_options = {
"on": on,
"lsuffix": lsuffix,
"rsuffix": rsuffix,
"npartitions": npartitions,
"shuffle": shuffle,
}
# Base case 1: just return the provided dataframe and merge with `left`
if number_of_dataframes_to_merge == 1:
return dataframes_to_merge[0]
# Base case 2: merge the two provided dataframe to be merged with `left`
if number_of_dataframes_to_merge == 2:
merged_ddf = dataframes_to_merge[0].join(
dataframes_to_merge[1], how="outer", **merge_options
)
return merged_ddf
# Recursive case: split the list of dfs into two ~even sizes and continue down
else:
middle_index = number_of_dataframes_to_merge // 2
merged_ddf = _recursive_pairwise_outer_join(
[
_recursive_pairwise_outer_join(
dataframes_to_merge[:middle_index], **merge_options
),
_recursive_pairwise_outer_join(
dataframes_to_merge[middle_index:], **merge_options
),
],
**merge_options,
)
return merged_ddf
| {
"alphanum_fraction": 0.6007355715,
"author": null,
"avg_line_length": 33.2413793103,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "169c1c76b670702cd985b6c2681318522a53ceb6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fc1cea9cdb2ea31348204aa51e4f6f7327a2af33",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "scharlottej13/dask",
"max_forks_repo_path": "dask/dataframe/multi.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "56eeb06103efbf36cc73e9405bcec42a5b92515a",
"max_issues_repo_issues_event_max_datetime": "2021-12-02T20:42:37.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-01T20:16:41.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "SultanOrazbayev/dask",
"max_issues_repo_path": "dask/dataframe/multi.py",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "56eeb06103efbf36cc73e9405bcec42a5b92515a",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "SultanOrazbayev/dask",
"max_stars_repo_path": "dask/dataframe/multi.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 12369,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 53020
} |
In order to give a better sense of the approach to reliability analysis and
optimization presented in \sref{chaos-reliability-analysis} and
\sref{chaos-optimization}, we consider a concrete application, meaning that we
specify the uncertain parameters and discuss the accompanying computations. This
application is also utilized for the quantitative evaluation of our technique
presented in the next section, \sref{chaos-optimization-results}.
\subsection{\problemtitle}
Assume that the structure of the reliability model $R(\cdot | \vg)$ of the
system at hand is the one given in \eref{reliability-model} where each
individual reliability function $R_i(\cdot | \vg_i)$ is the one shown in
\eref{weibull-reliability} with its own parameters $\scale_i$ and $\shape_i$.
During each iteration, the temperature of processing element~$i$ exhibits \nk{i}
cycles. Each cycle generally has different characteristics and hence causes a
different amount of damage to the processing element. This aspect is accounted
for by adjusting $\scale_i$ as shown in \eref{thermal-cycling-scale}. The shape
parameter $\shape_i$ is known to be indifferent to temperature \cite{chang2006}.
For simplicity, assume that $\shape_i$ does not depend on process parameters
either, and that $\shape_i = \shape$ for $i = \range{1}{\np}$.
Under the above assumptions, \rref{weibull-homogeneity} applies, and the
lifetime $\life: \Omega \to \real$ of the system has a Weibull distribution as
follows:
\[
\life | (\scale, \shape) \sim \mathrm{Weibull}(\scale, \shape)
\]
where $\scale$ is the one given in \rref{weibull-homogeneity} combined with
\eref{thermal-cycling-scale}. Even though the reliability model has two
parameters, only one of them is uncertain to the designer, namely $\scale$.
Therefore, we treat the model as if it was parameterized only by $\scale$. The
shape parameter $\shape$ is assumed to be implicitly given.
In the case of reliability analysis under process variation without any
accompanying exploration of the design space, one can proceed to constructing a
\ac{PC} expansion of $\scale$. Having obtained this lightweight surrogate, the
reliability of the system can be studied from various perspectives. In the
current scenario, however, the quantity of interest \g is the one given in
\eref{chaos-optimization-quantity}, since it allows for evaluating the objective
function and constraints defined in \eref{chaos-optimization-objective} and
\eref{chaos-optimization-constraints}, respectively. In
\eref{chaos-optimization-quantity}, the component denoted by \life stands for
the parameterization of the reliability model; consequently, it is $\scale$ in
the illustrative application developed in this section.
Let us now turn our attention to the uncertain parameters \vu of the problem
being addressed. We focus on two crucial process parameters: the effective
channel length and gate oxide thickness. Each processing element is then
assigned two random variables corresponding to the two process parameters, which
means that $\nu = 2 \np$ in the current example; see also \sref{chaos-problem}.
\begin{remark}
The variability in a process parameter at a spatial location can be modeled as a
composition of several parts---such as inter-lot, inter-wafer, inter-die, and
intra-die variations---which is demonstrated in
\sref{chaos-transient-application}. In this section, we illustrate a different
approach. From a mathematical perspective, it is sufficient to consider only one
random variable per location with an adequate distribution and correlations with
respect to the other locations.
\end{remark}
Based on \sref{chaos-formulation}, the parameters \vu are assumed to be given as
a set of marginal distributions and a correlation matrix denoted by
$\set{F_i}_{i = 1}^\nu$ and $\correlation{\vu}$, respectively. Note that the
number of distinct marginals is only two, since \np components of \vu correspond
to the same process parameter.
Both process parameters, the effective channel length and gate oxide thickness,
correspond to Euclidean distances; they take values on bounded intervals of the
positive half of the real line. Consequently, similarly to
\sref{chaos-transient-application}, we model the two process parameters using
the four-parameter family of beta distributions shown in
\eref{beta-distribution}. Without loss of generality, the parameters are assumed
to be independent of each other, and the correlations between those elements of
\vu that correspond to the same process parameter are assumed to be given by the
correlation function shown in \eref{bayes-correlation}.
The process parameters manifest themselves in the calculations associated with
the power model shown in \eref{chaos-power-model-bulk} through static power.
Analogously to \sref{chaos-transient-application}, the modeling here is based on
\up{SPICE} simulations of a series of \up{CMOS} invertors. The invertors are
taken from the 45-nm open cell library by NanGate \cite{nangate} and configured
according to the 45-nm \up{PTM} \up{HP} model \cite{ptm}. The simulations are
performed on a fine-grained and sufficiently broad three-dimensional grid
comprising the effective channel length, gate oxide thickness, and temperature;
the results are tabulated. An interpolation algorithm is subsequently employed
whenever static power is to be evaluated at a particular point within the range
of the grid. The output of this model is scaled up to account for about 40\% of
the total power consumption \cite{liu2007}. Regarding temperature, the thermal
\up{RC} circuit utilized for dynamic steady-state analysis is constructed by
virtue of HotSpot \cite{skadron2003} as described in \sref{temperature-model}.
At this point, the two outputs of Stage~1 are now specified.
\subsection{Probability Transformation}
At Stage~2 in \fref{chaos-overview}, the uncertain parameters \vu are
transformed into a vector of independent random variables \vz via a suitable
transformation $\transform$. Specifically, we use the one given in
\eref{probability-transformation}, which also includes model order reduction.
Unlike \sref{chaos-transient-application}, in this section, we let \vz obey the
standard Gaussian distribution and, therefore, tailor $\transform$ accordingly;
see \xref{probability-transformation}.
\subsection{Surrogate Construction}
Since the auxiliary variables $\vz = (\z_i)_{i = 1}^\nz$ are Gaussian, the
polynomial basis considered at Stage~3 is to be composed of Hermite polynomials,
which is the exact scenario described in \xref{polynomial-chaos}. The variables
also tell us how to approach numerical integration needed for evaluation of the
coefficients of \ac{PC} expansions: since we are interested in integrals with
respect to the standard Gaussian measure, Gauss--Hermite quadratures
\cite{maitre2010} are worth considering. These quadratures are especially
efficient, since they belong to the class of Gaussian quadratures and thus
inherit their properties; see \xref{numerical-integration}.
Lastly, let us illustrate the Hermite basis. In the case of working with only
one standard Gaussian variable ($\nz = 1$), a second-level \ac{PC} expansion
($\lc = 2$) of a three-dimensional quantity of interest \vg is as follows:
\[
\chaos{1}{2}{\vg}
= \hat{\vg}_{(0)} \psi_{(0)}
+ \hat{\vg}_{(1)} \psi_{(1)}
+ \hat{\vg}_{(2)} \psi_{(2)}
\]
where $\set{\hat{\vg}_{\vi}} \subset \real^3$,
\begin{align*}
& \psi_{(0)}(\vz) = 1, \\
& \psi_{(1)}(\vz) = \z_1, \text{ and} \\
& \psi_{(2)}(\vz) = \z_1^2 - 1.
\end{align*}
At Stage~4, the expansion is post-processed as described in
\sref{chaos-optimization}.
| {
"alphanum_fraction": 0.7832131148,
"author": null,
"avg_line_length": 56.4814814815,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "e99f311050dff10e93558ec47c98125975d34d16",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "IvanUkhov/thesis",
"max_forks_repo_path": "include/uncertainty/process/development/optimization-application.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "IvanUkhov/thesis",
"max_issues_repo_path": "include/uncertainty/process/development/optimization-application.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "IvanUkhov/thesis",
"max_stars_repo_path": "include/uncertainty/process/development/optimization-application.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1870,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 7625
} |
import os
import pytest
import musdb
import simplejson as json
import museval
import numpy as np
json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data/Music Delta - Rock.json',
)
@pytest.fixture()
def mus():
return musdb.DB(root_dir='data/MUS-STEMS-SAMPLE', is_wav=True)
def test_evaluate_mus_dir(mus):
museval.eval_mus_dir(
dataset=mus, # instance of musdb
estimates_dir='data/EST', # path to estimate folder
output_dir='data/EST_scores_mus_dir', # set a folder to write eval
)
def test_eval_dir(mus):
with pytest.raises(ValueError):
museval.eval_dir(
reference_dir='data/EST', # path to estimate folder
estimates_dir='data/EST', # set a folder to write eval json files
)
def test_estimate_and_evaluate(mus):
# return any number of targets
with open(json_path) as json_file:
ref = json.loads(json_file.read())
print(os.path.basename(json_path))
track = mus.load_mus_tracks(
tracknames=[os.path.splitext(os.path.basename(json_path))[0]]
)[0]
np.random.seed(0)
random_voc = np.random.random(track.audio.shape)
random_acc = np.random.random(track.audio.shape)
# create a silly regression test
estimates = {
'vocals': random_voc,
'accompaniment': random_acc
}
scores = museval.eval_mus_track(
track, estimates
)
assert scores.validate() is None
with open(
os.path.join('.', track.name) + '.json', 'w+'
) as f:
f.write(scores.json)
scores = json.loads(scores.json)
for target in ref['targets']:
for metric in ['SDR', 'SIR', 'SAR', 'ISR']:
ref = np.array([d['metrics'][metric] for d in target['frames']])
idx = [t['name'] for t in scores['targets']].index(target['name'])
est = np.array(
[
d['metrics'][metric]
for d in scores['targets'][idx]['frames']
]
)
assert np.allclose(ref, est)
| {
"alphanum_fraction": 0.6033573141,
"author": null,
"avg_line_length": 25.4268292683,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b89de0cc8403e81c3eb63f62b1bba82b7784bbe9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cec9b1aae45400a04a426ab214185efc89c0c563",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pseeth/sigsep-mus-eval",
"max_forks_repo_path": "tests/test_regression.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cec9b1aae45400a04a426ab214185efc89c0c563",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pseeth/sigsep-mus-eval",
"max_issues_repo_path": "tests/test_regression.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cec9b1aae45400a04a426ab214185efc89c0c563",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pseeth/sigsep-mus-eval",
"max_stars_repo_path": "tests/test_regression.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 511,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2085
} |
// Boost.Geometry (aka GGL, Generic Geometry Library)
// Unit Test
// Copyright (c) 2015, Oracle and/or its affiliates.
// Contributed and/or modified by Menelaos Karavelas, on behalf of Oracle
// Licensed under the Boost Software License version 1.0.
// http://www.boost.org/users/license.html
#ifndef BOOST_GEOMETRY_TEST_ENVELOPE_EXPAND_ON_SPHEROID_HPP
#define BOOST_GEOMETRY_TEST_ENVELOPE_EXPAND_ON_SPHEROID_HPP
#include <cmath>
#include <cstddef>
#include <algorithm>
#include <boost/type_traits/is_same.hpp>
#include <boost/geometry/core/access.hpp>
#include <boost/geometry/core/coordinate_dimension.hpp>
#include <boost/geometry/core/cs.hpp>
#include <boost/geometry/util/condition.hpp>
#include <boost/geometry/util/math.hpp>
#include <boost/geometry/views/detail/indexed_point_view.hpp>
#include <boost/geometry/algorithms/assign.hpp>
template <typename Units>
char const* units2string()
{
if (BOOST_GEOMETRY_CONDITION((boost::is_same<Units, bg::degree>::value)))
{
return "degrees";
}
return "radians";
}
template <typename CoordinateSystem>
struct other_system_info
{};
template <>
struct other_system_info<bg::cs::spherical_equatorial<bg::radian> >
{
typedef bg::degree units;
typedef bg::cs::spherical_equatorial<units> type;
template <typename T>
static inline T convert(T const& value)
{
return value * bg::math::r2d<T>();
}
};
template <>
struct other_system_info<bg::cs::spherical_equatorial<bg::degree> >
{
typedef bg::radian units;
typedef bg::cs::spherical_equatorial<units> type;
template <typename T>
static inline T convert(T const& value)
{
return value * bg::math::d2r<T>();
}
};
template <>
struct other_system_info<bg::cs::geographic<bg::radian> >
{
typedef bg::degree units;
typedef bg::cs::geographic<units> type;
template <typename T>
static inline T convert(T const& value)
{
return value * bg::math::r2d<T>();
}
};
template <>
struct other_system_info<bg::cs::geographic<bg::degree> >
{
typedef bg::radian units;
typedef bg::cs::geographic<units> type;
template <typename T>
static inline T convert(T const& value)
{
return value * bg::math::d2r<T>();
}
};
class equals_with_tolerance
{
private:
double m_tolerence;
template <typename T>
static inline T const& get_max(T const& a, T const& b, T const& c)
{
return (std::max)((std::max)(a, b), c);
}
template <typename T>
static inline bool check_close(T const& a, T const& b, double tol)
{
return (a == b)
|| (std::abs(a - b) <= tol * get_max(std::abs(a), std::abs(b), 1.0));
}
public:
equals_with_tolerance(double tolerence) : m_tolerence(tolerence) {}
template <typename T>
inline bool operator()(T const& value1, T const& value2) const
{
return check_close(value1, value2, m_tolerence);
}
};
template
<
typename Box1,
typename Box2 = Box1,
std::size_t DimensionCount = bg::dimension<Box1>::value
>
struct box_equals
{
static inline bool apply(Box1 const& box1, Box2 const& box2, double tol)
{
equals_with_tolerance equals(tol);
#ifndef BOOST_GEOMETRY_TEST_ENABLE_FAILING
// check latitude with tolerance when necessary
return bg::math::equals(bg::get<0, 0>(box1), bg::get<0, 0>(box2))
&& (bg::get<0, 1>(box1) < 0
? equals(bg::get<0, 1>(box1), bg::get<0, 1>(box2))
: bg::math::equals(bg::get<0, 1>(box1), bg::get<0, 1>(box2)))
&& bg::math::equals(bg::get<1, 0>(box1), bg::get<1, 0>(box2))
&& (bg::get<1, 1>(box1) > 0
? equals(bg::get<1, 1>(box1), bg::get<1, 1>(box2))
: bg::math::equals(bg::get<1, 1>(box1), bg::get<1, 1>(box2)));
#else
// check latitude with tolerance when necessary
return bg::get<0, 0>(box1) == bg::get<0, 0>(box2)
&& (bg::get<0, 1>(box1) < 0
? equals(bg::get<0, 1>(box1), bg::get<0, 1>(box2))
: bg::get<0, 1>(box1) == bg::get<0, 1>(box2))
&& bg::get<1, 0>(box1) == bg::get<1, 0>(box2)
&& (bg::get<1, 1>(box1) > 0
? equals(bg::get<1, 1>(box1), bg::get<1, 1>(box2))
: bg::get<1, 1>(box1) == bg::get<1, 1>(box2));
#endif
}
};
template <typename Box1, typename Box2>
struct box_equals<Box1, Box2, 3>
{
static inline bool apply(Box1 const& box1, Box2 const& box2, double tol)
{
#ifndef BOOST_GEOMETRY_TEST_ENABLE_FAILING
equals_with_tolerance equals(tol);
return box_equals<Box1, Box2, 2>::apply(box1, box2, tol)
&& equals(bg::get<0, 2>(box1), bg::get<0, 2>(box2))
&& equals(bg::get<1, 2>(box1), bg::get<1, 2>(box2));
#else
return box_equals<Box1, Box2, 2>::apply(box1, box2, tol)
&& bg::get<0, 2>(box1) == bg::get<0, 2>(box2)
&& bg::get<1, 2>(box1) == bg::get<1, 2>(box2);
#endif
}
};
template <typename Box, std::size_t Dimension = bg::dimension<Box>::value>
struct initialize_box
{
static inline void apply(Box& box,
double lon_min, double lat_min, double,
double lon_max, double lat_max, double)
{
bg::detail::indexed_point_view<Box, bg::min_corner> p_min(box);
bg::detail::indexed_point_view<Box, bg::max_corner> p_max(box);
bg::assign_values(p_min, lon_min, lat_min);
bg::assign_values(p_max, lon_max, lat_max);
}
};
template <typename Box>
struct initialize_box<Box, 3>
{
static inline void apply(Box& box,
double lon_min, double lat_min, double height_min,
double lon_max, double lat_max, double height_max)
{
bg::detail::indexed_point_view<Box, bg::min_corner> p_min(box);
bg::detail::indexed_point_view<Box, bg::max_corner> p_max(box);
bg::assign_values(p_min, lon_min, lat_min, height_min);
bg::assign_values(p_max, lon_max, lat_max, height_max);
}
};
#endif // BOOST_GEOMETRY_TEST_ENVELOPE_EXPAND_ON_SPHEROID_HPP
| {
"alphanum_fraction": 0.6170454545,
"author": null,
"avg_line_length": 28.5185185185,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "14066cd472543364ca9b0055b406937a792d36fb",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2016-08-11T20:31:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-07-30T10:17:12.000Z",
"max_forks_repo_head_hexsha": "66ea1fd4946668192e3f0d1060f0844f324ad7b8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "lipper/arangodb",
"max_forks_repo_path": "3rdParty/boost/1.62.0/libs/geometry/test/algorithms/envelope_expand/test_envelope_expand_on_spheroid.hpp",
"max_issues_count": 49,
"max_issues_repo_head_hexsha": "66ea1fd4946668192e3f0d1060f0844f324ad7b8",
"max_issues_repo_issues_event_max_datetime": "2019-05-05T04:59:26.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-02-29T17:59:52.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "lipper/arangodb",
"max_issues_repo_path": "3rdParty/boost/1.62.0/libs/geometry/test/algorithms/envelope_expand/test_envelope_expand_on_spheroid.hpp",
"max_line_length": 81,
"max_stars_count": 18,
"max_stars_repo_head_hexsha": "6a4f462fa209010cd064f99e63d85ce1d432c500",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "sita1999/arangodb",
"max_stars_repo_path": "3rdParty/boost/1.62.0/libs/geometry/test/algorithms/envelope_expand/test_envelope_expand_on_spheroid.hpp",
"max_stars_repo_stars_event_max_datetime": "2021-12-31T11:06:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-03-04T15:44:24.000Z",
"num_tokens": 1775,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6160
} |
#Djnago stuff
from .models import *
from django.shortcuts import render,redirect
from django.http import HttpResponse,JsonResponse
from django.core.serializers import serialize
import time
#helper functions
from .utils import traverse,convertGraph
#run binaries
import subprocess
from subprocess import PIPE, run
#networkx converters
from networkx.readwrite import json_graph
from networkx.readwrite import parse_gml,from_graph6_bytes,to_graph6_bytes
from networkx.exception import NetworkXException
import networkx as nx
#json module
import json
#path building
from pathlib import Path
import os
def graphPage(request):
if request.method == 'POST':
#get data from the form which is on home page
if ("file" in request.FILES):
try:
data = request.FILES['file'].read().decode("utf-8")
except:
return render(request,"graph_visualization/error.html")
else:
data=request.POST.get("text")
#some networkx converters dont handle strings(gexf,edge list,) so we need to create a temp file from
#which we can read
with open('temp.txt', 'w') as f:
f.write(data)
renderingOption=request.POST.get("radio")
context=dict()
graphData=convertGraph(data)
context['data']=graphData
context["commands"]= Command.objects.all()
#render 2d or 3d page
if graphData == "error":
return render(request,"graph_visualization/error.html",context)
elif renderingOption == "2D":
return render(request,"graph_visualization/graph.html",context)
else:
return render(request,"graph_visualization/graph3d.html",context)
elif request.method== "GET" :
context=dict()
g6Str=request.GET.get("g6")
graphData=convertGraph(g6Str)
context['data']=graphData
context["commands"]= Command.objects.all()
return render(request,"graph_visualization/graph.html",context)
def graphPage3D(request):
context={}
return render(request,"graph_visualization/graph3d.html",context)
def home(request):
context=dict()
context["home"]=1
return render(request, 'graph_visualization/home.html',context)
def About(request):
context=dict()
context["home"]=1
return render(request, 'graph_visualization/about.html',context)
def Manual(request):
context=dict()
context["home"]=1
return render(request, 'graph_visualization/manual.html',context)
def JsonEditor(request):
return render(request, 'graph_visualization/jsonEditor.html',)
def LoadNewContent(request):
if request.method == 'POST':
if ("file" in request.FILES):
try:
data = request.FILES['file'].read().decode("utf-8")
except:
return render(request,"graph_visualization/error.html")
with open('temp.txt', 'w') as f:
f.write(data)
print("loadcont")
graphData=convertGraph(data)
return JsonResponse({"data":graphData})
def exportGML(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
return JsonResponse({"data":response})
def exportG6(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
response=to_graph6_bytes(G)
response=response.decode().replace(">>graph6<<","").strip()
return JsonResponse({"data":response})
def exportGraphML(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
attrs = set()
#get attributes in nested structures
for node in G.nodes:
for attrib in G.nodes[node]:
if type(G.nodes[node][attrib]) == dict:
for key in G.nodes[node][attrib].keys():
attrs.add(key)
#G.nodes[node][attrib]=""
print(attrs)
for attr in attrs:
if isinstance(attr, int):
var =int()
elif isinstance(attr,str):
var = str()
nx.set_node_attributes(G, var, attr)
for node in G.nodes:
for attrib in G.nodes[node]:
if type(G.nodes[node][attrib]) == dict:
#graphics dict
print(G.nodes[node][attrib])
d=G.nodes[node][attrib]
for key, value in d.items():
print(G.nodes[node][attrib])
G.nodes[node][key]=value
G.nodes[node][attrib]=""
print(G)
for edge in G.edges:
for attrib in G.edges[edge]:
if type(G.edges[edge][attrib]) == dict:
G.edges[edge][attrib]=""
response=""
linefeed = chr(10)
s = linefeed.join(nx.generate_graphml(G))
for line in nx.generate_graphml(G):
response+=line+"\n"
return JsonResponse({"data":response})
def exportGexf(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
response=""
for line in nx.generate_gexf(G):
response+=line+"\n"
return JsonResponse({"data":response})
def exportEdgelist(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
response=""
for line in nx.generate_edgelist(G, data=False):
response+="\n"+line
return JsonResponse({"data":response.strip("\n")})
def exportAdjacencyList(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
response=""
for line in nx.generate_adjlist(G):
response+=line+"\n"
return JsonResponse({"data":response})
def exportSparse6(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
response=nx.to_sparse6_bytes(G).decode()
return JsonResponse({"data":response})
def exportDot(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
G=parse_gml(response,label="id",destringizer=None)
response=nx.nx_pydot.to_pydot(G).to_string()
#output_graphviz_svg = response.create_svg()
#print(output_graphviz_svg)
return JsonResponse({"data":response})
#binary file view
def modifyAPI(request):
if request.method == 'POST':
json_data = json.loads(request.body)
nodes = json_data["nodes"]
links = json_data["links"]
cmd = json_data["cmd"]
parameters =json_data["parameters"]
print(parameters)
response="graph [\n"
for node in nodes:
response+="node [\n"
response+=traverse(node)
response+="]\n"
for link in links:
response+="edge [\n"
response+=traverse(link)
response+="]\n"
#end of graph
response+="]"
#search for the specific binary name in the DB
binary = Command.objects.get(name=cmd)
#save the actual terminal command in a variable
cmd=binary.terminal_command
#add the params if they exist
for key,value in parameters.items():
if (key!=""):
cmd+=" -"+key
if(value!=""):
cmd+=" "+value
print(cmd)
#start timer to see how long it takes to run the binary
start_time = time.time()
with open('testfile.gml', 'w') as f:
f.write(response)
popen = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,shell=True)
output,error =popen.communicate()
print(error)
print("--- %s seconds ---" % (time.time() - start_time))
with open("testfile-out.graphml", 'r') as f:
gml=f.read()
gml.replace('Creator "ogdf::GraphIO::writeGML"',"")
output=convertGraph(gml)
return HttpResponse(output)
def getCommandParameter(request):
if request.method == 'POST':
name = json.loads(request.body)
#search for the specific binary name in the DB
binary = Command.objects.get(name=name)
#get parameters from DB as an object
parameters= CommandParameter.objects.filter(command=binary.id)
data = list(parameters.values())
#jsonString = json.dumps(data)
return JsonResponse(data,safe=False)
def getSampleGraph(request):
if request.method == 'POST':
name = json.loads(request.body)
with open("sample_graphs/"+ name +".txt", 'r') as f:
graphString=f.read()
return JsonResponse({"data":graphString})
| {
"alphanum_fraction": 0.551655365,
"author": null,
"avg_line_length": 28.6187214612,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "25cf1d93e52afa109a0fa9687cebe1680ff7bd2e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-18T12:49:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-18T12:49:17.000Z",
"max_forks_repo_head_hexsha": "8eab2fa79251dd78ded46f440269600f87de115a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Skazu14/grapp",
"max_forks_repo_path": "main/graph_visualization/views.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8eab2fa79251dd78ded46f440269600f87de115a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Skazu14/grapp",
"max_issues_repo_path": "main/graph_visualization/views.py",
"max_line_length": 108,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8eab2fa79251dd78ded46f440269600f87de115a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Skazu14/grapp",
"max_stars_repo_path": "main/graph_visualization/views.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2669,
"path": null,
"reason": "import networkx,from networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 12535
} |
from datetime import datetime, timezone
import numpy as np
import pytest
import astropy.units as u
from stixcore.time import SCETime, SCETimeDelta, SCETimeRange
from stixcore.time.datetime import MAX_COARSE, MAX_FINE
def test_time_init():
t1 = SCETime(0, 0)
t2 = SCETime.from_float(0*u.s)
t3 = SCETime(t1)
assert t1 == t2
assert t2 == t3
assert t1 == t3
with pytest.raises(ValueError, match=r'Coarse time must be in range.*'):
SCETime(-1, 0)
with pytest.raises(ValueError, match=r'Fine time must be in range.*'):
SCETime(0, -1)
with pytest.raises(ValueError):
_ = SCETime(2 ** 44-1, 0)
with pytest.raises(ValueError):
SCETime(0, 2**16+1)
with pytest.raises(ValueError):
SCETime(0.0, 0)
def test_time_to_datetime():
dt = SCETime(coarse=0, fine=0)
assert dt.to_datetime() == datetime(2000, 1, 1, 0, tzinfo=timezone.utc)
def test_time_as_float():
dt = SCETime(coarse=1, fine=0)
assert dt.as_float() == 1.0 * u.s
def test_time_from_float():
dt = SCETime.from_float(1 * u.s)
assert dt == SCETime(coarse=1, fine=0)
def test_time_to_str():
dt = SCETime(coarse=123, fine=45)
assert dt == SCETime.from_string(str(dt))
def test_time_add():
t1 = SCETime(123, 456)
with pytest.raises(TypeError, match=r'Only Quantities and SCETimeDelta.*'):
_ = t1 + SCETime(0, 1)
with pytest.raises(ValueError, match=r'.*are not convertible'):
_ = t1 + (1*u.m)
# test right add
t2 = t1 + (1 + 1/MAX_FINE) * u.s
# test left add
t3 = (1 + 1/MAX_FINE) * u.s + t1
assert t2 == t3
assert t2.coarse == 124
assert t2.fine == 457
def test_time_sub():
t1 = SCETime(123, 456)
with pytest.raises(TypeError, match=r'Only quantities, SCETime and SCETimeDelt.*'):
_ = t1 - 1
with pytest.raises(ValueError, match=r'.*are not convertible'):
_ = t1 + (1*u.m)
# test sub
t2 = t1 - (1 + 1/MAX_FINE) * u.s
assert t2.coarse == 122
assert t2.fine == 455
# test rsub
with pytest.raises(TypeError, match=r'unsupported operand.*'):
t2 = (1 + 1/MAX_FINE) * u.s - t1
# Test subtract to times
dt = t1 - t2
assert isinstance(dt, SCETimeDelta)
assert dt.coarse == 1
assert dt.fine == 1
dt = t2 - t1
assert isinstance(dt, SCETimeDelta)
assert dt.coarse == -1
assert dt.fine == -1
# Test subtract deltatime
t3 = t2 - dt
assert isinstance(t3, SCETime)
assert t3.coarse == 123
assert t3.fine == 456
# Can't subtract time from a delta time
with pytest.raises(TypeError, match=r'Unsupported operation for '
r'types SCETimeDelta and SCETime'):
_ = dt - t1
def test_time_eq():
t1 = SCETime(123, 456)
t2 = SCETime.from_float((123 + 456/MAX_FINE)*u.s)
t3 = SCETime(765, 432)
assert t1 == t2
assert t1 != t3
def test_time_broadcast():
t = SCETime(0, 0)
t1 = t + np.arange(100) * u.s
t2 = SCETime(np.arange(100, dtype=np.int), 0)
t3 = SCETime(0, np.arange(100, dtype=np.int))
assert t1.shape == (100,)
assert t2.shape == (100,)
assert t3.shape == (100,)
def test_time_lt():
dt = SCETime(coarse=123, fine=45)
dt2 = SCETime(coarse=124, fine=45)
assert dt < dt2
assert dt <= dt2
assert dt2 > dt
assert dt2 >= dt
assert dt2 is not dt
assert dt2 == SCETime.from_string(str(dt2))
def test_time_minmax():
assert SCETime.min_time() == SCETime(coarse=0, fine=0)
assert SCETime.max_time() == SCETime(coarse=MAX_COARSE, fine=MAX_FINE)
# TODO enable after https://github.com/i4Ds/STIXCore/issues/102
# assert SCETime.min_time() - SCETime(coarse=0, fine=1) == SCETime.min_time()
with pytest.raises(ValueError, match=r'Coarse time must be in range.*'):
m = SCETime.max_time()
dt = SCETimeDelta(0, 1)
nm = m + dt
print(nm)
def test_timedelta_init():
dt1 = SCETimeDelta(0, 0)
dt2 = SCETimeDelta.from_float(0*u.s)
dt3 = SCETimeDelta(dt1)
assert dt1 == dt2
assert dt2 == dt3
assert dt1 == dt3
with pytest.raises(ValueError):
_ = SCETimeDelta(2 ** 32 + 1, 0)
with pytest.raises(ValueError):
SCETime(0, 2**16+1)
with pytest.raises(ValueError):
SCETime(0.0, 0)
def test_timedelta_as_float():
dt = SCETimeDelta(coarse=-1, fine=0)
assert dt.as_float() == -1.0 * u.s
def test_timedelta_from_float():
dt = SCETimeDelta.from_float(-1 * u.s)
assert dt == SCETimeDelta(coarse=-1, fine=0)
def test_timedelta_add():
t1 = SCETime(1, 1)
dt1 = SCETimeDelta(100, 1)
dt2 = SCETimeDelta(200, 2)
# test time plus timedelta
t1_dt1 = dt1 + t1
dt1_t1 = t1 + dt1
assert t1_dt1 == dt1_t1
assert t1_dt1.coarse == 101
assert t1_dt1.fine == 2
with pytest.raises(ValueError, match=f'.*are not convertible'):
_ = dt1 + (1*u.m)
# test timedelta plus timedelta/quantity
dt1_dt2 = dt1 + dt2
dt1_float = dt1 + (200+2/MAX_FINE)*u.s
dt2_dt1 = dt2 + dt1
float_dt2 = (100 + 1/MAX_FINE) * u.s + dt2
assert dt1_dt2 == dt2_dt1
assert dt1_float == dt1_dt2
assert float_dt2 == dt1_dt2
assert dt1_dt2.coarse == 300
assert dt1_dt2.fine == 3
def test_deltatime_sub():
t1 = SCETime(100, 2)
dt1 = SCETimeDelta(100, 1)
dt2 = SCETimeDelta(200, 2)
with pytest.raises(TypeError, match=r'Unsupported operation for types SCETimeDelta and int'):
_ = dt1 - 1
with pytest.raises(TypeError, match=r'Quantity could not be converted to SCETimeDelta'):
_ = dt1 - (1*u.m)
# test sub deltatimes and quantities
dt1_dt2 = dt1 - dt2
dt1_float = dt1 - (200 + 2 / MAX_FINE) * u.s
assert dt1_dt2 == dt1_float
assert dt1_dt2.coarse == -100
assert dt1_dt2.fine == -1
dt2_dt1 = dt2 - dt1
float_dt1 = (200 + 2/MAX_FINE) * u.s - dt1
assert dt2_dt1 == float_dt1
assert dt2_dt1.coarse == 100
assert dt2_dt1.fine == 1
# test sub times
with pytest.raises(TypeError, match=f'Unsupported operation for types.*'):
dt1 - t1
t2 = t1 - dt1
assert t2.coarse == 0
assert t2.fine == 1
with pytest.raises(ValueError, match=r'Coarse time must be in range.*'):
t1 - dt2
def test_timedelta_eq():
dt1 = SCETimeDelta(123, 456)
dt2 = SCETimeDelta((123 + 456/MAX_FINE)*u.s)
dt3 = SCETimeDelta(-1, -1)
assert dt1 == dt2
assert dt1 != dt3
assert dt1 == (123 + 456/MAX_FINE)*u.s
def test_timerange():
tr = SCETimeRange(start=SCETime(coarse=100, fine=0), end=SCETime(coarse=200, fine=0))
tp_in = SCETime(coarse=150, fine=0)
tr_in = SCETimeRange(start=SCETime(coarse=150, fine=0), end=SCETime(coarse=160, fine=0))
tr_out = SCETimeRange(start=SCETime(coarse=150, fine=0), end=SCETime(coarse=250, fine=0))
tp_out = SCETime(coarse=250, fine=0)
assert tp_in in tr
assert tp_out not in tr
assert tr_in in tr
assert tr_out not in tr
tr.expand(tp_out)
tr.expand(tr_out)
assert tp_out in tr
assert tr_out in tr
| {
"alphanum_fraction": 0.6251581167,
"author": null,
"avg_line_length": 26.8490566038,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "13c9ded99596299bd7e7d929dcea175a60724c3d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-01-21T07:52:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-09T15:05:18.000Z",
"max_forks_repo_head_hexsha": "16822bbb37046f8e6c03be51909cfc91e9822cf7",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "nicHoch/STIXCore",
"max_forks_repo_path": "stixcore/time/tests/test_datetime.py",
"max_issues_count": 192,
"max_issues_repo_head_hexsha": "16822bbb37046f8e6c03be51909cfc91e9822cf7",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T15:17:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-03T22:40:19.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "nicHoch/STIXCore",
"max_issues_repo_path": "stixcore/time/tests/test_datetime.py",
"max_line_length": 97,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "16822bbb37046f8e6c03be51909cfc91e9822cf7",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "nicHoch/STIXCore",
"max_stars_repo_path": "stixcore/time/tests/test_datetime.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T13:42:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-31T13:42:43.000Z",
"num_tokens": 2389,
"path": null,
"reason": "import numpy,import astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7115
} |
import os
import sys
import time
import pickle
import random
import tarfile
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import ndimage
from urllib.request import urlretrieve
from sklearn.linear_model import LogisticRegression
from utils import *
from dataset_utils import *
logger = logging.getLogger(os.path.basename(__file__))
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
np.random.seed(133)
pixel_depth = 255.0 # Number of levels per pixel.
train_size = 200000
valid_size = 10000
test_size = 10000
def show_image_float(img, cmap=None):
#plt.imshow(img, norm=matplotlib.colors.NoNorm(vmin=-0.5, vmax=0.5), cmap='gray')
plt.imshow(img, vmin=-0.5, vmax=0.5, cmap='gray')
plt.show()
def show_image_filesystem(filename):
img = mpimg.imread(filename)
plt.imshow(img, cmap='gray')
plt.show()
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write('%s%%' % percent)
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception('Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d)
for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))
]
if len(data_folders) != NUM_CLASSES:
raise Exception('Expected %d folders, one per class. Found %d instead.' % (NUM_CLASSES, len(data_folders)))
print(data_folders)
return data_folders
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), IMAGE_RES, IMAGE_RES), dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
if image_data.shape != (IMAGE_RES, IMAGE_RES):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images += 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
NUM_CLASSES = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, IMAGE_RES)
train_dataset, train_labels = make_arrays(train_size, IMAGE_RES)
vsize_per_class = valid_size // NUM_CLASSES
tsize_per_class = train_size // NUM_CLASSES
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def verify_dataset(dataset, labels):
idx = random.randrange(0, labels.shape[0])
print(idx, labels[idx], chr(ord('a') + labels[idx]))
plt.imshow(dataset[idx], cmap='gray')
plt.show()
def generate_datasets(pickle_file):
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
print(train_filename, test_filename)
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
print(train_folders, test_folders)
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
print(train_datasets, test_datasets)
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size
)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
if not os.path.isfile(pickle_file):
try:
with open(pickle_file, 'wb') as fobj:
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, fobj, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
def find_duplicates(dataset1, labels1, dataset2, labels2, threshold=EPS):
means = np.mean(dataset1, axis=(1,2))
indices = means.argsort()
num_groups = 2000
indices = np.reshape(indices, (num_groups, indices.shape[0] // num_groups))
duplicate_indices = []
dataset2_idx = 0
for item in dataset2:
belongs_to_group = 0
item_mean = item.mean()
for group_idx in range(num_groups):
if item_mean >= means[indices[group_idx][0]]:
belongs_to_group = group_idx
else:
break
for idx_in_group in range(len(indices[belongs_to_group])):
dataset_idx = indices[belongs_to_group][idx_in_group]
mse = ((item - dataset1[dataset_idx]) ** 2).mean()
if mse < threshold:
if labels1[dataset_idx] == labels2[dataset2_idx]:
pass
else:
print(
'Found duplicate! Labels are different',
labels1[dataset_idx],
labels2[dataset2_idx],
)
duplicate_indices.append(dataset2_idx)
# show_image_float(item)
# show_image_float(dataset1[dataset_idx])
break
dataset2_idx += 1
if dataset2_idx % 100 == 0:
print('Processed', dataset2_idx, 'duplicates:', len(duplicate_indices))
print('Found total of', len(duplicate_indices), 'duplicantes!')
return duplicate_indices
def sanitize_datasets(datasets):
train_dataset, train_labels = extract_dataset(datasets, 'train')
test_dataset, test_labels = extract_dataset(datasets, 'valid')
valid_dataset, valid_labels = extract_dataset(datasets, 'test')
duplicate_indices = find_duplicates(
train_dataset, train_labels, test_dataset, test_labels, threshold=0.005,
)
test_dataset_sanitized = np.delete(test_dataset, duplicate_indices, axis=0)
test_labels_sanitized = np.delete(test_labels, duplicate_indices, axis=0)
duplicate_indices = find_duplicates(
train_dataset, train_labels, valid_dataset, valid_labels, threshold=0.005,
)
valid_dataset_sanitized = np.delete(valid_dataset, duplicate_indices, axis=0)
valid_labels_sanitized = np.delete(valid_labels, duplicate_indices, axis=0)
pickle_file_sanitized = os.path.join(data_root, 'notMNIST_sanitized.pickle')
try:
with open(pickle_file_sanitized, 'wb') as fobj:
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset_sanitized,
'valid_labels': valid_labels_sanitized,
'test_dataset': test_dataset_sanitized,
'test_labels': test_labels_sanitized,
}
pickle.dump(save, fobj, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file_sanitized, ':', e)
raise
def train_logistic_classifier(x, y, x_test, y_test, num_train_samples=None):
tstart = time.time()
if num_train_samples is None:
num_train_samples = x.shape[0]
x = x[:num_train_samples]
y = y[:num_train_samples]
resolution = x.shape[1]
x = x.reshape(num_train_samples, resolution * resolution)
x_test = x_test.reshape(x_test.shape[0], resolution * resolution)
classifier = LogisticRegression(
C=100./num_train_samples,
multi_class='multinomial',
penalty='l2',
solver='saga',
max_iter=200,
)
classifier.fit(x, y)
sparsity = np.mean(classifier.coef_ == 0) * 100
score_train = classifier.score(x, y)
score_test = classifier.score(x_test, y_test)
print('Sparsity: %.2f%%' % sparsity)
print('Train score: %.4f' % score_train)
print('Test score: %.4f' % score_test)
interactive = False
if interactive:
for i in range(10):
image = x_test[i]
prediction = classifier.predict(image.reshape((1, resolution * resolution)))[0]
prediction_proba = classifier.predict_proba(image.reshape((1, resolution * resolution)))
print('Prediction:', prediction, chr(ord('a') + prediction))
print('Prediction proba:', prediction_proba)
show_image_float(image.reshape((resolution, resolution)))
coef = classifier.coef_.copy()
plt.figure(figsize=(10, 5))
scale = np.abs(coef).max()
for i in range(10):
plot = plt.subplot(2, 5, i + 1)
plot.imshow(
coef[i].reshape(resolution, resolution),
interpolation='nearest',
cmap=plt.cm.RdBu,
vmin=-scale,
vmax=scale,
)
plot.set_xticks(())
plot.set_yticks(())
plot.set_xlabel('Class %i' % i)
plt.suptitle('Classification vector for...')
run_time = time.time() - tstart
print('Example run in %.3f s' % run_time)
plt.show()
return classifier
def main():
"""Script entry point."""
init_logger(os.path.basename(__file__))
pickle_file = os.path.join(data_root, PICKLE_FILE)
if not os.path.isfile(pickle_file):
generate_datasets(pickle_file)
datasets = load_datasets(pickle_file)
train_dataset, train_labels = extract_dataset(datasets, 'train')
test_dataset, test_labels = extract_dataset(datasets, 'valid')
valid_dataset, valid_labels = extract_dataset(datasets, 'test')
logger.info('%r %r', train_dataset.shape, train_labels.shape)
logger.info('%r %r', test_dataset.shape, test_labels.shape)
logger.info('%r %r', valid_dataset.shape, valid_labels.shape)
def train(samples=None):
return train_logistic_classifier(
train_dataset,
train_labels,
test_dataset,
test_labels,
num_train_samples=samples,
)
classifier = train(300)
score_valid = classifier.score(valid_dataset.reshape(valid_dataset.shape[0], 28*28), valid_labels)
print('Valid score: %.4f' % score_valid)
return 0
if __name__ == '__main__':
sys.exit(main()) | {
"alphanum_fraction": 0.6471323336,
"author": null,
"avg_line_length": 37.3562653563,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1d19da61f9bf446a9638a18102d8c2594b1fa562",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "14714ee4151b798cde0a31a94ac65e08b87d0f65",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "alex-petrenko/udacity-deep-learning",
"max_forks_repo_path": "assignment_01_notmnist.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "14714ee4151b798cde0a31a94ac65e08b87d0f65",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "alex-petrenko/udacity-deep-learning",
"max_issues_repo_path": "assignment_01_notmnist.py",
"max_line_length": 115,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "14714ee4151b798cde0a31a94ac65e08b87d0f65",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "alex-petrenko/udacity-deep-learning",
"max_stars_repo_path": "assignment_01_notmnist.py",
"max_stars_repo_stars_event_max_datetime": "2020-02-13T19:31:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-02T01:30:44.000Z",
"num_tokens": 3406,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 15204
} |
import sys, os
sys.path.append(os.getcwd())
import entities, viewer
import numpy as np
import importlib
importlib.reload(entities)
importlib.reload(viewer)
cube = entities.Entity(node_color=(255, 255, 255), name="cube")
cube_nodes = [(x, y, z) for x in (-75, 75) for y in (-75, 75) for z in (-75, 75)]
cube.addNodes(np.array(cube_nodes))
cube.addEdges([(n, n + 4) for n in range(0, 4)])
cube.addEdges([(n, n + 1) for n in range(0, 8, 2)])
cube.addEdges([(n, n + 2) for n in (0, 1, 4, 5)])
plain = entities.Entity(node_color=(255, 255, 255), name="plane")
plain_nodes = [(x, y, z) for x in (0, 150) for y in (0, 150) for z in (0,)]
plain_edges = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3), (3, 0), (3, 1), (3, 2), (3, 3)]
plain.addNodes(np.array(plain_nodes))
plain.addEdges(plain_edges)
yes = viewer.Viewer(500, 500)
yes.addObjects([cube, plain])
objects = ["context", "cube", "plane"]
#buttons
buttons = []
font_size = 20
for i in range(len(objects)):
buttons.append(viewer.Button(id=i, background_color=(147, 255, 0), text_color=(255, 0, 247), top_left = (0, i*(font_size+2)), text=objects[i], font_size = font_size))
yes.addButtons(buttons)
yes.run()
| {
"alphanum_fraction": 0.6286885246,
"author": null,
"avg_line_length": 32.1052631579,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bb7adaaee71678d1d180363d9d219fc26fedfb79",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-08-19T17:25:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-08-19T17:25:22.000Z",
"max_forks_repo_head_hexsha": "4cd94e0cf53ee06c9c31e9272572ca9656697c30",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DarkShadow4/python",
"max_forks_repo_path": "Python 3/PyGame/Matrix_based_3D/test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4cd94e0cf53ee06c9c31e9272572ca9656697c30",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DarkShadow4/python",
"max_issues_repo_path": "Python 3/PyGame/Matrix_based_3D/test.py",
"max_line_length": 170,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4cd94e0cf53ee06c9c31e9272572ca9656697c30",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DarkShadow4/python",
"max_stars_repo_path": "Python 3/PyGame/Matrix_based_3D/test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 453,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1220
} |
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
import os
import contextlib
np.seterr(all='raise')
class NoSignChange(Exception):
def __init__(self, message='No sign change in specified bounds'):
# Call the base class constructor with the parameters it needs
super(NoSignChange, self).__init__(message)
class BoundError(Exception):
def __init__(self, message='Bounds on v0 are divergent'):
# Call the base class constructor with the parameters it needs
super(BoundError, self).__init__(message)
class solver():
def __init__(self, gamma=1, a=10):
"""
Parameters:
gamma: numerical constant, from equation of state
a: numerical constant, equal to GM/cs^2 r0
"""
self.gamma = gamma
self.a = a
self.W0 = np.log(1)
def ndf1(self, psi, coords):
'''Dimensionless f1 function, takes t, (x, y, z) and returns 1-exp[2y-z]'''
try:
#return 1-np.exp(2*coords[1])
return 1-np.exp(2*coords[1]-coords[2])
except OverflowError or UnderflowError:
print("Error in ndf1 at ", psi, ", ", coords)
return -1
def ndf2(self, psi, coords):
'''Dimensionless f2 function, takes t, (x, y, z) and returns (GM/r0*cs^2)exp[-x-z]-2'''
try:
#return np.exp(-coords[0])*a-2
return self.a*np.exp(-coords[0]-coords[2])-2
except OverflowError or UnderflowError:
print("Error in ndf2 at ", psi, ", ", coords)
return -1
def dx(self, t, state):
'''Infinitesimal x step'''
return self.ndf1(t, state)
#return state[0]
def du(self, t, state):
'''Infinitesimal u step'''
return self.ndf2(t, state)
#return state[1]
def dw(self, t, state):
'''Infinitesimal w step'''
return -(self.gamma-1)*(2*self.ndf1(t, state)+self.ndf2(t, state))
#return state[2]
def adx(self, t, state):
'''Absolute value of dx'''
return abs(self.ndf1(t, state))
def adu(self, t, state):
'''Absolute value of du'''
return abs(self.ndf2(t, state))
def adw(self, t, state):
'''Absolute value of dw'''
return -(self.gamma-1)*(2*abs(self.ndf1(t, state))+abs(self.ndf2(t, state)))
def coupledRKStep(self, funs, state, dt):
"""RK4 step for array of coupled functions that evolve a state, for step size dt
Returns array (t, new state)
funs and state[1] should have the same length
Parameters:
funs: array of functions [dx, du, dw] to evolve a state
state: numpy array [t, [x, u, w]]
dt: generalized time step size
Returns:
numpy array [t, [x, u, w]] incremented by one RK4 step
"""
assert isinstance(funs, list), "Expected array input"
for i in funs:
assert callable(i), "Expected functions in list"
assert isinstance(state, (np.ndarray, list)), "Expected array input"
assert np.shape(state) == (2, ), "Expected 1D array with 2 elements"
assert isinstance(state[1], (np.ndarray, list)), "Expected array in second element"
assert np.shape(state[1]) == (len(funs), ), "State and function arrays are different lengths"
assert isinstance(dt, (float, int, np.float64)), "Expected float input"
karr = np.array([np.zeros(4) for i in range(len(funs))])
for j in range(4):
for i in range(len(funs)):
if j == 0:
karr[i][j] = dt*funs[i](state[0], state[1])
else:
karr[i][j] = dt*funs[i](state[0]+dt/2, state[1]+np.array([karr[k][j-1]/2 for k in range(len(karr))]))
for i in range(len(funs)):
karr[i][1] = 2*karr[i][1]
karr[i][2] = 2*karr[i][2]
step = np.array([state[1][i]+np.sum(karr[i])/6 for i in range(len(karr))])
return np.array([state[0]+dt, step])
def percentChange(self, curr, step):
"""Takes a current state and a new state (vectors) and calculates the percent change
Parameters:
curr: numpy array (x, u)
step: numpy array (x', u')
Returns:
Percent change from curr to step"""
assert isinstance(curr, (np.ndarray, list)), "Expected array input"
assert isinstance(step, (np.ndarray, list)), "Expected array input"
assert len(curr) == len(step), "Array lengths are not the same"
return 100*abs(np.linalg.norm((step-curr)/np.linalg.norm(curr)))
def adaptRK(self, currState, pc, t, dt, funs):
"""Iteratively adapts dt to ensure the change in (x, u) is between .1 and 1 percent
This new dt can be either larger or smaller, independent of the initial percent change
Takes a state (x, u, w), a percent change, t, dt, and the set of functions (dx, du, dw) being used to evolve the system
Parameters:
currState: numpy array (x, u, w)
pc: percent change between currState and the next RK step with t, dt
t: generalized time of currState
dt: generalized time step size
funs: array of functions [dx, du, dw] to evolve a state
Returns:
dt, adjusted so that the percent change is between .1 and 1 percent"""
assert isinstance(funs, (np.ndarray, list)), "Expected array input"
for i in funs:
assert callable(i), "Expected functions in list"
assert isinstance(currState, (np.ndarray, list)), "Expected array input"
assert np.shape(currState) == (len(funs), ), "Expected 1D array input"
assert isinstance(dt, (float, int, np.float64)), "Expected float input"
assert isinstance(t, (float, int, np.float64)), "Expected float input, got {0}".format(type(t))
assert isinstance(pc, (float, int, np.float64)), "Expected float input"
ddt = 1.5
i = 0
itermax = 10000
if pc > 1:
pc2 = 1e10 #initialize dummy percent changes, used to track movement in % change while finding new dt
prevpc2 = 1e10
while pc2 > 1 and i < itermax:
dt = dt*ddt
step2 = self.coupledRKStep(funs, [t, currState], dt) #calculate hypothetical next step using new dt
pc2 = self.percentChange(currState, step2[1])
if pc2 > prevpc2: #if we're moving in the wrong direction, invert our change in dt
ddt = 1/ddt
prevpc2 = pc2
i = i+1
if i == itermax: print("Max iteration count exceeded in adaptation")
return dt #once we've found a working dt, take a step using it
elif pc < .1:
pc2 = 1e-10 #initialize dummy percent changes, used to track movement in % change while finding new dt
prevpc2 = 1e-10
while pc2 < .1 and i < itermax:
dt = dt*ddt
step2 = self.coupledRKStep(funs, [t, currState], dt) #calculate hypothetical next step using new dt
pc2 = self.percentChange(currState, step2[1])
if pc2 < prevpc2: #if we're moving in the wrong direction, invert our change in dt
ddt = 1/ddt
prevpc2 = pc2
i = i+1
if i == itermax: print("Max iteration count exceeded in adaptation")
return dt #once we've found a working dt, take a step using it
def generateFunc(self, state0, itermax=10000, AV=True, xrange=10, urange=5):
"""Generates a trace of wind behavior with initial conditions x0 and u0 (dimensionless) using the RK4 method with adaptation in dt
Takes x0, u0, max iteration count and returns a 2D array tracking t, x, and u
Parameters:
state0: initial state of system (x0, u0, w0)
itermax: maximum iteration count for loop
AV: use the absolute value of f1 and f2 (boolean)
xrange: maximum x value to display on plot (displays (1, xrange))
urange: maximum u value to display on plot (displays (0, urange))
Returns:
numpy array, [0] contains t values, [1] contains x values, [2] contains u values, [3] contains w values for the wind curve"""
assert isinstance(state0, (list, np.ndarray)), "Expected array input"
assert len(state0) == 3, "Expected state array of length 3"
assert isinstance(itermax, int), "Expected integer input"
assert itermax > 0, "Expected positive itermax"
assert isinstance(AV, bool), "Expected boolean input"
assert isinstance(xrange, (float, int, np.float64)), "Expected numerical input"
assert xrange > 1, "Expected xrange > 1"
assert isinstance(urange, (float, int, np.float64)), "Expected numerical input"
assert urange > 0, "Expected positive urange"
xsol = np.array([state0[0]])
usol = np.array([state0[1]])
wsol = np.array([state0[2]])
tarray = np.array([0])
t = 0
dt = .01
i = 0
currState = np.array([xsol[-1], usol[-1], wsol[-1]])
if AV:
funs = [self.adx, self.adu, self.adw]
else:
funs = [self.dx, self.du, self.dw]
#Main loop for adaptive RK solver
#Exit conditions are based on values for exp(x)
#Using zero points for f1 and f2 only works if you change ndf1 and ndf2 to return absolute values, and then you don't see the full curve
#Setting a max iteration count doesn't always work well - the solution curves may be cut
while np.exp(currState[0]) > 1e-6 and np.exp(currState[0]) < xrange and i < itermax:
#Load the current position of the system to determine if adaptation is necessary
currState = np.array([xsol[-1], usol[-1], wsol[-1]])
#Calculate the next integration step using the RK function defined above
step = self.coupledRKStep(funs, [t, currState], dt)
#Calculate percent change from current state to predicted next state
pc = self.percentChange(currState, step[1])
#If the percent change is too large or too small, change dt to compensate
if pc > 1 or pc < .1:
dt = self.adaptRK(currState, pc, t, dt, funs)
step = self.coupledRKStep(funs, [t, currState], dt)
xsol = np.append(xsol, step[1][0]) #update solution curves with next step
usol = np.append(usol, step[1][1])
wsol = np.append(wsol, step[1][2])
t = t+dt
i = i+1
tarray = np.append(tarray, t)
return np.array((tarray, xsol, usol, wsol))
def makePlot(self, u0, AV=True, xrange=10, urange=5):
"""Generates a plot of one wind curve
Takes u0 and generates a curve
No return, prints a plot of the wind curve
Parameters:
u0: initial value of u
AV: use absolute value of f1 and f2 functions (boolean)
xrange: maximum x value to display on plot (displays (1, xrange))
urange: maximum u value to display on plot (displays (0, urange))
Displays a plot of one wind curve"""
assert isinstance(u0, (float, int, np.float64)), "Expected float input"
assert u0 > 0, "Expected positive input"
assert isinstance(AV, bool), "Expected boolean input"
assert isinstance(xrange, (float, int, np.float64)), "Expected numerical input"
assert xrange > 1, "Expected xrange > 1"
assert isinstance(urange, (float, int, np.float64)), "Expected numerical input"
assert urange > 0, "Expected positive urange"
plt.figure(1)
plt.xlim(1, xrange)
plt.ylim(0, urange)
func = self.generateFunc(np.array([0, np.log(u0), self.W0]), 10000, AV, xrange, urange)
plt.scatter(np.exp(func[1]), np.exp(func[2]-func[3]/2), s=.5);
plt.title("Velocity vs Radius (Dimensionless), v0 = %1.9f cs"%u0)
plt.xlabel("r/r0")
plt.ylabel("v/cs")
def makePlots(self, vmin, vmax, dv, AV=True, xrange=10, urange=5, showAll=False):
"""Generates a plot of wind curves
Takes vmin, vmax, dv and generates a curve for u0 (x0 = 0), then increments v0 by dv and generates a new curve, repeating until v0 = umax
Expects vmin, vmax, and dv scaled by 1/cs
No return, prints a plot of the different wind curves
Parameters:
umin: starting u value
umax: maximum u value
du: increment of u
AV: Use absolute value of f1 and f2 functions (boolean)
xrange: maximum x value to display on plot (displays (1, xrange))
urange: maximum u value to display on plot (displays (0, urange))
showAll: boolean, True will plot additional graphs of velocity and temperature
Displays a plot of several different wind curves"""
assert isinstance(vmin, (float, int, np.float64)), "Expected float input"
assert vmin > 0, "Expected positive input"
assert isinstance(vmax, (float, int, np.float64)), "Expected float input"
assert vmax > 0, "Expected positive input"
assert isinstance(dv, (float, int, np.float64)), "Expected float input"
assert dv > 0, "Expected positive input"
assert vmax > vmin, "Maximum less than minimum"
assert isinstance(AV, bool), "Expected boolean input"
assert isinstance(xrange, (float, int, np.float64)), "Expected numerical input"
assert xrange > 1, "Expected xrange > 1"
assert isinstance(urange, (float, int, np.float64)), "Expected numerical input"
assert urange > 0, "Expected positive urange"
plt.figure(1)
plt.xlim(1, xrange)
plt.ylim(0, urange)
for i in np.arange(vmin, vmax, dv):
func = self.generateFunc(np.array([0, np.log(i), self.W0]), 10000, AV, xrange, urange)
if i == vmin:
data = [func]
else:
data.append(func)
plt.figure(1)
plt.title("Velocity vs Radius (Dimensionless)")
for i in range(len(data)):
plt.scatter(np.exp(data[i][1]), np.exp(data[i][2]-data[i][3]/2), s=.5, label='v0/cs = %g'%np.exp(data[i][2][0]));
plt.xlabel("r/r0")
plt.ylabel("v/cs")
plt.legend(loc="upper left", bbox_to_anchor=(1, 1))
if showAll:
plt.figure(2)
plt.title("Temperature vs Radius (Dimensionless)")
for i in range(len(data)):
plt.scatter(np.exp(data[i][1]), np.exp(data[i][3]), s = .5, label = 'v0/cs = %g'%np.exp(data[i][2][0]));
plt.xlabel("r/r0")
plt.ylabel("v/cs")
plt.legend(loc = "upper left", bbox_to_anchor = (1, 1))
plt.figure(3)
plt.title("Temperature vs Velocity (Dimensionless)")
for i in range(len(data)):
plt.scatter(np.exp(data[i][2]), np.exp(data[i][3]), s = .5, label = 'v0/cs = %g'%np.exp(data[i][2][0]));
plt.xlabel("r/r0")
plt.ylabel("v/cs")
plt.legend(loc = "upper left", bbox_to_anchor = (1, 1))
def findZeros(self, v0):
"""Finds the t values where f1 and f2 reach zero, and returns the difference
Takes starting velocity v0, expected to be scaled by 1/cs
Returns tu-tx
Parameters:
v0: initial value of v/cs"""
assert isinstance(v0, (float, int, np.float64)), "Expected numerical input"
assert v0 > 0, "Expected positive input"
u0 = np.log(v0)
xsol = np.array([0])
usol = np.array([u0])
wsol = np.array([self.W0])
t = 0
dt = .01
currState = np.array([xsol[-1], usol[-1], wsol[-1]])
xfound = False
ufound = False
tu = 0
tx = 0
#Main loop, uses RK solver and iterates until f1 or f2 changes sign, and returns the t value where that takes place
while not ufound and not xfound:
#Load the current position of the system to determine if adaptation is necessary
currState = np.array([xsol[-1], usol[-1], wsol[-1]])
#Calculate the next integration step using the RK function defined above
step = self.coupledRKStep([self.adx, self.adu, self.adw], np.array([t, currState]), dt)
#Calculate percent change from current state to predicted next state
pc = self.percentChange(currState, step[1])
#If the percent change is too large or too small, change dt to compensate
if pc > 1 or pc < .1:
dt = self.adaptRK(currState, pc, t, dt, [self.adx, self.adu, self.adw])
step = self.coupledRKStep([self.adx, self.adu, self.adw], np.array([t, currState]), dt)
#if ndf2 changes sign, we have found its turnover point and can exit the loop
if np.sign(self.ndf2(t, step[1])) != np.sign(self.ndf2(t, currState)):
xfound = True
tx = t
#if ndf1 changes sign, we have found its turnover point and can exit the loop
if np.sign(self.ndf1(t, step[1])) != np.sign(self.ndf1(t, currState)):
ufound = True
tu = t
#update solution curves with next step
xsol = np.append(xsol, step[1][0])
usol = np.append(usol, step[1][1])
wsol = np.append(wsol, step[1][2])
t = t+dt
#return difference between zeros in ndf1 and ndf2
return (tu-tx)
def findVboundary(self, guess, increment=1e-4, maxprecision=1e-10, itermax=10000):
"""Locates the boundary value of v0 at which f1 and f2 pass through zero at the same time using a bisection method
Parameters:
guess: starting value of v0
increment: initial step size for incrementing v0
maxprecision: sets limit on how small dv can be before exiting the loop
itermax: maximum iteration count
Returns:
Boundary value for v0"""
assert isinstance(guess, (float, int, np.float64)), "Expected numerical input"
assert guess > 0, "Expected positive input"
assert isinstance(increment, (float, int, np.float64)), "Expected numerical input"
assert isinstance(maxprecision, (float, int, np.float64)), "Expected numerical input"
assert maxprecision > 0, "Expected positive input"
assert isinstance(itermax, int), "Expected integer input"
assert itermax > 0, "Expected positive input"
v0 = guess
dv = increment
i = 0
#Main loop, while dv is larger than the max precision we want to continue refining our search
while abs(dv) > maxprecision and i < itermax:
#When we find a sign change in the zero for a given v0, that implies that the solution has changed character between a breeze and a non-physical one
#i.e. whichever of f1 and f2 changed sign first, that has now reversed
#when that happens, we shrink dv until we avoid the sign change, in order to approximate where exactly that occurs in v0
while (v0+dv) <= 0:
dv = dv/2
self.findZeros(v0+dv)
while np.sign(self.findZeros(v0+dv)) != np.sign(self.findZeros(v0)):
dv = dv/2
while (v0+dv) <= 0:
dv = dv/2
v0 = v0+dv
i = i+1
if i >= itermax:
print("Max iteration count exceeded")
raise Exception("MaxIterExceeded")
return v0+dv
def findV0(self, lowerguess, upperguess, dv, maxprecision=1e-10, itermax=10000, xrange=10, urange=5, show=True, showPlot=False):
"""Finds boundary values for v0 and estimates an exact value for it
Parameters:
lowerguess: estimate of lower bound on v0/cs
upperguess: estimate of upper bound on v0/cs
dv: initial step size for incrementing v0/cs
maxprecision: sets limit on how small dv can be before exiting the loop
itermax: maximum iteration count
xrange: maximum x value to display on plot (displays (1, xrange))
urange: maximum u value to display on plot (displays (0, urange))
show: show plot of v0 once it is found (boolean)
Returns:
Average of upper and lower bounds on v0
Prints bounds on v0, estimated value, and estimated error
Displays plot of the wind curves for the boundary values of v0 if show = True"""
assert isinstance(lowerguess, (float, int, np.float64)), "Expected numerical input"
assert lowerguess > 0, "Expected positive input, got %"%lowerguess
assert isinstance(upperguess, (float, int, np.float64)), "Expected numerical input"
assert upperguess > 0, "Expected positive input, got %f"%upperguess
assert isinstance(dv, (float, int, np.float64)), "Expected numerical input"
assert dv > 0, "Expected positive input, got %"%dv
assert isinstance(maxprecision, (float, int, np.float64)), "Expected numerical input"
assert maxprecision > 0, "Expected positive input"
assert isinstance(itermax, int), "Expected integer input"
assert itermax > 0, "Expected positive input"
assert isinstance(xrange, (float, int, np.float64)), "Expected numerical input"
assert xrange > 1, "Expected xrange > 1"
assert isinstance(urange, (float, int, np.float64)), "Expected numerical input"
assert urange > 0, "Expected positive urange"
if np.sign(self.findZeros(upperguess)) == np.sign(self.findZeros(lowerguess)):
print("No sign change in specified range")
raise NoSignChange
dv=(upperguess-lowerguess)/2
upper = self.findVboundary(upperguess, -dv, maxprecision, itermax)
lower = self.findVboundary(lowerguess, dv, maxprecision, itermax)
if show:
print("Lower bound on v0: ", lower)
print("Upper bound on v0: ", upper)
if abs(lower-upper) > 1e-2:
print("divergent bounds, exiting")
raise BoundError
print("Estimated v0: ", (lower+upper)/2)
print("Estimated error: ", abs((upper-lower)/2))
if showPlot:
self.makePlot(lower, False, xrange, urange)
self.makePlot(upper, False, xrange, urange)
return (lower+upper)/2
def gammaSearch(self, a=10, g0=None, dg=.0025, glim=5/3, lower=1e-10, upper=.9, itermax=100):
"""Searches through gamma values for a given a and returns a table of gamma values and associated critical velocities
Parameters:
a: GM/cs^2 r0; constant value
g0: starting gamma value, defaults to the gamma the class was initialized with
dg: gamma increment (positive or negative)
glim: gamma value at which the search will stop
lower: estimate of lower bound on v0/cs for first gamma value
upper: estimate of upper bound on v0/cs for first gamma value
itermax: maximum iterations for searching a given gamma value for v0
Returns:
List of ordered pairs [gamma, v0]
Prints scatter plot of v0 vs gamma
"""
assert isinstance(a, (float, int, np.float64)), "Expected numerical input"
assert a > 0, "Expected positive input"
assert g0 is None or isinstance(g0, (float, int, np.float64)), "Expected numerical input"
assert g0 is None or g0 > 0, "Expected positive input"
assert isinstance(dg, (float, int, np.float64)), "Expected numerical input"
assert isinstance(glim, (float, int, np.float64)), "Expected numerical input"
assert isinstance(lower, (float, int, np.float64)), "Expected numerical input"
assert lower > 0, "Expected positive input"
assert isinstance(upper, (float, int, np.float64)), "Expected numerical input"
assert upper > lower, "Nonsensical bounds"
assert upper < 1, "Upper bound should be less than 1"
gdata = np.array([])
if a != None:
self.a = a
if g0 != None:
self.gamma = float(g0)
else:
g0 = float(self.gamma)
i = 0
while (i < itermax) and ((self.gamma <= glim and np.sign(dg) == 1) or (self.gamma >= glim and np.sign(dg) == -1)):
if i == 0: print("Searching gamma = ", self.gamma)
if self.gamma == g0:
try:
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
gdata = [self.gamma, self.findV0(lower, upper, (upper-lower)/2, show = False)]
self.gamma = self.gamma+dg
i = 0
clear_output()
except NoSignChange:
if i == 0:
print("No sign change, decrementing bounds")
upper = float(lower)
lower = lower/2
i = i+1
else:
try:
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
gdata = np.vstack((gdata, [self.gamma, self.findV0(lower, upper, (upper-lower)/2, show=False)]))
self.gamma = self.gamma+dg
i = 0
clear_output()
except NoSignChange:
if i == 0:
print("No sign change, decrementing bounds")
upper = float(lower)
lower = lower/2
i = i+1
if i >= itermax:
print("Max iteration count exceeded at gamma = ", self.gamma)
print("No sign change above v0/cs = ", lower)
if len(gdata) > 0:
plt.figure(1)
plt.title("Critical velocity vs. Gamma")
plt.xlabel("Gamma")
plt.ylabel("v0/cs")
plt.scatter(gdata[:, 0], gdata[:, 1])
else:
print("No data collected")
return gdata | {
"alphanum_fraction": 0.5831949268,
"author": null,
"avg_line_length": 45.5189003436,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2cbe963f07942145345946724a3b259b4be4d736",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0088a0568841cda00ee8303b797d05be9feab844",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "colbrydi/neutrino-winds",
"max_forks_repo_path": "neutrino-winds/Adiabatic_wind_solver.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0088a0568841cda00ee8303b797d05be9feab844",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "colbrydi/neutrino-winds",
"max_issues_repo_path": "neutrino-winds/Adiabatic_wind_solver.py",
"max_line_length": 160,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0088a0568841cda00ee8303b797d05be9feab844",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "colbrydi/neutrino-winds",
"max_stars_repo_path": "neutrino-winds/Adiabatic_wind_solver.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6644,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 26492
} |
import abc
import os
import inspect
import requests
import pandas as pd
import numpy as np
import xarray as xr
from .adapters import Adapter
from .bounds import Bounds
from .constants import (RCMED_QUERY_URL, ESGF_NODES,
DEFAULT_INTAKE_CAT, DEFAULT_INTAKE_ESM_CAT)
from .ensemble import Ensemble
from .registry import registry, register
from .utils import inherit_docs, decode_month_units, get_dropped_varnames
class DataSource(object):
"""Loads datasets into ocw workspace.
A data source describes where the input data comes from (eg, from your
local filesystem or a remote server) and how to obtain it (via load()).
All loaded datasets are represented in OCW as instances of
xarray.DataArray.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, adapter=None):
"""Data source constructor.
A data source describes where the input data comes from (eg, from your
local filesystem or a remote server) and how to obtain it (via load()).
All loaded datasets are represented in OCW as instances of
xarray.DataArray. An optional parameter, adapter, allows for additional
post-processing of the loaded DataArray object. The default adapter,
an instance of bcdp.adapters.Adapter, simply changes the coordinate
dimension names and attributes to comply with CF-format. For more
complex post-processing use-cases, we recommend subclassing Adapter and
passing an instance of such to this constructor.
Parameters
----------
adapter : bcdp.Adapter
Default adapter to use for post-processing.
"""
self.adapter = adapter if adapter else 'basic'
self._cache = {}
def __call__(self, *args, **kwargs):
"""Main interface for loading datasets.
This combines the action of loading the dataset(s) from source into
an `xarray.DataArray` object (via load()) and then building the
evaluation Ensemble.
Parameters
----------
adapter : str, optional
Name of adapter class to use, which must be a valid key in the
adapters registry.
dims : dict, optional
Mapping of dimension labels (x, y, z and/or t) to their respective
names in the file(s). This should rarely ever be needed to be set,
since these can be easily inferred most of the time.
labels : dict, optional
Mapping of labels
**kwargs : dict
Keyword arguments to pass to load().
Returns
-------
bcdp.Ensemble
List of datasets to be evaluated.
"""
dims = kwargs.pop('dims', {})
labels = kwargs.pop('labels', {})
datasets = self.load(*args, **kwargs)
return Ensemble(datasets, adapter=self.adapter, dims=dims, **labels)
@abc.abstractmethod
def load(self, *args, **kwargs):
"""Loads datasets from given parameters."""
pass
def _prep_datasets(self, variable, dset_dict):
datasets = []
for name, ds in dset_dict.items():
variables = dict(ds.data_vars)
if len(variables) == 1:
# If no project or variable name information, infer it
# from file_metadata. This will only work if the file has
# one non-coordinate variable.
variable = list(variables.keys())[0]
elif not variable:
raise ValueError('Variable name must be specified for files'
' with more than one non-coord variable.')
# Check if variable name is defined in metadata.
da = ds[variable].squeeze(drop=True)
da.attrs['variable_name'] = da.name
da.name = name
datasets.append(da)
return datasets
@register('source.local')
class LocalFileSource(DataSource):
"""Local Filesystem data source"""
def load(self, paths='./*.nc', variable=None, names=None, convert_times=False,
use_dt64=False, dims=None, project=None, load_all=False, **kwargs):
"""Loads datasets from given parameters.
Parameters
----------
paths : str or list of str, optional
Regex or (list of) full path(s) to the netcdf file(s) to open.
variable : str, optional
Variable Name. If input files have only one non-coordinate variable,
that variable's name is used by default.
names : list of str, optional
List of dataset names. By default these are inferred directly from
the input `paths` attribute.
convert_times : bool, optional
If True, files are assumed to be split by time and values
are automatically converted to datetime-like objects.
Does nothing if `project` is not set.
use_dt64 : bool, optional
If convert_times is True, use numpy.datetime64 to parse times
instead of pandas.Timestamp. Only use this if the latter causes
errors, as it is less flexible with datetimes that aren't in
ISO 8601 format.
project : str, optional
A project name that encapsulates all of the datasets to be loaded.
This must be a valid key in bcdp.extractors.metadata_extractors,
which includes `CMIP5`, `CORDEX`, and `obs4MIPS`. When set, this
replaces the default behavior for defining the variable and dataset
names. For this reason, this parameter should only be set if you are
sure that all of the input filenames correctly conform to the
conventions required by the given project.
load_all : bool, optional
If True, datasets spanned by multiple files are loaded into
memory rather than lazily (Default False). Ignored if chunks argument
is passed to open_mfdataset.
**kwargs
Keyword Arguments to `xarray.open_(mf)dataset()`
Returns
-------
datasets : list
xarray DataArray objects.
"""
if not isinstance(paths, list):
paths = [paths]
# Determine dataset names
if not names:
if project:
# Use project specific conventions
extractor_cls = registry['metadata'][project]
extractor = extractor_cls(*paths)
names = sorted(extractor.names)
paths = sorted(extractor.files)
else:
# Infer dataset names from filenames
pre = os.path.commonprefix(paths)
post = os.path.commonprefix([p[::-1] for p in paths])[::-1]
names = [p.replace(pre, '').replace(post, '') for p in paths]
root_dir = os.path.dirname(pre) + os.path.sep
paths = [root_dir + '*{}*'.format(name) for name in names]
# Generic dataset loader
def open_dataset(path, **kwargs):
dropped = get_dropped_varnames(variable) if variable else None
if os.path.isfile(path):
ds = self._cache.get(
path,
xr.open_dataset(path, drop_variables=dropped, **kwargs)
)
else:
chunks = kwargs.pop('chunks', None)
ds = self._cache.get(
path,
xr.open_mfdataset(path, drop_variables=dropped, **kwargs)
)
ds = ds.chunk(chunks)
if load_all and chunks is None:
ds = ds.load()
self._cache[path] = ds
return ds
# Open each dataset
dset_dict = {}
for name, path in zip(names, paths):
try:
ds = open_dataset(path, **kwargs)
except ValueError:
# Custom datetime decoding required for monthly time units
kwargs.update(decode_times=False)
ds = decode_month_units(open_dataset(path, **kwargs))
if project:
# Get variable name from filename if project given.
meta = extractor.query(filename=path)[0]
if not variable:
variable = meta['variable']
concat_dim = kwargs.pop('concat_dim', 'time')
if concat_dim not in ds.coords:
dim_vals = meta[concat_dim]
if convert_times:
if use_dt64:
dim_vals = [np.datetime64(t) for t in dim_vals]
else:
dim_vals = [pd.Timestamp(t) for t in dim_vals]
ds = ds.assign_coords({concat_dim: dim_vals})
dset_dict[name] = ds
return self._prep_datasets(variable, dset_dict)
@register('source.bucket')
class BucketSource(DataSource):
""""Load zarr data from cloud storage buckets."""
def load(self, *paths, variable=None, bucket_type=None,
zarr_kwargs=None, **kwargs):
"""Loads datasets from given parameters.
Parameters
----------
*paths : str
Paths to zarr datastore in bucket.
variable : str, optional
Variable Name. If input files have only one non-coordinate variable,
that variable's name is used by default.
bucket_type : str, optional
Type of cloud bucket (gc or s3). Can be ignored if the bucket type
is in the url (eg gc://, s3://)
zarr_kwargs : dict, optional
Additional keyword arguments to pass to xarray.open_zarr.
consolidated=True is set to default.
**kwargs : dict, optional
Keyword arguments to pass to bucke API (gcsfs or s3fs)
Returns
-------
datasets : list
xarray DataArray objects.
"""
dset_dict = {}
zarr_kwargs = zarr_kwargs if zarr_kwargs else dict(consolidated=True)
for path in paths:
if '://' in path:
bucket_type = path.split('://')[0]
if bucket_type not in ['s3', 'gc']:
raise ValueError('Please specify supported bucket type (s3, gc)')
if bucket_type == 's3':
import s3fs
fs = s3fs.S3FileSystem(**kwargs)
mapper = s3fs.S3Map
elif bucket_type == 'gc':
import gcsfs
fs = gcsfs.GCSFileSystem
mapper = gcsfs.GCSMap
if path.endswith('*'):
sub_paths = fs.ls(path[:-2])
else:
sub_paths = [path]
for spath in sub_paths:
name = spath.split('/')[-1]
store = mapper(spath, fs)
dset_dict[name] = xr.open_zarr(store, **zarr_kwargs)
return self._prep_datasets(variable, dset_dict)
@register('source.intake')
class IntakeSource(DataSource):
""""Load remote data via the intake library."""
def load(self, variable=None, names=None, depth=5,
catfile=DEFAULT_INTAKE_CAT, auth_token=None):
"""Loads datasets from given parameters.
Parameters
----------
variable : str, optional
Variable Name. If input files have only one non-coordinate variable,
that variable's name is used by default.
names : list of str, optional
List of dataset names.
depth : int, optional
Depth of catalog search (default: 5)
catfile : str, optional
Path to catalogue metadata file, can be a remote URL. The pangeo
Intake master catalogue is used by default.
auth_token : str, optional
Path to credentials key file to use for accessing cloud storage
buckets.
Returns
-------
datasets : list
xarray DataArray objects.
"""
import intake
if auth_token:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = auth_token
cat = intake.Catalog(catfile)
meta = cat.walk(depth=depth)
sel = [name for name, ent in meta.items() if ent.container == 'xarray']
names = sel if not names else names
entries = [cat[name] for name in sel]
shortnames = [name.split('.')[-1] for name in sel]
dset_dict = {name: ent.to_dask() for name, ent in zip(shortnames, entries)
if name in names}
return self._prep_datasets(variable, dset_dict)
@register('source.intake-esm')
class IntakeESMSource(DataSource):
""""Load remote data via the intake-esm library."""
def load(self, query, catfile=DEFAULT_INTAKE_ESM_CAT, **kwargs):
"""Loads datasets from given parameters.
Parameters
----------
query: dict
Key, value pairs used to search the catalogue.
Depth of catalog search (default: 5)
catfile : str, optional
Path to catalogue metadata file, can be a remote URL. The pangeo
intake-esm CMIP6 catalogue is used by default.
**kwargs : dict, optional
Keyword Arguments for `intake_esm.core.esm_datastore.to_dataset_dict()`
Returns
-------
datasets : list
xarray DataArray objects.
"""
import intake
col = intake.open_esm_datastore(catfile)
cat = col.search(**query)
dset_dict = cat.to_dataset_dict(**kwargs)
variable = query.get('variable_id')
return self._prep_datasets(variable, dset_dict)
@register('source.rcmed')
class RCMEDSource(DataSource):
"""JPL Regional Climate Model Evaluation Database (RCMED) Data Source"""
def load(self, dataset_id, parameter_id, domain, chunks=None):
"""Loads datasets from given parameters.
Parameters
----------
dataset_id : int
Dataset ID in RCMED.
variable_id : int
Variable (Parameter) ID in RCMED.
domain : bcdp.bounds.Bounds
Bounds defining the spatial and temporal domain to search
(and subset) the requested data.
chunks : dict, optional
If set, load data into a dask array with given chunk sizes.
Returns
-------
list of xarray.DataArray
xarray DataArray object with loaded data.
"""
# Read RCMED metadata table from RCMES website
metadata = self.get_metadata()
idx = metadata.parameter_id == parameter_id
if not idx.any():
raise ValueError(f'Invalid Parameter ID: {parameter_id}. '
'Should be one of: {metadata.parameter_id}')
# Format remaining parameters for request query
params = dict(datasetId=dataset_id, parameterId=variable_id,
**domain.to_dict())
# Make request to RCMED server
r = requests.get(RCMED_QUERY_URL, params=params)
r.raise_for_status()
result = r.text.split('\r\n')
meta = result[:12]
data = result[12:-1]
# Create DataArray object from output
variable_name = meta[2].split('Parameter:')[1].split('(')[0].strip()
name = metadata.database[idx].values[0]
df = pd.DataFrame(data=data)
df = df[0].str.split(',', expand=True)
numeric_dims = ['lat', 'lon', 'lev', variable_name]
df.columns = ['lat', 'lon', 'lev', 'time', variable_name]
df[numeric_dims] = df[numeric_dims].astype(float)
df['time'] = pd.to_datetime(df['time'])
df = df.set_index(['time', 'lev', 'lat', 'lon'])
da = df[variable_name].to_xarray().squeeze()
if chunks:
da = da.chunk(chunks)
da.attrs['units'] = metadata['units'][idx].values[0]
da = da.where(da != metadata['missingdataflag'][idx].values[0])
da.name = name
return [da]
def get_metadata(self):
"""Loads RCMED metadata.
Returns
-------
meta : pandas.DataFrame
RCMED metadata table.
"""
info = requests.get(RCMED_QUERY_URL, params=dict(param_info='yes')).json()
meta = pd.DataFrame(data=info['data'], columns=info['fields_name'])
return meta
@register('source.esgf')
class ESGFSource(DataSource):
"""Earth System Grid (ESGF) data source"""
_origin = 'esgf'
def load(self, variable=None, project=None, node='JPL', **kwargs):
"""Builds an xarray.DataArray object from given parameters.
Parameters
----------
paths : str or list of str, optional
Regex or (list of) full path(s) to the netcdf file(s) to open.
variable : str, optional
Variable Name.
project : str, optional
A project name that encapsulates all of the datasets to be loaded.
This must be a valid key in the bcdp object registry, which includes
`CMIP5`, `CORDEX`, and `obs4MIPS`. When set, this replaces the
default behavior for defining the variable and dataset names. For
this reason, this parameter should only be set if you are sure that
all of the input filenames correctly conform to the conventions
required by the given project.
node : str, optional
ESGF node to search from, default is JPL.
**kwargs : dict, optional
ESGF search parameters.
Returns
-------
datasets : dict of xarray.DataArray
xarray DataArray objects with origin information and CF-compliant
coordinate variables, keyed by dataset names.
"""
hostname = ESGF_NODES[node]
raise NotImplementedError('')
load_local = LocalFileSource()
load_bucket = BucketSource()
load_intake = IntakeSource()
load_intake_esm = IntakeESMSource()
load_rcmed = RCMEDSource()
| {
"alphanum_fraction": 0.5699197068,
"author": null,
"avg_line_length": 40.8744493392,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "317bd89efcfdb25fcfe273e43c8ee7ba9bfec5c9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-04-04T09:33:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-05T23:28:32.000Z",
"max_forks_repo_head_hexsha": "63e8b93251599cbb591567459d7b598472c6afc8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "bcdp/bcdp",
"max_forks_repo_path": "bcdp/sources.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "63e8b93251599cbb591567459d7b598472c6afc8",
"max_issues_repo_issues_event_max_datetime": "2020-04-16T22:17:45.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-16T22:17:45.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "bcdp/bcdp",
"max_issues_repo_path": "bcdp/sources.py",
"max_line_length": 84,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "63e8b93251599cbb591567459d7b598472c6afc8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "bcdp/bcdp",
"max_stars_repo_path": "bcdp/sources.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-16T14:58:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-17T10:24:32.000Z",
"num_tokens": 3835,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 18557
} |
[STATEMENT]
lemma numsubst0_numbound0:
assumes "numbound0 t"
shows "numbound0 (numsubst0 t a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. numbound0 (numsubst0 t a)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
numbound0 t
goal (1 subgoal):
1. numbound0 (numsubst0 t a)
[PROOF STEP]
proof (induct a)
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<And>x. numbound0 t \<Longrightarrow> numbound0 (numsubst0 t (C x))
2. \<And>x. numbound0 t \<Longrightarrow> numbound0 (numsubst0 t (Bound x))
3. \<And>x1a x2a a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (CN x1a x2a a))
4. \<And>a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Neg a))
5. \<And>a1 a2. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a1); numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a2); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Add a1 a2))
6. \<And>a1 a2. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a1); numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a2); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Sub a1 a2))
7. \<And>x1a a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Mul x1a a))
[PROOF STEP]
case (CN n)
[PROOF STATE]
proof (state)
this:
numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a_)
numbound0 t
goal (7 subgoals):
1. \<And>x. numbound0 t \<Longrightarrow> numbound0 (numsubst0 t (C x))
2. \<And>x. numbound0 t \<Longrightarrow> numbound0 (numsubst0 t (Bound x))
3. \<And>x1a x2a a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (CN x1a x2a a))
4. \<And>a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Neg a))
5. \<And>a1 a2. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a1); numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a2); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Add a1 a2))
6. \<And>a1 a2. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a1); numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a2); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Sub a1 a2))
7. \<And>x1a a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Mul x1a a))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a_)
numbound0 t
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a_)
numbound0 t
goal (1 subgoal):
1. numbound0 (numsubst0 t (CN n x2a_ a_))
[PROOF STEP]
by (cases n) simp_all
[PROOF STATE]
proof (state)
this:
numbound0 (numsubst0 t (CN n x2a_ a_))
goal (6 subgoals):
1. \<And>x. numbound0 t \<Longrightarrow> numbound0 (numsubst0 t (C x))
2. \<And>x. numbound0 t \<Longrightarrow> numbound0 (numsubst0 t (Bound x))
3. \<And>a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Neg a))
4. \<And>a1 a2. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a1); numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a2); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Add a1 a2))
5. \<And>a1 a2. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a1); numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a2); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Sub a1 a2))
6. \<And>x1a a. \<lbrakk>numbound0 t \<Longrightarrow> numbound0 (numsubst0 t a); numbound0 t\<rbrakk> \<Longrightarrow> numbound0 (numsubst0 t (Mul x1a a))
[PROOF STEP]
qed simp_all | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 7,
"llama_tokens": 1565,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
function vertCongruenceAA(W)
err, i, todelete, vclasses = 10^-6, 1, [], []
verts = convert(Lar.Points, W')
kdtree = NearestNeighbors.KDTree(verts)
newverts = zeros(Int, size(verts,2))
for vi in 1:size(verts,2)
if !(vi in todelete)
nearvs = NearestNeighbors.inrange(kdtree, verts[:,vi], err)
push!(vclasses,nearvs)
newverts[nearvs] .= i
nearvs = setdiff(nearvs, vi)
todelete = union(todelete, nearvs)
i += 1
end
end
V = zeros(3,length(vclasses))
for (k,class) in enumerate(vclasses)
V[:,k] = sum(W[class,:],dims=1)/length(class)
end
return V, vclasses
end
function cellCongruenceAA(Delta,inclasses)
cellarray = Lar.cop2lar(Delta)
new_e = Array{Int64,1}(undef,size(Delta,2))
for (k,class) in enumerate(inclasses)
for e in class
new_e[e] = k
end
end
cells = [map(e->new_e[e], face) for face in cellarray]
outclasses = DefaultOrderedDict{Array{Int64,1},Array{Int64,1}}([])
for (k,face) in enumerate(cells)
if outclasses[face] == []
outclasses[face] = [k]
else
append!(outclasses[face],[k])
end
end
FEnew = sort(collect(keys(outclasses)))
outclasses = sort(collect(values(outclasses)))
return FEnew,outclasses
end
function chainCongruenceAA(W, T)
V, vclasses = vertCongruenceAA(W)
EV, eclasses = cellCongruenceAA(T[1],vclasses)
FE, fclasses = cellCongruenceAA(T[2],eclasses)
#@show size(V,2) - size(EV,1) + size(FE,1)
return V,EV,FE
end
| {
"alphanum_fraction": 0.6793248945,
"author": null,
"avg_line_length": 26.3333333333,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "c37df422ab2d06863d2f38341123e15a0daffa3a",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T11:20:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-31T11:20:41.000Z",
"max_forks_repo_head_hexsha": "3b8d254681d56fab1fbdf305a36dd4e3234c7119",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cvdlab/LocalCongruence.jl",
"max_forks_repo_path": "src/cea-AA.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3b8d254681d56fab1fbdf305a36dd4e3234c7119",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cvdlab/LocalCongruence.jl",
"max_issues_repo_path": "src/cea-AA.jl",
"max_line_length": 68,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3b8d254681d56fab1fbdf305a36dd4e3234c7119",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cvdlab/LocalCongruence.jl",
"max_stars_repo_path": "src/cea-AA.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 484,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1422
} |
const INTERNALNAMES = (:__model__, :__context__, :__varinfo__)
"""
isassumption(expr[, vn])
Return an expression that can be evaluated to check if `expr` is an assumption in the
model.
Let `expr` be `:(x[1])`. It is an assumption in the following cases:
1. `x` is not among the input data to the model,
2. `x` is among the input data to the model but with a value `missing`, or
3. `x` is among the input data to the model with a value other than missing,
but `x[1] === missing`.
When `expr` is not an expression or symbol (i.e., a literal), this expands to `false`.
If `vn` is specified, it will be assumed to refer to a expression which
evaluates to a `VarName`, and this will be used in the subsequent checks.
If `vn` is not specified, `AbstractPPL.drop_escape(varname(expr))` will be
used in its place.
"""
function isassumption(expr::Union{Expr,Symbol}, vn=AbstractPPL.drop_escape(varname(expr)))
return quote
if $(DynamicPPL.contextual_isassumption)(__context__, $vn)
# Considered an assumption by `__context__` which means either:
# 1. We hit the default implementation, e.g. using `DefaultContext`,
# which in turn means that we haven't considered if it's one of
# the model arguments, hence we need to check this.
# 2. We are working with a `ConditionContext` _and_ it's NOT in the model arguments,
# i.e. we're trying to condition one of the latent variables.
# In this case, the below will return `true` since the first branch
# will be hit.
# 3. We are working with a `ConditionContext` _and_ it's in the model arguments,
# i.e. we're trying to override the value. This is currently NOT supported.
# TODO: Support by adding context to model, and use `model.args`
# as the default conditioning. Then we no longer need to check `inargnames`
# since it will all be handled by `contextual_isassumption`.
if !($(DynamicPPL.inargnames)($vn, __model__)) ||
$(DynamicPPL.inmissings)($vn, __model__)
true
else
$(maybe_view(expr)) === missing
end
else
false
end
end
end
# failsafe: a literal is never an assumption
isassumption(expr, vn) = :(false)
isassumption(expr) = :(false)
"""
contextual_isassumption(context, vn)
Return `true` if `vn` is considered an assumption by `context`.
The default implementation for `AbstractContext` always returns `true`.
"""
contextual_isassumption(::IsLeaf, context, vn) = true
function contextual_isassumption(::IsParent, context, vn)
return contextual_isassumption(childcontext(context), vn)
end
function contextual_isassumption(context::AbstractContext, vn)
return contextual_isassumption(NodeTrait(context), context, vn)
end
function contextual_isassumption(context::ConditionContext, vn)
if hasvalue(context, vn)
val = getvalue(context, vn)
# TODO: Do we even need the `>: Missing`, i.e. does it even help the compiler?
if eltype(val) >: Missing && val === missing
return true
else
return false
end
end
# We might have nested contexts, e.g. `ContextionContext{.., <:PrefixContext{..., <:ConditionContext}}`
# so we defer to `childcontext` if we haven't concluded that anything yet.
return contextual_isassumption(childcontext(context), vn)
end
function contextual_isassumption(context::PrefixContext, vn)
return contextual_isassumption(childcontext(context), prefix(context, vn))
end
# If we're working with, say, a `Symbol`, then we're not going to `view`.
maybe_view(x) = x
maybe_view(x::Expr) = :(@views($x))
"""
isliteral(expr)
Return `true` if `expr` is a literal, e.g. `1.0` or `[1.0, ]`, and `false` otherwise.
"""
isliteral(e) = false
isliteral(::Number) = true
isliteral(e::Expr) = !isempty(e.args) && all(isliteral, e.args)
"""
check_tilde_rhs(x)
Check if the right-hand side `x` of a `~` is a `Distribution` or an array of
`Distributions`, then return `x`.
"""
function check_tilde_rhs(@nospecialize(x))
return throw(
ArgumentError(
"the right-hand side of a `~` must be a `Distribution` or an array of `Distribution`s",
),
)
end
check_tilde_rhs(x::Distribution) = x
check_tilde_rhs(x::AbstractArray{<:Distribution}) = x
"""
unwrap_right_vn(right, vn)
Return the unwrapped distribution on the right-hand side and variable name on the left-hand
side of a `~` expression such as `x ~ Normal()`.
This is used mainly to unwrap `NamedDist` distributions.
"""
unwrap_right_vn(right, vn) = right, vn
unwrap_right_vn(right::NamedDist, vn) = unwrap_right_vn(right.dist, right.name)
"""
unwrap_right_left_vns(right, left, vns)
Return the unwrapped distributions on the right-hand side and values and variable names on the
left-hand side of a `.~` expression such as `x .~ Normal()`.
This is used mainly to unwrap `NamedDist` distributions and adjust the indices of the
variables.
# Example
```jldoctest; setup=:(using Distributions, LinearAlgebra)
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(MvNormal(ones(2), I), randn(2, 2), @varname(x)); vns[end]
x[:,2]
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(Normal(), randn(1, 2), @varname(x)); vns[end]
x[1,2]
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(Normal(), randn(1, 2), @varname(x[:])); vns[end]
x[:][1,2]
julia> _, _, vns = DynamicPPL.unwrap_right_left_vns(Normal(), randn(3), @varname(x[1])); vns[end]
x[1][3]
```
"""
unwrap_right_left_vns(right, left, vns) = right, left, vns
function unwrap_right_left_vns(right::NamedDist, left, vns)
return unwrap_right_left_vns(right.dist, left, right.name)
end
function unwrap_right_left_vns(
right::MultivariateDistribution, left::AbstractMatrix, vn::VarName
)
# This an expression such as `x .~ MvNormal()` which we interpret as
# x[:, i] ~ MvNormal()
# for `i = size(left, 2)`. Hence the symbol should be `x[:, i]`,
# and we therefore add the `Colon()` below.
vns = map(axes(left, 2)) do i
return vn ∘ Setfield.IndexLens((Colon(), i))
end
return unwrap_right_left_vns(right, left, vns)
end
function unwrap_right_left_vns(
right::Union{Distribution,AbstractArray{<:Distribution}},
left::AbstractArray,
vn::VarName,
)
vns = map(CartesianIndices(left)) do i
return vn ∘ Setfield.IndexLens(Tuple(i))
end
return unwrap_right_left_vns(right, left, vns)
end
#################
# Main Compiler #
#################
"""
@model(expr[, warn = false])
Macro to specify a probabilistic model.
If `warn` is `true`, a warning is displayed if internal variable names are used in the model
definition.
# Examples
Model definition:
```julia
@model function model(x, y = 42)
...
end
```
To generate a `Model`, call `model(xvalue)` or `model(xvalue, yvalue)`.
"""
macro model(expr, warn=false)
# include `LineNumberNode` with information about the call site in the
# generated function for easier debugging and interpretation of error messages
return esc(model(__module__, __source__, expr, warn))
end
function model(mod, linenumbernode, expr, warn)
modelinfo = build_model_info(expr)
# Generate main body
modelinfo[:body] = generate_mainbody(mod, modelinfo[:modeldef][:body], warn)
return build_output(modelinfo, linenumbernode)
end
"""
build_model_info(input_expr)
Builds the `model_info` dictionary from the model's expression.
"""
function build_model_info(input_expr)
# Break up the model definition and extract its name, arguments, and function body
modeldef = MacroTools.splitdef(input_expr)
# Check that the function has a name
# https://github.com/TuringLang/DynamicPPL.jl/issues/260
haskey(modeldef, :name) ||
throw(ArgumentError("anonymous functions without name are not supported"))
# Print a warning if function body of the model is empty
warn_empty(modeldef[:body])
## Construct model_info dictionary
# Shortcut if the model does not have any arguments
if !haskey(modeldef, :args) && !haskey(modeldef, :kwargs)
modelinfo = Dict(
:allargs_exprs => [],
:allargs_syms => [],
:allargs_namedtuple => NamedTuple(),
:defaults_namedtuple => NamedTuple(),
:modeldef => modeldef,
)
return modelinfo
end
# Ensure that all arguments have a name, i.e., are of the form `name` or `name::T`
addargnames!(modeldef[:args])
# Extract the positional and keyword arguments from the model definition.
allargs = vcat(modeldef[:args], modeldef[:kwargs])
# Split the argument expressions and the default values.
allargs_exprs_defaults = map(allargs) do arg
MacroTools.@match arg begin
(x_ = val_) => (x, val)
x_ => (x, NO_DEFAULT)
end
end
# Extract the expressions of the arguments, without default values.
allargs_exprs = first.(allargs_exprs_defaults)
# Extract the names of the arguments.
allargs_syms = map(allargs_exprs) do arg
MacroTools.@match arg begin
(name_::_) => name
x_ => x
end
end
# Build named tuple expression of the argument symbols and variables of the same name.
allargs_namedtuple = to_namedtuple_expr(allargs_syms)
# Extract default values of the positional and keyword arguments.
default_syms = []
default_vals = []
for (sym, (expr, val)) in zip(allargs_syms, allargs_exprs_defaults)
if val !== NO_DEFAULT
push!(default_syms, sym)
push!(default_vals, val)
end
end
# Build named tuple expression of the argument symbols with default values.
defaults_namedtuple = to_namedtuple_expr(default_syms, default_vals)
modelinfo = Dict(
:allargs_exprs => allargs_exprs,
:allargs_syms => allargs_syms,
:allargs_namedtuple => allargs_namedtuple,
:defaults_namedtuple => defaults_namedtuple,
:modeldef => modeldef,
)
return modelinfo
end
"""
generate_mainbody(mod, expr, warn)
Generate the body of the main evaluation function from expression `expr` and arguments
`args`.
If `warn` is true, a warning is displayed if internal variables are used in the model
definition.
"""
generate_mainbody(mod, expr, warn) = generate_mainbody!(mod, Symbol[], expr, warn)
generate_mainbody!(mod, found, x, warn) = x
function generate_mainbody!(mod, found, sym::Symbol, warn)
if warn && sym in INTERNALNAMES && sym ∉ found
@warn "you are using the internal variable `$sym`"
push!(found, sym)
end
return sym
end
function generate_mainbody!(mod, found, expr::Expr, warn)
# Do not touch interpolated expressions
expr.head === :$ && return expr.args[1]
# Do we don't want escaped expressions because we unfortunately
# escape the entire body afterwards.
Meta.isexpr(expr, :escape) && return generate_mainbody(mod, found, expr.args[1], warn)
# If it's a macro, we expand it
if Meta.isexpr(expr, :macrocall)
return generate_mainbody!(mod, found, macroexpand(mod, expr; recursive=true), warn)
end
# Modify dotted tilde operators.
args_dottilde = getargs_dottilde(expr)
if args_dottilde !== nothing
L, R = args_dottilde
return Base.remove_linenums!(
generate_dot_tilde(
generate_mainbody!(mod, found, L, warn),
generate_mainbody!(mod, found, R, warn),
),
)
end
# Modify tilde operators.
args_tilde = getargs_tilde(expr)
if args_tilde !== nothing
L, R = args_tilde
return Base.remove_linenums!(
generate_tilde(
generate_mainbody!(mod, found, L, warn),
generate_mainbody!(mod, found, R, warn),
),
)
end
return Expr(expr.head, map(x -> generate_mainbody!(mod, found, x, warn), expr.args)...)
end
function generate_tilde_literal(left, right)
# If the LHS is a literal, it is always an observation
@gensym value
return quote
$value, __varinfo__ = $(DynamicPPL.tilde_observe!!)(
__context__, $(DynamicPPL.check_tilde_rhs)($right), $left, __varinfo__
)
$value
end
end
"""
generate_tilde(left, right)
Generate an `observe` expression for data variables and `assume` expression for parameter
variables.
"""
function generate_tilde(left, right)
isliteral(left) && return generate_tilde_literal(left, right)
# Otherwise it is determined by the model or its value,
# if the LHS represents an observation
@gensym vn isassumption value
# HACK: Usage of `drop_escape` is unfortunate. It's a consequence of the fact
# that in DynamicPPL we the entire function body. Instead we should be
# more selective with our escape. Until that's the case, we remove them all.
return quote
$vn = $(AbstractPPL.drop_escape(varname(left)))
$isassumption = $(DynamicPPL.isassumption(left, vn))
if $isassumption
$(generate_tilde_assume(left, right, vn))
else
# If `vn` is not in `argnames`, we need to make sure that the variable is defined.
if !$(DynamicPPL.inargnames)($vn, __model__)
$left = $(DynamicPPL.getvalue_nested)(__context__, $vn)
end
$value, __varinfo__ = $(DynamicPPL.tilde_observe!!)(
__context__,
$(DynamicPPL.check_tilde_rhs)($right),
$(maybe_view(left)),
$vn,
__varinfo__,
)
$value
end
end
end
function generate_tilde_assume(left, right, vn)
# HACK: Because the Setfield.jl macro does not support assignment
# with multiple arguments on the LHS, we need to capture the return-values
# and then update the LHS variables one by one.
@gensym value
expr = :($left = $value)
if left isa Expr
expr = AbstractPPL.drop_escape(
Setfield.setmacro(BangBang.prefermutation, expr; overwrite=true)
)
end
return quote
$value, __varinfo__ = $(DynamicPPL.tilde_assume!!)(
__context__,
$(DynamicPPL.unwrap_right_vn)($(DynamicPPL.check_tilde_rhs)($right), $vn)...,
__varinfo__,
)
$expr
$value
end
end
"""
generate_dot_tilde(left, right)
Generate the expression that replaces `left .~ right` in the model body.
"""
function generate_dot_tilde(left, right)
isliteral(left) && return generate_tilde_literal(left, right)
# Otherwise it is determined by the model or its value,
# if the LHS represents an observation
@gensym vn isassumption value
return quote
$vn = $(AbstractPPL.drop_escape(varname(left)))
$isassumption = $(DynamicPPL.isassumption(left, vn))
if $isassumption
$(generate_dot_tilde_assume(left, right, vn))
else
# If `vn` is not in `argnames`, we need to make sure that the variable is defined.
if !$(DynamicPPL.inargnames)($vn, __model__)
$left .= $(DynamicPPL.getvalue_nested)(__context__, $vn)
end
$value, __varinfo__ = $(DynamicPPL.dot_tilde_observe!!)(
__context__,
$(DynamicPPL.check_tilde_rhs)($right),
$(maybe_view(left)),
$vn,
__varinfo__,
)
$value
end
end
end
function generate_dot_tilde_assume(left, right, vn)
# We don't need to use `Setfield.@set` here since
# `.=` is always going to be inplace + needs `left` to
# be something that supports `.=`.
@gensym value
return quote
$value, __varinfo__ = $(DynamicPPL.dot_tilde_assume!!)(
__context__,
$(DynamicPPL.unwrap_right_left_vns)(
$(DynamicPPL.check_tilde_rhs)($right), $(maybe_view(left)), $vn
)...,
__varinfo__,
)
$left .= $value
$value
end
end
# Note that we cannot use `MacroTools.isdef` because
# of https://github.com/FluxML/MacroTools.jl/issues/154.
"""
isfuncdef(expr)
Return `true` if `expr` is any form of function definition, and `false` otherwise.
"""
function isfuncdef(e::Expr)
return if Meta.isexpr(e, :function)
# Classic `function f(...)`
true
elseif Meta.isexpr(e, :->)
# Anonymous functions/lambdas, e.g. `do` blocks or `->` defs.
true
elseif Meta.isexpr(e, :(=)) && Meta.isexpr(e.args[1], :call)
# Short function defs, e.g. `f(args...) = ...`.
true
else
false
end
end
"""
replace_returns(expr)
Return `Expr` with all `return ...` statements replaced with
`return ..., DynamicPPL.return_values(__varinfo__)`.
Note that this method will _not_ replace `return` statements within function
definitions. This is checked using [`isfuncdef`](@ref).
"""
replace_returns(e) = e
function replace_returns(e::Expr)
if isfuncdef(e)
return e
end
if Meta.isexpr(e, :return)
# We capture the original return-value in `retval` and return
# a `Tuple{typeof(retval),typeof(__varinfo__)}`.
# If we don't capture the return-value separately, cases such as
# `return x = 1` will result in `(x = 1, __varinfo__)` which will
# mistakenly attempt to construct a `NamedTuple` (which fails on Julia 1.3
# and is not our intent).
@gensym retval
return quote
$retval = $(e.args...)
return $retval, __varinfo__
end
end
return Expr(e.head, map(replace_returns, e.args)...)
end
# If it's just a symbol, e.g. `f(x) = 1`, then we make it `f(x) = return 1`.
make_returns_explicit!(body) = Expr(:return, body)
function make_returns_explicit!(body::Expr)
# If the last statement is a return-statement, we don't do anything.
# Otherwise we replace the last statement with a `return` statement.
if !Meta.isexpr(body.args[end], :return)
body.args[end] = Expr(:return, body.args[end])
end
return body
end
const FloatOrArrayType = Type{<:Union{AbstractFloat,AbstractArray}}
hasmissing(::Type) = false
hasmissing(::Type{>:Missing}) = true
hasmissing(::Type{<:AbstractArray{TA}}) where {TA} = hasmissing(TA)
hasmissing(::Type{Union{}}) = false # issue #368
"""
build_output(modelinfo, linenumbernode)
Builds the output expression.
"""
function build_output(modelinfo, linenumbernode)
## Build the anonymous evaluator from the user-provided model definition.
evaluatordef = deepcopy(modelinfo[:modeldef])
# Add the internal arguments to the user-specified arguments (positional + keywords).
evaluatordef[:args] = vcat(
[
:(__model__::$(DynamicPPL.Model)),
:(__varinfo__::$(DynamicPPL.AbstractVarInfo)),
:(__context__::$(DynamicPPL.AbstractContext)),
],
modelinfo[:allargs_exprs],
)
# Delete the keyword arguments.
evaluatordef[:kwargs] = []
# Replace the user-provided function body with the version created by DynamicPPL.
# We use `MacroTools.@q begin ... end` instead of regular `quote ... end` to ensure
# that no new `LineNumberNode`s are added apart from the reference `linenumbernode`
# to the call site.
# NOTE: We need to replace statements of the form `return ...` with
# `return (..., __varinfo__)` to ensure that the second
# element in the returned value is always the most up-to-date `__varinfo__`.
# See the docstrings of `replace_returns` for more info.
evaluatordef[:body] = MacroTools.@q begin
$(linenumbernode)
$(replace_returns(make_returns_explicit!(modelinfo[:body])))
end
## Build the model function.
# Extract the named tuple expression of all arguments and the default values.
allargs_namedtuple = modelinfo[:allargs_namedtuple]
defaults_namedtuple = modelinfo[:defaults_namedtuple]
# Obtain or generate the name of the model to support functors:
# https://github.com/TuringLang/DynamicPPL.jl/issues/367
modeldef = modelinfo[:modeldef]
if MacroTools.@capture(modeldef[:name], ::T_)
name = gensym(:f)
modeldef[:name] = Expr(:(::), name, T)
elseif MacroTools.@capture(modeldef[:name], (name_::_ | name_))
else
throw(ArgumentError("unsupported format of model function"))
end
# Update the function body of the user-specified model.
# We use `MacroTools.@q begin ... end` instead of regular `quote ... end` to ensure
# that no new `LineNumberNode`s are added apart from the reference `linenumbernode`
# to the call site
modeldef[:body] = MacroTools.@q begin
$(linenumbernode)
return $(DynamicPPL.Model)($name, $allargs_namedtuple, $defaults_namedtuple)
end
return MacroTools.@q begin
$(MacroTools.combinedef(evaluatordef))
$(Base).@__doc__ $(MacroTools.combinedef(modeldef))
end
end
function warn_empty(body)
if all(l -> isa(l, LineNumberNode), body.args)
@warn("Model definition seems empty, still continue.")
end
return nothing
end
"""
matchingvalue(sampler, vi, value)
matchingvalue(context::AbstractContext, vi, value)
Convert the `value` to the correct type for the `sampler` or `context` and the `vi` object.
For a `context` that is _not_ a `SamplingContext`, we fall back to
`matchingvalue(SampleFromPrior(), vi, value)`.
"""
function matchingvalue(sampler, vi, value)
T = typeof(value)
if hasmissing(T)
_value = convert(get_matching_type(sampler, vi, T), value)
if _value === value
return deepcopy(_value)
else
return _value
end
else
return value
end
end
function matchingvalue(sampler::AbstractSampler, vi, value::FloatOrArrayType)
return get_matching_type(sampler, vi, value)
end
function matchingvalue(context::AbstractContext, vi, value)
return matchingvalue(NodeTrait(matchingvalue, context), context, vi, value)
end
function matchingvalue(::IsLeaf, context::AbstractContext, vi, value)
return matchingvalue(SampleFromPrior(), vi, value)
end
function matchingvalue(::IsParent, context::AbstractContext, vi, value)
return matchingvalue(childcontext(context), vi, value)
end
function matchingvalue(context::SamplingContext, vi, value)
return matchingvalue(context.sampler, vi, value)
end
"""
get_matching_type(spl::AbstractSampler, vi, ::Type{T}) where {T}
Get the specialized version of type `T` for sampler `spl`.
For example, if `T === Float64` and `spl::Hamiltonian`, the matching type is
`eltype(vi[spl])`.
"""
get_matching_type(spl::AbstractSampler, vi, ::Type{T}) where {T} = T
function get_matching_type(spl::AbstractSampler, vi, ::Type{<:Union{Missing,AbstractFloat}})
return Union{Missing,floatof(eltype(vi, spl))}
end
function get_matching_type(spl::AbstractSampler, vi, ::Type{<:AbstractFloat})
return floatof(eltype(vi, spl))
end
function get_matching_type(spl::AbstractSampler, vi, ::Type{<:Array{T,N}}) where {T,N}
return Array{get_matching_type(spl, vi, T),N}
end
function get_matching_type(spl::AbstractSampler, vi, ::Type{<:Array{T}}) where {T}
return Array{get_matching_type(spl, vi, T)}
end
floatof(::Type{T}) where {T<:Real} = typeof(one(T) / one(T))
floatof(::Type) = Real # fallback if type inference failed
| {
"alphanum_fraction": 0.6598417869,
"author": null,
"avg_line_length": 34.1111111111,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "4038d5d14fa21e2ffffb7b218e417419814bf00a",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d222316a7a2fd5afe6ec74a7ec2a50c6f08c1d00",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TuringLang/MicroPPL",
"max_forks_repo_path": "src/compiler.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d222316a7a2fd5afe6ec74a7ec2a50c6f08c1d00",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TuringLang/MicroPPL",
"max_issues_repo_path": "src/compiler.jl",
"max_line_length": 109,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d222316a7a2fd5afe6ec74a7ec2a50c6f08c1d00",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TuringLang/MicroPPL",
"max_stars_repo_path": "src/compiler.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6020,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 23639
} |
import os, sys
lib_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(lib_path)
import tensorflow as tf
from ddpg_actor import DDPG_Actor
from ddpg_critic import DDPG_Critic
class Model(object):
def __init__(self,
state_dim,
action_dim,
optimizer=None,
actor_learning_rate=1e-4,
critic_learning_rate=1e-3,
tau = 0.001,
sess=None):
self.state_dim = state_dim
self.action_dim = action_dim
self.actor_learning_rate = actor_learning_rate
self.critic_learning_rate = critic_learning_rate
self.tau = tau
#tf.reset_default_graph()
self.sess = sess or tf.Session()
self.global_step = tf.Variable(0, name="global_step", trainable=False)
global_step_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="global_step")
self.sess.run(tf.variables_initializer(global_step_vars))
self.actor_scope = "actor_net"
with tf.name_scope(self.actor_scope):
self.actor = DDPG_Actor(self.state_dim,
self.action_dim,
learning_rate=self.actor_learning_rate,
tau=self.tau,
scope=self.actor_scope,
sess=self.sess)
self.critic_scope = "critic_net"
with tf.name_scope(self.critic_scope):
self.critic = DDPG_Critic(self.state_dim,
self.action_dim,
learning_rate=self.critic_learning_rate,
tau=self.tau,
scope=self.critic_scope,
sess=self.sess)
def update(self, state_batch, action_batch, y_batch, sess=None):
sess = sess or self.sess
self.critic.update_source_critic_net(state_batch, action_batch, y_batch, sess)
action_batch_for_grad = self.actor.predict_action_source_net(state_batch, sess)
action_grad_batch = self.critic.get_action_grads(state_batch, action_batch_for_grad, sess)
self.actor.update_source_actor_net(state_batch, action_grad_batch, sess)
self.critic.update_target_critic_net(sess)
self.actor.update_target_actor_net(sess)
def predict_action(self, observation, sess=None):
sess = sess or self.sess
return self.actor.predict_action_source_net(observation, sess)
if __name__ == '__main__':
import numpy as np
state_dim = 40
action_dim = 3
actor_learning_rate = np.random.rand(1)
print("actor_learning_rate: ", actor_learning_rate)
critic_learning_rate = np.random.rand(1)
print("critic_learning_rate: ", critic_learning_rate)
tau = np.random.rand(1)
print("tau: ", tau)
sess = tf.Session()
model = Model(state_dim,
action_dim,
tau=tau,
actor_learning_rate=actor_learning_rate[0],
critic_learning_rate=critic_learning_rate[0],
sess=sess)
random_state = np.random.normal(size=state_dim)
print("random_state", random_state)
random_action = np.random.random(size=action_dim)
print("random_action", random_action)
# check prediction
pred_action = model.predict_action(random_state)
print("predict_action", pred_action)
# check forward
target_q = model.critic.predict_q_target_net([random_state], [random_action], sess)
print("predict target q", target_q)
y = target_q[0] + 1
model.update([random_state], [random_action], [y])
| {
"alphanum_fraction": 0.6378258458,
"author": null,
"avg_line_length": 37.175257732,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d46e6e9beb440dccb73d0170bdc39dee3e2403fc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 30,
"max_forks_repo_forks_event_max_datetime": "2021-09-15T06:33:55.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-21T00:49:02.000Z",
"max_forks_repo_head_hexsha": "7176d7fa5cbc20d3d31e9c01f6c1424bd3501ecc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "code-cultivater/TensorAgent",
"max_forks_repo_path": "model/ddpg_model.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "7176d7fa5cbc20d3d31e9c01f6c1424bd3501ecc",
"max_issues_repo_issues_event_max_datetime": "2020-03-12T16:23:45.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-10-14T03:22:55.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "code-cultivater/TensorAgent",
"max_issues_repo_path": "model/ddpg_model.py",
"max_line_length": 98,
"max_stars_count": 55,
"max_stars_repo_head_hexsha": "7176d7fa5cbc20d3d31e9c01f6c1424bd3501ecc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lightaime/TensorAgent",
"max_stars_repo_path": "model/ddpg_model.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-06T14:38:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-08-03T08:04:54.000Z",
"num_tokens": 765,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3606
} |
[STATEMENT]
lemma f_Exec_Stream_Acc_Output_drop: "
f_Exec_Comp_Stream_Acc_Output k output_fun trans_fun xs c \<up> n =
f_Exec_Comp_Stream_Acc_Output k output_fun trans_fun (xs \<up> n) (
f_Exec_Comp trans_fun (xs \<down> n \<odot>\<^sub>f k) c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f_Exec_Comp_Stream_Acc_Output k output_fun trans_fun xs c \<up> n = f_Exec_Comp_Stream_Acc_Output k output_fun trans_fun (xs \<up> n) (f_Exec_Comp trans_fun (xs \<down> n \<odot> k) c)
[PROOF STEP]
by (simp add: f_Exec_Comp_Stream_Acc_Output_def f_shrink_def f_Exec_Stream_expand_aggregate_map_drop) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "AutoFocus-Stream_AF_Stream_Exec",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 245,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import sys
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
graph = nx.Graph()
def dtoCOM(a):
global graph
return nx.dijkstra_path_length(graph, a, "COM")
for orbit in sys.stdin:
A, B = [i.strip() for i in orbit.split(")")]
graph.add_edge(A,B)
redges = zip(nx.dijkstra_path(graph, "YOU", "SAN")+[0], [0]+nx.dijkstra_path(graph, "YOU", "SAN"))
redges = list(redges)[1:-1]
labls = {
"YOU": "YO",
"SAN": "Santa",
"COM": "COM"
}
pos = nx.spectral_layout(graph)
plt.subplot(1, 1, 1)
nx.draw_networkx_nodes(graph, pos, alpha=0.5, with_labels = True, node_size=10)
nx.draw_networkx_nodes(graph,pos, nodelist=nx.dijkstra_path(graph, "YOU", "SAN") , node_size=10, node_color='r' , alpha=0.8, with_labels = True)
nx.draw_networkx_labels(graph, pos, labels=labls)
nx.draw_networkx_edges(graph, pos, edge_color='b')
nx.draw_networkx_edges(graph, pos, edgelist=redges, edge_color='r')
plt.show()
| {
"alphanum_fraction": 0.7054187192,
"author": null,
"avg_line_length": 29,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "167ef5d44bcb082d0a7c1ca96f8beb1326ad4fc2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8c1e1a0766b067fbe282dd482bc258275c5a3364",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DVRodri8/advent-of-code-2019",
"max_forks_repo_path": "6/draw.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8c1e1a0766b067fbe282dd482bc258275c5a3364",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DVRodri8/advent-of-code-2019",
"max_issues_repo_path": "6/draw.py",
"max_line_length": 144,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8c1e1a0766b067fbe282dd482bc258275c5a3364",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DVRodri8/advent-of-code-2019",
"max_stars_repo_path": "6/draw.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 309,
"path": null,
"reason": "import networkx,from networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 1015
} |
import math
from typing import Iterable, Union
import numpy as np
from shor.errors import CircuitError
from shor.layers import _Layer
from shor.utils.collections import flatten
QbitOrIterable = Union[int, Iterable]
class _Gate(_Layer):
"""Abstract base quantum gate class
# Properties
input_length = valid length of input qubits
qubits = indices of qubits, to be used as input to gate.
"""
@property
def symbol(self):
return self.__class__.__name__.lower()
def __init__(self, *qbits: QbitOrIterable, **kwargs):
super().__init__(**kwargs)
self.qbits = flatten(qbits) if qbits else [0]
self.dimension = kwargs.get("dimension", 1)
assert all(map(lambda q: type(q) == int, self.qbits)), str(self.qbits)
try:
assert len(self.qbits) % self.dimension == 0
except AssertionError:
raise CircuitError(
f"The input qbits length {len(self.qbits)} is not divisible by the '{self.symbol}' "
f"gate's dimension {self.dimension}"
)
@property
def qubits(self):
return self.qbits
def to_gates(self):
if len(self.qbits) > self.dimension:
return [
self.__class__(self.qbits[i : i + self.dimension]) for i in range(0, len(self.qbits), self.dimension)
]
return [self]
@property
def num_states(self):
return np.power(2, self.dimension)
def to_matrix(self) -> np.ndarray:
return np.eye(self.num_states())
@property
def matrix(self):
return self.to_matrix()
def invert(self):
return self
@property
def I(self):
return self.invert()
def __invert__(self):
return self.invert()
class CNOT(_Gate):
symbol = "CX"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 2
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
class CY(_Gate):
symbol = "CY"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 2
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1 * 1j], [0, 0, 1j, 0]])
class CSWAP(_Gate):
symbol = "CSWAP"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 3
if not qubits:
qubits = [0, 1, 2]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
cswap_matrix = np.eye(8)
cswap_matrix[:, [5, 6]] = cswap_matrix[:, [6, 5]]
return cswap_matrix
class Hadamard(_Gate):
symbol = "H"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.multiply(np.divide(1, np.sqrt(self.num_states)), np.array([[1, 1], [1, -1]]))
class PauliX(_Gate):
symbol = "X"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[0, 1], [1, 0]])
class PauliY(_Gate):
symbol = "Y"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[0, -1j], [1j, 0]])
class PauliZ(_Gate):
symbol = "Z"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0], [0, -1]])
class QFT(_Gate):
def __init__(self, *qubits, **kwargs):
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, dimension=len(qubits), **kwargs)
# def to_gates(self):
# # TODO: translate this gate to base gates / CNOTs
# pass
def get_nth_unity_root(self, k):
return np.exp((2j * np.pi * k) / self.num_states)
def to_matrix(self) -> np.ndarray:
m = np.array(np.ones((self.num_states, self.num_states)), dtype="complex")
for i in range(1, self.num_states):
for j in range(i, self.num_states):
w = self.get_nth_unity_root(i * j)
m[i, j] = w
m[j, i] = w
return np.around(np.multiply(1 / np.sqrt(self.num_states), m), decimals=15)
class SWAP(_Gate):
symbol = "SWAP"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 2
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
class Cx(_Gate):
symbol = "CX"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 2
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])
class CCNOT(_Gate):
symbol = "CCX"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 3
if not qubits:
qubits = [0, 1, 2]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
class CRZ(_Gate):
symbol = "CRZ"
def __init__(self, *qubits, angle=0, **kwargs):
kwargs["dimension"] = 2
self.angle = angle
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.exp(-1j * self.angle / 2), 0],
[0, 0, 0, np.exp(1j * self.angle / 2)],
]
)
class CH(_Gate):
symbol = "CH"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 2
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1 / np.sqrt(2), 1 / np.sqrt(2)],
[0, 0, 1 / np.sqrt(2), -1 / np.sqrt(2)],
]
)
class S(_Gate):
symbol = "S"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0], [0, 1j]])
class Sdg(_Gate):
symbol = "Sdg"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0], [0, -1j]])
class T(_Gate):
symbol = "T"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]])
class Tdg(_Gate):
symbol = "Tdg"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0], [0, np.exp(-1j * np.pi / 4)]])
class ID(_Gate):
symbol = "I"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0], [0, 1]])
class U1(_Gate):
symbol = "U1"
def __init__(self, *qubits, angle=0, **kwargs):
kwargs["dimension"] = 1
self.angle = angle
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array([[1, 0], [0, np.exp(1j * self.angle)]])
class Cz(_Gate):
symbol = "CZ"
def __init__(self, *qubits, **kwargs):
kwargs["dimension"] = 2
if not qubits:
qubits = [0, 1]
super().__init__(*qubits, **kwargs)
@staticmethod
def to_matrix() -> np.ndarray:
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
class Rx(_Gate):
symbol = "RX"
def __init__(self, *qubits, angle=math.pi / 2, **kwargs):
kwargs["dimension"] = 1
self.angle = angle
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array(
[
[math.cos(self.angle / 2), -math.sin(self.angle / 2) * 1j],
[-math.sin(self.angle / 2) * 1j, math.cos(self.angle / 2)],
]
)
class Ry(_Gate):
symbol = "RY"
def __init__(self, *qubits, angle=math.pi / 2, **kwargs):
kwargs["dimension"] = 1
self.angle = angle
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array(
[
[math.cos(self.angle / 2), -math.sin(self.angle / 2)],
[math.sin(self.angle / 2), math.cos(self.angle / 2)],
]
)
class Rz(_Gate):
symbol = "RZ"
def __init__(self, *qubits, angle, **kwargs):
self.angle = angle
kwargs["dimension"] = 1
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array(
[[np.exp(-(1 / 2) * 1j * self.angle), 0], [0, np.exp((1 / 2) * 1j * self.angle)]], dtype="complex"
)
class U3(_Gate):
symbol = "U3"
def __init__(self, *qubits, theta=0, phi=0, lam=0, **kwargs):
kwargs["dimension"] = 1
self.theta = theta
self.phi = phi
self.lam = lam
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array(
[
[math.cos(self.theta / 2), -np.exp(1j * self.lam) * math.sin(self.theta / 2)],
[
np.exp(1j * self.phi) * math.sin(self.theta / 2),
np.exp(1j * (self.phi + self.lam)) * math.cos(self.theta / 2),
],
]
)
class U2(U3):
symbol = "U2"
def __init__(self, *qubits, phi=0, lam=0, **kwargs):
super().__init__(*qubits, theta=np.pi / 2, phi=phi, lam=lam, **kwargs)
self.symbol = "u2"
class Init_x(_Gate):
def __init__(self, *qubits, **kwargs):
self.H = Hadamard(0)
kwargs["dimension"] = 1
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return self.H.to_matrix()
class Init_y(_Gate):
def __init__(self, *qubits, **kwargs):
self.H = Hadamard(0)
self.S = S()
kwargs["dimension"] = 1
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return self.S.to_matrix().dot(self.H.to_matrix())
class Cr(_Gate):
symbol = "CU1"
def __init__(self, *qubits, angle, **kwargs):
self.angle = angle
kwargs["dimension"] = 2
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, np.exp(1j * self.angle)]], dtype="complex")
class CRk(_Gate):
def __init__(self, *qubits, k, **kwargs):
self.k = k
kwargs["dimension"] = 2
if not qubits:
qubits = [0]
super().__init__(*qubits, **kwargs)
def to_matrix(self) -> np.ndarray:
return np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, np.exp(2 * 1j * np.pi / 2 ** self.k)]], dtype="complex"
)
# Aliases
H = h = Hadamard
X = x = PauliX
Y = y = PauliY
Z = z = PauliZ
swap = SWAP
Fredkin = cswap = CSWAP
CX = cx = CNOT
| {
"alphanum_fraction": 0.5080947384,
"author": null,
"avg_line_length": 23.9964028777,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "696bfe59953f2dc657d64439260348f8fed03c23",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6dda779779bc425ef7a726fc427620924255050e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vanmagnan/shor",
"max_forks_repo_path": "shor/gates.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6dda779779bc425ef7a726fc427620924255050e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vanmagnan/shor",
"max_issues_repo_path": "shor/gates.py",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6dda779779bc425ef7a726fc427620924255050e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vanmagnan/shor",
"max_stars_repo_path": "shor/gates.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4102,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13342
} |
import argparse
import logging
import json
from pprint import pprint
import numpy as np
import os
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.time import Time
from astropy import units as u
from astropy.coordinates import AltAz
from datetime import datetime
import ctbend.ctbendbase.CTBend as CTBend
from ctbend.ctbendtrainer.CTBendGeometry import XYZVector, e_phi, e_theta
from ctbend.ctbendbase.PointingData import CCDCoordinate, DriveCoordinate,\
PointingData, PointingDataset
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
class MCSetup(object):
def __init__(self, config_dict):
self.config_dict = config_dict
def random_star(self):
"""Return a random star coordinate.
"""
ra_h = np.random.uniform(0, 24)
ra_m = np.random.uniform(0, 60)
ra_s = np.random.uniform(0, 60)
star_ra = str(int(ra_h)) + "h"
star_ra += str(int(ra_m)) + "m"
star_ra += str(int(ra_s)) + "s"
dec_d = np.random.uniform(0, 90)
dec_m = np.random.uniform(0, 60)
dec_s = np.random.uniform(0, 60)
star_dec = str(int(dec_d)) + "d"
star_dec += str(int(dec_m)) + "m"
star_dec += str(int(dec_s)) + "s"
return SkyCoord(ra=star_ra, dec=star_dec)
@property
def location(self):
"""Observation location.
"""
lat = u.Quantity(self.config_dict["location_lat"])
lon = u.Quantity(self.config_dict["location_lon"])
height = u.Quantity(self.config_dict["location_height"])
return EarthLocation(lat=lat,
lon=lon,
height=height)
@property
def telescope_focal_length(self):
"""Focal length of the telescope as quantity with unit.
"""
return u.Quantity(self.config_dict["telescope_focal_length"])
@property
def ccd_focal_length(self):
"""CCD focal length as quantity with unit.
"""
return u.Quantity(self.config_dict["ccd_focal_length"])
@property
def ccd_pixel_size(self):
"""Size of a CCD pixel (assumed to be a square) as quantity with unit.
"""
return u.Quantity(self.config_dict["ccd_pixel_size"])
@property
def delta_t(self):
"""Time between two CCD images as quantity with unit.
"""
return u.Quantity(self.config_dict["delta_t"])
@property
def n_tracking(self):
"""Number of tracking points.
"""
return int(self.config_dict["n_tracking"])
@property
def start_timestamp(self):
"""Timestamp at start of tracking.
"""
return self.config_dict["start_timestamp"]
def tracking_timestamps(self):
"""Yields a tracking timestamp.
"""
delta_t = u.Quantity(self.config_dict["delta_t"])
start_timestamp = self.config_dict["start_timestamp"]
for i in range(int(self.config_dict["n_tracking"])):
timestamp = start_timestamp + i * delta_t.to(u.s).value
yield timestamp
@property
def pixel_scale(self):
"""CCD pixel scale in radians.
"""
scale = self.ccd_pixel_size.to(u.m).value
scale /= self.ccd_focal_length.to(u.m).value
scale *= u.rad
return scale
@property
def bending(self):
"""Returns the bending model applied during data-taking.
"""
parameters = self.config_dict["bending"]["parameters"]
parameters0 = parameters
return getattr(CTBend,
self.config_dict["bending"]["model"])(parameters0)
def measured_x1x2(self, true_x1, true_x2):
"""Position of star in CCD coordinates.
"""
sigma = self.config_dict["sigma"]["size"]
sigma2 = np.power(sigma, 2)
(dx1, dx2) = np.random.multivariate_normal([0, 0], cov=[[sigma2, 0],
[0, sigma2]])
offset_x1 = float(self.config_dict["sigma"]["offset_x1"])
offset_x2 = float(self.config_dict["sigma"]["offset_x2"])
return (true_x1 + dx1 + offset_x1, true_x2 + dx2 + offset_x2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config",
type=str,
help="MC config json file",
dest="CONFIG",
required=True)
parser.add_argument("--outfile",
type=str,
help="MC tracking data outfile",
dest="OUTFILE",
required=True)
options = parser.parse_args()
if os.path.isfile(options.OUTFILE):
raise RuntimeError(options.OUTFILE + " already exists")
with open(options.CONFIG) as fin:
config = json.load(fin)
pprint(config)
setup = MCSetup(config)
info = "Pixelscale: " + str(setup.pixel_scale.to(u.arcsec).value)
info += " arcsec"
logger.info(info)
pointing_model_for_data_taking = CTBend.ConstantOffsetModel(
parameters={"azimuth_offset_deg": 0.,
"elevation_offset_deg": 0.})
pointing_dataset = PointingDataset(
pixelscale=setup.pixel_scale.to(u.arcsec).value,
pointing_model=pointing_model_for_data_taking.serialize())
for timestamp in setup.tracking_timestamps():
def random_star_altaz(minimal_altitude_deg: float = 5):
"""Returns random altaz coordinates for a test star.
Args:
minimal_altitude_deg: Minimal altitude of the star in degrees.
"""
def get_random_star():
time = datetime.fromtimestamp(timestamp)
observing_time = Time(time)
aa = AltAz(location=setup.location,
obstime=observing_time)
return setup.random_star().transform_to(aa)
star_altaz = get_random_star()
while star_altaz.alt.to(u.deg).value < minimal_altitude_deg:
star_altaz = get_random_star()
return star_altaz
star_altaz = random_star_altaz()
bending = setup.bending
az_star_deg = star_altaz.az.to(u.deg).value
el_star_deg = star_altaz.alt.to(u.deg).value
delta_az_deg = bending.delta_azimuth(az=az_star_deg,
el=el_star_deg)
delta_el_deg = bending.delta_elevation(az=az_star_deg,
el=el_star_deg)
telescope_az = (az_star_deg - delta_az_deg) * u.deg
telescope_el = (el_star_deg - delta_el_deg) * u.deg
telescope = XYZVector(az=telescope_az.to(u.deg).value,
el=telescope_el.to(u.deg).value)
def get_e_phi():
delta_az_derivative_phi = bending.delta_azimuth_derivative_phi(
az=telescope_az.to(u.deg).value,
el=telescope_el.to(u.deg).value)
delta_el_derivative_phi = bending.delta_elevation_derivative_phi(
az=telescope_az.to(u.deg).value,
el=telescope_el.to(u.deg).value)
_e_phi = e_phi(az_tel=telescope.az,
el_tel=telescope.alt,
delta_az_derivative_phi=delta_az_derivative_phi,
delta_el_derivative_phi=delta_el_derivative_phi)
return _e_phi
def get_e_theta():
delta_az_derivative_theta = bending.delta_azimuth_derivative_theta(
az=telescope_az.to(u.deg).value,
el=telescope_el.to(u.deg).value)
delta_el_derivative_theta = \
bending.delta_elevation_derivative_theta(
az=telescope_az.to(u.deg).value,
el=telescope_el.to(u.deg).value)
_e_theta = e_theta(
az_tel=telescope.az,
el_tel=telescope.alt,
delta_az_derivative_theta=delta_az_derivative_theta,
delta_el_derivative_theta=delta_el_derivative_theta)
return _e_theta
def telescope_ccd_position():
"""The pointing direction of the telescope in CCD coordinates
is the center of the LED pattern. By definition, this is the
zero-vector in CCD coordinates in this MC.
"""
x1_tel = 0
x2_tel = 0
return CCDCoordinate(x1_tel, x2_tel)
def star_ccd_position():
star_vector = XYZVector(star_altaz.az.to(u.deg).value,
star_altaz.alt.to(u.deg).value)
focal_length_mm = setup.ccd_focal_length.to(u.mm).value
image_star_length = focal_length_mm / (telescope * star_vector)
image = telescope * (star_vector * telescope) * 2. - star_vector
image_star = image * image_star_length
fp_u = image_star * get_e_phi()
fp_v = image_star * get_e_theta()
x1_star = fp_u / setup.ccd_pixel_size.to(u.mm).value
x2_star = fp_v / setup.ccd_pixel_size.to(u.mm).value
measured_x1_star, measured_x2_star = setup.measured_x1x2(x1_star,
x2_star)
return CCDCoordinate(measured_x1_star, measured_x2_star)
drive_position = DriveCoordinate(telescope.az, telescope.alt)
pointing_data = PointingData(telescope=telescope_ccd_position(),
star=star_ccd_position(),
drive_position=drive_position)
pointing_dataset.append(pointing_data)
logger.debug("Pointing data:")
logger.debug(pointing_data)
pointing_dataset.save(options.OUTFILE)
| {
"alphanum_fraction": 0.5727593646,
"author": null,
"avg_line_length": 34.2214765101,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6dabd5f28654c086b3d4d43656607b582aa8a14a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "826c147a9b1830e66ff63fa356e7fd48b7615c96",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "residualsilence/ctbend",
"max_forks_repo_path": "ctbendtrainer/test/trackingmc.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "826c147a9b1830e66ff63fa356e7fd48b7615c96",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "residualsilence/ctbend",
"max_issues_repo_path": "ctbendtrainer/test/trackingmc.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "826c147a9b1830e66ff63fa356e7fd48b7615c96",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "residualsilence/ctbend",
"max_stars_repo_path": "ctbendtrainer/test/trackingmc.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2135,
"path": null,
"reason": "import numpy,from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10198
} |
[STATEMENT]
lemma dj_cp:
fixes pi1::"'x prm"
and pi2::"'y prm"
and x ::"'a"
assumes cp: "cp TYPE ('a) TYPE('x) TYPE('y)"
and dj: "disjoint TYPE('y) TYPE('x)"
shows "pi1\<bullet>(pi2\<bullet>x) = (pi2)\<bullet>(pi1\<bullet>x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pi1 \<bullet> pi2 \<bullet> x = pi2 \<bullet> pi1 \<bullet> x
[PROOF STEP]
by (simp add: cp1[OF cp] dj_perm_perm_forget[OF dj]) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 201,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
"""
To run tests:
pytest test_game.py
"""
import numpy as np
import curling.constants
import log_handler
from curling import board as board_utils
from curling import constants as c
from curling import game
from curling import simulation
from curling import utils
log_handler.flush_on_error()
def test_simulation_setupBoard_0():
sim = simulation.Simulation()
stones = list(sim.getStones())
assert len(stones) == 0
def test_simulation_setupBoard_1():
curl = game.CurlingGame()
board = curl.getInitBoard()
sim = curl.sim
sim.setupBoard(board)
sim.addStone(c.P1_COLOR, 0, utils.TEE_LINE)
sim.addStone(c.P2_COLOR, 0, utils.TEE_LINE)
stones = list(sim.getStones())
assert len(stones) == 2
def test_simulation_getNextStoneId():
curl = game.CurlingGame()
r = utils.STONE_RADIUS
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 0 # for red
curl.sim.addStone(c.P1_COLOR, 0, utils.HOG_LINE)
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 0 # for blue
curl.sim.addStone(c.P2_COLOR, 2 * r, utils.HOG_LINE + 2 * r)
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 1 # for red
curl.sim.addStone(c.P1_COLOR, 4 * r, utils.HOG_LINE + 4 * r)
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 1 # for blue
def test_simulation_getNextStoneId_with_removed():
curl = game.CurlingGame()
curl.sim.addShooterAsInvalid()
curl.sim.addShooterAsInvalid()
curl.sim.addShooterAsInvalid()
curl.sim.addShooterAsInvalid()
r = utils.STONE_RADIUS
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 2 # for red
curl.sim.addStone(c.P1_COLOR, 0, utils.HOG_LINE)
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 2 # for blue
curl.sim.addStone(c.P2_COLOR, 2 * r, utils.HOG_LINE + 2 * r)
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 3 # for red
curl.sim.addStone(c.P1_COLOR, 4 * r, utils.HOG_LINE + 4 * r)
i = simulation.getNextStoneId(curl.sim.getBoard())
assert i == 3 # for blue
def test_simulation_getBoard_is_symmetric():
"""Create a board then convert it to simulation and back to board."""
curl = game.CurlingGame()
expected = curl.sim.getBoard()
curl.sim.addStone(c.P1_COLOR, 0, utils.TEE_LINE)
curl.sim.setupBoard(expected)
actual = curl.sim.getBoard()
actual_position = list(board_utils.get_xy_team1(actual))
expected_position = list(board_utils.get_xy_team1(expected))
np.testing.assert_array_equal(actual_position, expected_position)
def SKIP_test_simulation_getBoard_button():
sim = game.CurlingGame().sim
board = sim.getBoard()
board[-1][0] = c.EMPTY
button_x, button_y = curling.constants.BUTTON_POSITION
board_x, board_y = utils.realToBoard(button_x, button_y)
board[board_x][board_y] = c.P1
sim.setupBoard(board)
stones = sim.getStones()
assert len(stones) == 1
stone_x, stone_y = stones[0].body.position
assert (button_x, button_y) == (stone_x, stone_y)
recalculated_board = sim.getBoard()
expected = np.argwhere(board == c.P1)
actual = np.argwhere(recalculated_board == c.P1)
np.testing.assert_array_equal(expected, actual)
def SKIP_test_simulation_getBoard_right_edge():
sim = game.CurlingGame().sim
board = sim.getBoard()
board[-1][0] = c.EMPTY
button_x, button_y = curling.constants.BUTTON_POSITION
button_x -= utils.dist(inches=10) # NOTE - shifting to the left
board_x, board_y = utils.realToBoard(button_x, button_y)
board[board_x][board_y] = c.P1
sim.setupBoard(board)
stones = sim.getStones()
assert len(stones) == 1
stone_x, stone_y = stones[0].body.position
assert (button_x, button_y) == (stone_x, stone_y)
recalculated_board = sim.getBoard()
expected = np.argwhere(board == c.P1)
actual = np.argwhere(recalculated_board == c.P1)
np.testing.assert_array_equal(expected, actual)
def test_invalid_shooter():
curl = game.CurlingGame()
curl.sim.addShooterAsInvalid()
def test_simulation_getBoard_has_extra_data():
curl = game.CurlingGame()
expected = curl.sim.getBoard()
board_utils.configure_hammer_2_scenario(expected)
curl.sim.setupBoard(expected)
actual = curl.sim.getBoard()
# Not called during setup() because useless
board_utils.update_distance_and_score(expected)
np.testing.assert_array_equal(actual, expected)
| {
"alphanum_fraction": 0.700264667,
"author": null,
"avg_line_length": 26.514619883,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "06fcbe0272354b4f01046abddb88d8329b17ee08",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-23T02:17:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-23T02:17:06.000Z",
"max_forks_repo_head_hexsha": "1b2641ef823a8dedd49159ecd5b69b77c7165731",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mikhail/alpha-zero-general",
"max_forks_repo_path": "curling/test_simulation.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1b2641ef823a8dedd49159ecd5b69b77c7165731",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mikhail/alpha-zero-general",
"max_issues_repo_path": "curling/test_simulation.py",
"max_line_length": 73,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1b2641ef823a8dedd49159ecd5b69b77c7165731",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mikhail/alpha-zero-general",
"max_stars_repo_path": "curling/test_simulation.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1200,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4534
} |
# Imports here
import matplotlib.pyplot as plt
import numpy as np
import time
import json
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
import PIL
from PIL import Image
import argparse
def get_training_input_args():
model_list = ['vgg16','vgg19','vgg13','densenet','alexnet', 'densenet201', 'resnet18']
parser = argparse.ArgumentParser(description='Train and save a model checkpoint')
parser.add_argument('data_dir', type = str, help = 'path to dataset folder')
parser.add_argument('--save_dir', type = str, help = 'Directory where the checkpoint will be saved', default= 'checkpoint.pth')
parser.add_argument('--arch', type = str, help = 'Model Architecture eg vgg,densenet, alexnet, resnet18', default= 'vgg13', choices = model_list)
parser.add_argument('--learning_rate', type = float, help = 'Learning Rate', default= 0.01)
parser.add_argument('--hidden_units', type = int, help = 'List of number of nodes in hidden layers', nargs='+', default= [1000])
parser.add_argument('--epochs', type = int, help = 'Number of epochs', default = 20)
parser.add_argument('--gpu', help = 'Switch to gpu ', action= 'store_true')
parser.add_argument('--dropout', type = float, help = 'Dropout for training', default = 0.5)
in_arg = parser.parse_args()
return in_arg
def get_prediction_input_args():
parser = argparse.ArgumentParser(description='Predict the category of a flower')
parser.add_argument('image_path', type = str, help = 'path to flower whose class is to be predicted')
parser.add_argument('checkpoint', type = str, help = 'Directory where the checkpoint was saved' )
parser.add_argument('--top_k', type = int, help = 'number of most likely classes', default= 3)
parser.add_argument('--category_names', help = 'Enter JSON file', default= 'cat_to_name.json')
parser.add_argument('--gpu', help = 'Switch to gpu ', action= 'store_true')
in_arg = parser.parse_args()
return in_arg
def check_command_line_args_prediction(in_arg):
print("______________________________________")
print("____ Arguments used for prediction ___")
print("______________________________________")
print("\n image_path =", in_arg.image_path,
"\n checkpoint =", in_arg.checkpoint, "\n top_k =", in_arg.top_k, "\n category_names =", in_arg.category_names,
"\n gpu =", in_arg.gpu)
def check_command_line_args(in_arg):
print("______________________________________")
print("____ Arguments used for training ___")
print("______________________________________")
print("\n data_dir =", in_arg.data_dir,
"\n arch =", in_arg.arch, "\n save_dir =", in_arg.save_dir, "\n dropout =", in_arg.dropout,
"\n learning_rate =", in_arg.learning_rate, "\n hidden_units =", in_arg.hidden_units, "\n epochs =", in_arg.epochs,
"\n gpu =", in_arg.gpu)
# Loading the pretrained Network
in_features = 0
def load_model(model_name):
if 'vgg' in model_name:
model = models.__dict__[model_name](pretrained=True)
in_features = model.classifier[0].in_features
elif model_name == 'alexnet':
model = models.alexnet(pretrained = True)
in_features = 9216
elif 'densenet' in model_name:
model = models.__dict__[model_name](pretrained=True)
in_features = model.classifier.in_features
return model, in_features
# Building the Network
class FlowerNetwork(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):
super().__init__()
self.hidden_layers=nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x):
for linear in self.hidden_layers:
x = F.relu(linear(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def validation(model, device, dataloaders_valid, criterion):
test_loss = 0
accuracy = 0
model.to(device)
for images, labels in dataloaders_valid:
images = images.to(device)
labels = labels.to(device)
output = model.forward(images)
test_loss += criterion(output, labels).item()
# Calculating the accuracy
# take exponents to get the probabilities
ps = torch.exp(output)
equality = (labels.data == output.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def trainer(device, model, dataloaders, print_every,criterion,optimizer,epochs, dataloaders_valid):
steps = 0
running_loss = 0
model.to(device)
for e in range(epochs):
model.train()
for images, labels in dataloaders:
steps += 1
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
# forward step happens here
output = model.forward(images)
loss = criterion(output, labels)
#backpropagation step
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
test_loss, accuracy = validation(model, device, dataloaders_valid, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(test_loss/len(dataloaders_valid)),
" Accuracy: {:.3f}".format(accuracy/len(dataloaders_valid)))
running_loss = 0
model.train()
print("__Training successfully completed__")
# TODO: Do validation on the test set
def test_network(model, device,dataloaders_test):
model.to(device)
model.eval()
accurately_classified_count = 0
total = 0
for images, labels in dataloaders_test:
images = images.to(device)
labels = labels.to(device)
output = model(images)
_, prediction = torch.max(output.data,1)
total += labels.size(0)
accurately_classified_count += torch.sum(prediction == labels.data).item()
testing_accuracy = 100 * (accurately_classified_count/total)
return testing_accuracy
# TODO: Save the checkpoint
def save_checkpoint(model,model_name, filename, image_datasets, optimizer):
model.class_to_idx = image_datasets.class_to_idx
checkpoint = {'model_name': model_name,
'classifier': model.classifier,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'class_to_idx': model.class_to_idx
}
torch.save(checkpoint, filename )
print("Checkpoint successfully saved to: ", filename)
def load_checkpoint(filename,device):
checkpoint = torch.load(filename)
model_name = checkpoint['model_name']
model = models.__getattribute__(model_name)(pretrained = True)
model.to(device)
for param in model.parameters():
param.requires_grad = False
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
optimizer = checkpoint['optimizer']
model.eval()
return (model)
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
img = Image.open(image)
if img.width > img.height:
ratio = img.width/img.height
img = img.resize(size=( int(round(ratio*256,0)),256))
else:
ratio = img.height/img.width
img = img.resize(size=(256, int(round(ratio*256,0))))
bottom = (img.height + 224)/2
right = (img.width + 224)/2
top = (img.height - 224)/2
left = (img.width - 224)/2
img = img.crop(box=(left,top,right,bottom))
np_image = np.array(img)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (np_image - mean)/std
img = img.transpose((2, 0, 1))
return img
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, device, category_names, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
image = process_image(image_path)
image = torch.from_numpy(image).type(torch.FloatTensor)
image = image.unsqueeze(0).float()
image = image.to(device)
model.eval()
with torch.no_grad():
output = model.forward(image)
ps = torch.exp(output)
probs, indices = torch.topk(ps, topk)
Probs = np.array(probs.data[0])
Indices = np.array(indices.data[0])
# invert class_to_idx
idx_to_class = {idx:Class for Class,idx in model.class_to_idx.items()}
classes = [idx_to_class[i] for i in Indices]
labels = [cat_to_name[Class] for Class in classes]
print("\n ___Most likely flower class with associated Probability___ ")
print(labels[0], " : ", Probs[0])
print(":\n ___Top K classes along with associated probabilities ___: \n")
for i, val in enumerate(Probs):
print( labels[i], " : ", Probs[i])
return Probs,labels
def duration(start_time, end_time):
tot_time = end_time - start_time
print("\n** Total Elapsed Runtime:", str(int((tot_time/3600))) + ":" + str(int((tot_time%3600)/60)) + ":" + str(int((tot_time%3600)%60)))
| {
"alphanum_fraction": 0.6274171584,
"author": null,
"avg_line_length": 33.7883435583,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7c912d11c4873d7dc4f4c126e7652382860f22d6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7289adc5f1e811bf09144bd31dbb7c010a27dbc5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AineKiraboMbabazi/AIPND_Image_Classifier",
"max_forks_repo_path": "helper_functions.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7289adc5f1e811bf09144bd31dbb7c010a27dbc5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AineKiraboMbabazi/AIPND_Image_Classifier",
"max_issues_repo_path": "helper_functions.py",
"max_line_length": 149,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7289adc5f1e811bf09144bd31dbb7c010a27dbc5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AineKiraboMbabazi/AIPND_Image_Classifier",
"max_stars_repo_path": "helper_functions.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2591,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11015
} |
import os
from os.path import basename
os.environ['CUDA_VISIBLE_DEVICES'] = ''
from keras.models import load_model
import matplotlib.pyplot as plt
from skimage import io
import numpy as np
from skimage.color import gray2rgb, label2rgb
from skimage.io import imsave
from datetime import datetime
from functions import your_loss
import glob
from scipy.misc import imread
#from skimage.io import imread
import glob
import re
import time
import random
from datetime import datetime
labels = 4
channels = 1
size = 56
inner_size = 36
ysize = 36
batch_size = 254
def output_to_colors(result, x):
zeros = np.zeros((rows,cols,4))
#zeros[:,:,:-1]=gray2rgb(x.copy())
#zeros = gray2rgb(x.copy())
#output = result.argmax(axis=-1)
zeros[output==2]=[0,0,1]
return zeros
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_tiles(img, inner_size, overlap):
img_padded = np.pad(img, ((overlap,overlap), (overlap,overlap)), mode='reflect')
xs = []
for i in xrange(0, img.shape[0], inner_size):
for j in xrange(0, img.shape[1], inner_size):
#print(i-overlap+overlap,i+inner_size+overlap+overlap,j-overlap+overlap, j+inner_size+overlap+overlap)
img_overlapped = img_padded[i:i+inner_size+overlap+overlap,j:j+inner_size+overlap+overlap]
xs.append(img_overlapped)
return xs
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
models = sorted(natural_sort(glob.glob('models/*')), key=lambda name: int(re.search(r'\d+', name).group()), reverse=True)[0:1]
print(models)
tick = datetime.now()
for model_n in models:
model = load_model(model_n, custom_objects={'your_loss': your_loss})
print("Loaded :%s", model_n)
files_all = glob.glob('temp_cropped/*')
#files_all = glob.glob('cleaned/raw/*.png')
#files_all = glob.glob('images/single_frames/*.png')
#files_all = glob.glob('images/all_grey/15_jpgs/*.jpg')
#files_all = random.sample(files_all)
#files_all = glob.glob('cleaned/penta/*')
#files = files+files+files# + files[-4:-1]
#print(files)
file_chunks = chunks(files_all, batch_size)
for idx, files in enumerate(file_chunks):
file_names = [basename(path) for path in files]
#print(file_names)
imgs = np.array([np.pad(imread(fl, mode='L'), (8,8), mode='reflect').astype(float)/255 for fl in files])
#import pdb; pdb.set_trace()
tiles = np.array([get_tiles(img, 36, 10) for img in imgs])
#print(file_chunks)
#print("Processing: %s"%(fl))
#print("Imgs shape: %s", tiles.shape)
#Create input tensor
xs = tiles.reshape(imgs.shape[0]*len(tiles[0]),size,size,channels)
#print(np.unique(xs[0]))
start_time = time.time()
# Predict output
ys = model.predict(xs)
print("---- %s seconds for size: %d ----"%(time.time()-start_time, xs.shape[0]))
ys = ys.reshape(imgs.shape[0],len(tiles[0]), ysize, ysize, labels)
# Stitch it together
for ix,y in enumerate(ys):
#imgcount = 0
count= 0
img = imgs[ix]
tile_output = np.zeros((img.shape[0],img.shape[1],4))
zeros = np.zeros((img.shape[0],img.shape[1],4))
for i in xrange(0, img.shape[0], inner_size):
for j in xrange(0, img.shape[1], inner_size):
zeros[i:i+inner_size,j:j+inner_size] = y[count]
count += 1
for i in range(img.shape[0]):
for j in range(img.shape[1]):
output = tile_output[i,j]
zeros[i,j,np.argmax(output)] = 1
#count += 1
#import pdb; pdb.set_trace()
zeros[:,:,3]=1
#color = output_to_colors(zeros, img)
#colors = [output_to_colors(y, imgs[i]) for i,y in enumerate(ys)]
#colors = [label2rgb(y.argmax(axis=-1), image=imgs[i], colors=[(1,0,0), (0,1,0), (0,0,1), (0,0,0)], alpha=0.9, bg_label=3) for i,y in enumerate(ys)]
#[plt.imsave('plots/%s_%s'%(model_n, file_names[i]), zeros) for i,zeros in enumerate(colors)]
#print(file_names)
plt.imsave('plots/results/%s.png'%(file_names[ix]), zeros)
print "total processing done in: "+str((datetime.now()-tick).total_seconds())
| {
"alphanum_fraction": 0.597693786,
"author": null,
"avg_line_length": 34.947761194,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "26c732c18116a026621aceeb43de31b4fecddbe6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "44e7318ce6a974bd9fe975219dd3f45727bc9522",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "crackmech/fly-walk",
"max_forks_repo_path": "output_multiple_crop.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "44e7318ce6a974bd9fe975219dd3f45727bc9522",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "crackmech/fly-walk",
"max_issues_repo_path": "output_multiple_crop.py",
"max_line_length": 164,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "44e7318ce6a974bd9fe975219dd3f45727bc9522",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "crackmech/fly-walk",
"max_stars_repo_path": "output_multiple_crop.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1239,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4683
} |
C
C $Id: pakrsp.f,v 1.5 2008-07-27 00:17:10 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
REAL FUNCTION PAKRSP (ANG)
C
C Function to convert DMS packed angle into radians.
C
IMPLICIT REAL (A-Z)
DATA SECRAD /0.4848136811095359E-5/
C
C Convert angle to seconds of arc.
C
SEC = PAKSSP (ANG)
C
C Convert angle to radians.
C
PAKRSP = SEC * SECRAD
C
RETURN
END
| {
"alphanum_fraction": 0.648312611,
"author": null,
"avg_line_length": 20.8518518519,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "a9ec52742119d6515683a05c9da27279a7c9935e",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 58,
"max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z",
"max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "tenomoto/ncl",
"max_forks_repo_path": "ncarg2d/src/libncarg/ezmapc/pakrsp.f",
"max_issues_count": 156,
"max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "tenomoto/ncl",
"max_issues_repo_path": "ncarg2d/src/libncarg/ezmapc/pakrsp.f",
"max_line_length": 62,
"max_stars_count": 210,
"max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "tenomoto/ncl",
"max_stars_repo_path": "ncarg2d/src/libncarg/ezmapc/pakrsp.f",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z",
"num_tokens": 178,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 563
} |
rng = StableRNG(614)
# convert a Binary vector into vector of +1 or -1 values
# (for testing only):
pm1(y) = Int8(2) .* (Int8.(MLJBase.int(y))) .- Int8(3)
const MARGIN_LOSSES = MLJBase.MARGIN_LOSSES
const DISTANCE_LOSSES = MLJBase.DISTANCE_LOSSES
# using `WeightedSum` instead of `WeightedMean`; see
# https://github.com/JuliaML/LossFunctions.jl/issues/149
WeightedSum(w) = LossFunctions.AggMode.WeightedMean(w, normalize=false)
@testset "naked" begin
@test MLJBase.naked(MLJBase.LossFunctions.PeriodicLoss{Float64}) ==
:PeriodicLoss
@test MLJBase.naked(MLJBase.LossFunctions.PeriodicLoss) ==
:PeriodicLoss
end
@testset "LossFunctions.jl - binary" begin
y = categorical(["yes", "yes", "no", "yes"])
yes, no = y[1], y[3]
dyes = MLJBase.UnivariateFinite([yes, no], [0.6, 0.4])
dno = MLJBase.UnivariateFinite([yes, no], [0.3, 0.7])
yhat = [dno, dno, dyes, dyes]
w = [1, 2, 3, 4]
@test MLJBase.ZeroOneLoss()(yhat, y) ≈ [1, 1, 1, 0]
@test MLJBase.zero_one_loss(yhat,y, w) ≈ [1, 2, 3, 0]
N = 10
y = categorical(rand(rng, ["yes", "no"], N), ordered=true)
levels!(y, ["no", "yes"])
no, yes = MLJBase.classes(y[1])
@test pm1([yes, no]) in [[+1, -1], [-1, +1]]
ym = pm1(y) # observations for raw LossFunctions measure
p_vec = rand(N)
yhat = MLJBase.UnivariateFinite([no, yes], p_vec, augment=true)
yhatm = MLJBase._scale.(p_vec) # predictions for raw LossFunctions measure
w = rand(rng, N)
for M_ex in MARGIN_LOSSES
m = eval(:(MLJBase.$M_ex()))
@test m(yhat, y) ≈ LossFunctions.value(getfield(m, :loss), ym, yhatm)
@test MLJBase.Mean()(m(yhat, y, w)) ≈
LossFunctions.value(getfield(m, :loss),
ym,
yhatm,
WeightedSum(w))/N
end
end
@testset "LossFunctions.jl - continuous" begin
# losses for continuous targets:
N = 10
y = randn(rng, N)
yhat = randn(rng, N)
X = nothing
w = rand(rng, N)
for M_ex in DISTANCE_LOSSES
m = eval(:(MLJBase.$M_ex()))
m_ex = MLJBase.snakecase(M_ex)
@test m == eval(:(MLJBase.$m_ex))
@test m(yhat, y) ≈
LossFunctions.value(getfield(m, :loss), y, yhat)
@test mean(m(yhat ,y, w)) ≈
LossFunctions.value(getfield(m, :loss), y, yhat,
WeightedSum(w))/N
end
end
| {
"alphanum_fraction": 0.5810260586,
"author": null,
"avg_line_length": 33.1891891892,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "30fe057e61ad0278776f921042bbc71acaa066f0",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2e9444d64c6e1f63c0b2be409e1aadb7692672b7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Rahulub3r/MLJBase.jl",
"max_forks_repo_path": "test/measures/loss_functions_interface.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2e9444d64c6e1f63c0b2be409e1aadb7692672b7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Rahulub3r/MLJBase.jl",
"max_issues_repo_path": "test/measures/loss_functions_interface.jl",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2e9444d64c6e1f63c0b2be409e1aadb7692672b7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Rahulub3r/MLJBase.jl",
"max_stars_repo_path": "test/measures/loss_functions_interface.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 828,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2456
} |
import numpy as np
import pandas as pd
data = pd.read_csv('input.txt', sep="\n", header=None)
# Part 1
def calculate_fuel_required(mass):
return int(mass / 3) - 2 # divide by 3, round down, subtract 2
fuel = data.apply(calculate_fuel_required, axis=1)
fuel_required_sum = fuel.values.sum()
print("Fuel required = {0}".format(fuel_required_sum))
# Part 2
def calculate_fuel_recursive(mass):
fuel_required = calculate_fuel_required(mass)
if fuel_required > 0:
return fuel_required + calculate_fuel_recursive(fuel_required)
return 0
recursive_fuel = data.apply(calculate_fuel_recursive, axis=1)
recursive_fuel_sum = recursive_fuel.values.sum()
print("Fuel required (recursive) = {0}".format(recursive_fuel_sum)) | {
"alphanum_fraction": 0.7489878543,
"author": null,
"avg_line_length": 27.4444444444,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9ec4e921aeefccda9c37f646f2c75a516876e76d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "28bd937d08dafbd232e4818d11cb37767a91d9c6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "timothyr/advent_of_code_2019",
"max_forks_repo_path": "day1/solution.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "28bd937d08dafbd232e4818d11cb37767a91d9c6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "timothyr/advent_of_code_2019",
"max_issues_repo_path": "day1/solution.py",
"max_line_length": 70,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "28bd937d08dafbd232e4818d11cb37767a91d9c6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "timothyr/advent_of_code_2019",
"max_stars_repo_path": "day1/solution.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 195,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 741
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.