text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# -*- coding: utf-8 -*-
"""
Generate angle list and plot numbers of sensors for each angle on a cubemap
Allow to manually tune overlap for best coverage versus number of measurements
@author: Brice Dubost
Copyright 2020 Brice Dubost
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import matplotlib
#Adjust your backend here
#matplotlib.use('qt5agg')
import matplotlib.pyplot as plt
import numpy as np
import transf_mat
MLX_SENSORS_XPIX = 32
MLX_SENSORS_YPIX = 24
#================= CUBEMAP DATA =====================
cub_npix = 256
p_cam_data = transf_mat.gen_cubemap_data(cub_npix)
#================= WHO TO OPTIMIZE FOR ===============
#current option "LIDAR", "calibration", "dynamixel1", "dynamixel2"
optimize_for = "dynamixel1"
#===================================== Let's alanlyze lidar ==================
#=============== This part is still wip ======================================
if optimize_for == "LIDAR":
L_SENSORS_XPIX = 100
L_SENSORS_YPIX = 100
sen_deg = 3
p_sen_data = {}
i_sen = 0
nsen = len(p_sen_data.keys())
for i_row in range(int(90/sen_deg)):
beta = i_row * sen_deg
corr = np.cos(beta*np.pi/180)
dalpha = sen_deg/corr
for i_col in range(int(180/dalpha)+1):
if i_row %2:
alpha = 180 - dalpha*i_col
else:
alpha = dalpha*i_col
p_sen_data[i_sen + nsen] = {
"alpha" : alpha,
"beta_i" : beta,
}
i_sen += 1
for i_row in range(int(90/sen_deg)):
beta = 90+(i_row+1) * sen_deg
corr = np.abs(np.cos(beta*np.pi/180))
dalpha = sen_deg/corr
for i_col in range(int(180/dalpha)+1):
if i_row %2:
alpha = 180 - dalpha*i_col
else:
alpha = dalpha*i_col
p_sen_data[i_sen + nsen] = {
"alpha" : alpha,
"beta_i" : beta,
}
i_sen += 1
for sen in p_sen_data:
p_sen_data[sen]["HFOV"] = sen_deg
p_sen_data[sen]["VFOV"] = sen_deg
p_sen_data[sen]["HNPIX"] = L_SENSORS_XPIX
p_sen_data[sen]["VNPIX"] = L_SENSORS_YPIX
p_sen_data_final = {"sen" : p_sen_data,
"arranged":p_sen_data.keys()}
#=============DYNAMIXEL 360 in alpha (bottom servo, rotation about vertical axis) 180 Beta =======================
if optimize_for == "dynamixel1":
p_sen_data = {}
list_xx_bi=[[9,-32,20],[9,-4,0],[9,23,20],[7,51,0],[3,76,0]]
for xx,betai,off in list_xx_bi:
nsen = len(p_sen_data.keys())
for i_sen in range(xx):
p_sen_data[i_sen + nsen] = { "alpha" : (181+i_sen*(360/xx))%360+off, "beta_i" : betai }
#Potentially we can look more down if we want
for sen in p_sen_data:
p_sen_data[sen]["HFOV"] = 45.64884636
p_sen_data[sen]["VFOV"] = 33.6037171
p_sen_data[sen]["gamma_i"] = 7.36132776
p_sen_data[sen]["distortion"] = -2.87108226
p_sen_data[sen]["HNPIX"] = MLX_SENSORS_XPIX
p_sen_data[sen]["VNPIX"] = MLX_SENSORS_YPIX
p_sen_data_final = {"sen" : p_sen_data,
"arranged": [0,1,2,3,4,5,6,7,8,17,16,15,14,13,12,11,10,9,18,19,20,21,22,23,24,25,26,33,32,31,30,29,28,27,34,35,36]}
#=============DYNAMIXEL 360 in alpha doubled for overlapping images (bottom servo, rotation about vertical axis) 180 Beta =======================
if optimize_for == "dynamixel2":
p_sen_data = {}
list_xx_bi=[[9,-32,20],[9,-4,0],[9,23,20],[7,51,0],[3,80,0],
[9,-32-9.25,32.5],[9,-4-10.25,22.5],[9,23-10.25,27.5],[8,51-10.25,18.5],[5,76-9.25,28.5]]
for xx,betai,off in list_xx_bi:
nsen = len(p_sen_data.keys())
for i_sen in range(xx):
p_sen_data[i_sen + nsen] = { "alpha" : (181+i_sen*(360/xx))%360+off, "beta_i" : betai }
#Potentially we can look more down if we want
for sen in p_sen_data:
p_sen_data[sen]["HFOV"] = 45.64884636
p_sen_data[sen]["VFOV"] = 33.6037171
p_sen_data[sen]["gamma_i"] = 7.36132776
p_sen_data[sen]["distortion"] = -2.87108226
p_sen_data[sen]["HNPIX"] = MLX_SENSORS_XPIX
p_sen_data[sen]["VNPIX"] = MLX_SENSORS_YPIX
p_sen_data_final = {"sen" : p_sen_data,
"arranged": [0,1,2,3,4,5,6,7,8,17,16,15,14,13,12,11,10,9,18,19,20,21,22,23,24,25,26,33,32,31,30,29,28,27,34,35,36,\
37,38,39,40,41,42,43,44,45, 54,53,52,51,50,49,48,47,46, 55,56,57,58,59,60,61,62,63, 71,70,69,68,67,66,65,64, 72,73,74,75]}
#============= DYNAMIXEL calibration =======================
if optimize_for == "calibration":
p_sen_data = {}
hcenter = -44
vcenter = -1.5
dh = 40
dv = 25
#step = 3 #crude calibration
step = 1 #slow calibration
i_sen = 0
for ah in np.arange(hcenter-dh/2.,hcenter + dh/2., step):
for av in np.arange(vcenter-dv/2.,vcenter + dv/2., step):
p_sen_data[i_sen] = {
"alpha" : ah,
"beta_i" : av,
}
i_sen +=1
for sen in p_sen_data:
p_sen_data[sen]["HFOV"] = 55
p_sen_data[sen]["VFOV"] = 35
p_sen_data[sen]["HNPIX"] = MLX_SENSORS_XPIX
p_sen_data[sen]["VNPIX"] = MLX_SENSORS_YPIX
p_sen_data[sen]["Hoff"] = 45 #Following the adjustment
p_sen_data[sen]["Voff"] = 0
p_sen_data_final = {"sen" : p_sen_data,
"arranged": list(range(len(p_sen_data.keys())))}
#===================================================================================
import math
if __name__ == "__main__":
print("Hi let's try to optimize sensor orientation ")
p_data = p_sen_data_final
p_sen_data = p_data["sen"]
transf_mat.compute_all_matrices(p_cam_data,p_sen_data,plot = True)
print("Here is your angle list, you have %d positions " % len(p_sen_data.keys()))
for i_sen in p_sen_data.keys():
print("Sensor %3d\t Alpha %5.1f\t Beta_i %5.1f"%(i_sen,p_sen_data[i_sen]["alpha"],p_sen_data[i_sen]["beta_i"]))
if "arranged" in p_data.keys():
i_sen_l = p_data["arranged"]
print("\n\n\nHere is your arranged list by hand !!! , you have %d positions " % len(i_sen_l))
pa = 0
pb = 0
for i_sen in i_sen_l:
print("Sensor %3d\t Alpha %5.1f\t Beta_i %5.1f \t DAlpha %6.1f\t DBeta_i %5.1f"
%(i_sen,p_sen_data[i_sen]["alpha"],p_sen_data[i_sen]["beta_i"],
math.fmod(p_sen_data[i_sen]["alpha"]-pa,360),math.fmod(p_sen_data[i_sen]["beta_i"]-pb,360)))
pa = p_sen_data[i_sen]["alpha"]
pb = p_sen_data[i_sen]["beta_i"]
print("# ==== Angles for angles.txt =========")
dstr = ""
#Expect a pair of Y angle, Xi angle per line. eg "-20 12.34"
for i_sen in i_sen_l:
dstr += "%6.2f " %(p_sen_data[i_sen]["alpha"])
dstr += " %6.2f" %(p_sen_data[i_sen]["beta_i"])
dstr += "\n"
print(dstr)
|
{"hexsha": "0baa63aca8a56f668d4a9e4b2642f016c784f973", "size": 8056, "ext": "py", "lang": "Python", "max_stars_repo_path": "robot_optimize_angles.py", "max_stars_repo_name": "brice-digilus/Infrared_Analysis", "max_stars_repo_head_hexsha": "614a14b832f130f9cca9ea3659c08e40fb3d1b1c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "robot_optimize_angles.py", "max_issues_repo_name": "brice-digilus/Infrared_Analysis", "max_issues_repo_head_hexsha": "614a14b832f130f9cca9ea3659c08e40fb3d1b1c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "robot_optimize_angles.py", "max_forks_repo_name": "brice-digilus/Infrared_Analysis", "max_forks_repo_head_hexsha": "614a14b832f130f9cca9ea3659c08e40fb3d1b1c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3463035019, "max_line_length": 157, "alphanum_fraction": 0.5269364449, "include": true, "reason": "import numpy", "num_tokens": 2481}
|
import numpy as np
from numpy import ma
from scipy.optimize import bisect
# This code was modified slightly from superplot; some functionality was depreciated, so this filled in the blanks
# URL to original code is below:
# https://github.com/michaelhb/superplot/blob/master/superplot/statslib/two_dim.py
def posterior_pdf(paramx, paramy, posterior, nbins=50, bin_limits=None):
r"""
Weighted histogram of data for two-dimensional posterior pdf.
.. warning::
Outliers sometimes mess up bins. So you might want to \
specify the bin limits.
.. warning::
Posterior pdf normalized such that maximum value is one.
:param paramx: Data column of parameter x
:type paramx: numpy.ndarray
:param paramy: Data column of parameter y
:type paramy: numpy.ndarray
:param posterior: Data column of posterior weight
:type posterior: numpy.ndarray
:param nbins: Number of bins for histogram
:type nbins: integer
:param bin_limits: Bin limits for histogram
:type bin_limits: list [[xmin,xmax],[ymin,ymax]]
:returns: Posterior pdf, x and y bin centers
:rtype: named tuple (pdf: numpy.ndarray, bin_centers_x: \
numpy.ndarray, bin_centers_y: numpy.ndarray)
:Example:
>>> nbins = 100
>>> pdf, x, y = posterior_pdf(data[2], data[3], data[0], nbins=nbins)
>>> assert len(pdf) == nbins
>>> assert len(x) == nbins
>>> assert len(y) == nbins
"""
# 2D histogram the data - pdf is a matrix
pdf, bin_edges_x, bin_edges_y = np.histogram2d(
paramx,
paramy,
nbins,
range=bin_limits,
weights=posterior)
# Normalize the pdf so that the area is one.
pdf = pdf /pdf.sum()
# Find centers of bins
bin_centers_x = 0.5 * (bin_edges_x[:-1] + bin_edges_x[1:])
bin_centers_y = 0.5 * (bin_edges_y[:-1] + bin_edges_y[1:])
return bin_centers_x, bin_centers_y, pdf
def shift(rawBinNum, nbins, axList=None):
if axList is not None:
assert len(axList) == 2
low = axList[0]
high = axList[1]
else:
low = 0
high = nbins+1
# Remove low
rawBinNum_sansLow = np.where(rawBinNum==low, low+1, rawBinNum)
# Remove high
rawBinNum_sansLowAndHigh = np.where(rawBinNum_sansLow==high, high-1, rawBinNum_sansLow)
# Shift to python array notation
binNum = rawBinNum_sansLowAndHigh - 1
return binNum
def profile_like(paramx, paramy, chi_sq, nbins, bin_limits=None):
"""
Maximizes the likelihood in each bin to obtain the profile likelihood and
profile chi-squared.
:param paramx: Data column of parameter x
:type paramx: numpy.ndarray
:param paramy: Data column of parameter y
:type paramy: numpy.ndarray
:param chi_sq: Data column of chi-squared
:type chi_sq: numpy.ndarray
:param nbins: Number of bins for histogram
:type nbins: integer
:param bin_limits: Bin limits for histogram
:type bin_limits: list [[xmin,xmax],[ymin,ymax]]
:returns: Profile chi squared, profile likelihood, x and y bin centers
:rtype: named tuple (\
profchi_sq: numpy.ndarray, \
prof_like: numpy.ndarray, \
bin_center_x: numpy.ndarray, \
bin_center_y: numpy.ndarray)
:Example:
>>> nbins = 100
>>> chi_sq, like, x, y = profile_like(data[2], data[3], data[0], nbins=nbins)
>>> assert len(chi_sq) == nbins
>>> assert len(like) == nbins
>>> assert len(x) == nbins
>>> assert len(y) == nbins
"""
# Bin the data to find bin edges. nbins we discard the count
_, bin_edges_x, bin_edges_y = np.histogram2d(
paramx,
paramy,
nbins,
range=bin_limits,
weights=None)
# Find centers of bins
bin_center_x = 0.5 * (bin_edges_x[:-1] + bin_edges_x[1:])
bin_center_y = 0.5 * (bin_edges_y[:-1] + bin_edges_y[1:])
# Find bin number for each point in the chain
bin_numbers_x = np.digitize(paramx, bin_edges_x)
bin_numbers_y = np.digitize(paramy, bin_edges_y)
# Shift bin numbers to account for outliers
bin_numbers_x = shift(bin_numbers_x, nbins)
bin_numbers_y = shift(bin_numbers_y, nbins)
# Initialize the profiled chi-squared to something massive
prof_chi_sq = np.full((nbins, nbins), float("inf"))
# Minimize the chi-squared in each bin by looping over all the entries in
# the chain.
for index in range(chi_sq.size):
bin_numbers = (bin_numbers_x[index], bin_numbers_y[index])
if bin_numbers[0] is not None and bin_numbers[1] is not None and chi_sq[index] < prof_chi_sq[bin_numbers]:
prof_chi_sq[bin_numbers] = chi_sq[index]
# Subtract minimum chi-squared (i.e. minimum profile chi-squared is zero,
# and maximum profile likelihood is one).
prof_chi_sq = prof_chi_sq - prof_chi_sq.min()
# Exponentiate to obtain profile likelihood
prof_like = np.exp(- 0.5 * prof_chi_sq)
return bin_center_x, bin_center_y, prof_chi_sq, prof_like
def getFuncOnGrid(dfDict_new, xkey, ykey, zkey, axisRange, gMesh, frequentist=True):
#-- Set up preliminary parameters --#
paramx = dfDict_new[xkey]
paramy = dfDict_new[ykey]
xmin, xmax, ymin, ymax = axisRange
bin_limits = [[xmin, xmax], [ymin, ymax]]
nbins = int(gMesh.imag)
#-- Calculate function on grid depending on type --#
if(zkey == 'posterior'):
posterior = dfDict_new[zkey]
X, Y, Z = posterior_pdf(paramx, paramy, posterior, nbins=nbins, bin_limits=bin_limits)
elif(zkey == 'likelihood'):
likelihood = dfDict_new[zkey]
if(frequentist):
chi_sq = -2.*np.log(likelihood)
else:
chi_sq = -2.*np.log((likelihood/likelihood.max()))
X, Y, _, Z = profile_like(paramx, paramy, chi_sq, nbins, bin_limits=bin_limits)
#-- Create dictionary of values --#
gridDict = {}
gridDict["X"] = X
gridDict["Y"] = Y
gridDict["Z"] = Z
return gridDict
def smoothData(Z, sigma, order, normType='max'):
from scipy.ndimage import gaussian_filter
Z_smooth = gaussian_filter(Z, sigma=sigma, order=order) # sigma=1, order=0 is a good choice
if normType == 'max':
Z_smooth = Z_smooth/Z_smooth.max()
elif normType == 'sum':
Z_smooth = Z_smooth/Z_smooth.sum()
return Z_smooth
def calcCriticalDensity(pdf, alpha):
# Normalize posterior pdf so that integral is one, if it wasn't already
pdf = pdf / pdf.sum()
# Minimize difference between amount of probability contained above a
# particular density and that desired
prob_desired = 1. - alpha
def prob_contained(density):
return ma.masked_where(pdf < density, pdf).sum()
def delta_prob(density):
return prob_contained(density) - prob_desired
# Critical density cannot be greater than maximum posterior pdf and must
# be greater than 0. The function delta_probability is monotonic on that
# interval. Find critical density by bisection.
critical_density = bisect(delta_prob, 0., pdf.max())
return critical_density
def findCriticalDensityVals(pdf):
"""
Finds 1 and 2 sigma critical density values
alpha = 0.32 => 68% probability contained => 1 sigma curve
alpha = 0.05 => 95% probability contained => 2 sigma curve
"""
#-- Define alpha levels --#
levels = [2, 1]
alphaList = [0.05, 0.32]
#! Develop fancier alpha given level function later
#-- Calculate critical density --#
critDensityList = []
for aa in alphaList:
critDensityList.append(calcCriticalDensity(pdf, aa))
return critDensityList
|
{"hexsha": "a2b26366ee20f007e34302188848b0f8ef062cbd", "size": 8019, "ext": "py", "lang": "Python", "max_stars_repo_path": "Plotting/plottingUtils/statUtils.py", "max_stars_repo_name": "jnhoward/SU2LDM_public", "max_stars_repo_head_hexsha": "67db9142cbb67946e273ac940d13906d0a39bf58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Plotting/plottingUtils/statUtils.py", "max_issues_repo_name": "jnhoward/SU2LDM_public", "max_issues_repo_head_hexsha": "67db9142cbb67946e273ac940d13906d0a39bf58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Plotting/plottingUtils/statUtils.py", "max_forks_repo_name": "jnhoward/SU2LDM_public", "max_forks_repo_head_hexsha": "67db9142cbb67946e273ac940d13906d0a39bf58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3259911894, "max_line_length": 114, "alphanum_fraction": 0.6302531488, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 2076}
|
[STATEMENT]
lemma LIMSEQ_le_const2: "X \<longlonglongrightarrow> x \<Longrightarrow> \<exists>N. \<forall>n\<ge>N. X n \<le> a \<Longrightarrow> x \<le> a"
for a x :: "'a::linorder_topology"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>X \<longlonglongrightarrow> x; \<exists>N. \<forall>n\<ge>N. X n \<le> a\<rbrakk> \<Longrightarrow> x \<le> a
[PROOF STEP]
by (rule LIMSEQ_le[of X x "\<lambda>n. a"]) auto
|
{"llama_tokens": 169, "file": null, "length": 1}
|
import numpy as np
import pandas as pd
from munch import Munch
from plaster.run.prep import prep_fixtures
from plaster.run.prep.prep_worker import triangle_dytmat, dyt_to_seq
from plaster.run.priors import PriorsMLEFixtures, MLEPrior
from plaster.run.sim_v2 import sim_v2_worker
from plaster.run.sim_v2.sim_v2_params import SimV2Params
from plaster.tools.utils import utils
from plaster.tools.schema import check
from plaster.run.sim_v2 import dyt_helpers
from plaster.tools.c_common.c_common_tools import DytPepType, DytType, RadType, RowKType
from zest import zest
from plaster.tools.zlog.zlog import spy
def zest_dytmat_sim():
def it_handles_normal_dytmat_sim_no_error():
n_peps = 3
n_channels = 2
n_cycles = 5
dyes_labels = Munch(
dyes=[
Munch(dye_name="dye0", channel_name="ch0"),
Munch(dye_name="dye1", channel_name="ch1"),
],
labels=[
Munch(aa="A", dye_name="dye0", label_name="label0", ptm_only=False),
Munch(aa="B", dye_name="dye1", label_name="label1", ptm_only=False),
],
)
n_samples = 10
sim_v2_params = SimV2Params(
n_pres=1,
n_mocks=0,
n_edmans=4,
n_samples=n_samples,
**dyes_labels,
priors_desc={
"gain_mu": dict(class_name="MLEPrior", params=dict(value=5000.0)),
"gain_sigma": dict(class_name="MLEPrior", params=dict(value=50.0)),
"bg_sigma": dict(class_name="MLEPrior", params=dict(value=50.0)),
"row_k_sigma": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_edman_failure": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_detach": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_bleach": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_non_fluorescent": dict(
class_name="MLEPrior", params=dict(value=0.0)
),
},
)
# pepseqs: DF(pep_i, aa, pep_off_in_pro)
pepseqs_df = pd.DataFrame(
dict(
pep_i=[0, 1, 1, 1, 2, 2, 2],
aa=[".", "A", "B", ".", "B", ".", "."],
pep_offset_in_pro=[0, 0, 1, 2, 3, 4, 5],
)
)
# pcbs are an encoding of flus. See def pcbs()
pcbs = sim_v2_params.pcbs(pepseqs_df)
train_dytmat, train_dytpeps, train_pep_recalls = sim_v2_worker._dytmat_sim(
sim_v2_params, pcbs, n_samples=n_samples, progress=None,
)
n_dyts = train_dytmat.shape[0]
check.array_t(train_dytmat, shape=(n_dyts, n_channels * n_cycles))
# dytpeps are in (dyt_i, pep_i, count) order
# Every peptide should have
assert np.max(train_dytpeps[:, 1]) == n_peps - 1
# Assert that every peptide (except 0) got 10 samples
for pep_i in range(1, n_peps):
mask = train_dytpeps[:, 1] == pep_i
assert train_dytpeps[mask, 2].sum() == n_samples
# Assert only those peptides are present
# 1: to skip the reserved row
assert np.min(train_dytpeps[1:, 1]) == 1
assert np.max(train_dytpeps[1:, 1]) == n_peps - 1
def it_handles_bleaching_no_error():
n_cycles = 5
dyes_labels = Munch(
dyes=[Munch(dye_name="dye0", channel_name="ch0"),],
labels=[
Munch(aa="X", dye_name="dye0", label_name="label0", ptm_only=False),
],
)
n_samples = 10
sim_v2_params = SimV2Params(
n_pres=1,
n_mocks=0,
n_edmans=4,
n_samples=n_samples,
**dyes_labels,
priors_desc={
"gain_mu": dict(class_name="MLEPrior", params=dict(value=5000.0)),
"gain_sigma": dict(class_name="MLEPrior", params=dict(value=50.0)),
"bg_sigma": dict(class_name="MLEPrior", params=dict(value=50.0)),
"row_k_sigma": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_edman_failure": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_detach": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_bleach": dict(class_name="MLEPrior", params=dict(value=0.0)),
"p_non_fluorescent": dict(
class_name="MLEPrior", params=dict(value=0.0)
),
},
)
dytmat = triangle_dytmat(5, 1, include_multi_drop=False, include_nul_row=True)
pep_i = []
aa = []
pep_offset_in_pro = []
for i, dyt in enumerate(dytmat):
pep_i += [i] * n_cycles
aa += list(dyt_to_seq(dyt))
pep_offset_in_pro += [0] * n_cycles
pepseqs_df = pd.DataFrame(
dict(pep_i=pep_i, aa=aa, pep_offset_in_pro=pep_offset_in_pro,)
)
# pcbs are an encoding of flus. See def pcbs()
pcbs = sim_v2_params.pcbs(pepseqs_df)
n_samples = 10
train_dytmat, train_dytpeps, train_pep_recalls = sim_v2_worker._dytmat_sim(
sim_v2_params, pcbs, n_samples=n_samples, progress=None,
)
n_dyts = train_dytmat.shape[0]
check.array_t(train_dytmat, shape=(n_dyts, n_cycles))
# dytpeps are in (dyt_i, pep_i, count) order
# Every peptide should have
assert np.max(train_dytpeps[:, 1])
# Assert that every peptide (except 0) got 10 samples
for pep_i in range(1, 6):
mask = train_dytpeps[:, 1] == pep_i
assert train_dytpeps[mask, 2].sum() == n_samples
# Assert only those peptides are present
# 1: to skip reserved row
assert np.min(train_dytpeps[1:, 1]) == 1
assert np.max(train_dytpeps[1:, 1]) == 5
# Mapping of dyt and pep should both be identity
assert np.all(train_dytpeps[1:, 0] == np.arange(1, n_dyts))
assert np.all(train_dytpeps[1:, 1] == np.arange(1, n_dyts))
assert np.all(train_dytpeps[2:, 2] == n_samples)
assert train_dytmat.tolist() == [
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
]
zest()
def zest_sample_pep_dytmat():
def it_samples():
dytpep_group = np.array([[1, 1, 5], [2, 1, 5], [3, 1, 0],], dtype=int,)
n_samples_per_pep = 20
sampled_dt_iz = sim_v2_worker._sample_pep_dytmat(
dytpep_group, n_samples_per_pep
)
assert sampled_dt_iz.shape == (n_samples_per_pep,)
assert not np.any(sampled_dt_iz == 0)
assert np.any(sampled_dt_iz == 1)
assert np.any(sampled_dt_iz == 2)
assert not np.any(sampled_dt_iz == 3)
def it_handles_no_samples():
dytpep_group = np.zeros((0, 3), dtype=int)
n_samples_per_pep = 10
sampled_dt_iz = sim_v2_worker._sample_pep_dytmat(
dytpep_group, n_samples_per_pep
)
assert sampled_dt_iz.shape == (0,)
def it_handles_no_counts():
dytpep_group = np.zeros((1, 3), dtype=int)
n_samples_per_pep = 10
sampled_dt_iz = sim_v2_worker._sample_pep_dytmat(
dytpep_group, n_samples_per_pep
)
assert sampled_dt_iz.shape == (0,)
def it_does_not_resample_if_identical_counts():
# fmt: off
dytpep_group = np.array([
[1, 1, 5],
[2, 1, 5],
[3, 1, 0],
], dtype=int)
# fmt: on
n_samples_per_pep = 10
sampled_dt_iz = sim_v2_worker._sample_pep_dytmat(
dytpep_group, n_samples_per_pep
)
assert sampled_dt_iz.tolist() == [1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
zest()
def zest_radmat_sim():
dyes_labels = Munch(
dyes=[
Munch(dye_name="dye0", channel_name="ch0"),
Munch(dye_name="dye1", channel_name="ch1"),
],
labels=[
Munch(aa="A", dye_name="dye0", label_name="label0", ptm_only=False),
Munch(aa="B", dye_name="dye1", label_name="label1", ptm_only=False),
],
)
params_with_noise = SimV2Params(
**dyes_labels,
priors_desc={
"gain_mu": dict(class_name="MLEPrior", params=dict(value=7500.0)),
"gain_sigma": dict(class_name="MLEPrior", params=dict(value=0.16)),
"bg_sigma": dict(class_name="MLEPrior", params=dict(value=200.0)),
},
)
# ch_params_with_noise = params_with_noise.by_channel()
params_no_noise = SimV2Params(
**dyes_labels,
priors_desc={
"gain_mu": dict(class_name="MLEPrior", params=dict(value=1.0)),
"gain_sigma": dict(class_name="MLEPrior", params=dict(value=0.0)),
"bg_sigma": dict(class_name="MLEPrior", params=dict(value=0.0)),
"row_k_sigma": dict(class_name="MLEPrior", params=dict(value=0.0)),
},
)
# ch_params_no_noise = params_no_noise.by_channel()
# fmt: off
dytmat = np.array([
[[0, 0, 0], [0, 0, 0]],
[[1, 1, 0], [1, 0, 0]],
[[2, 2, 1], [2, 1, 0]],
], dtype=DytType)
dytpeps = np.array([
[0, 0, 0],
[1, 1, 10],
[1, 2, 5],
[2, 2, 5],
], dtype=DytPepType)
# fmt: on
n_samples_per_pep = 10
n_channels = 2
n_cycles = 3
n_peps = dyt_helpers.n_peps(dytpeps)
n_samples_total = (n_peps - 1) * n_samples_per_pep # -1 to exclude the nul record
out_radmat, out_row_ks, out_dyt_iz, out_pep_iz = None, None, None, None
def _call(params):
nonlocal out_radmat, out_row_ks, out_dyt_iz, out_pep_iz
out_radmat = np.zeros(
shape=(n_samples_total, n_channels, n_cycles), dtype=RadType,
)
out_row_ks = np.zeros(shape=(n_samples_total,), dtype=RowKType)
out_dyt_iz = np.zeros(shape=(n_samples_total,), dtype=DytPepType)
out_pep_iz = np.zeros(shape=(n_samples_total,), dtype=DytPepType)
sim_v2_worker._radmat_sim(
dytmat,
dytpeps,
params.priors,
n_samples_per_pep,
use_lognormal_model=True,
out_radmat=out_radmat,
out_row_ks=out_row_ks,
out_dyt_iz=out_dyt_iz,
out_pep_iz=out_pep_iz,
)
def it_removes_all_zero_rows():
_call(params_with_noise)
n_expected_rows = 20
# 20 because we sample 10 per pep but only peps 1 and 2 have non-zero dyts
assert out_pep_iz.shape[0] == n_expected_rows
# fmt: off
assert out_pep_iz.tolist() == [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
]
# fmt: on
assert not np.any(np.all(out_radmat == 0.0, axis=(1, 2)))
def it_returns_reasonable_radiometry():
_call(params_with_noise)
# Only 2 of the peptide have dyetracks
assert out_radmat.shape == (n_samples_per_pep * 2, n_channels, n_cycles)
# fmt: off
assert out_pep_iz.tolist() == [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
]
# fmt: on
# I'm not sure of a good test here
# assert np.all(radiometry[radiometry > 0.0] > 1000.0)
def it_returns_correct_radiometry_with_no_noise():
# By using no noise, we can just compare that radiometry gave back the dytmat
# but with each peptide repeated
n_true_rows = _call(params_no_noise)
assert np.all(out_row_ks == 1.0)
assert np.all(out_radmat[0:5] == dytmat[1, :].astype(RadType),)
assert np.all(
(
(out_radmat[5:10] == dytmat[1, :].astype(RadType))
| (out_radmat[5:10] == dytmat[2, :].astype(RadType))
),
)
# fmt: off
assert out_dyt_iz[0:5].tolist() == [1, 1, 1, 1, 1]
# fmt: on
assert np.all((out_dyt_iz[5:10] == 1) | (out_dyt_iz[5:10] == 2))
# fmt: off
assert out_pep_iz.tolist() == [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
]
# fmt: on
zest()
def zest_sim_v2_worker():
prep_result = prep_fixtures.result_simple_fixture()
def _sim(priors=None, _prep_result=None, sim_kwargs=None):
if _prep_result is None:
_prep_result = prep_result
priors = PriorsMLEFixtures.fixture_no_errors(**(priors or {}))
if sim_kwargs is None:
sim_kwargs = {}
sim_kwargs["use_lognormal_model"] = True
sim_v2_params = SimV2Params.from_aa_list_fixture(
["A", "B"], priors=priors, n_edmans=4, **(sim_kwargs or {})
)
return sim_v2_worker.sim_v2(sim_v2_params, _prep_result), sim_v2_params
@zest.retry(2)
def it_returns_train_dytmat():
# Because it has no errors, there's only a perfect dytmats
sim_v2_result, sim_v2_params = _sim()
assert sim_v2_result.train_dytmat.shape == (4, 5 * 2) # 5 cycles, 2 channels
assert utils.np_array_same(
sim_v2_result.train_dytmat,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0],
],
dtype=np.uint8,
),
)
def it_returns_train_dytmat_with_a_zero_row():
sim_v2_result, sim_v2_params = _sim()
assert np.all(sim_v2_result.train_dytmat[0, :] == 0)
@zest.retry(2)
def it_returns_train_dytmat_for_cleaved_cterm_labels():
prep_cterm = prep_fixtures.result_cterm_label_fixture()
# dyemat when allow_edman_cterm is True
sim_v2_result, sim_v2_params = _sim(
_prep_result=prep_cterm, sim_kwargs=Munch(allow_edman_cterm=True)
)
assert sim_v2_result.train_dytmat.shape == (6, 5 * 2) # 5 cycles, 2 channels
assert utils.np_array_same(
sim_v2_result.train_dytmat,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
),
)
@zest.retry(2)
def it_returns_train_dytmat_for_uncleaved_cterm_labels():
prep_cterm = prep_fixtures.result_cterm_label_fixture()
# dyemat when allow_edman_cterm is False (default)
sim_v2_result, sim_v2_params = _sim(
_prep_result=prep_cterm, sim_kwargs=Munch(allow_edman_cterm=False)
)
assert sim_v2_result.train_dytmat.shape == (3, 5 * 2) # 5 cycles, 2 channels
assert utils.np_array_same(
sim_v2_result.train_dytmat,
np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
],
dtype=np.uint8,
),
)
def it_returns_train_dytpeps():
sim_v2_result, sim_v2_params = _sim()
# The order of dyts is not guaranteed, so remove them
assert utils.np_array_same(
sim_v2_result.train_dytpeps[:, (0, 2)],
np.array([[0, 0], [3, 5000], [2, 5000], [1, 5000],], dtype=DytPepType),
)
def it_handles_non_fluorescent():
sim_v2_result, sim_v2_params = _sim(priors=dict(p_non_fluorescent=0.5))
# Check that every dytpep other than the nul-row
# should have n_reads (col=2) a lot less than 5000.
assert np.all(sim_v2_result.train_dytpeps[1:, 2] < 2000)
def it_returns_no_all_dark_samples():
sim_v2_result, sim_v2_params = _sim(priors=dict(p_non_fluorescent=0.99))
assert not np.any(sim_v2_result.train_dytpeps[1:, 0] == 0)
def it_returns_recalls():
sim_v2_result, sim_v2_params = _sim(priors=dict(p_non_fluorescent=0.50))
assert sim_v2_result.train_pep_recalls.shape[0] == 4 # 4 peps
assert (
sim_v2_result.train_pep_recalls[0] == 0.0
) # The nul record should have no recall
assert np.all(
sim_v2_result.train_pep_recalls[1:] < 0.85
) # The exact number is hard to say, but it should be < 1
def it_emergency_escapes():
sim_v2_result, sim_v2_params = _sim(priors=dict(p_non_fluorescent=0.99))
# When nothing is fluorescent, everything should have zero recall
assert np.all(sim_v2_result.train_pep_recalls == 0.0)
def it_handles_empty_dytpeps():
sim_v2_result, sim_v2_params = _sim(priors=dict(p_non_fluorescent=1.0))
assert np.all(sim_v2_result.train_pep_recalls == 0.0)
def decoys():
prep_with_decoy = prep_fixtures.result_simple_fixture(has_decoy=True)
sim_v2_result, sim_v2_params = _sim(priors=dict(), _prep_result=prep_with_decoy)
# def it_maintains_decoys_for_train():
# assert sim_v2_result.train_dytmat.shape == (4, 10)
#
# def it_removes_decoys_for_test():
# # 1000 because the nul-dye track should be removed
# assert sim_v2_result.test_radmat.shape == (1000, 2, 5)
zest()
def it_skips_row_noise():
sim_v2_result, sim_v2_params = _sim(priors=dict(row_k_sigma=0.0))
assert np.all(sim_v2_result.test_true_row_ks == 1.0)
def it_adds_row_noise():
sim_v2_result, sim_v2_params = _sim(priors=dict(row_k_sigma=0.5))
assert np.any(sim_v2_result.test_true_row_ks != 1.0)
@zest.skip(reason="Not implemented")
def it_raises_if_train_and_test_identical():
raise NotImplementedError
zest()
def zest_sim_v2_photobleaching():
def it_sims_photobleach():
sim_v2_result = sim_v2_worker.sim_v2_photobleaching(
n_cycles=5, n_count=2, n_samples=7,
gain_mu=5000.0, gain_sigma=50.0, bg_sigma=50.0, row_k_sigma=0.1
)
assert sim_v2_result.train_radmat.shape[0] >= 7 * (sim_v2_result.train_dytmat.shape[0]-1)
zest()
|
{"hexsha": "7c92f6f2420585a3c10ad1731b0110b8e74b7f7d", "size": 18488, "ext": "py", "lang": "Python", "max_stars_repo_path": "plaster/run/sim_v2/zests/zest_sim_v2_worker.py", "max_stars_repo_name": "erisyon/plaster", "max_stars_repo_head_hexsha": "20af32aed2365c6351fe3c26293308960099152b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plaster/run/sim_v2/zests/zest_sim_v2_worker.py", "max_issues_repo_name": "erisyon/plaster", "max_issues_repo_head_hexsha": "20af32aed2365c6351fe3c26293308960099152b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-06-22T19:27:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-30T20:02:31.000Z", "max_forks_repo_path": "plaster/run/sim_v2/zests/zest_sim_v2_worker.py", "max_forks_repo_name": "erisyon/plaster", "max_forks_repo_head_hexsha": "20af32aed2365c6351fe3c26293308960099152b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-16T17:38:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-06T09:37:22.000Z", "avg_line_length": 34.9489603025, "max_line_length": 97, "alphanum_fraction": 0.5698831675, "include": true, "reason": "import numpy", "num_tokens": 5701}
|
"""Copyright © 2020-present, Swisscom (Schweiz) AG.
All rights reserved."""
from subprocess import call
import numpy as np
from codi.codi_utils import create_speech_data, create_unlabelled_speech_data, save_ids
from codi.speech_trainer import SpeechTrainer
def train_codi(labelling='naive', threshold=None):
"""
Process of training : computing features, creating data, training.
Params: labelling : 'naive ' or 'levenshtein'
threshold : for the levenshtein method.
"""
codi_trainer = SpeechTrainer(yaml_model_path='codi/mlp_codi.yaml',
yaml_train_path='codi/speech_trainer.yaml')
X, y = create_speech_data(codi_trainer.train_params['features'], method=labelling, thresh=threshold)
y = y[~np.isnan(X).any(axis=1)]
X = X[~np.isnan(X).any(axis=1)]
codi_trainer.create_labelled_dataset(X, y)
codi_trainer.train()
print('***CoDi model trained.***')
def infer_from_codi():
"""
Process of inferring : computing features, creating data, inferring, filtering points.
"""
codi_trainer = SpeechTrainer(yaml_model_path='codi/mlp_codi.yaml',
yaml_train_path='codi/speech_trainer.yaml')
X = create_unlabelled_speech_data(codi_trainer.train_params['features'])
X = X[~np.isnan(X).any(axis=1)]
codi_trainer.create_unlabelled_dataset(X)
save_ids(codi_trainer.test())
print('***Prediction done.***')
def experiment():
"""
Computes the experiment based on the value in train_codi.yaml
exp : '1' is the fixed-sized variable threshold experiment.
exp : '2' is the variable-sized variable threshold experiment.
"""
ids_trust = np.load('codi/outputs_init/ids_trust.npy', allow_pickle=True)
ids_no_trust = np.load('codi/outputs_init/ids_no_trust.npy', allow_pickle=True)
for i in range(0, len(ids_trust)):
np.save('codi/outputs/ids_trust.npy', ids_trust[i])
np.save('codi/outputs/ids_no_trust.npy', ids_no_trust[i])
call('cd s5 && ./run.sh --stage 23 --i {}'.format(i+1), shell=True)
def iterative_process(N):
"""
Iterative retraining (N iterations)
"""
for i in range(0, N):
call('cd speech_inference && ./run.sh --stage 23 --i {}'.format(i+1), shell=True)
infer_from_codi()
def main():
"""
Whole Process pipeline encompassing the other modules.
"""
# If GMM has not already been trained on labelled set, put --stage 0,
# otherwise, put --stage 20
call('cd speech_inference && ./run.sh --stage 0', shell=True)
print('***Inference model trained***')
train_codi(labelling='levenshtein', threshold=None)
infer_from_codi()
# experiment()
iterative_process(N=10)
if __name__ == '__main__':
main()
|
{"hexsha": "5cb5b377224582a6e8dcf445da8eb77b5e8f8c9a", "size": 2784, "ext": "py", "lang": "Python", "max_stars_repo_path": "main_speech.py", "max_stars_repo_name": "swisscom/ai-research-data-valuation-repository", "max_stars_repo_head_hexsha": "bcb45b7d8b84674f12e0a3671260290d98257c9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main_speech.py", "max_issues_repo_name": "swisscom/ai-research-data-valuation-repository", "max_issues_repo_head_hexsha": "bcb45b7d8b84674f12e0a3671260290d98257c9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main_speech.py", "max_forks_repo_name": "swisscom/ai-research-data-valuation-repository", "max_forks_repo_head_hexsha": "bcb45b7d8b84674f12e0a3671260290d98257c9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3720930233, "max_line_length": 104, "alphanum_fraction": 0.6695402299, "include": true, "reason": "import numpy", "num_tokens": 730}
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
import scipy
import os
import pylab
import networkx as nx
pylab.ion()
########################
# Computes which pairs are highly cross
# correlated and highly PLV (Phase Locking Value)
# correlated
########################
path=os.getenv('P_Dir')
initial_nodes=1
C = numpy.genfromtxt('%s/Cross_correlation_connected_edges.dat' %(path),dtype=(int,int,float,float))
D = numpy.genfromtxt('%s/PLV_connected_edges.dat' %(path),dtype=(int,int,float))
#C = numpy.genfromtxt('%s/Correlation_Sorted_By_Pairs.dat' %(path),dtype=(tuple,tuple,float,float))
#C = numpy.loadtxt('%s/Correlation_Sorted_By_Pairs.dat' %(path), unpack=True)
B = numpy.genfromtxt('./Input/Edge_distribution_BARABASI_1.dat',dtype=(tuple,tuple),delimiter=',')
B=[(int(a[0].strip('(')),(int(a[1].strip(')')))) for i,a in enumerate(B)]
Complete_Sync_Pairs=[]
PLV_Sync_Pairs=[]
Lag_Sync_Pairs=[]
Not_Sync_Pairs=[]
for j in range(len(C)):
if(C[j][3]==0.0):
if(C[j][2] >=0.9):
Complete_Sync_Pairs.append((C[j][0],C[j][1]))
else:
Not_Sync_Pairs.append((C[j][0],C[j][1]))
elif(C[j][3]!=0.0):
if(C[j][2] >=0.9):
Lag_Sync_Pairs.append((C[j][0],C[j][1]))
else:
Not_Sync_Pairs.append((C[j][0],C[j][1]))
print Not_Sync_Pairs
for j in range(len(D)):
if(D[j][2]>=0.9):
if((D[j][0],D[j][1])!=(C[j][0],C[j][1])):
PLV_Sync_Pairs.append((D[j][0],D[j][1]))
# Draw the graph with highlighted edges
G=nx.barabasi_albert_graph(50,initial_nodes,seed=1)
H=G.to_directed()
for u,v,d in sorted(H.edges(data=True)): #u,v -> pairs, d=weight
d['weight']=1./H.in_degree(v)
#position = nx.spring_layout(H)
position = {0: ([ 0.42171447, 0.36624474]), 1: ([ 0.27835402, 0.57645869]), 2: ([ 0.70713832, 0.43504537]), 3: ([ 0.79295514, 0.63469889]), 4: ([ 0.87275703, 0.31531177]), 5: ([ 0.19338322, 0.25321228]), 6: ([ 0.62960323, 0.60557565]), 7: ([ 0.80488831, 0.79570882]), 8: ([ 0.22708454, 0.12506547]), 9: ([ 0.86004523, 0.6818332 ]), 10: ([ 0.23780114, 0.83137355]), 11: ([ 0.50635835, 0.31935575]), 12: ([ 0.19578812, 0.50293847]), 13: ([ 0.85244911, 0.45402593]), 14: ([ 0.2100549 , 0.95737484]), 15: ([ 0.42108751, 0.16371832]), 16: ([ 0.79616455, 0.92104627]), 17: ([ 0.32502199, 0.40051425]), 18: ([ 0.81118556, 0.23715869]), 19: ([ 1.04908159, 0.20114414]), 20: ([ 0.9603631 , 0.34510215]), 21: ([ 0.20749589, 0.61441376]), 22: ([ 0.1067045 , 0.66612503]), 23: ([ 0.10661692, 0.40643112]), 24: ([ 0.02109619, 0.71804176]), 25: ([ 0.29630843, 0.75620989]), 26: ([ 0.77264734, 0.32278909]), 27: ([ 0.59637322, 0.30288791]), 28: ([ 0.40646795, 0.68953242]), 29: ([ 0.71585117, 0.70037731]), 30: ([ 0.94090026, 0.46592304]), 31: ([ 0.37341987, 0.03891329]), 32: ([ 0.29356884, 0.12460151]), 33: ([ 0.05802269, 0.32214673]), 34: ([ 0.00977545, 0.22167312]), 35: ([ 0.79315063, 1.05 ]), 36: ([ 0.53349251, 0.20963861]), 37: ([ 0.44142946, 0.50166274]), 38: ([ 0.3032838, -0.1 ]), 39: ([ 0.36181199, 0.59372331]), 40: ([ 0.70089743, 0.31080511]), 41: ([ 0.07144408, 0.15225059]), 42: ([ 0. , 0.09694259]), 43: ([ 0.100334, 0.5529214 ]), 44: ([ 0.80433902, 0.53163181]), 45: ([ 0.44638934, 0.008488029]), 46: ([ 0.2051538, 0.38861911]), 47: ([ 0.0016441 , 0.38750248]), 48: ([ 0.5716443 , 0.09833529]), 49: ([ 0.20562024, 0.74734859])}
nx.set_node_attributes(H,'pos',position)
nx.draw_networkx_nodes(H,pos=position, node_color='b')
a=nx.draw_networkx_edges(H,position,edgelist=Complete_Sync_Pairs, width=2.5,edge_color='r', label="Complete synchronized")
b=nx.draw_networkx_edges(H,position,edgelist=Lag_Sync_Pairs,width=2.5,edge_color='g',label="Lag synchronized")
c=nx.draw_networkx_edges(H,position,edgelist=PLV_Sync_Pairs,width=2.5,edge_color='m',label="Phase locked")
nx.draw_networkx_edges(H,position,edgelist=Not_Sync_Pairs,width=1.0)
#nx.draw_networkx_labels(H,position)
ax1=plt.gca()
ax1.legend([a,b,c],['Complete synchronization','Lag synchronization','Phase locked'], frameon=False,loc='lower right')
ax1.set_frame_on(False)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_xticklabels([])
pylab.savefig('%s/Network.eps' %path)
pylab.close()
|
{"hexsha": "83157194aafba52d0d0584cdb04f78042ebcb5c0", "size": 4243, "ext": "py", "lang": "Python", "max_stars_repo_path": "TEST_1/Analysis/Classification_Synchronization_Types_Network.py", "max_stars_repo_name": "dmalagarriga/PLoS_2015_segregation", "max_stars_repo_head_hexsha": "949afedf96945c11ee84b1a6c9842e5257fb5be8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-28T08:49:49.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-28T08:49:49.000Z", "max_issues_repo_path": "TEST_2b/Analysis/Classification_Synchronization_Types_Network.py", "max_issues_repo_name": "dmalagarriga/PLoS_2015_segregation", "max_issues_repo_head_hexsha": "949afedf96945c11ee84b1a6c9842e5257fb5be8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TEST_2b/Analysis/Classification_Synchronization_Types_Network.py", "max_forks_repo_name": "dmalagarriga/PLoS_2015_segregation", "max_forks_repo_head_hexsha": "949afedf96945c11ee84b1a6c9842e5257fb5be8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.1204819277, "max_line_length": 1752, "alphanum_fraction": 0.6394060806, "include": true, "reason": "import numpy,import scipy,import networkx", "num_tokens": 1788}
|
Christianity is a relatively popular religion in town. Besides churches, there are a number of local businesses and services that either cater specifically to Christians or that operate under a Christian philosophy.
Retail
Davis Christian Bookroom
Integrity Windows & Doors
Christian Education
Davis Community Church Nursery School
Little Ones Christian Preschool
Grace Valley Christian Academy
St. James Catholic School
Departed
The Open Bible
|
{"hexsha": "1df3e611faa09db06875d1679f029045ed86ae3b", "size": 469, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Christian_Businesses_and_Services.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Christian_Businesses_and_Services.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Christian_Businesses_and_Services.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2666666667, "max_line_length": 216, "alphanum_fraction": 0.8038379531, "num_tokens": 89}
|
From Categories Require Import Essentials.Notations.
From Categories Require Import Essentials.Types.
From Categories Require Import Essentials.Facts_Tactics.
From Categories Require Import Category.Main.
From Categories Require Import Functor.Functor Functor.Functor_Ops
Functor.Representable.Hom_Func.
From Categories Require Import Functor.Functor_Extender.
From Categories Require Import NatTrans.NatTrans NatTrans.Operations
NatTrans.Func_Cat NatTrans.NatIso.
From Categories Require Import Ext_Cons.Prod_Cat.Prod_Cat
Ext_Cons.Prod_Cat.Operations Ext_Cons.Prod_Cat.Nat_Facts.
From Categories Require Import Adjunction.Adjunction.
From Categories Require Import KanExt.Local KanExt.LocalFacts.Uniqueness.
From Categories Require Import Basic_Cons.Terminal.
Local Open Scope functor_scope.
(** This module contains conversion from local kan extension defiend as cones
to local kan extensions defined through hom functor. *)
Section Local_Right_KanExt_to_Hom_Local_Right_KanExt.
Context {C C' : Category} {p : C --> C'}
{D : Category} {F : C --> D}
(lrke : Local_Right_KanExt p F).
(** The left to right side of Hom_Local_Right_KanExt isomorphism. *)
Program Definition Local_Right_KanExt_to_Hom_Local_Right_KanExt_Iso_LR :
(((@Fix_Bi_Func_2 _ (Func_Cat C D) _ F (Hom_Func (Func_Cat C D)))
∘ (Left_Functor_Extender p D)^op)
--> (@Fix_Bi_Func_2 _ (Func_Cat C' D)
_ lrke (Hom_Func (Func_Cat C' D))))%nattrans :=
{|
Trans := fun c h => LRKE_morph_ex lrke {|cone_apex := c; cone_edge := h|}
|}.
Next Obligation.
Proof.
extensionality x.
repeat rewrite NatTrans_id_unit_left.
match goal with
[|- cone_morph (LRKE_morph_ex lrke ?A) = ?X] =>
match X with
((cone_morph ?C) ∘ ?B)%nattrans =>
change X with
(cone_morph
(LoKan_Cone_Morph_compose
_
_
(Build_LoKan_Cone_Morph
p F A {|cone_apex := c; cone_edge := x|} h eq_refl) C
)
)
end
end.
apply LRKE_morph_unique.
Qed.
Next Obligation.
Proof.
symmetry.
apply Local_Right_KanExt_to_Hom_Local_Right_KanExt_Iso_LR_obligation_1.
Qed.
(** The right to left side of Hom_Local_Right_KanExt isomorphism. *)
Program Definition Local_Right_KanExt_to_Hom_Local_Right_KanExt_Iso_RL :
((@Fix_Bi_Func_2 _ (Func_Cat C' D) _ lrke (Hom_Func (Func_Cat C' D)))
--> ((@Fix_Bi_Func_2 _ (Func_Cat C D) _ F (Hom_Func (Func_Cat C D)))
∘ (Left_Functor_Extender p D)^op
))%nattrans
:=
{|
Trans := fun c h => (lrke ∘ (h ∘_h (NatTrans_id p)))%nattrans
|}.
Next Obligation.
Proof.
extensionality x.
repeat rewrite NatTrans_id_unit_left.
rewrite NatTrans_compose_assoc.
rewrite NatTrans_comp_hor_comp.
rewrite NatTrans_id_unit_right.
trivial.
Qed.
Next Obligation.
Proof.
symmetry.
apply Local_Right_KanExt_to_Hom_Local_Right_KanExt_Iso_RL_obligation_1.
Qed.
(** Conversion from Local_Right_KanExt Hom_Local_Right_KanExt isomorphism. *)
Program Definition Local_Right_KanExt_to_Hom_Local_Right_KanExt :
Hom_Local_Right_KanExt p F :=
{|
HLRKE := (cone_apex (LRKE lrke));
HLRKE_Iso :=
{|
iso_morphism := Local_Right_KanExt_to_Hom_Local_Right_KanExt_Iso_LR;
inverse_morphism :=
Local_Right_KanExt_to_Hom_Local_Right_KanExt_Iso_RL
|}
|}.
Next Obligation.
Proof.
apply NatTrans_eq_simplify.
extensionality h; extensionality x.
symmetry.
apply (cone_morph_com
(LRKE_morph_ex lrke {| cone_apex := h; cone_edge := x |})).
Qed.
Next Obligation.
Proof.
apply NatTrans_eq_simplify.
extensionality h; extensionality x.
cbn in *.
match goal with
[|- cone_morph (LRKE_morph_ex lrke ?A) = ?X] =>
change X with (cone_morph (Build_LoKan_Cone_Morph p F A lrke x eq_refl));
apply (LRKE_morph_unique lrke A)
end.
Qed.
End Local_Right_KanExt_to_Hom_Local_Right_KanExt.
|
{"author": "amintimany", "repo": "Categories", "sha": "1839108875df0107fa4f6061c654003decda2d49", "save_path": "github-repos/coq/amintimany-Categories", "path": "github-repos/coq/amintimany-Categories/Categories-1839108875df0107fa4f6061c654003decda2d49/KanExt/LocalFacts/ConesToHom.v"}
|
#!/usr/bin/python
import argparse
import random
import os
import subprocess
import math
import sys
import time
import copy
from numpy.random import choice as choices
from WES_simulator import *
from snp_rate import *
def main():
parser = argparse.ArgumentParser(description='Simulator for WES or WGS data', \
formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group('Mandatory inputs')
group1.add_argument('-G', type=str, dest='genome_file', required=True, \
help='Reference genome FASTA file')
group1.add_argument('-T', type=str, dest='target_region_file', required=True, \
help='Target region file')
group2 = parser.add_argument_group('Arguments for simulating rearranged genomes')
group2.add_argument('-rN', dest='replaceNs', choices=['none','gap','all'], default="none", \
help='Replace sequences of "N"s by ATGC randomly? [none]')
group2.add_argument('-eN', dest='exclude_Ns', choices=['none','gap','all'], default="none", \
help='Exclude sequences of "N"s for CNV simulation? [none]')
group2.add_argument('-n_gap', dest='num_N_for_gaps', type=int, default = 50, \
help='Number of consecutive "N"s to be considered a gap region [50]')
group2.add_argument('-e_cnv', dest='target_cnv_list', type=str, default=None, \
help='A user-defined list of CNVs overlapping with target regions')
group2.add_argument('-e_chr', dest='target_cnv_chr', type=int, default = None, \
help='Number of CNVs overlapping with target regions to be generated on each chromosome')
group2.add_argument('-e_tol', dest='target_cnv_tol', type=int, default = None, \
help='Total number of CNVs overlapping with target regions to be generated across the genome (estimate)')
group2.add_argument('-e_cl', dest='target_cnv_len_file', type=str, default=None, \
help='User supplied file of CNV length for CNVs overlapping with target regions')
group2.add_argument('-o_cnv', dest='out_cnv_list', type=str, default=None, \
help='A user-defined list of CNVs outside of target regions')
group2.add_argument('-o_chr', dest='out_cnv_chr', type=int, default = None, \
help='Number of CNVs outside of target regions to be generated on each chromosome')
group2.add_argument('-o_tol', dest='out_cnv_tol', type=int, default = None, \
help='Total number of CNVs outside of target regions to be generated across the genome (estimate)')
group2.add_argument('-o_cl', dest='out_cnv_len_file', type=str, default=None, \
help='User supplied file of CNV length for CNVs outside of target regions')
group2.add_argument('-ol', dest='overlap_bps', type=int, default = 100, \
help='For each CNV overlapping with target regions, number of minimum overlapping bps [100]')
group2.add_argument('-min_len', dest='cnv_min_length', type=int, default=1000, \
help='Minimum CNV length [1000]')
group2.add_argument('-max_len', dest='cnv_max_length', type=int, default=100000, \
help='Maximum CNV length [100000]')
group2.add_argument('-min_cn', dest='min_copy_number', type=int, default=2, \
help='Minimum copy number for insertions [2]')
group2.add_argument('-max_cn', dest='max_copy_number', type=int, default=10, \
help='Maximum copy number for insertions [10]')
group2.add_argument('-p', dest='proportion_ins', type=float, default=0.5, \
help='Proportion of insertions [0.5]')
group2.add_argument('-f', dest='min_flanking_len', type=int, default=50, \
help='Minimum length between each CNV [50]')
group2.add_argument('-ms', dest='method_s', choices=['random','uniform','gauss'], default="random", \
help='Distribution of CNVs [random]')
group2.add_argument('-ml', dest='method_l', choices=['random','uniform','gauss','beta','user'], default="random", \
help='Distribution of CNV length [random]')
group2.add_argument('-as', dest='as1', type=float, default=None, \
help='Mu for Gaussian CNV distribution [0]')
group2.add_argument('-bs', dest='bs', type=float, default=None, \
help='Sigma for Gaussian CNV distribution [1]')
group2.add_argument('-al', dest='al', type=float, default=None, \
help='Mu (Gaussian distribution) or alpha (Beta distribution) for CNV length distribution [0 for Gaussian distribution, and 0.5 for Beta distribution]')
group2.add_argument('-bl', dest='bl', type=float, default=None, \
help='Sigma (Gaussian distribution) or beta (Beta distribution) for CNV length distribution [1 for Gaussian distribution, and 2.3 for Beta distribution]')
group2.add_argument('-s_r', dest='s_rate', type=float, default=0, \
help='Rate of SNPs in target regions [0]')
group2.add_argument('-s_s', dest='s_slack', type=int, default=0, \
help='Slack region up and down stream of target regions to simulate SNPs [0]')
group2.add_argument('-i_r', dest='i_rate', type=float, default=0, \
help='Rate of indels in target regions [0]')
group2.add_argument('-i_mlen', dest='i_max_len', type=int, default=50, \
help='The Maximum length of indels in target regions [50]. (If a deletion is equal or larger than the length of the target region it is in, the length of the deletion will be changed to (length of the target region it is in) - 1.)')
group3 = parser.add_argument_group('Arguments for simulating short reads (fastq)')
group3.add_argument('-nr', dest='nreads', type=int, default=10000, \
help='Number of reads / read pairs on target regions to be generated for each genome [10000]')
group3.add_argument('-fs', dest='frag_size', type=int, default=200, \
help='Mean fragment size to be generated [200]')
group3.add_argument('-s', dest='stdev', type=int, default=20, \
help='Standard deviation of fragment sizes [20]')
group3.add_argument('-l', dest='read_length', type=int, default=100, \
help='Read length of each short read [100]')
group3.add_argument('-tf', dest='target_region_flank', type=int, default=0, \
help='Length of flanking region up and down stream of target regions to be sequenced (this step take place after -clr). [0]')
group3.add_argument('-pr', dest='paired_end', action='store_true', \
help='Select if paired-end sequencing')
group3.add_argument('-q', dest='quality_score_offset', type=int, default=33, \
help='Quality score offset for short reads simulation [33]')
group3.add_argument('-clr', dest='connect_len_between_regions', type=int, default=None, \
help='Maximum length bwtween target regions to connect the target regions.')
group3.add_argument('-m', dest='model', type=str, \
default=os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/Illumina_HiSeq_2500_p.gzip"), \
help='GemSIM error model file (.gzip, need absolute path) [Illumina_HiSeq_2500_p]')
group4 = parser.add_argument_group('Arguments for general settings')
group4.add_argument('-o', dest='output_dir', type=str, default="simulation_output", \
help='Output directory [simulator_output]')
group4.add_argument('-rn', dest='rearranged_output_name', type=str, default="test", \
help='Prefix of the rearranged outputs (do not include directory name) [test]')
group4.add_argument('-n', dest='num_samples', type=int, default=1, \
help='Number of test samples to be generated [1]')
group4.add_argument('-sc', dest='sim_control', action='store_true', \
help='Simulation for control genome')
group4.add_argument('-ssr', dest='sim_short_reads', action='store_true', \
help='Simulate short reads (fastq) files')
group4.add_argument('-sb', dest='sim_bam', action='store_true', \
help='Simulate BAM files')
group4.add_argument('-picard', dest='path_to_picard', type=str, default=None, \
help='Absolute path to picard')
group4.add_argument('-GATK', dest='path_to_GATK', type=str, default=None, \
help='Absolute path to GATK')
args = parser.parse_args()
if not os.path.exists(args.genome_file):
log_print('Error: The reference genome file does not exist!')
exit(1)
if not os.path.exists(args.target_region_file):
log_print('Error: The target region file does not exist!')
exit(1)
param = {}
param['type'] = 'e'
param['genome_file'] = os.path.join(os.getcwd(), args.genome_file)
if args.target_region_file:
param['target_region_file'] = os.path.join(os.getcwd(), args.target_region_file)
param['replaceN'] = args.replaceNs
param['cnv_min_len'] = args.cnv_min_length
param['cnv_max_len'] = args.cnv_max_length
param['min_cn'] = args.min_copy_number
param['max_cn'] = args.max_copy_number
param['p_ins'] = args.proportion_ins
param['e_cnv_list'] = args.target_cnv_list
param['o_cnv_list'] = args.out_cnv_list
param['out_dir'] = os.path.join(os.getcwd(), args.output_dir)
param['e_cnv_chr'] = args.target_cnv_chr
param['e_cnv_tol'] = args.target_cnv_tol
param['o_cnv_chr'] = args.out_cnv_chr
param['o_cnv_tol'] = args.out_cnv_tol
param['overlap_bp'] = args.overlap_bps
param['tmp_dir'] = os.path.join(param['out_dir'], "tmp")
#param['rearranged_out'] = args.rearranged_output_name
param['nreads'] = args.nreads
param['frag_size'] = args.frag_size
param['stdev'] = args.stdev
param['read_length'] = args.read_length
param['paired_end'] = args.paired_end
param['qual'] = args.quality_score_offset
param['model'] = args.model
#param['sim_control'] = args.sim_control
param['sim_short_reads'] = args.sim_short_reads
param['sim_bam'] = args.sim_bam
param['path_to_picard'] = args.path_to_picard
param['path_to_GATK'] = args.path_to_GATK
param['method_s'] = args.method_s
param['method_l'] = args.method_l
param['e_cnv_len_file'] = args.target_cnv_len_file
param['o_cnv_len_file'] = args.out_cnv_len_file
param['opt'] = args.exclude_Ns
param['gapn'] = args.num_N_for_gaps
param['flank'] = args.min_flanking_len
param['fl'] = args.target_region_flank
param['inter'] = args.connect_len_between_regions
param['s_rate'] = args.s_rate
param['i_rate'] = args.i_rate
param['i_mlen'] = args.i_max_len
param['as'] = args.as1
param['bs'] = args.bs
param['al'] = args.al
param['bl'] = args.bl
param['snp_slack'] = args.s_slack
t = args.num_samples
if t < 1:
log_print("Error: The number of test samples (-n) must be at least 1!")
exit(1)
if (param['p_ins'] < 0) or (param['p_ins'] > 1):
log_print("Error: Insertion rate must be between 0 and 1.")
exit(1)
if (param['s_rate'] < 0) or (param['s_rate'] > 1):
log_print("Error: SNP rate must be between 0 and 1.")
exit(1)
if (param['i_rate'] < 0) or (param['i_rate'] > 1):
log_print("Error: indel rate must be between 0 and 1.")
exit(1)
if (param['i_mlen'] < 0):
log_print("Error: the maximium length of indels must > 0.")
exit(1)
if (param['snp_slack'] < 0):
log_print("Error: the slack region for making SNPS must > 0.")
exit(1)
if param['method_s'] == 'gauss':
if not param['as']:
param['as'] = 0
if not param['bs']:
param['bs'] = 1
if param['method_l'] == 'gauss':
if not param['al']:
param['al'] = 0
if not param['bl']:
param['bl'] = 1
if (param['method_l'] == 'beta'):
if not param['al']:
param['al'] = 2
if not param['bl']:
param['bl'] = 2
if (param['al']) and (param['al'] <= 0):
log_print("Error: alpha must > 0 for beta distribution.")
exit(1)
if (param['bl']) and (param['bl'] <= 0):
log_print("Error: beta must > 0 for beta distribution.")
exit(1)
if (param['method_s'] != 'gauss'):
if param['as'] or param['bs']:
log_print("Warning: parameters mu and sigma for CNV distribution are not used! (Only used in gauss distribution.)")
if (param['method_l'] != 'gauss') and (param['method_l'] != 'beta'):
if param['al'] or param['bl']:
log_print("Warning: parameters mu and sigma / alpha and beta for CNV length distribution are not used! (Only used in gauss and beta distribution.)")
if param['sim_bam']:
if (not param['path_to_picard']) or (not param['path_to_GATK']):
log_print('Error: Must provide path to picard (-picard) and path to GATK (-GATK)!')
exit(1)
if param['sim_short_reads'] and not param['paired_end']:
log_print("Warning: Chose single-end sequencing. Mean fragment size (-fs) and standard deviation of fragment size (-s) will be ignored.")
if param['type'] == 'e':
e_ct = 0
if param['e_cnv_list']:
e_ct += 1
if param['e_cnv_chr']:
e_ct += 1
if param['e_cnv_tol']:
e_ct += 1
if param['e_cnv_len_file']:
e_ct += 1
if e_ct != 1:
log_print('Error: One and only one of -e_cnv, -e_chr, -e_tol and -e_cl must be present!')
exit(1)
o_ct = 0
if param['o_cnv_list']:
o_ct += 1
if param['o_cnv_chr']:
o_ct += 1
if param['o_cnv_tol']:
o_ct += 1
if param['o_cnv_len_file']:
o_ct += 1
if not (o_ct == 0 or o_ct ==1):
log_print('Error: Only one of -o_cnv, -o_chr, -o_tol and -o_cl can be present!')
exit(1)
if param['e_cnv_list']:
log_print('Warning: A list of CNVs overlapping with target regions are provided. -em, -f, -ms, -ml, -ol, -min_cn, -max_cn, -min_len and -max_len will be ignored for CNVs on this list!')
if param['o_cnv_list']:
log_print('Warning: A list of CNVs outside of target regions are provided. -em, -f, -ms, -ml, -ol, -min_cn, -max_cn, -min_len and -max_len will be ignored for CNVs on this list!')
if param['method_l'] == 'user':
log_print('Warning: -min_len and -max_len will be ignored since "-ml user" is chosen!')
if not param['e_cnv_len_file']:
log_print('Error: "-ml user" must be used with -e_cl!')
exit(1)
if o_ct == 1 and not param['o_cnv_len_file']:
log_print('Error: If CNVs outside of target regions are to be generated, "-ml user" must be used with -o_cl!')
exit(1)
else:
if param['e_cnv_len_file']:
log_print('Error: Only "-ml user" could be used with -e_cl!')
exit(1)
if o_ct == 1 and param['o_cnv_len_file']:
log_print('Error: Only "-ml user" could be used with -o_cl!')
exit(1)
if param['sim_bam']:
if not param['sim_short_reads']:
log_print('Error: Must simulate short reads (-ssr) to simulate bam files!')
exit(1)
if os.path.exists(param['tmp_dir']):
subprocess.call(['rm', '-rf', param['tmp_dir']], stderr=None)
#shutil.rmtree(param['tmp_dir'])
os.makedirs(param['tmp_dir'])
else:
os.makedirs(param['tmp_dir'])
print ' ==================== SECNVs ==================== '
sys.stdout.flush()
print ' SECNVs (2019) '
sys.stdout.flush()
print ' Version 2.7.1 (Oct 2019) '
sys.stdout.flush()
print ' Bug report: Yue Xing <yue.july.xing@gmail.com> '
sys.stdout.flush()
print ' ------------------------------------------------------ '
sys.stdout.flush()
log_print('Reading genome file...')
iin_seqs, iin_chrs, iin_ran_m = read_fasta(param['genome_file'], \
param['replaceN'], param['gapn'], param['out_dir'], \
param['opt'])
if param['type'] == 'e':
log_print('Reading target region file...')
iin_st, iin_ed = read_target(param['target_region_file'], iin_chrs)
if t == 1:
param['rearranged_out'] = args.rearranged_output_name
else:
log_print('Processing the 1st sample and control (if required)...')
param['rearranged_out'] = args.rearranged_output_name + "1"
simulate_WES(param, iin_seqs, iin_chrs, iin_st, iin_ed, args.sim_control, 0, iin_ran_m)
if t > 1:
for i in range(1,t):
mess = 'Processing the ' + str(i+1) + 'th sample...'
log_print(mess)
param['rearranged_out'] = args.rearranged_output_name + str(i+1)
simulate_WES(param, iin_seqs, iin_chrs, iin_st, iin_ed, None, 1, iin_ran_m)
#shutil.rmtree(param['tmp_dir'])
subprocess.call(['rm', '-rf', param['tmp_dir']], stderr=None)
log_print('Done')
if __name__ == '__main__':
main()
|
{"hexsha": "5d4223c91f7bf46447cea8a9767abe84d0c7fee5", "size": 15517, "ext": "py", "lang": "Python", "max_stars_repo_path": "SECNVs.py", "max_stars_repo_name": "YJulyXing/SECNVs-SimulateCNVs-2.0-", "max_stars_repo_head_hexsha": "e1a4a7fe6ca4370e9fe3d7b92ecdf3ec5c55cbd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-07-23T18:05:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T15:10:52.000Z", "max_issues_repo_path": "SECNVs.py", "max_issues_repo_name": "YJulyXing/SECNVs-SimulateCNVs-2.0-", "max_issues_repo_head_hexsha": "e1a4a7fe6ca4370e9fe3d7b92ecdf3ec5c55cbd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-08-17T22:31:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-04T14:21:54.000Z", "max_forks_repo_path": "SECNVs.py", "max_forks_repo_name": "YJulyXing/SECNVs-SimulateCNVs-2.0-", "max_forks_repo_head_hexsha": "e1a4a7fe6ca4370e9fe3d7b92ecdf3ec5c55cbd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-18T03:17:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-13T12:18:21.000Z", "avg_line_length": 44.4613180516, "max_line_length": 234, "alphanum_fraction": 0.6916929819, "include": true, "reason": "from numpy", "num_tokens": 4389}
|
module Dh_allDs
! contains functions that compute the dh/dsigma matrices of the material model
! and one main (model-indepenent) function that calls all dh/dsigma functions of the model
! and returns the function values as a matrix
use constants
use material_info
use derived_types
implicit none
contains
function DhDs(sigma, alpha)
! computes the dh/dsigma matrix
! in case of multi surface plasticity the dh/dsigma matrices of all yield surfaces
! of the model must be computed in this subroutine
implicit none
! --------------------------------------------------------------------------
! passed variables
real(kind=dbl), dimension(6), intent(in) :: sigma ! stress vector
real(kind=dbl), dimension(:), intent(in) :: alpha ! internal variables vector
! return variable
real(kind=dbl), dimension(size(alpha),6,n_yf) :: DhDs ! results array of rank 3
! internal variable
! --------------------------------------------------------------------------
! --------------------------------------
! computation of all dh/dsigma matrices (modified leon, ...)
! returning dh/dsigma matrices return array
! --------------------------------------
DhDs(:,:,1) = Dh_1Ds(sigma, alpha)
end function DhDs
function Dh_1Ds(sigma, alpha)
implicit none
! --------------------------------------------------------------------------
! passed variables
real(kind=dbl), dimension(6), intent(in) :: sigma ! stress vector
real(kind=dbl), dimension(:), intent(in) :: alpha ! internal variables vector
! return variable
real(kind=dbl), dimension(size(alpha,1),6) :: Dh_1Ds
! internal variables
! --------------------------------------------------------------------------
! ...
end function Dh_1Ds
end module Dh_allDs
|
{"hexsha": "493282eeec93523004b4a8c46ac828128d97f30c", "size": 1800, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "return_mapping/model_components_all/Dh_allDs.f90", "max_stars_repo_name": "yuyong1990/TsaiWu-Fortran", "max_stars_repo_head_hexsha": "a111ca1717adfbbaf3e9e34f4189a441e16441b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-11-19T15:12:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T15:34:59.000Z", "max_issues_repo_path": "return_mapping/model_components_all/Dh_allDs.f90", "max_issues_repo_name": "OVGULIU/TsaiWu-Fortran", "max_issues_repo_head_hexsha": "a111ca1717adfbbaf3e9e34f4189a441e16441b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "return_mapping/model_components_all/Dh_allDs.f90", "max_forks_repo_name": "OVGULIU/TsaiWu-Fortran", "max_forks_repo_head_hexsha": "a111ca1717adfbbaf3e9e34f4189a441e16441b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2017-02-11T12:56:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T11:29:18.000Z", "avg_line_length": 27.2727272727, "max_line_length": 91, "alphanum_fraction": 0.5511111111, "num_tokens": 403}
|
"""Dataloader for language generation"""
from collections import Counter
from itertools import chain
import numpy as np
from .._utils.unordered_hash import UnorderedSha256
from .._utils.file_utils import get_resource_file_path
from .dataloader import BasicLanguageGeneration
from ..metric import MetricChain, PerplexityMetric, LanguageGenerationRecorder, \
HashValueRecorder
# pylint: disable=W0223
class LanguageGeneration(BasicLanguageGeneration):
r"""Base class for language modelling datasets. This is an abstract class.
Arguments:{ARGUMENTS}
Attributes:{ATTRIBUTES}
"""
ARGUMENTS = BasicLanguageGeneration.ARGUMENTS
ATTRIBUTES = BasicLanguageGeneration.ATTRIBUTES
def get_batch(self, key, index, needhash=False):
'''Get a batch of specified `index`.
Arguments:
key (str): must be contained in `key_name`
index (list): a list of specified index
needhash (bool): whether to return a hashvalue
representing this batch of data. Default: False.
Returns:
(dict): A dict at least contains:
* sent_length(list): A 1-d list, the length of sentence in each batch.
Size: `[batch_size]`
* sent(:class:`numpy.array`): A 2-d padding array containing id of words.
Only provide valid words. `unk_id` will be used if a word is not valid.
Size: `[batch_size, max(sent_length)]`
* sent_allvocabs(:class:`numpy.array`): A 2-d padding array containing id of words.
Provide both valid and invalid words.
Size: `[batch_size, max(sent_length)]`
* hashvalue(bytes): (If `needhash` is True.) A bytes representing hash value of the data.
Examples:
>>> # vocab_list = ["<pad>", "<unk>", "<go>", "<eos>", "how", "are", "you",
>>> # "hello", "i", "am", "fine"]
>>> dataloader.get_batch('train', [0, 1])
{
"sent": [
[2, 4, 5, 6, 3], # first sentence: <go> how are you <eos>
[2, 7, 3, 0, 0], # second sentence: <go> hello <eos> <pad> <pad>
],
"sent_length": [5, 3], # length of sentences
}
Todo:
* add invalid_vocab examples
* mark which array is np.array
'''
if key not in self.key_name:
raise ValueError("No set named %s." % key)
res = {}
batch_size = len(index)
res["sent_length"] = np.array( \
list(map(lambda i: len(self.data[key]['sent'][i]), index)))
res_sent = res["sent"] = np.zeros( \
(batch_size, np.max(res["sent_length"])), dtype=int)
for i, j in enumerate(index):
sentence = self.data[key]['sent'][j]
res["sent"][i, :len(sentence)] = sentence
if needhash:
unordered_hash = UnorderedSha256()
for j in index:
unordered_hash.update_data(repr((self.data[key]['sent'][j], self.valid_vocab_len)).encode())
res["hashvalue"] = unordered_hash.digest()
# hashvalue must be unique for representing the whole batch
res["sent_allvocabs"] = res_sent.copy()
res_sent[res_sent >= self.valid_vocab_len] = self.unk_id
return res
def get_teacher_forcing_metric(self, gen_log_prob_key="gen_log_prob"):
'''Get metric for teacher-forcing mode.
It contains:
* :class:`.metric.PerplexityMetric`
Arguments:
gen_prob_key (str): default: `gen_prob`. Refer to :class:`.metric.PerplexityMetric`
'''
metric = MetricChain()
metric.add_metric(HashValueRecorder(hash_key="teacher_forcing_hashvalue"))
metric.add_metric(PerplexityMetric(self, \
reference_allvocabs_key='sent_allvocabs', \
reference_len_key='sent_length', \
gen_log_prob_key=gen_log_prob_key))
return metric
def get_inference_metric(self, gen_key="gen"):
'''Get metric for inference.
It contains:
* :class:`.metric.LanguageGenerationRecorder`
Arguments:
gen_key (str): default: "gen". Refer to :class:`.metric.LanguageGenerationRecorder`
'''
metric = MetricChain()
metric.add_metric(HashValueRecorder(hash_key="inference_hashvalue"))
metric.add_metric(LanguageGenerationRecorder(self, \
gen_key=gen_key))
return metric
class MSCOCO(LanguageGeneration):
'''A dataloder for preprocessed MSCOCO dataset.
Arguments:
file_id (str): a str indicates the source of MSCOCO dataset.
file_type (str): a str indicates the type of MSCOCO dataset. Default: "MSCOCO"
valid_vocab_times (int): A cut-off threshold of valid tokens. All tokens appear
not less than `min_vocab_times` in **training set** will be marked as valid words.
Default: 10.
max_sen_length (int): All sentences longer than `max_sen_length` will be shortened
to first `max_sen_length` tokens. Default: 50.
invalid_vocab_times (int): A cut-off threshold of invalid tokens. All tokens appear
not less than `invalid_vocab_times` in the **whole dataset** (except valid words) will be
marked as invalid words. Otherwise, they are unknown words, both in training or
testing stages. Default: 0 (No unknown words).
Refer to :class:`.LanguageGeneration` for attributes and methods.
References:
[1] http://images.cocodataset.org/annotations/annotations_trainval2017.zip
[2] Lin T Y, Maire M, Belongie S, et al. Microsoft COCO: Common Objects in Context. ECCV 2014.
'''
def __init__(self, file_id, file_type="MSCOCO", min_vocab_times=10, \
max_sen_length=50, invalid_vocab_times=0):
self._file_id = file_id
self._file_path = get_resource_file_path(file_id, file_type)
self._file_type = file_type
self._min_vocab_times = min_vocab_times
self._max_sen_length = max_sen_length
self._invalid_vocab_times = invalid_vocab_times
super(MSCOCO, self).__init__()
def _load_data(self):
r'''Loading dataset, invoked by `LanguageGeneration.__init__`
'''
origin_data = {}
for key in self.key_name:
f_file = open("%s/mscoco_%s.txt" % (self._file_path, key))
origin_data[key] = {}
origin_data[key]['sent'] = list( \
map(lambda line: line.split(), f_file.readlines()))
raw_vocab_list = list(chain(*(origin_data['train']['sent'])))
# Important: Sort the words preventing the index changes between
# different runs
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = list( \
filter( \
lambda x: x[1] >= self._min_vocab_times, \
vocab))
vocab_list = self.ext_vocab + list(map(lambda x: x[0], left_vocab))
valid_vocab_len = len(vocab_list)
valid_vocab_set = set(vocab_list)
for key in self.key_name:
if key == 'train':
continue
raw_vocab_list.extend(list(chain(*(origin_data[key]['sent']))))
vocab = sorted(Counter(raw_vocab_list).most_common(), \
key=lambda pair: (-pair[1], pair[0]))
left_vocab = list( \
filter( \
lambda x: x[1] >= self._invalid_vocab_times and x[0] not in valid_vocab_set, \
vocab))
vocab_list.extend(list(map(lambda x: x[0], left_vocab)))
print("valid vocab list length = %d" % valid_vocab_len)
print("vocab list length = %d" % len(vocab_list))
word2id = {w: i for i, w in enumerate(vocab_list)}
def line2id(line):
return ([self.go_id] + \
list(map(lambda word: word2id[word] if word in word2id else self.unk_id, line)) \
+ [self.eos_id])[:self._max_sen_length]
data = {}
data_size = {}
for key in self.key_name:
data[key] = {}
data[key]['sent'] = list(map(line2id, origin_data[key]['sent']))
data_size[key] = len(data[key]['sent'])
vocab = list(chain(*(origin_data[key]['sent'])))
vocab_num = len(vocab)
oov_num = len( \
list( \
filter( \
lambda word: word not in word2id, \
vocab)))
invalid_num = len( \
list( \
filter( \
lambda word: word not in valid_vocab_set, \
vocab))) - oov_num
length = list( \
map(len, origin_data[key]['sent']))
cut_num = np.sum( \
np.maximum( \
np.array(length) - \
self._max_sen_length + \
1, \
0))
print( \
"%s set. invalid rate: %f, unknown rate: %f, max length before cut: %d, cut word rate: %f" % \
(key, invalid_num / vocab_num, oov_num / vocab_num, max(length), cut_num / vocab_num))
return vocab_list, valid_vocab_len, data, data_size
|
{"hexsha": "b9c3d38a94d15c33de7b5812e5cf1089508886fd", "size": 7988, "ext": "py", "lang": "Python", "max_stars_repo_path": "contk/dataloader/language_generation.py", "max_stars_repo_name": "GentleSmile/contk", "max_stars_repo_head_hexsha": "14c86b16e064c3034f64f6c48a267a0a31f0c463", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "contk/dataloader/language_generation.py", "max_issues_repo_name": "GentleSmile/contk", "max_issues_repo_head_hexsha": "14c86b16e064c3034f64f6c48a267a0a31f0c463", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "contk/dataloader/language_generation.py", "max_forks_repo_name": "GentleSmile/contk", "max_forks_repo_head_hexsha": "14c86b16e064c3034f64f6c48a267a0a31f0c463", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5800865801, "max_line_length": 98, "alphanum_fraction": 0.6864046069, "include": true, "reason": "import numpy", "num_tokens": 2251}
|
import math
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch import Tensor
from data import GenericTranslationDataset, BATCH_SIZE
class EncoderDecoderTransformer(nn.Module):
def __init__(
self,
d_model: int,
nhead: int,
num_encoder_layers: int,
num_decoder_layers: int,
src_vocab_size: int,
tgt_vocab_size: int,
src_key_padding: int,
tgt_key_padding: int,
device: torch.device,
dropout: float = 0.1
):
r"""Transformer with an encoder and decoder for seq2seq models.
Args:
d_model: the number of features in the encoder/decoder inputs
nhead: the number of heads in the multiheadattention models
num_encoder_layers: number of TransformerEncoderLayers
num_decoder_layers: number of TransformerDeocderLayers
src_vocab_size: number of inputs for source embeddings
tgt_vocab_size: number of inputs for target embeddings
src_key_padding: the key (value) of padding token ('<pad>') in src
tgt_key_padding: the key (value) of padding token ('<pad>') in tgt
dropout: dropout throughout model for training
"""
super(EncoderDecoderTransformer, self).__init__()
assert (d_model % nhead == 0), "d_model must be a multiple of nhead"
self.src_embedding = nn.Embedding(src_vocab_size, d_model)
self.src_positional_encoding = PositionalEncoding(d_model, dropout)
self.encoder = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model=d_model, nhead=nhead, dropout=dropout
), num_layers=num_encoder_layers
)
self.tgt_embedding = nn.Embedding(tgt_vocab_size, d_model)
self.tgt_positional_encoding = PositionalEncoding(d_model, dropout)
self.decoder = nn.TransformerDecoder(
nn.TransformerDecoderLayer(
d_model=d_model, nhead=nhead, dropout=dropout
), num_layers=num_decoder_layers
)
self.projection = nn.Linear(d_model, tgt_vocab_size)
self.d_model = d_model
self.src_key_padding = src_key_padding
self.tgt_key_padding = tgt_key_padding
self.device = device
def future_token_square_mask(self, sz: int) -> Tensor:
r"""Generate a square mask for the sequence. The masked positions are
filled with 1. Unmasked positions are filled with 0. This outputs
a ByteTensor which, according to PyTorch docs, will mask tokens
with non-zero values.
Masking future tokens is only applicable to the decoder.
https://www.reddit.com/r/MachineLearning/comments/bjgpt2
/d_confused_about_using_masking_in_transformer/
torch.triu(..., diagnonal=1) is required to avoid masking the
current token.
"""
mask = torch.triu(torch.ones(sz, sz), diagonal=1).bool()
return mask.to(self.device)
def forward(self, src: Tensor, tgt: Tensor) -> Tensor:
r"""Forward propagate data.
Shapes:
src: (S, N, E)
tgt: (T, N, E)
tgt_mask: (T, T)
src_padding_mask: (N, S)
tgt_padding_mask: (T, S)
"""
assert (src.shape[1] == tgt.shape[1])
tgt_seq_len, N = tgt.shape
src_padding_mask = (src == self.src_key_padding).transpose(0, 1)
tgt_padding_mask = (tgt == self.tgt_key_padding).transpose(0, 1)
tgt_future_token_mask = self.future_token_square_mask(tgt_seq_len)
src_embeds = self.src_embedding(src) * math.sqrt(self.d_model)
src_embeds = self.src_positional_encoding(src_embeds)
enc_src = self.encoder(
src_embeds, src_key_padding_mask=src_padding_mask
)
tgt_embeds = self.tgt_embedding(tgt) * math.sqrt(self.d_model)
tgt_embeds = self.tgt_positional_encoding(tgt_embeds)
out = self.decoder(
tgt_embeds, enc_src, tgt_mask=tgt_future_token_mask,
tgt_key_padding_mask=tgt_padding_mask
)
out = self.projection(out)
return out
class PositionalEncoding(nn.Module):
def __init__(
self,
d_model: int,
dropout: float = 0.1,
max_len: int = 5000
):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * -math.log(10000.0) / d_model
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x: Tensor):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
def get_local_data() -> Tuple[Tensor, Tensor]:
# 0 = <sod>
# 1 = <eod>
# 2 = <pad>
# 4 = <unk>
src_key_padding = 2
tgt_key_padding = 2
# "<sod> This is in english right this moment now <eod> <pad>"
src = torch.tensor([
[0, 9, 8, 7, 6, 5, 11, 3, 10, 1, src_key_padding],
[0, 5, 7, 4, 12, 6, 13, 7, 9, 8, 1]
]).transpose(0, 1)
# "<sod> Esto está en inglés ahora mismo <eod> <pad>"
tgt = torch.tensor([
[0, 3, 11, 5, 6, 7, 8, 1, tgt_key_padding],
[0, 9, 8, 7, 3, 1, tgt_key_padding, tgt_key_padding, tgt_key_padding]
]).transpose(0, 1)
return src, tgt
def main():
# (src, tgt) = get_local_data()
train_iter = GenericTranslationDataset()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
learning_rate = 1e-4
epochs = 50
batch_size = BATCH_SIZE
src_key_padding = train_iter.src_pad_idx
tgt_key_padding = train_iter.tgt_pad_idx
src_vocab_size = train_iter.src_vocab_len
tgt_vocab_size = train_iter.tgt_vocab_len
model = EncoderDecoderTransformer(
d_model=512, nhead=8, num_encoder_layers=6, num_decoder_layers=6,
src_vocab_size=src_vocab_size, tgt_vocab_size=tgt_vocab_size,
src_key_padding=src_key_padding, tgt_key_padding=tgt_key_padding,
device=device, dropout=0.1
)
criterion = nn.CrossEntropyLoss(ignore_index=tgt_key_padding)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(epochs):
print(f'\r[Epoch {epoch + 1} / {epochs}]', end='', flush=True)
for (src, tgt) in train_iter:
# We must right shift the tgt inputs into the decoder, as stated
# by the original paper Attention is All We Need
out = model(src, tgt[:-1])
# Reshape according to use of nn.CrossEntropyLoss
# input shape: (N, C)
# tgt shape: (N)
# collapsing will interweave the training samples but still
# generate the correct loss
out = out.reshape(-1, tgt_vocab_size)
# We don't want to predict the <sod> token, so we left shift the
# target to ensure it doesn't become a linear mapping
tgt_loss = tgt[1:].reshape(-1)
loss = criterion(out, tgt_loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)
optimizer.step()
if epoch == epochs - 1:
softmax = nn.LogSoftmax(dim=1)
preds = softmax(out).argmax(dim=1).reshape(-1, batch_size)
print(f'\n\nTargets:\n{tgt}\n{tgt.shape}')
print(f'\nPredictions:\n{preds}\n{preds.shape}')
print(f'\nLoss: {loss}')
source = src[:, -1]
translated = preds[:, -1]
en_itos = np.vectorize(lambda x: train_iter.english.itos[x])
sp_itos = np.vectorize(lambda x: train_iter.spanish.itos[x])
source = ' '.join(en_itos(source))
translated = ' '.join(sp_itos(translated.numpy()))
print(f'\nEnglish: {source}\nSpanish: {translated}')
if __name__ == '__main__':
main()
|
{"hexsha": "ab7f6887d17a792025f739f1f1ac9b4bd8477fb8", "size": 8354, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/transformer.py", "max_stars_repo_name": "RyanElliott10/PyTorch-Transformer", "max_stars_repo_head_hexsha": "4ac9842712a9c0fa9a2396684ca92ef0d048fd05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-28T23:13:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-28T23:13:57.000Z", "max_issues_repo_path": "src/transformer.py", "max_issues_repo_name": "RyanElliott10/PyTorch-Transformer", "max_issues_repo_head_hexsha": "4ac9842712a9c0fa9a2396684ca92ef0d048fd05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/transformer.py", "max_forks_repo_name": "RyanElliott10/PyTorch-Transformer", "max_forks_repo_head_hexsha": "4ac9842712a9c0fa9a2396684ca92ef0d048fd05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2489451477, "max_line_length": 78, "alphanum_fraction": 0.6102465885, "include": true, "reason": "import numpy", "num_tokens": 2070}
|
from molsysmt._private_tools.exceptions import *
import numpy as np
from molsysmt.elements import entities
types = ["water", "ion", "cosolute", "protein", "peptide", "rna", "dna", "lipid", "small molecule"]
def _aux(item):
from molsysmt import get
from numpy import empty, full
entities = {}
n_entities = 0
n_atoms = get(item, target='system', n_atoms=True)
n_peptides = 0
n_proteins = 0
index_array = full(n_atoms, None, dtype=object)
name_array = full(n_atoms, None, dtype=object)
type_array = full(n_atoms, None, dtype=object)
molecule_index, molecule_type = get(item, target='molecule', molecule_index=True, molecule_type=True)
atom_indices_in_molecule = get(item, target='molecule', atom_index=True)
for m_index, m_type, m_atoms in zip(molecule_index, molecule_type, atom_indices_in_molecule):
if m_index is not None:
if m_type == 'water':
name = 'water'
type = 'water'
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
elif m_type == 'ion':
group_name = get(item, target='atom', indices=m_atoms, group_name=True)[0]
name = group_name
type = 'ion'
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
elif m_type == 'peptide':
name = 'Peptide_'+str(n_peptides)
type = 'peptide'
n_peptides+=1
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
elif m_type == 'protein':
name = 'Protein_'+str(n_proteins)
type = 'protein'
n_proteins+=1
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
elif m_type == 'lipid':
group_name = get(item, target='atom', indices=m_atoms[0], group_name=True)[0]
name = group_name
type = 'lipid'
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
elif m_type == 'small molecule':
group_name = get(item, target='atom', indices=m_atoms[0], group_name=True)[0]
name = group_name
type = 'small molecule'
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
else:
name = 'unknown'
type = 'unknown'
try:
index = entities[name]
except:
entities[name]=n_entities
index=n_entities
n_entities+=1
index_array[m_atoms]=index
type_array[m_atoms]=type
name_array[m_atoms]=name
del(molecule_index, molecule_type, atom_indices_in_molecule)
return index_array, name_array, type_array
def entity_index_from_atom(item, indices='all'):
output, _, _ = _aux(item)
if indices is not 'all':
output = output[indices]
return output
def entity_id_from_entity(item, indices='all'):
if indices is 'all':
from molsysmt.multitool import get
n_entities = get(item, target='system', n_entities=True)
output = np.full(n_entities, None, dtype=object)
else:
output = np.full(indices.shape[0], None, dtype=object)
return output
def entity_name_from_entity(item, indices='all'):
entity_index_from_atom, entity_name_from_atom, _ = _aux(item)
output=[]
if indices is 'all':
indices = np.unique(entity_index_from_atom)
for ii in indices:
atom_index = np.where(entity_index_from_atom==ii)[0][0]
output.append(entity_name_from_atom[atom_index])
output = np.array(output, dtype=object)
return output
def entity_type_from_entity(item, indices='all'):
entity_index_from_atom, _, entity_type_from_atom = _aux(item)
if indices is 'all':
indices = np.unique(entity_index_from_atom)
output=[]
for ii in indices:
atom_index = np.where(entity_index_from_atom==ii)[0][0]
output.append(entity_type_from_atom[atom_index])
output = np.array(output, dtype=object)
return output
def n_entities_from_system(item, indices='all'):
from molsysmt import get
entity_index_from_atom = get(item, target='atom', indices='all', entity_index=True)
if entity_index_from_atom[0] is None:
n_entities = 0
else:
output = np.unique(entity_index_from_atom)
n_entities = output.shape[0]
return n_entities
def type_from_MMTFDecoder_entity (mmtf_entity):
output = None
if mmtf_entity['type']=='water':
return 'water'
elif mmtf_entity['type']=='polymer':
return _get_type_from_sequence(mmtf_entity['sequence'])
elif mmtf_entity['type']=='non-polymer':
try:
entity_name = entities.mmtf_translator[mmtf_entity['description']]
output = entities.catalog[entity_name]
except:
raise NotImplementedError("The mmtf entity type {} is not implemented.".format(mmtf_entity))
return output
def _get_type_from_sequence(sequence):
from .molecule import _get_type_from_sequence as molecule_type_from_sequence
molecule_type = molecule_type_from_sequence(sequence)
if molecule_type == 'protein':
return 'protein'
elif molecule_type == 'dna':
return 'dna'
elif molecule_type == 'rna':
return 'rna'
elif molecule_type == 'peptide':
return 'peptide'
else:
return None
def _shortpath_to_build_entities(molecule_index_from_atom, molecule_type_from_atom, group_name_from_atom):
n_atoms = molecule_index_from_atom.shape[0]
not_None = np.where(molecule_index_from_atom!=None)
molecule_indices = np.unique(molecule_index_from_atom[not_None])
index_array = np.full(n_atoms, None, dtype=object)
id_array = np.full(n_atoms, None, dtype=object)
name_array = np.full(n_atoms, None, dtype=object)
type_array = np.full(n_atoms, None, dtype=object)
entities = {}
n_entities = 0
n_peptides = 0
n_proteins = 0
for molecule_index in molecule_indices:
mask = (molecule_index_from_atom==molecule_index)
molecule_type = molecule_type_from_atom[mask][0]
if molecule_type == 'water':
entity_name = 'water'
entity_type = 'water'
try:
entity_index = entities[entity_name]
except:
entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
elif molecule_type == 'ion':
entity_name = group_name_from_atom[mask][0]
entity_type = 'ion'
try:
entity_index = entities[entity_name]
except:
entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
elif molecule_type == 'peptide':
entity_name = 'Peptide_'+str(n_peptides)
entity_type = 'peptide'
n_peptides+=1
try:
index = entities[entity_name]
except:
entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
elif molecule_type == 'protein':
entity_name = 'Protein_'+str(n_proteins)
entity_type = 'protein'
n_proteins+=1
try:
entity_index = entities[entity_name]
except:
entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
elif molecule_type == 'lipid':
entity_name = group_name_from_atom[mask][0]
entity_type = 'lipid'
try:
entity_index = entities[entity_name]
except:
entity_entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
elif molecule_type == 'small molecule':
entity_name = group_name_from_atom[mask][0]
entity_type = 'small molecule'
try:
entity_index = entities[entity_name]
except:
entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
else:
entity_name = 'unknown'
entity_type = 'unknown'
try:
entity_index = entities[entity_name]
except:
entities[entity_name]=n_entities
entity_index=n_entities
n_entities+=1
index_array[mask]=entity_index
name_array[mask]=entity_name
type_array[mask]=entity_type
return index_array, id_array, name_array, type_array
|
{"hexsha": "871288dea14fbf1468e93863e963abf1ebecfc8a", "size": 9609, "ext": "py", "lang": "Python", "max_stars_repo_path": "molsysmt/elements/entity.py", "max_stars_repo_name": "uibcdf/MolSysMT", "max_stars_repo_head_hexsha": "9866a6fb090df9fff36af113a45164da4b674c09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-02T03:55:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:43:52.000Z", "max_issues_repo_path": "molsysmt/elements/entity.py", "max_issues_repo_name": "uibcdf/MolSysMT", "max_issues_repo_head_hexsha": "9866a6fb090df9fff36af113a45164da4b674c09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-06-24T00:55:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-16T22:09:19.000Z", "max_forks_repo_path": "molsysmt/elements/entity.py", "max_forks_repo_name": "uibcdf/MolSysMT", "max_forks_repo_head_hexsha": "9866a6fb090df9fff36af113a45164da4b674c09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-17T18:55:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-17T18:55:25.000Z", "avg_line_length": 31.5049180328, "max_line_length": 106, "alphanum_fraction": 0.5700905401, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2047}
|
import numpy as np
from matplotlib import pyplot as plt
import advec_diff
plt.yscale("log")
runner = advec_diff.AdvecDiffRunner()
def case_sdc():
runner.variant = "sdc"
runner.coarse_factor = 1
runner.run()
t, r, rr, e, re = runner.results()
i = np.arange(0, len(r))
plt.plot(i, r, "^-", label="SDC")
runner.remove_files()
def case_mlsdc(coarse_factor):
runner.variant = "mlsdc"
runner.coarse_factor = coarse_factor
runner.run()
t, r, rr, e, re = runner.results()
i = np.arange(0, len(r))
plt.plot(i, r, "v-", label="MLSDC, coarse_factor={}".format(coarse_factor))
runner.remove_files()
def case_mlsdc2(coarse_factor):
runner.variant = "mlsdc"
runner.coarse_factor = coarse_factor
runner.run()
t, r, rr, e, re = runner.results()
i = np.arange(0, len(r))
plt.plot(i, r, "^-", label="MLSDC, coarse_factor={}".format(coarse_factor))
runner.remove_files()
case_sdc()
case_mlsdc(1)
case_mlsdc2(2)
plt.title(
(
"AdvecDiff, dt={}, t_end={}, max_iter={}, dof={},\n"
"nu={}, vel={}, abs_res_tol={}, {} Nodes\n"
).format(
runner.dt, runner.t_end, runner.num_iters, runner.num_dofs,
runner.nu, runner.vel, runner.abs_res_tol, runner.num_nodes
)
)
plt.xlabel("iteration")
plt.ylabel("absolute residuals")
plt.legend()
plt.savefig("plot/advec_diff/coarse_factor.pdf")
|
{"hexsha": "e90a66cad3d6944a78a3370ea749ebd49de583b5", "size": 1396, "ext": "py", "lang": "Python", "max_stars_repo_path": "advec_diff/coarse_factor.py", "max_stars_repo_name": "f-koehler/pfasst-analysis", "max_stars_repo_head_hexsha": "5a55fc6d4f5c7fd7ceec6c6c6354ad8231d361f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-09-16T12:13:16.000Z", "max_stars_repo_stars_event_max_datetime": "2015-09-16T12:13:16.000Z", "max_issues_repo_path": "advec_diff/coarse_factor.py", "max_issues_repo_name": "f-koehler/pfasst-analysis", "max_issues_repo_head_hexsha": "5a55fc6d4f5c7fd7ceec6c6c6354ad8231d361f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-08-18T07:33:38.000Z", "max_issues_repo_issues_event_max_datetime": "2015-08-26T10:16:17.000Z", "max_forks_repo_path": "advec_diff/coarse_factor.py", "max_forks_repo_name": "f-koehler/pfasst-analysis", "max_forks_repo_head_hexsha": "5a55fc6d4f5c7fd7ceec6c6c6354ad8231d361f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6610169492, "max_line_length": 79, "alphanum_fraction": 0.6396848138, "include": true, "reason": "import numpy", "num_tokens": 399}
|
\documentclass{standalone}
\begin{document}
\chapter*{Conclusions}\addcontentsline{toc}{chapter}{Conclusions}
\markboth{Conclusions}{Conclusions}
In this work of thesis, I have developed, implemented and tested an automated pipeline for the identification of Ground Glass Opacities and Consolidation in chest CT scans of patients affected by COVID-19.
As a preliminary step, I have performed a lung segmentation by using a pre-trained U-Net. This step is followed by a bronchial removal, to reduce the number of false positives. To perform the GGO and CS segmentation, I have applied the colour quantization. It allows identifying the different areas inside the lung, grouping them by colour similarity. Since the lesions involve many closest voxels, I have used the multi-channel properties of digital images to encode also neighbouring information, this was done by assigning at each channel a different function of the image (median blurring, gamma correction, neighbourhood standard deviation and CLAHE). To achieve the segmentation, I have found the characteristic colour of each tissue, performing a k-means clustering in the colour space. This set can be used to segment several scans by assigning each voxel to the nearest colour.
The pipeline was tested of $3$ datasets by comparing the segmentation with manual annotation, using specificity, sensitivity and a blind evaluation made by experts.
The pipeline has shown to achieve segmentation consistent with the annotation in a small amount of time (less than $3\,min$) and does not require the interaction with trained personnel. The segmentation presents a high specificity with a small rate of false positives. Even if the lesion areas are underestimated, the expert evaluation has shown that better segmentation were achieved in the $31\%$ of the slices by the pipeline and for the $33\%$ by the annotation. In the remaining $35\%$ equal performances were detected. However, in any case, the segmentation and annotation seem to be consistent.
Further developing is possible, like embedding of spatial information (not considered in this work), fine training and sensitivity.
In the end, after these preliminary tests, the colour quantization has shown to be a suitable approach to face this kind of problems.
\end{document}
|
{"hexsha": "5289b6277907cfcba1182c55ec98a7fa2251425e", "size": 2308, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/conclusions.tex", "max_stars_repo_name": "RiccardoBiondi/SCDthesis", "max_stars_repo_head_hexsha": "2506df1995e5ba239b28d2ca0b908ba55f81761b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tex/conclusions.tex", "max_issues_repo_name": "RiccardoBiondi/SCDthesis", "max_issues_repo_head_hexsha": "2506df1995e5ba239b28d2ca0b908ba55f81761b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/conclusions.tex", "max_forks_repo_name": "RiccardoBiondi/SCDthesis", "max_forks_repo_head_hexsha": "2506df1995e5ba239b28d2ca0b908ba55f81761b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 128.2222222222, "max_line_length": 887, "alphanum_fraction": 0.8058925477, "num_tokens": 465}
|
module TestProject
using StaticArrays
function dot(x)
v = SVector(x...)
return v'v
end
end # module
|
{"hexsha": "e9b246345cc58a3e4375a4bae893646c14b2595e", "size": 111, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/TestProject/src/TestProject.jl", "max_stars_repo_name": "jondeuce/MATDaemon.jl", "max_stars_repo_head_hexsha": "6a1de76acd835991e24f07067dd8c43c59e5e380", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-29T15:03:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T15:03:12.000Z", "max_issues_repo_path": "test/TestProject/src/TestProject.jl", "max_issues_repo_name": "jondeuce/MATDaemon.jl", "max_issues_repo_head_hexsha": "6a1de76acd835991e24f07067dd8c43c59e5e380", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-18T20:14:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T22:45:05.000Z", "max_forks_repo_path": "test/TestProject/src/TestProject.jl", "max_forks_repo_name": "jondeuce/MATDaemon.jl", "max_forks_repo_head_hexsha": "6a1de76acd835991e24f07067dd8c43c59e5e380", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.0909090909, "max_line_length": 21, "alphanum_fraction": 0.6756756757, "num_tokens": 31}
|
using QMTK
using Compat.Test
@testset "Utils" begin
using QMTK.Consts.Pauli
@test kronprod(sigmax, sigmax, sigmai) == kron(kron(sigmax, sigmax), sigmai)
@test sigmax ⊗ sigmay ⊗ sigmaz == kron(kron(sigmax, sigmay), sigmaz)
h = @kron sigmax[1] ⊗ sigmaz[3] + sigmax[2] ⊗ sigmay[4]
ans = kronprod(σ₁, σ₀, σ₃, σ₀) + kronprod(σ₀, σ₁, σ₀, σ₂)
@test h == ans
h = @kron sigmax[1] * (sigmaz[2] + sigmay[4])
ans = kronprod(σ₁, σ₃, σ₀, σ₀) + kronprod(σ₁, σ₀, σ₀, σ₂)
@test h == ans
h = @kron sigmax[5] * (sigmax[1] * (sigmaz[2] + sigmay[4]))
ans = kronprod(σ₁, σ₃, σ₀, σ₀, σ₁) + kronprod(σ₁, σ₀, σ₀, σ₂, σ₁)
@test h == ans
end
|
{"hexsha": "626dde69536bfc9840e62ad0e82421daf7c5f9e8", "size": 622, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Utils.jl", "max_stars_repo_name": "Roger-luo/QMTK.jl", "max_stars_repo_head_hexsha": "90987261588fc8a4aefa73df2b1fb5d0c5a3f9d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-03-09T17:37:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T01:27:09.000Z", "max_issues_repo_path": "test/Utils.jl", "max_issues_repo_name": "Roger-luo/QMTK.jl", "max_issues_repo_head_hexsha": "90987261588fc8a4aefa73df2b1fb5d0c5a3f9d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2018-03-09T17:09:23.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-08T14:13:47.000Z", "max_forks_repo_path": "test/Utils.jl", "max_forks_repo_name": "Roger-luo/QMTK.jl", "max_forks_repo_head_hexsha": "90987261588fc8a4aefa73df2b1fb5d0c5a3f9d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9166666667, "max_line_length": 76, "alphanum_fraction": 0.6318327974, "num_tokens": 289}
|
# Copyright (c) Facebook, Inc. and its affiliates.import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
import pdb
import glob
import natsort
from torch.autograd import Variable
import trimesh
import copy
import re
# io utils
from pytorch3d.io import load_obj, save_obj, save_ply, load_ply
# datastructures
from pytorch3d.structures import Meshes
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, HardFlatShader, DirectionalLights, cameras
)
import json
import csv
import open3d as o3d
device = torch.device("cuda:0")
torch.cuda.set_device(device)
# helper function for computing roation matrix in 3D
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
# helper function for loading and merging meshes
def merge_meshes(obj_path):
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx
cad_folder = 'test' # cad data folder (after mesh fusion)
cad_classes = [f.name for f in os.scandir(cad_folder)]
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in os.scandir(folder_path)]
for obj_path in object_paths:
print('processing %s' % obj_path)
# load merged mesh and number of vtx for each part
verts_list, faces_list, num_vtx = merge_meshes(obj_path)
# load motion json file
with open(os.path.join(obj_path, 'motion.json')) as json_file:
motion = json.load(json_file)
# create gif writer
filename_output = os.path.join(obj_path, 'motion.gif')
writer = imageio.get_writer(filename_output, mode='I', duration=0.3)
vis = o3d.visualization.Visualizer()
vis.create_window(height=500, width=500)
distance = 2.4 # distance from camera to the object
elevation = 25 # angle of elevation in degrees
azimuth = 20 # No rotation so the camera is positioned on the +Z axis.
# at least render one frame
if len(motion) == 0:
motion['placeholder'] = {}
# rotate or translate individual part
for idx, key in enumerate(motion.keys()):
jointData = motion[key]
# rotation part
if jointData and jointData['type'] == 'revolute':
start = num_vtx[idx]
end = num_vtx[idx+1]
rot_orig = torch.FloatTensor(jointData['axis']['origin']).to(device)
rot_axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
aa = math.pi*jointData['limit']['a'] / 180.0
bb = math.pi*jointData['limit']['b'] / 180.0
print(aa)
print(bb)
rot_angles = np.linspace(aa, bb, num=5)
rot_angles_rev = np.linspace(bb, aa, num=5)
angles = np.concatenate((rot_angles, rot_angles_rev),0)
for angle in angles:
verts = verts_list.clone()
faces = faces_list.clone()
# world coordinate to local coordinate (rotation origin)
verts[start:end, 0] -= rot_orig[0]
verts[start:end, 1] -= rot_orig[1]
verts[start:end, 2] -= rot_orig[2]
# rotate around local axis [-1 0 0]
init_value = torch.tensor([angle])
theta = Variable(init_value.cuda())
rot_mat = rotation_matrix(rot_axis, theta).float() # 3x3
verts[start:end,:] = torch.t(torch.mm(rot_mat.to(device),
torch.t(verts[start:end,:])))
# local coordinate to world coordinate
verts[start:end, 0] += rot_orig[0]
verts[start:end, 1] += rot_orig[1]
verts[start:end, 2] += rot_orig[2]
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
# translation part
elif jointData and jointData['type'] == 'prismatic':
start = num_vtx[idx]
end = num_vtx[idx+1]
trans_orig = torch.FloatTensor(jointData['axis']['origin']).to(device)
trans_axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
aa = jointData['limit']['a']
bb = jointData['limit']['b']
trans_len = np.linspace(aa, bb, num=5)
trans_len_rev = np.linspace(bb, aa, num=5)
trans_lens = np.concatenate((trans_len, trans_len_rev),0)
for tran_len in trans_lens:
verts = verts_list.clone()
faces = faces_list.clone()
# world coordinate to local coordinate (rotation origin)
verts[start:end, 0] -= trans_orig[0]
verts[start:end, 1] -= trans_orig[1]
verts[start:end, 2] -= trans_orig[2]
# add value in translation direction
verts[start:end, 0] += (trans_axis[0] * tran_len)
verts[start:end, 1] += (trans_axis[1] * tran_len)
verts[start:end, 2] += (trans_axis[2] * tran_len)
# local coordinate to world coordinate
verts[start:end, 0] += trans_orig[0]
verts[start:end, 1] += trans_orig[1]
verts[start:end, 2] += trans_orig[2]
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
# no motion
else:
assert not jointData
# world --> view coordinate
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts_list).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
vis.destroy_window()
writer.close()
|
{"hexsha": "d0ed0a91b61572a0c16f1ccf22d91e901abef6a6", "size": 10443, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess/visualize_data.py", "max_stars_repo_name": "ChicyChen/my_d3d", "max_stars_repo_head_hexsha": "efe3eb1f8c270ca371f628b1d6eface3042ac9a7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2021-08-19T17:20:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T11:54:27.000Z", "max_issues_repo_path": "preprocess/visualize_data.py", "max_issues_repo_name": "ChicyChen/my_d3d", "max_issues_repo_head_hexsha": "efe3eb1f8c270ca371f628b1d6eface3042ac9a7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-12T03:35:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T06:52:39.000Z", "max_forks_repo_path": "preprocess/visualize_data.py", "max_forks_repo_name": "ChicyChen/my_d3d", "max_forks_repo_head_hexsha": "efe3eb1f8c270ca371f628b1d6eface3042ac9a7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-08-22T08:44:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T10:17:59.000Z", "avg_line_length": 36.0103448276, "max_line_length": 98, "alphanum_fraction": 0.5568323279, "include": true, "reason": "import numpy", "num_tokens": 2386}
|
import csv
import json
from glob import glob
from pprint import pprint
import pandas
from numpy import mean
files = glob('*.json')
results = {}
for file in files:
name = file.split(".")[0].split("_")
name = name[1] + " " + name[2]
data = json.load(open(file))
accuracy = mean([max(run["acc"]) for run in data])
uw_accuracy = mean([max(run["un_acc"]) for run in data])
f1 = mean([max(run["f1"]) for run in data])
results[name] = {
"accuracy": accuracy,
"uw_accuracy": uw_accuracy,
"f1": f1,
}
data = pandas.DataFrame().from_dict(results, orient='index')
with open('results.csv', 'w') as f:
data.to_csv(f, sep=',', encoding='utf-8')
pprint(results)
|
{"hexsha": "5dc9588f1fcd00666770295f6b042f9b947245d9", "size": 711, "ext": "py", "lang": "Python", "max_stars_repo_path": "nldrp/dnn/models/aggregate_scores.py", "max_stars_repo_name": "etzinis/nldrp", "max_stars_repo_head_hexsha": "3b6e24aa86a6d43bfd6f753b346739c00c282de3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-10-27T13:16:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-07T17:34:11.000Z", "max_issues_repo_path": "nldrp/dnn/models/aggregate_scores.py", "max_issues_repo_name": "etzinis/nldrp", "max_issues_repo_head_hexsha": "3b6e24aa86a6d43bfd6f753b346739c00c282de3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nldrp/dnn/models/aggregate_scores.py", "max_forks_repo_name": "etzinis/nldrp", "max_forks_repo_head_hexsha": "3b6e24aa86a6d43bfd6f753b346739c00c282de3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-29T16:20:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-12T16:06:52.000Z", "avg_line_length": 24.5172413793, "max_line_length": 60, "alphanum_fraction": 0.6132208158, "include": true, "reason": "from numpy", "num_tokens": 197}
|
import tensorflow as tf
import numpy as np
import logging
import hypertune
import argparse
import shutil
import os
def parse_tfrecord(example_data):
parsed = tf.io.parse_single_example(example_data, {
'size': tf.io.VarLenFeature(tf.int64),
'ref': tf.io.VarLenFeature(tf.float32),
'time': tf.io.FixedLenFeature([], tf.string),
'valid_time': tf.io.FixedLenFeature([], tf.string)
})
parsed['size'] = tf.sparse.to_dense(parsed['size'])
parsed['ref'] = tf.reshape(tf.sparse.to_dense(parsed['ref']), (1059, 1799))/60. # 0 to 1
return parsed
def read_dataset(pattern):
filenames = tf.io.gfile.glob(pattern)
ds = tf.data.TFRecordDataset(filenames, compression_type=None, buffer_size=None, num_parallel_reads=None)
return ds.prefetch(tf.data.experimental.AUTOTUNE).map(parse_tfrecord)
def create_model(nlayers=4, poolsize=4, numfilters=5, num_dense=0):
input_img = tf.keras.Input(shape=(1059, 1799, 1), name='refc_input')
x = tf.keras.layers.Cropping2D(cropping=((17, 18),(4, 3)), name='cropped')(input_img)
last_pool_layer = None
for layerno in range(nlayers):
x = tf.keras.layers.Conv2D(2**(layerno + numfilters), poolsize, activation='relu', padding='same', name='encoder_conv_{}'.format(layerno))(x)
last_pool_layer = tf.keras.layers.MaxPooling2D(poolsize, padding='same', name='encoder_pool_{}'.format(layerno))
x = last_pool_layer(x)
output_shape = last_pool_layer.output_shape[1:]
if num_dense == 0:
# flatten to create the embedding
x = tf.keras.layers.Flatten(name='refc_embedding')(x)
embed_size = output_shape[0] * output_shape[1] * output_shape[2]
if embed_size > 1024:
print("Embedding size={} is too large".format(embed_size))
return None, embed_size
else:
# flatten, send through dense layer to create the embedding
x = tf.keras.layers.Flatten(name='encoder_flatten')(x)
x = tf.keras.layers.Dense(num_dense, name='refc_embedding')(x)
x = tf.keras.layers.Dense(output_shape[0] * output_shape[1] * output_shape[2], name='decoder_dense')(x)
embed_size = num_dense
x = tf.keras.layers.Reshape(output_shape, name='decoder_reshape')(x)
for layerno in range(nlayers):
x = tf.keras.layers.Conv2D(2**(nlayers-layerno-1 + numfilters), poolsize, activation='relu', padding='same', name='decoder_conv_{}'.format(layerno))(x)
x = tf.keras.layers.UpSampling2D(poolsize, name='decoder_upsamp_{}'.format(layerno))(x)
before_padding_layer = tf.keras.layers.Conv2D(1, 3, activation='relu', padding='same', name='before_padding')
x = before_padding_layer(x)
htdiff = 1059 - before_padding_layer.output_shape[1]
wddiff = 1799 - before_padding_layer.output_shape[2]
if htdiff < 0 or wddiff < 0:
print("Invalid architecture: htdiff={} wddiff={}".format(htdiff, wddiff))
return None, 9999
decoded = tf.keras.layers.ZeroPadding2D(padding=((htdiff//2,htdiff - htdiff//2),
(wddiff//2,wddiff - wddiff//2)), name='refc_reconstructed')(x)
autoencoder = tf.keras.Model(input_img, decoded, name='autoencoder')
autoencoder.compile(optimizer='adam', loss=tf.keras.losses.LogCosh()) #loss='mse')
if autoencoder.count_params() > 1000*1000: # 1 million
print("Autoencoder too large: {} params".format(autoencoder.count_params()))
return None, autoencoder.count_params()
return autoencoder, embed_size
class HptCallback(tf.keras.callbacks.Callback):
def __init__(self):
self.hpt = hypertune.HyperTune()
def on_epoch_end(self, epoch, logs):
self.hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='final_loss',
metric_value=logs['loss'], #history.history['loss'][-1],
global_step=epoch
)
def run_job(opts):
def input_and_label(rec):
return rec['ref'], rec['ref']
ds = read_dataset(opts['input']).map(input_and_label).batch(opts['batch_size']).repeat()
checkpoint = tf.keras.callbacks.ModelCheckpoint(os.path.join(opts['job_dir'], 'checkpoints'))
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
autoencoder, error = create_model(opts['num_layers'], opts['pool_size'], opts['num_filters'], opts['num_dense'])
if autoencoder:
print(autoencoder.summary())
history = autoencoder.fit(ds, steps_per_epoch=opts['num_steps']//opts['num_checkpoints'],
epochs=opts['num_checkpoints'], shuffle=True, callbacks=[checkpoint, HptCallback()])
autoencoder.save(os.path.join(opts['job_dir'], 'savedmodel'))
else:
HptCallback().on_epoch_end(1, {'loss': error})
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Train an autoencoder')
parser.add_argument(
'--project',
default='',
help='Specify GCP project to bill to run on cloud')
parser.add_argument(
'--job-dir', required=True, help='output dir. could be local or on GCS')
parser.add_argument(
'--input', required=True, help='input pattern. eg: gs://ai-analytics-solutions-kfpdemo/wxsearch/data/tfrecord-*')
parser.add_argument(
'--batch_size', default=2, help='batch size for training', type=int)
parser.add_argument(
'--num_steps', default=12, help='total number of steps for training', type=int)
parser.add_argument(
'--num_checkpoints', default=3, help='number of steps for training', type=int)
parser.add_argument(
'--num_layers', default=4, help='number of conv layers in model', type=int)
parser.add_argument(
'--pool_size', default=4, help='size of upscaling/downscaling kernel', type=int)
parser.add_argument(
'--num_filters', default=4, help='efficiency of representation of a tile', type=int)
parser.add_argument(
'--num_dense', default=50, help='size of embedding if you want a dense layer. Specify 0 to use conv layers only', type=int)
# parse command-line args and add a few more
logging.basicConfig(level=getattr(logging, 'INFO', None))
tf.debugging.set_log_device_placement(True)
options = parser.parse_args().__dict__
outdir = options['job_dir']
if not options['project']:
print('Removing local output directory {} ... hang on'.format(outdir))
shutil.rmtree(outdir, ignore_errors=True)
os.makedirs(outdir)
else:
print('Removing GCS output directory {} ... hang on'.format(outdir))
try:
subprocess.check_call('gsutil -m rm -r {}'.format(outdir).split())
except: # pylint: disable=bare-except
pass
run_job(options)
|
{"hexsha": "da8b97f0026747c44386e0560814c1c0bd681c13", "size": 6880, "ext": "py", "lang": "Python", "max_stars_repo_path": "02_data_representation/weather_search/wxsearch/train_autoencoder.py", "max_stars_repo_name": "fanchi/ml-design-patterns", "max_stars_repo_head_hexsha": "6f686601d2385a11a517f8394324062ec6094e14", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1149, "max_stars_repo_stars_event_min_datetime": "2020-04-09T21:20:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T02:41:53.000Z", "max_issues_repo_path": "02_data_representation/weather_search/wxsearch/train_autoencoder.py", "max_issues_repo_name": "dfinke/ml-design-patterns", "max_issues_repo_head_hexsha": "6f686601d2385a11a517f8394324062ec6094e14", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2020-06-14T15:17:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T10:13:08.000Z", "max_forks_repo_path": "02_data_representation/weather_search/wxsearch/train_autoencoder.py", "max_forks_repo_name": "dfinke/ml-design-patterns", "max_forks_repo_head_hexsha": "6f686601d2385a11a517f8394324062ec6094e14", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 296, "max_forks_repo_forks_event_min_datetime": "2020-04-28T06:26:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:52:33.000Z", "avg_line_length": 45.8666666667, "max_line_length": 159, "alphanum_fraction": 0.6598837209, "include": true, "reason": "import numpy", "num_tokens": 1691}
|
import numpy as np
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
conf = SparkConf().setAppName("HW3").setMaster("local[2]")
sc = SparkContext(conf=conf)
# Map the data to a tuple of (hour, (project code, page name), page views)
# We combine project code and page name with a delimeter of dash
def parse(i):
def result(r):
s = r.split()
# The minus 5 accounts for the fact that we want to index our array
# starting at one, not six
return (i-5, (s[0], s[1]), int(s[2]))
return result
def to_vector(r):
# Create a numpy array of 18 elements
n = np.zeros(18)
# Set the array at the index of the number of hours minus 5 to the number
# of page view, unless it's the target value, which we store separately
target = 0
if r[0] != 18:
n[r[0]] = r[2]
else:
target = r[2]
# Our new tuple is ((project code, page name), (18-element array with
# arr[hour-6] set to page views, target value))
# The feature vector and target value are wrapped in a tuple so they can be
# added with one call to np.add
return (r[1], (n.reshape(18,1), target))
def set_bias(r):
# r[1] is our inner tuple, r[1][0] is the feature vector, r[1][0][0] is the
# first term of the feature vector, which is the bias and should be 1
r[1][0][0] = 1
return r
def split_code_name(r):
s = r[0]
return (s[0], s[1], r[1][0], r[1][1])
# This one is for the server
#base = "/wikistats/{0}.txt"
# This one is for local testing
base = "~/HW3Data/{0}.txt"
rdds = []
for i in range(6,24):
f = base.format(i)
rdd = sc.textFile(f)
# We use our function-returing function to evade Spark's lazy evaluation
rdd = rdd.map(parse(i))
rdds.append(rdd)
# Combine all of our rdds
rdd = sc.union(rdds)
# We use our vector function from above
rdd = rdd.map(to_vector)
# We add all of the hours together, which is effectively adding a bunch of
# zeros and one page view count per column
rdd = rdd.reduceByKey(np.add)
# Set the bias term to 1
rdd = rdd.map(set_bias)
# Split the project code and project name out of the tuple we used earlier
rdd = rdd.map(split_code_name)
# Final format is (project code, project name, feature vector, target value)
|
{"hexsha": "e0d97f8f79b2ce209adda70eb053e48fe98fdb84", "size": 2276, "ext": "py", "lang": "Python", "max_stars_repo_path": "Spark/2a.py", "max_stars_repo_name": "bcspragu/Machine-Learning-Projects", "max_stars_repo_head_hexsha": "b6832cbb9bb27d7e8253300f97a3ab84b1a555dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Spark/2a.py", "max_issues_repo_name": "bcspragu/Machine-Learning-Projects", "max_issues_repo_head_hexsha": "b6832cbb9bb27d7e8253300f97a3ab84b1a555dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Spark/2a.py", "max_forks_repo_name": "bcspragu/Machine-Learning-Projects", "max_forks_repo_head_hexsha": "b6832cbb9bb27d7e8253300f97a3ab84b1a555dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-26T13:13:03.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-26T13:13:03.000Z", "avg_line_length": 29.9473684211, "max_line_length": 79, "alphanum_fraction": 0.6577328647, "include": true, "reason": "import numpy", "num_tokens": 672}
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/utsname.h>
#include <fstream>
#include <boost/algorithm/string.hpp>
#include "include/compat.h"
#include "include/util.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/version.h"
#ifdef HAVE_SYS_VFS_H
#include <sys/vfs.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/param.h>
#include <sys/mount.h>
#if defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
#endif
#include <string>
#include <stdio.h>
int get_fs_stats(ceph_data_stats_t &stats, const char *path)
{
if (!path)
return -EINVAL;
struct statfs stbuf;
int err = ::statfs(path, &stbuf);
if (err < 0) {
return -errno;
}
stats.byte_total = stbuf.f_blocks * stbuf.f_bsize;
stats.byte_used = (stbuf.f_blocks - stbuf.f_bfree) * stbuf.f_bsize;
stats.byte_avail = stbuf.f_bavail * stbuf.f_bsize;
stats.avail_percent = (((float)stats.byte_avail/stats.byte_total)*100);
return 0;
}
static char* value_sanitize(char *value)
{
while (isspace(*value) || *value == '"')
value++;
char* end = value + strlen(value) - 1;
while (end > value && (isspace(*end) || *end == '"'))
end--;
*(end + 1) = '\0';
return value;
}
static bool value_set(char *buf, const char *prefix,
map<string, string> *pm, const char *key)
{
if (strncmp(buf, prefix, strlen(prefix))) {
return false;
}
(*pm)[key] = value_sanitize(buf + strlen(prefix));
return true;
}
static void file_values_parse(const map<string, string>& kvm, FILE *fp, map<string, string> *m, CephContext *cct) {
char buf[512];
while (fgets(buf, sizeof(buf) - 1, fp) != NULL) {
for (auto& kv : kvm) {
if (value_set(buf, kv.second.c_str(), m, kv.first.c_str()))
continue;
}
}
}
static bool os_release_parse(map<string, string> *m, CephContext *cct)
{
#if defined(__linux__)
static const map<string, string> kvm = {
{ "distro", "ID=" },
{ "distro_description", "PRETTY_NAME=" },
{ "distro_version", "VERSION_ID=" }
};
FILE *fp = fopen("/etc/os-release", "r");
if (!fp) {
int ret = -errno;
lderr(cct) << "os_release_parse - failed to open /etc/os-release: " << cpp_strerror(ret) << dendl;
return false;
}
file_values_parse(kvm, fp, m, cct);
fclose(fp);
#elif defined(__FreeBSD__)
struct utsname u;
int r = uname(&u);
if (!r) {
m->insert(std::make_pair("distro", u.sysname));
m->insert(std::make_pair("distro_description", u.version));
m->insert(std::make_pair("distro_version", u.release));
}
#endif
return true;
}
static void distro_detect(map<string, string> *m, CephContext *cct)
{
if (!os_release_parse(m, cct)) {
lderr(cct) << "distro_detect - /etc/os-release is required" << dendl;
}
for (const char* rk: {"distro", "distro_description"}) {
if (m->find(rk) == m->end())
lderr(cct) << "distro_detect - can't detect " << rk << dendl;
}
}
int get_cgroup_memory_limit(uint64_t *limit)
{
// /sys/fs/cgroup/memory/memory.limit_in_bytes
// the magic value 9223372036854771712 or 0x7ffffffffffff000
// appears to mean no limit.
FILE *f = fopen(PROCPREFIX "/sys/fs/cgroup/memory/memory.limit_in_bytes", "r");
if (!f) {
return -errno;
}
char buf[100];
int ret = 0;
long long value;
char *line = fgets(buf, sizeof(buf), f);
if (!line) {
ret = -EINVAL;
goto out;
}
if (sscanf(line, "%lld", &value) != 1) {
ret = -EINVAL;
}
if (value == 0x7ffffffffffff000) {
*limit = 0; // no limit
} else {
*limit = value;
}
out:
fclose(f);
return ret;
}
void collect_sys_info(map<string, string> *m, CephContext *cct)
{
// version
(*m)["ceph_version"] = pretty_version_to_str();
(*m)["ceph_version_short"] = ceph_version_to_str();
(*m)["ceph_release"] = ceph_release_to_str();
// kernel info
struct utsname u;
int r = uname(&u);
if (r >= 0) {
(*m)["os"] = u.sysname;
(*m)["kernel_version"] = u.release;
(*m)["kernel_description"] = u.version;
(*m)["hostname"] = u.nodename;
(*m)["arch"] = u.machine;
}
// but wait, am i in a container?
bool in_container = false;
if (const char *pod_name = getenv("POD_NAME")) {
(*m)["pod_name"] = pod_name;
in_container = true;
}
if (const char *container_name = getenv("CONTAINER_NAME")) {
(*m)["container_name"] = container_name;
in_container = true;
}
if (const char *container_image = getenv("CONTAINER_IMAGE")) {
(*m)["container_image"] = container_image;
in_container = true;
}
if (in_container) {
if (const char *node_name = getenv("NODE_NAME")) {
(*m)["container_hostname"] = u.nodename;
(*m)["hostname"] = node_name;
}
if (const char *ns = getenv("POD_NAMESPACE")) {
(*m)["pod_namespace"] = ns;
}
}
#ifdef __APPLE__
// memory
{
uint64_t size;
size_t len = sizeof(size);
r = sysctlbyname("hw.memsize", &size, &len, NULL, 0);
if (r == 0) {
(*m)["mem_total_kb"] = std::to_string(size);
}
}
{
xsw_usage vmusage;
size_t len = sizeof(vmusage);
r = sysctlbyname("vm.swapusage", &vmusage, &len, NULL, 0);
if (r == 0) {
(*m)["mem_swap_kb"] = std::to_string(vmusage.xsu_total);
}
}
// processor
{
char buf[100];
size_t len = sizeof(buf);
r = sysctlbyname("machdep.cpu.brand_string", buf, &len, NULL, 0);
if (r == 0) {
buf[len - 1] = '\0';
(*m)["cpu"] = buf;
}
}
#else
// memory
if (std::ifstream f{PROCPREFIX "/proc/meminfo"}; !f.fail()) {
for (std::string line; std::getline(f, line); ) {
std::vector<string> parts;
boost::split(parts, line, boost::is_any_of(":\t "), boost::token_compress_on);
if (parts.size() != 3) {
continue;
}
if (parts[0] == "MemTotal") {
(*m)["mem_total_kb"] = parts[1];
} else if (parts[0] == "SwapTotal") {
(*m)["mem_swap_kb"] = parts[1];
}
}
}
uint64_t cgroup_limit;
if (get_cgroup_memory_limit(&cgroup_limit) == 0 &&
cgroup_limit > 0) {
(*m)["mem_cgroup_limit"] = std::to_string(cgroup_limit);
}
// processor
if (std::ifstream f{PROCPREFIX "/proc/cpuinfo"}; !f.fail()) {
for (std::string line; std::getline(f, line); ) {
std::vector<string> parts;
boost::split(parts, line, boost::is_any_of(":"));
if (parts.size() != 2) {
continue;
}
boost::trim(parts[0]);
boost::trim(parts[1]);
if (parts[0] == "model name") {
(*m)["cpu"] = parts[1];
break;
}
}
}
#endif
// distro info
distro_detect(m, cct);
}
void dump_services(Formatter* f, const map<string, list<int> >& services, const char* type)
{
ceph_assert(f);
f->open_object_section(type);
for (map<string, list<int> >::const_iterator host = services.begin();
host != services.end(); ++host) {
f->open_array_section(host->first.c_str());
const list<int>& hosted = host->second;
for (list<int>::const_iterator s = hosted.begin();
s != hosted.end(); ++s) {
f->dump_int(type, *s);
}
f->close_section();
}
f->close_section();
}
void dump_services(Formatter* f, const map<string, list<string> >& services, const char* type)
{
ceph_assert(f);
f->open_object_section(type);
for (const auto& host : services) {
f->open_array_section(host.first.c_str());
const auto& hosted = host.second;
for (const auto& s : hosted) {
f->dump_string(type, s);
}
f->close_section();
}
f->close_section();
}
// If non-printable characters found then convert bufferlist to
// base64 encoded string indicating whether it did.
string cleanbin(bufferlist &bl, bool &base64, bool show)
{
bufferlist::iterator it;
for (it = bl.begin(); it != bl.end(); ++it) {
if (iscntrl(*it))
break;
}
if (it == bl.end()) {
base64 = false;
string result(bl.c_str(), bl.length());
return result;
}
bufferlist b64;
bl.encode_base64(b64);
string encoded(b64.c_str(), b64.length());
if (show)
encoded = "Base64:" + encoded;
base64 = true;
return encoded;
}
// If non-printable characters found then convert to "Base64:" followed by
// base64 encoding
string cleanbin(string &str)
{
bool base64;
bufferlist bl;
bl.append(str);
string result = cleanbin(bl, base64, true);
return result;
}
std::string bytes2str(uint64_t count) {
static char s[][2] = {"\0", "k", "M", "G", "T", "P", "E", "\0"};
int i = 0;
while (count >= 1024 && *s[i+1]) {
count >>= 10;
i++;
}
char str[128];
snprintf(str, sizeof str, "%" PRIu64 "%sB", count, s[i]);
return std::string(str);
}
|
{"hexsha": "f816ff41db0bd9a3f023e12fc055825880bae9af", "size": 8975, "ext": "cc", "lang": "C++", "max_stars_repo_path": "common/util.cc", "max_stars_repo_name": "liucxer/ceph-msg", "max_stars_repo_head_hexsha": "2e5c18c0c72253b283bfd3d0576033c0b515ce55", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common/util.cc", "max_issues_repo_name": "liucxer/ceph-msg", "max_issues_repo_head_hexsha": "2e5c18c0c72253b283bfd3d0576033c0b515ce55", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/util.cc", "max_forks_repo_name": "liucxer/ceph-msg", "max_forks_repo_head_hexsha": "2e5c18c0c72253b283bfd3d0576033c0b515ce55", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5890410959, "max_line_length": 115, "alphanum_fraction": 0.6091364903, "num_tokens": 2698}
|
import numpy as np
import networkx as nx
from tqdm import tqdm
from numpy.linalg import inv
from sklearn.decomposition import TruncatedSVD
class BANE(object):
"""
Binarized Attributed Network Embedding Class (ICDM 2018).
"""
r"""An implementation of `"BANE" <https://arxiv.org/abs/1403.6652>`_
from the ICDM '18 paper "Binarized Attributed Network Embedding Class".
Args:
dimensions (int): Number of random walks. Default is 10.
svd_iterations (int): Length of random walks. Default is 80.
seed (int): Dimensionality of embedding. Default is 128.
alpha (float): Number of cores. Default is 4.
approximation_rounds (int):
binarization_rounds (int):
"""
def __init__(self, dimensions=32, svd_iterations=70, seed=42, alpha=0.3,
approximation_rounds=100, binarization_rounds=20):
self.dimensions = dimensions
self.svd_iterations = svd_iterations
self.seed = seed
self.alpha = alpha
self.approximation_rounds = approximation_rounds
self.binarization_rounds = binarization_rounds
def _create_target_matrix(self, graph):
"""
Creating a normalized sparse adjacency matrix target.
"""
weighted_graph = nx.Graph()
for (u, v) in graph.edges():
weighted_graph.add_edge(u, v, weight=1.0/graph.degree(u))
weighted_graph.add_edge(v, u, weight=1.0/graph.degree(v))
P = nx.adjacency_matrix(weighted_graph,
nodelist=range(graph.number_of_nodes()))
return P
def fit(self, graph, X):
"""
Creating a BANE embedding.
1. Running SVD.
2. Running power iterations and CDC.
"""
self.P = self._create_target_matrix(graph)
self.X = X
print("\nFitting BANE model.\nBase SVD fitting started.")
self._fit_base_SVD_model()
print("SVD completed.\nFitting binary model.\n")
self._binary_optimize()
def _fit_base_SVD_model(self):
"""
Reducing the dimensionality with SVD in the 1st step.
"""
self.P = self.P.dot(self.X)
self.model = TruncatedSVD(n_components=self.dimensions,
n_iter=self.svd_iterations,
random_state=self.seed)
self.model.fit(self.P)
self.P = self.model.fit_transform(self.P)
def _update_G(self):
"""
Updating the kernel matrix.
"""
self.G = np.dot(self.B.transpose(), self.B)
self.G = self.G + self.alpha*np.eye(self.dimensions)
self.G = inv(self.G)
self.G = self.G.dot(self.B.transpose()).dot(self.P)
def _update_Q(self):
"""
Updating the rescaled target matrix.
"""
self.Q = self.G.dot(self.P.transpose()).transpose()
def _update_B(self):
"""
Updating the embedding matrix.
"""
for _ in tqdm(range(self.approximation_rounds), desc="Inner approximation:"):
for d in range(self.dimensions):
sel = [x for x in range(self.dimensions) if x != d]
self.B[:, d] = self.Q[:, d]-self.B[:, sel].dot(self.G[sel, :]).dot(self.G[:, d]).transpose()
self.B[:, d] = np.sign(self.B[:, d])
def _binary_optimize(self):
"""
Starting 2nd optimization phase with power iterations and CCD.
"""
self.B = np.sign(np.random.normal(size=(self.P.shape[0], self.dimensions)))
for _ in tqdm(range(self.binarization_rounds), desc="Iteration", leave=True):
self._update_G()
self._update_Q()
self._update_B()
def get_embedding(self):
r"""Getting the node embedding.
Return types:
* **embedding** *(Numpy array)* - The embedding of nodes.
"""
embedding = self.B
return embedding
|
{"hexsha": "1e8e8aa92ef0e474d6cd5214d63750cd1bf77e4d", "size": 3955, "ext": "py", "lang": "Python", "max_stars_repo_path": "karateclub/node_embedding/attributed/bane.py", "max_stars_repo_name": "Laeyoung/ainized-karateclub", "max_stars_repo_head_hexsha": "26d8e10d9cb15a7ae6bf43db6ec338a6ae4f9aa0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "karateclub/node_embedding/attributed/bane.py", "max_issues_repo_name": "Laeyoung/ainized-karateclub", "max_issues_repo_head_hexsha": "26d8e10d9cb15a7ae6bf43db6ec338a6ae4f9aa0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "karateclub/node_embedding/attributed/bane.py", "max_forks_repo_name": "Laeyoung/ainized-karateclub", "max_forks_repo_head_hexsha": "26d8e10d9cb15a7ae6bf43db6ec338a6ae4f9aa0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-08T07:38:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-08T07:38:37.000Z", "avg_line_length": 35.0, "max_line_length": 108, "alphanum_fraction": 0.590391909, "include": true, "reason": "import numpy,from numpy,import networkx", "num_tokens": 912}
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
cumulative_rewards = pickle.load(open('cum_rewards_history-12.pkl', 'rb'))
epsilons = pickle.load(open('epsilon_history-12.pkl', 'rb'))
# Set general font size
plt.rcParams['font.size'] = '24'
ax = plt.subplot(211)
plt.title("Cumulative Rewards over Episodes", fontsize=24)
plt.plot(np.arange(len(cumulative_rewards)) + 1, cumulative_rewards)
# Set tick font size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(16)
ax = plt.subplot(212)
plt.title("Epsilons over Episodes", fontsize=24)
plt.plot(np.arange(len(epsilons)) + 1, epsilons)
# Set tick font size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(16)
plt.show()
|
{"hexsha": "704f55e74a79f652ec4729e05812129ec5c77041", "size": 755, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulation/dqn-simulation/final/visualizer.py", "max_stars_repo_name": "pgabriela/dqn-jitsi-autoscaler", "max_stars_repo_head_hexsha": "b39eb335e584095ef66a9941dbe0b2ea21a02d4a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simulation/dqn-simulation/final/visualizer.py", "max_issues_repo_name": "pgabriela/dqn-jitsi-autoscaler", "max_issues_repo_head_hexsha": "b39eb335e584095ef66a9941dbe0b2ea21a02d4a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simulation/dqn-simulation/final/visualizer.py", "max_forks_repo_name": "pgabriela/dqn-jitsi-autoscaler", "max_forks_repo_head_hexsha": "b39eb335e584095ef66a9941dbe0b2ea21a02d4a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.962962963, "max_line_length": 74, "alphanum_fraction": 0.7456953642, "include": true, "reason": "import numpy", "num_tokens": 204}
|
#================================
# RESEARCH GROUP PROJECT [RGP]
#================================
# This file is part of the COMP3096 Research Group Project.
# System
import logging
# Gym Imports
import gym
from gym.spaces import Box, Discrete, Tuple
# PySC2 Imports
from pysc2.lib.actions import FUNCTIONS, FunctionCall
from pysc2.lib.features import SCREEN_FEATURES
# Numpy
import numpy as np
# Typing
from typing import List
from sc2g.env.unit_tracking import UnitTrackingEnv
# Setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# ==========================================================
# Only applies to movement-based mini-games with
# two friendly player units (eg. CollectMineralShards)
# ==========================================================
class MultiMovementDirectedEnv(UnitTrackingEnv):
def __init__(self, sc2_env, **kwargs):
super().__init__(sc2_env, **kwargs)
# Number of marines and adjacency (hardcoded)
self.number_of_marines = 2
self.number_adjacency = 8
# Specify observation and action space
screen_shape_observation = self.screen_shape + (1,)
self.observation_space = Box(low=0, high=SCREEN_FEATURES.player_relative.scale, shape=screen_shape_observation)
self.resolution = self.screen_shape[0] * self.screen_shape[1] # (width x height)
self.action_space = Discrete(self.resolution)
self.unravel_shape = (self.screen_shape[0], self.screen_shape[1])
def get_sc2_action(self, gym_action) -> List[FunctionCall]:
if len(self.state["player_units_stable"]) == 0:
return [FUNCTIONS.no_op()]
# Get coords by unravelling action. DQN only supports returning an integer as action.
# How unravel works:
# Ref: https://www.quora.com/What-is-a-simple-intuitive-example-for-the-unravel_index-in-Python
coords = np.unravel_index(gym_action, self.unravel_shape)
# Get gym action for each marine
gym_action_1, gym_action_2 = (coords[0] % self.number_adjacency, coords[1] % self.number_adjacency)
# Get current coordinates for each marine
marine_1_stable = self.state["player_units_stable"][0]
marine_2_stable = self.state["player_units_stable"][1]
# Get tags for each marine
marine_1_tag = marine_1_stable.tag.item()
marine_2_tag = marine_2_stable.tag.item()
# Get target coordinates for each marine
marine_1_curr_xy = next((unit.x, unit.y) for unit in self.state["player_units"] if unit.tag.item() == marine_1_tag)
marine_2_curr_xy = next((unit.x, unit.y) for unit in self.state["player_units"] if unit.tag.item() == marine_2_tag)
def get_target_xy(num, curr_coords):
# 0: Up
# 1: Down
# 2: Left
# 3: Right
# 4: Up + Left
# 5: Up + Right
# 6: Down + Left
# 7: Down + Right
target_xy = list(curr_coords)
# Determine target position
if num in (0, 4, 5):
# Up
target_xy[1] = max(0, curr_coords[1]-1)
if num in (1, 6, 7):
# Down
target_xy[1] = min(self.screen_shape[1]-1, curr_coords[1]+1)
if num in (2, 4, 6):
# Left
target_xy[0] = max(0, curr_coords[0]-1)
if num in (3, 5, 7):
# Right
target_xy[0] = min(self.screen_shape[0]-1, curr_coords[0]+1)
return tuple(target_xy)
marine_1_target_xy = get_target_xy(gym_action_1, marine_1_curr_xy)
marine_2_target_xy = get_target_xy(gym_action_2, marine_2_curr_xy)
# Assign action functions
actions = [FUNCTIONS.move_unit(marine_1_tag, "now", marine_1_target_xy), FUNCTIONS.move_unit(marine_2_tag, "now", marine_2_target_xy)]
return actions
|
{"hexsha": "72a1ba23557b588ec99e22b25ee684eded2010a2", "size": 3929, "ext": "py", "lang": "Python", "max_stars_repo_path": "sc2g/sc2g/env/movement/multi_movement_directed.py", "max_stars_repo_name": "kiriphorito/COMP3096---MARL", "max_stars_repo_head_hexsha": "5e05413b0980d60f4a3f2a17123178c93bb0b763", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-07T09:02:27.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-07T09:02:27.000Z", "max_issues_repo_path": "sc2g/sc2g/env/movement/multi_movement_directed.py", "max_issues_repo_name": "kiriphorito/COMP3096---MARL", "max_issues_repo_head_hexsha": "5e05413b0980d60f4a3f2a17123178c93bb0b763", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sc2g/sc2g/env/movement/multi_movement_directed.py", "max_forks_repo_name": "kiriphorito/COMP3096---MARL", "max_forks_repo_head_hexsha": "5e05413b0980d60f4a3f2a17123178c93bb0b763", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3796296296, "max_line_length": 142, "alphanum_fraction": 0.6116060066, "include": true, "reason": "import numpy", "num_tokens": 985}
|
# helpers to calc features / cols importance
import matplotlib.pyplot as plt
import numpy as np
def collapse_values(importance, features):
"""collapse cols w/ values (A_A, A_B, A_C for example into just A w/ sum(A_weights))"""
assert len(importance) == len(features)
assert abs(sum(importance) - 1) < 1e-10
cols = set([f.split('_')[0] for f in features])
importance2, features2 = [], []
for c in cols:
pairs = zip(features, importance)
w = sum([w for (f, w) in pairs if f.split('_')[0] == c])
features2.append(c)
importance2.append(w)
indices = np.argsort(importance2)[::-1] # sort by descending order
importance2 = [importance2[ix] for ix in indices] # should be in descending order
features2 = [features2[ix] for ix in indices]
assert abs(sum(importance2) - 1) < 1e-10
return importance2, features2
def calc_importance(features, feature_importance_weights, show_prc, collapse_vals=True):
"""show only top part of most important features"""
importance = feature_importance_weights
indices = np.argsort(importance)[::-1] # sort by descending order
importance_sorted = importance[indices] # should be in descending order
features2 = [features[ix] for ix in indices]
if collapse_vals:
importance_sorted, features2 = collapse_values(importance_sorted, features2)
def map_feature(col):
prefix = col
suffix = ''
max_len = 40
if len(prefix) + len(suffix) > max_len:
max_len -= len(suffix) - 3
prefix = prefix[:max_len] + '..'
return ''.join([prefix, suffix])
features2 = list(map(map_feature, features2))
if show_prc > 0:
part = round(len(feature_importance_weights) * show_prc / 100)
importance_part = importance_sorted[:part]
features_part = features2[:part]
plt.figure(1).canvas.set_window_title('Feature selection')
plt.subplots_adjust(left=0.5)
plt.title(f'Feature Importance (top {show_prc}%)' if show_prc < 100 else 'Feature Importance')
plt.barh(range(len(importance_part)), importance_part[::-1], color='b', align='center')
plt.yticks(range(len(importance_part)), features_part[::-1])
plt.xlabel('Relative Importance')
plt.show()
return features2, importance_sorted
###
|
{"hexsha": "04ea84827feff513f1aacb388e05b3a23712457b", "size": 2361, "ext": "py", "lang": "Python", "max_stars_repo_path": "util_cols_importance.py", "max_stars_repo_name": "pbogomolov1967/ml_features_importance", "max_stars_repo_head_hexsha": "b3b60de2810bf6660a583bec24635593afb12507", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util_cols_importance.py", "max_issues_repo_name": "pbogomolov1967/ml_features_importance", "max_issues_repo_head_hexsha": "b3b60de2810bf6660a583bec24635593afb12507", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util_cols_importance.py", "max_forks_repo_name": "pbogomolov1967/ml_features_importance", "max_forks_repo_head_hexsha": "b3b60de2810bf6660a583bec24635593afb12507", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7727272727, "max_line_length": 102, "alphanum_fraction": 0.6573485811, "include": true, "reason": "import numpy", "num_tokens": 581}
|
[STATEMENT]
lemma set_zip_tr[simp]: "(s, s') \<in> set (zip ss (tr_ss_f T ss)) \<longrightarrow> s' = tr_s_f T s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (s, s') \<in> set (zip ss (tr_ss_f T ss)) \<longrightarrow> s' = tr_s_f T s
[PROOF STEP]
by (induct ss, auto)
|
{"llama_tokens": 124, "file": "LightweightJava_Lightweight_Java_Equivalence", "length": 1}
|
import numpy as np
from rl687.environments.gridworld import Gridworld
import matplotlib.pyplot as plt
import time
def problemA():
"""
Have the agent uniformly randomly select actions. Run 10,000 episodes.
Report the mean, standard deviation, maximum, and minimum of the observed
discounted returns.
"""
# setting random seed for reproducibility
print ("Problem A")
env = Gridworld()
discounted_returns = []
for episode in range(10000):
# print (episode)
discounted_return = 0.0
while not env.isEnd:
state = env.state
action = np.random.choice([0,1,2,3])
# print (state, action)
actual_action, new_state, reward = env.step(action)
# print (actual_action, new_state, reward)
discounted_return += reward
# print (t)
env.reset()
# print(time_step)
discounted_returns.append(discounted_return)
print ("Mean ", np.mean(discounted_returns))
print ("Std Dev ", np.std(discounted_returns))
print ("Max ", np.max(discounted_returns))
print ("Min ", np.min(discounted_returns))
return discounted_returns
"""
Results:
Mean -9.123800743342608
Std Dev 7.197419765658342
Max 4.304672100000001
Min -46.72965105421902
"""
def problemB():
"""
Run the optimal policy that you found for 10,000 episodes. Repor the
mean, standard deviation, maximum, and minimum of the observed
discounted returns
"""
print ("Problem B")
optimal_policy_actions = [1,1,1,1,2, 0,1,1,1,2, 0,2,-1,2,2, 0,3,-1,1,2, 0,3,1,1,-1]
env = Gridworld()
discounted_returns = []
for t in range(10000):
# print (t)
discounted_return = 0.0
while not env.isEnd:
state = env.state
action = optimal_policy_actions[state]
# print (state, action)
actual_action, new_state, reward = env.step(action)
# print (actual_action, new_state, reward)
discounted_return += reward
discounted_returns.append(discounted_return)
env.reset()
print ("Mean ", np.mean(discounted_returns))
print ("Std Dev ", np.std(discounted_returns))
print ("Max ", np.max(discounted_returns))
print ("Min ", np.min(discounted_returns))
return discounted_returns
# plt.hist(sorted(discounted_returns), density = True, cumulative=True, label='CDF',
# histtype='step', alpha=0.8, color='k')
# plt.show()
"""
Results:
Mean 2.670185828345252
Std Dev 3.2895320487842836
Max 4.782969000000001
Min -25.05248903619
"""
def problemE():
"""
Have the agent uniformly randomly select actions. Run 10,000 episodes.
Report the mean, standard deviation, maximum, and minimum of the observed
discounted returns.
"""
# setting random seed for reproducibility
print ("Problem E")
start_time = time.time()
env = Gridworld(startState = 19)
num_episodes = 1000000
count_s19_22_given_s8_19 = 0
for episode in range(num_episodes):
# print (episode)
time_step = 0
while (not env.isEnd) and time_step<12:
state = env.state
if time_step == 11 and state == 22:
count_s19_22_given_s8_19 += 1
action = np.random.choice([0,1,2,3])
env.step(action)
time_step += 1
# print (t)
env.reset()
print(count_s19_22_given_s8_19)
Pr_s19_22_given_s8_19 = (count_s19_22_given_s8_19*1.0)/num_episodes
end_time = time.time()
print ("Estimate of Pr(S_8=19 | S_19 = 22) = ", Pr_s19_22_given_s8_19)
print ("Execution time = ", end_time - start_time)
"""
Estimate of Pr(S_8=19 | S_19 = 22) = 0.01873
"""
def quantile(discounted_returns):
num_returns=len(discounted_returns)
discounted_returns.sort()
cdf = []
print("CDF")
for r in sorted(set(discounted_returns)):
cdf.append (len([i for i in discounted_returns if i<= r])/num_returns)
return cdf, sorted(set(discounted_returns))
def main():
np.random.seed(123)
returns_A = problemA()
x,y = quantile(returns_A)
plt.title("Quantile Function for Random Policy")
plt.xlabel("Sample Fraction (CDF)")
plt.ylabel("Quantile (Discounted Return)")
plt.plot(x,y)
plt.show()
returns_B = problemB()
x,y = quantile(returns_B)
plt.plot(x,y)
plt.title("Quantile Function for Optimal Policy")
plt.xlabel("Sample Fraction (CDF)")
plt.ylabel("Quantiles (Discounted Return)")
plt.show()
quantile(returns_B)
# problemE()
main()
|
{"hexsha": "4964ec04144fb812fb44dff61db3fa95c8b4b426", "size": 4757, "ext": "py", "lang": "Python", "max_stars_repo_path": "homeworks/homework1.py", "max_stars_repo_name": "anshuman1811/cs687-reinforcementlearning", "max_stars_repo_head_hexsha": "cf30cc0ab2b0e515cd4b643fc55c60cc5f38a481", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-18T04:09:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-18T04:09:03.000Z", "max_issues_repo_path": "homeworks/homework1.py", "max_issues_repo_name": "anshuman1811/cs687-reinforcementlearning", "max_issues_repo_head_hexsha": "cf30cc0ab2b0e515cd4b643fc55c60cc5f38a481", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "homeworks/homework1.py", "max_forks_repo_name": "anshuman1811/cs687-reinforcementlearning", "max_forks_repo_head_hexsha": "cf30cc0ab2b0e515cd4b643fc55c60cc5f38a481", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1840490798, "max_line_length": 91, "alphanum_fraction": 0.6182467942, "include": true, "reason": "import numpy", "num_tokens": 1256}
|
from __future__ import annotations
"""A module containing the core class to specify a Factor Graph."""
import collections
import copy
import functools
import inspect
import typing
from dataclasses import asdict, dataclass
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
OrderedDict,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import jax
import jax.numpy as jnp
import numpy as np
from jax.scipy.special import logsumexp
from pgmax.bp import infer
from pgmax.factors import FAC_TO_VAR_UPDATES
from pgmax.fg import groups, nodes
from pgmax.groups.enumeration import EnumerationFactorGroup
from pgmax.utils import cached_property
@dataclass
class FactorGraph:
"""Class for representing a factor graph.
Factors in a graph are clustered in factor groups, which are grouped according to their factor types.
Args:
variables: A single VariableGroup or a container containing variable groups.
If not a single VariableGroup, supported containers include mapping and sequence.
For a mapping, the keys of the mapping are used to index the variable groups.
For a sequence, the indices of the sequence are used to index the variable groups.
Note that if not a single VariableGroup, a CompositeVariableGroup will be created from
this input, and the individual VariableGroups will need to be accessed by indexing.
"""
variables: Union[
Mapping[Any, groups.VariableGroup],
Sequence[groups.VariableGroup],
groups.VariableGroup,
]
def __post_init__(self):
if isinstance(self.variables, groups.VariableGroup):
self._variable_group = self.variables
else:
self._variable_group = groups.CompositeVariableGroup(self.variables)
vars_num_states_cumsum = np.insert(
np.array(
[variable.num_states for variable in self._variable_group.variables],
dtype=int,
).cumsum(),
0,
0,
)
# Useful objects to build the FactorGraph
self._factor_types_to_groups: OrderedDict[
Type, List[groups.FactorGroup]
] = collections.OrderedDict(
[(factor_type, []) for factor_type in FAC_TO_VAR_UPDATES]
)
self._factor_types_to_variable_names_for_factors: OrderedDict[
Type, Set[FrozenSet]
] = collections.OrderedDict(
[(factor_type, set()) for factor_type in FAC_TO_VAR_UPDATES]
)
# See FactorGraphState docstrings for documentation on the following fields
self._num_var_states = vars_num_states_cumsum[-1]
self._vars_to_starts = MappingProxyType(
{
variable: vars_num_states_cumsum[vv]
for vv, variable in enumerate(self._variable_group.variables)
}
)
self._named_factor_groups: Dict[Hashable, groups.FactorGroup] = {}
def __hash__(self) -> int:
all_factor_groups = tuple(
[
factor_group
for factor_groups_per_type in self._factor_types_to_groups.values()
for factor_group in factor_groups_per_type
]
)
return hash(all_factor_groups)
def add_factor(
self,
variable_names: List,
factor_configs: np.ndarray,
log_potentials: Optional[np.ndarray] = None,
name: Optional[str] = None,
) -> None:
"""Function to add a single factor to the FactorGraph.
Args:
variable_names: A list containing the connected variable names.
Variable names are tuples of the type (variable_group_name, variable_name_within_variable_group)
factor_configs: Array of shape (num_val_configs, num_variables)
An array containing explicit enumeration of all valid configurations.
If the connected variables have n1, n2, ... states, 1 <= num_val_configs <= n1 * n2 * ...
factor_configs[config_idx, variable_idx] represents the state of variable_names[variable_idx]
in the configuration factor_configs[config_idx].
log_potentials: Optional array of shape (num_val_configs,).
If specified, log_potentials[config_idx] contains the log of the potential value for
the valid configuration factor_configs[config_idx].
If None, it is assumed the log potential is uniform 0 and such an array is automatically
initialized.
"""
factor_group = EnumerationFactorGroup(
self._variable_group,
variable_names_for_factors=[variable_names],
factor_configs=factor_configs,
log_potentials=log_potentials,
)
self._register_factor_group(factor_group, name)
def add_factor_by_type(
self, variable_names: List, factor_type: type, *args, **kwargs
) -> None:
"""Function to add a single factor to the FactorGraph.
Args:
variable_names: A list containing the connected variable names.
Variable names are tuples of the type (variable_group_name, variable_name_within_variable_group)
factor_type: Type of factor to be added
args: Args to be passed to the factor_type.
kwargs: kwargs to be passed to the factor_type, and an optional "name" argument
for specifying the name of a named factor group.
Example:
To add an ORFactor to a FactorGraph fg, run::
fg.add_factor_by_type(
variable_names=variables_names_for_OR_factor,
factor_type=logical.ORFactor
)
"""
if factor_type not in FAC_TO_VAR_UPDATES:
raise ValueError(
f"Type {factor_type} is not one of the supported factor types {FAC_TO_VAR_UPDATES.keys()}"
)
name = kwargs.pop("name", None)
variables = tuple(self._variable_group[variable_names])
factor = factor_type(variables, *args, **kwargs)
factor_group = groups.SingleFactorGroup(
variable_group=self._variable_group,
variable_names_for_factors=[variable_names],
factor=factor,
)
self._register_factor_group(factor_group, name)
def add_factor_group(self, factory: Callable, *args, **kwargs) -> None:
"""Add a factor group to the factor graph
Args:
factory: Factory function that takes args and kwargs as input and outputs a factor group.
args: Args to be passed to the factory function.
kwargs: kwargs to be passed to the factory function, and an optional "name" argument
for specifying the name of a named factor group.
"""
name = kwargs.pop("name", None)
factor_group = factory(self._variable_group, *args, **kwargs)
self._register_factor_group(factor_group, name)
def _register_factor_group(
self, factor_group: groups.FactorGroup, name: Optional[str] = None
) -> None:
"""Register a factor group to the factor graph, by updating the factor graph state.
Args:
factor_group: The factor group to be registered to the factor graph.
name: Optional name of the factor group.
Raises:
ValueError: If the factor group with the same name or a factor involving the same variables
already exists in the factor graph.
"""
if name in self._named_factor_groups:
raise ValueError(
f"A factor group with the name {name} already exists. Please choose a different name!"
)
factor_type = factor_group.factor_type
for var_names_for_factor in factor_group.variable_names_for_factors:
var_names = frozenset(var_names_for_factor)
if (
var_names
in self._factor_types_to_variable_names_for_factors[factor_type]
):
raise ValueError(
f"A Factor of type {factor_type} involving variables {var_names} already exists. Please merge the corresponding factors."
)
self._factor_types_to_variable_names_for_factors[factor_type].add(var_names)
self._factor_types_to_groups[factor_type].append(factor_group)
if name is not None:
self._named_factor_groups[name] = factor_group
@functools.lru_cache(None)
def compute_offsets(self) -> None:
"""Compute factor messages offsets for the factor types and factor groups
in the flattened array of message.
Also compute log potentials offsets for factor groups.
See FactorGraphState for documentation on the following fields
If offsets have already beeen compiled, do nothing.
"""
# Message offsets for ftov messages
self._factor_type_to_msgs_range = collections.OrderedDict()
self._factor_group_to_msgs_starts = collections.OrderedDict()
factor_num_states_cumsum = 0
# Log potentials offsets
self._factor_type_to_potentials_range = collections.OrderedDict()
self._factor_group_to_potentials_starts = collections.OrderedDict()
factor_num_configs_cumsum = 0
for factor_type, factors_groups_by_type in self._factor_types_to_groups.items():
factor_type_num_states_start = factor_num_states_cumsum
factor_type_num_configs_start = factor_num_configs_cumsum
for factor_group in factors_groups_by_type:
self._factor_group_to_msgs_starts[
factor_group
] = factor_num_states_cumsum
self._factor_group_to_potentials_starts[
factor_group
] = factor_num_configs_cumsum
factor_num_states_cumsum += sum(factor_group.factor_edges_num_states)
factor_num_configs_cumsum += (
factor_group.factor_group_log_potentials.shape[0]
)
self._factor_type_to_msgs_range[factor_type] = (
factor_type_num_states_start,
factor_num_states_cumsum,
)
self._factor_type_to_potentials_range[factor_type] = (
factor_type_num_configs_start,
factor_num_configs_cumsum,
)
self._total_factor_num_states = factor_num_states_cumsum
self._total_factor_num_configs = factor_num_configs_cumsum
@cached_property
def wiring(self) -> OrderedDict[Type, nodes.Wiring]:
"""Function to compile wiring for belief propagation.
If wiring has already beeen compiled, do nothing.
Returns:
A dictionnary mapping each factor type to its wiring.
"""
wiring = collections.OrderedDict(
[
(
factor_type,
[
factor_group.compile_wiring(self._vars_to_starts)
for factor_group in self._factor_types_to_groups[factor_type]
],
)
for factor_type in self._factor_types_to_groups
]
)
wiring = collections.OrderedDict(
[
(factor_type, factor_type.concatenate_wirings(wiring[factor_type]))
for factor_type in wiring
]
)
return wiring
@cached_property
def log_potentials(self) -> OrderedDict[Type, np.ndarray]:
"""Function to compile potential array for belief propagation.
If potential array has already been compiled, do nothing.
Returns:
A dictionnary mapping each factor type to the array of the log of the potential
function for each valid configuration
"""
log_potentials = collections.OrderedDict()
for factor_type, factors_groups_by_type in self._factor_types_to_groups.items():
if len(factors_groups_by_type) == 0:
log_potentials[factor_type] = np.empty((0,))
else:
log_potentials[factor_type] = np.concatenate(
[
factor_group.factor_group_log_potentials
for factor_group in factors_groups_by_type
]
)
return log_potentials
@cached_property
def factors(self) -> OrderedDict[Type, Tuple[nodes.Factor, ...]]:
"""Mapping factor type to individual factors in the factor graph.
This function is only called on demand when the user requires it."""
print(
"Factors have not been added to the factor graph yet, this may take a while..."
)
factors: OrderedDict[Type, Tuple[nodes.Factor, ...]] = collections.OrderedDict(
[
(
factor_type,
tuple(
[
factor
for factor_group in self.factor_groups[factor_type]
for factor in factor_group.factors
]
),
)
for factor_type in self.factor_groups
]
)
return factors
@property
def factor_groups(self) -> OrderedDict[Type, List[groups.FactorGroup]]:
"""Tuple of factor groups in the factor graph"""
return self._factor_types_to_groups
@cached_property
def fg_state(self) -> FactorGraphState:
"""Current factor graph state given the added factors."""
# Preliminary computations
self.compute_offsets()
log_potentials = np.concatenate(
[self.log_potentials[factor_type] for factor_type in self.log_potentials]
)
return FactorGraphState(
variable_group=self._variable_group,
vars_to_starts=self._vars_to_starts,
num_var_states=self._num_var_states,
total_factor_num_states=self._total_factor_num_states,
named_factor_groups=copy.copy(self._named_factor_groups),
factor_type_to_msgs_range=copy.copy(self._factor_type_to_msgs_range),
factor_type_to_potentials_range=copy.copy(
self._factor_type_to_potentials_range
),
factor_group_to_potentials_starts=copy.copy(
self._factor_group_to_potentials_starts
),
log_potentials=log_potentials,
wiring=self.wiring,
)
@property
def bp_state(self) -> BPState:
"""Relevant information for doing belief propagation."""
# Preliminary computations
self.compute_offsets()
return BPState(
log_potentials=LogPotentials(fg_state=self.fg_state),
ftov_msgs=FToVMessages(fg_state=self.fg_state),
evidence=Evidence(fg_state=self.fg_state),
)
@dataclass(frozen=True, eq=False)
class FactorGraphState:
"""FactorGraphState.
Args:
variable_group: A variable group containing all the variables in the FactorGraph.
vars_to_starts: Maps variables to their starting indices in the flat evidence array.
flat_evidence[vars_to_starts[variable]: vars_to_starts[variable] + variable.num_var_states]
contains evidence to the variable.
num_var_states: Total number of variable states.
total_factor_num_states: Size of the flat ftov messages array.
named_factor_groups: Maps the names of named factor groups to the corresponding factor groups.
factor_type_to_msgs_range: Maps factors types to their start and end indices in the flat ftov messages.
factor_type_to_potentials_range: Maps factor types to their start and end indices in the flat log potentials.
factor_group_to_potentials_starts: Maps factor groups to their starting indices in the flat log potentials.
log_potentials: Flat log potentials array concatenated for each factor type.
wiring: Wiring derived for each factor type.
"""
variable_group: groups.VariableGroup
vars_to_starts: Mapping[nodes.Variable, int]
num_var_states: int
total_factor_num_states: int
named_factor_groups: Mapping[Hashable, groups.FactorGroup]
factor_type_to_msgs_range: OrderedDict[type, Tuple[int, int]]
factor_type_to_potentials_range: OrderedDict[type, Tuple[int, int]]
factor_group_to_potentials_starts: OrderedDict[groups.FactorGroup, int]
log_potentials: OrderedDict[type, None | np.ndarray]
wiring: OrderedDict[type, nodes.Wiring]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
if isinstance(getattr(self, field), Mapping):
object.__setattr__(self, field, MappingProxyType(getattr(self, field)))
@dataclass(frozen=True, eq=False)
class BPState:
"""Container class for belief propagation states, including log potentials,
ftov messages and evidence (unary log potentials).
Args:
log_potentials: log potentials of the model
ftov_msgs: factor to variable messages
evidence: evidence (unary log potentials) for variables.
Raises:
ValueError: If log_potentials, ftov_msgs or evidence are not derived from the same
FactorGraphState.
"""
log_potentials: LogPotentials
ftov_msgs: FToVMessages
evidence: Evidence
def __post_init__(self):
if (self.log_potentials.fg_state != self.ftov_msgs.fg_state) or (
self.ftov_msgs.fg_state != self.evidence.fg_state
):
raise ValueError(
"log_potentials, ftov_msgs and evidence should be derived from the same fg_state."
)
@property
def fg_state(self) -> FactorGraphState:
return self.log_potentials.fg_state
@functools.partial(jax.jit, static_argnames="fg_state")
def update_log_potentials(
log_potentials: jnp.ndarray,
updates: Dict[Any, jnp.ndarray],
fg_state: FactorGraphState,
) -> jnp.ndarray:
"""Function to update log_potentials.
Args:
log_potentials: A flat jnp array containing log_potentials.
updates: A dictionary containing updates for log_potentials
fg_state: Factor graph state
Returns:
A flat jnp array containing updated log_potentials.
Raises: ValueError if
(1) Provided log_potentials shape does not match the expected log_potentials shape.
(2) Provided name is not valid for log_potentials updates.
"""
for name in updates:
data = updates[name]
if name in fg_state.named_factor_groups:
factor_group = fg_state.named_factor_groups[name]
flat_data = factor_group.flatten(data)
if flat_data.shape != factor_group.factor_group_log_potentials.shape:
raise ValueError(
f"Expected log potentials shape {factor_group.factor_group_log_potentials.shape} "
f"for factor group {name}. Got incompatible data shape {data.shape}."
)
start = fg_state.factor_group_to_potentials_starts[factor_group]
log_potentials = log_potentials.at[start : start + flat_data.shape[0]].set(
flat_data
)
else:
raise ValueError(f"Invalid name {name} for log potentials updates.")
return log_potentials
@dataclass(frozen=True, eq=False)
class LogPotentials:
"""Class for storing and manipulating log potentials.
Args:
fg_state: Factor graph state
value: Optionally specify an initial value
Raises:
ValueError: If provided value shape does not match the expected log_potentials shape.
"""
fg_state: FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(self, "value", self.fg_state.log_potentials)
else:
if not self.value.shape == self.fg_state.log_potentials.shape:
raise ValueError(
f"Expected log potentials shape {self.fg_state.log_potentials.shape}. "
f"Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
def __getitem__(self, name: Any) -> np.ndarray:
"""Function to query log potentials for a named factor group or a factor.
Args:
name: Name of a named factor group, or a frozenset containing the set
of connected variables for the queried factor.
Returns:
The queried log potentials.
"""
value = cast(np.ndarray, self.value)
if not isinstance(name, Hashable):
name = frozenset(name)
if name in self.fg_state.named_factor_groups:
factor_group = self.fg_state.named_factor_groups[name]
start = self.fg_state.factor_group_to_potentials_starts[factor_group]
log_potentials = value[
start : start + factor_group.factor_group_log_potentials.shape[0]
]
else:
raise ValueError(f"Invalid name {name} for log potentials updates.")
return log_potentials
def __setitem__(
self,
name: Any,
data: Union[np.ndarray, jnp.ndarray],
):
"""Set the log potentials for a named factor group or a factor.
Args:
name: Name of a named factor group, or a frozenset containing the set
of connected variables for the queried factor.
data: Array containing the log potentials for the named factor group
or the factor.
"""
if not isinstance(name, Hashable):
name = frozenset(name)
object.__setattr__(
self,
"value",
np.asarray(
update_log_potentials(
jax.device_put(self.value),
{name: jax.device_put(data)},
self.fg_state,
)
),
)
@functools.partial(jax.jit, static_argnames="fg_state")
def update_ftov_msgs(
ftov_msgs: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update ftov_msgs.
Args:
ftov_msgs: A flat jnp array containing ftov_msgs.
updates: A dictionary containing updates for ftov_msgs
fg_state: Factor graph state
Returns:
A flat jnp array containing updated ftov_msgs.
Raises: ValueError if:
(1) provided ftov_msgs shape does not match the expected ftov_msgs shape.
(2) provided name is not valid for ftov_msgs updates.
"""
for names in updates:
data = updates[names]
if names in fg_state.variable_group.names:
variable = fg_state.variable_group[names]
if data.shape != (variable.num_states,):
raise ValueError(
f"Given belief shape {data.shape} does not match expected "
f"shape {(variable.num_states,)} for variable {names}."
)
var_states_for_edges = np.concatenate(
[
wiring_by_type.var_states_for_edges
for wiring_by_type in fg_state.wiring.values()
]
)
starts = np.nonzero(
var_states_for_edges == fg_state.vars_to_starts[variable]
)[0]
for start in starts:
ftov_msgs = ftov_msgs.at[start : start + variable.num_states].set(
data / starts.shape[0]
)
else:
raise ValueError(
"Invalid names for setting messages. "
"Supported names include a tuple of length 2 with factor "
"and variable names for directly setting factor to variable "
"messages, or a valid variable name for spreading expected "
"beliefs at a variable"
)
return ftov_msgs
@dataclass(frozen=True, eq=False)
class FToVMessages:
"""Class for storing and manipulating factor to variable messages.
Args:
fg_state: Factor graph state
value: Optionally specify initial value for ftov messages
Raises: ValueError if provided value does not match expected ftov messages shape.
"""
fg_state: FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(
self, "value", np.zeros(self.fg_state.total_factor_num_states)
)
else:
if not self.value.shape == (self.fg_state.total_factor_num_states,):
raise ValueError(
f"Expected messages shape {(self.fg_state.total_factor_num_states,)}. "
f"Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
@typing.overload
def __setitem__(
self,
names: Tuple[Any, Any],
data: Union[np.ndarray, jnp.ndarray],
) -> None:
"""Setting messages from a factor to a variable
Args:
names: A tuple of length 2
names[0] is the name of the factor
names[1] is the name of the variable
data: An array containing messages from factor names[0]
to variable names[1]
"""
@typing.overload
def __setitem__(
self,
names: Any,
data: Union[np.ndarray, jnp.ndarray],
) -> None:
"""Spreading beliefs at a variable to all connected factors
Args:
names: The name of the variable
data: An array containing the beliefs to be spread uniformly
across all factor to variable messages involving this
variable.
"""
def __setitem__(self, names, data) -> None:
if (
isinstance(names, tuple)
and len(names) == 2
and names[1] in self.fg_state.variable_group.names
):
names = (frozenset(names[0]), names[1])
object.__setattr__(
self,
"value",
np.asarray(
update_ftov_msgs(
jax.device_put(self.value),
{names: jax.device_put(data)},
self.fg_state,
)
),
)
@functools.partial(jax.jit, static_argnames="fg_state")
def update_evidence(
evidence: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update evidence.
Args:
evidence: A flat jnp array containing evidence.
updates: A dictionary containing updates for evidence
fg_state: Factor graph state
Returns:
A flat jnp array containing updated evidence.
"""
for name in updates:
data = updates[name]
if name in fg_state.variable_group.container_names:
if name is None:
variable_group = fg_state.variable_group
else:
assert isinstance(
fg_state.variable_group, groups.CompositeVariableGroup
)
variable_group = fg_state.variable_group.variable_group_container[name]
start_index = fg_state.vars_to_starts[variable_group.variables[0]]
flat_data = variable_group.flatten(data)
evidence = evidence.at[start_index : start_index + flat_data.shape[0]].set(
flat_data
)
else:
var = fg_state.variable_group[name]
start_index = fg_state.vars_to_starts[var]
evidence = evidence.at[start_index : start_index + var.num_states].set(data)
return evidence
@dataclass(frozen=True, eq=False)
class Evidence:
"""Class for storing and manipulating evidence
Args:
fg_state: Factor graph state
value: Optionally specify initial value for evidence
Raises: ValueError if provided value does not match expected evidence shape.
"""
fg_state: FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(self, "value", np.zeros(self.fg_state.num_var_states))
else:
if self.value.shape != (self.fg_state.num_var_states,):
raise ValueError(
f"Expected evidence shape {(self.fg_state.num_var_states,)}. "
f"Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
def __getitem__(self, name: Any) -> np.ndarray:
"""Function to query evidence for a variable
Args:
name: name for the variable
Returns:
evidence for the queried variable
"""
value = cast(np.ndarray, self.value)
variable = self.fg_state.variable_group[name]
start = self.fg_state.vars_to_starts[variable]
evidence = value[start : start + variable.num_states]
return evidence
def __setitem__(
self,
name: Any,
data: np.ndarray,
) -> None:
"""Function to update the evidence for variables
Args:
name: The name of a variable group or a single variable.
If name is the name of a variable group, updates are derived by using the variable group to
flatten the data.
If name is the name of a variable, data should be of an array shape (num_states,)
If name is None, updates are derived by using self.fg_state.variable_group to flatten the data.
data: Array containing the evidence updates.
"""
object.__setattr__(
self,
"value",
np.asarray(
update_evidence(
jax.device_put(self.value),
{name: jax.device_put(data)},
self.fg_state,
),
),
)
@jax.tree_util.register_pytree_node_class
@dataclass(frozen=True, eq=False)
class BPArrays:
"""Container for the relevant flat arrays used in belief propagation.
Args:
log_potentials: Flat log potentials array.
ftov_msgs: Flat factor to variable messages array.
evidence: Flat evidence array.
"""
log_potentials: Union[np.ndarray, jnp.ndarray]
ftov_msgs: Union[np.ndarray, jnp.ndarray]
evidence: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def tree_flatten(self):
return jax.tree_util.tree_flatten(asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@dataclass(frozen=True, eq=False)
class BeliefPropagation:
"""Belief propagation functions.
Arguments:
init: Function to create log_potentials, ftov_msgs and evidence.
Args:
log_potentials_updates: Optional dictionary containing log_potentials updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
Returns:
A BPArrays with the log_potentials, ftov_msgs and evidence.
update: Function to update log_potentials, ftov_msgs and evidence.
Args:
bp_arrays: Optional arrays of log_potentials, ftov_msgs, evidence.
log_potentials_updates: Optional dictionary containing log_potentials updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
Returns:
A BPArrays with the updated log_potentials, ftov_msgs and evidence.
run_bp: Function to run belief propagation for num_iters with a damping_factor.
Args:
bp_arrays: Initial arrays of log_potentials, ftov_msgs, evidence.
num_iters: Number of belief propagation iterations.
damping: The damping factor to use for message updates between one timestep and the next.
Returns:
A BPArrays containing the updated ftov_msgs.
get_bp_state: Function to reconstruct the BPState from a BPArrays.
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
The reconstructed BPState
get_beliefs: Function to calculate beliefs from a BPArrays.
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
beliefs: An array or a PyTree container containing the beliefs for the variables.
"""
init: Callable
update: Callable
run_bp: Callable
to_bp_state: Callable
get_beliefs: Callable
def BP(bp_state: BPState, temperature: float = 0.0) -> BeliefPropagation:
"""Function for generating belief propagation functions.
Args:
bp_state: Belief propagation state.
temperature: Temperature for loopy belief propagation.
1.0 corresponds to sum-product, 0.0 corresponds to max-product.
Returns:
Belief propagation functions.
"""
wiring = bp_state.fg_state.wiring
edges_num_states = np.concatenate(
[wiring[factor_type].edges_num_states for factor_type in FAC_TO_VAR_UPDATES]
)
max_msg_size = int(np.max(edges_num_states))
var_states_for_edges = np.concatenate(
[wiring[factor_type].var_states_for_edges for factor_type in FAC_TO_VAR_UPDATES]
)
# Inference argumnets per factor type
inference_arguments: Dict[type, Mapping] = {}
for factor_type in FAC_TO_VAR_UPDATES:
this_inference_arguments = inspect.getfullargspec(
FAC_TO_VAR_UPDATES[factor_type]
).args
this_inference_arguments.remove("vtof_msgs")
this_inference_arguments.remove("log_potentials")
this_inference_arguments.remove("temperature")
this_inference_arguments = {
key: getattr(wiring[factor_type], key) for key in this_inference_arguments
}
inference_arguments[factor_type] = this_inference_arguments
factor_type_to_msgs_range = bp_state.fg_state.factor_type_to_msgs_range
factor_type_to_potentials_range = bp_state.fg_state.factor_type_to_potentials_range
def update(
bp_arrays: Optional[BPArrays] = None,
log_potentials_updates: Optional[Dict[Any, jnp.ndarray]] = None,
ftov_msgs_updates: Optional[Dict[Any, jnp.ndarray]] = None,
evidence_updates: Optional[Dict[Any, jnp.ndarray]] = None,
) -> BPArrays:
"""Function to update belief propagation log_potentials, ftov_msgs, evidence.
Args:
bp_arrays: Optional arrays of log_potentials, ftov_msgs, evidence.
log_potentials_updates: Optional dictionary containing log_potentials updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
Returns:
A BPArrays with the updated log_potentials, ftov_msgs and evidence.
"""
if bp_arrays is not None:
log_potentials = bp_arrays.log_potentials
evidence = bp_arrays.evidence
ftov_msgs = bp_arrays.ftov_msgs
else:
log_potentials = jax.device_put(bp_state.log_potentials.value)
ftov_msgs = bp_state.ftov_msgs.value
evidence = bp_state.evidence.value
if log_potentials_updates is not None:
log_potentials = update_log_potentials(
log_potentials, log_potentials_updates, bp_state.fg_state
)
if ftov_msgs_updates is not None:
ftov_msgs = update_ftov_msgs(
ftov_msgs, ftov_msgs_updates, bp_state.fg_state
)
if evidence_updates is not None:
evidence = update_evidence(evidence, evidence_updates, bp_state.fg_state)
return BPArrays(
log_potentials=log_potentials, ftov_msgs=ftov_msgs, evidence=evidence
)
def run_bp(
bp_arrays: BPArrays,
num_iters: int,
damping: float = 0.5,
) -> BPArrays:
"""Function to run belief propagation for num_iters with a damping_factor.
Args:
bp_arrays: Initial arrays of log_potentials, ftov_msgs, evidence.
num_iters: Number of belief propagation iterations.
damping: The damping factor to use for message updates between one timestep and the next.
Returns:
A BPArrays containing the updated ftov_msgs.
"""
log_potentials = bp_arrays.log_potentials
evidence = bp_arrays.evidence
ftov_msgs = bp_arrays.ftov_msgs
# Normalize the messages to ensure the maximum value is 0.
ftov_msgs = infer.normalize_and_clip_msgs(
ftov_msgs, edges_num_states, max_msg_size
)
@jax.checkpoint
def update(msgs: jnp.ndarray, _) -> Tuple[jnp.ndarray, None]:
# Compute new variable to factor messages by message passing
vtof_msgs = infer.pass_var_to_fac_messages(
msgs,
evidence,
var_states_for_edges,
)
ftov_msgs = jnp.zeros_like(vtof_msgs)
for factor_type in FAC_TO_VAR_UPDATES:
msgs_start, msgs_end = factor_type_to_msgs_range[factor_type]
potentials_start, potentials_end = factor_type_to_potentials_range[
factor_type
]
ftov_msgs_type = FAC_TO_VAR_UPDATES[factor_type](
vtof_msgs=vtof_msgs[msgs_start:msgs_end],
log_potentials=log_potentials[potentials_start:potentials_end],
temperature=temperature,
**inference_arguments[factor_type],
)
ftov_msgs = ftov_msgs.at[msgs_start:msgs_end].set(ftov_msgs_type)
# Use the results of message passing to perform damping and
# update the factor to variable messages
delta_msgs = ftov_msgs - msgs
msgs = msgs + (1 - damping) * delta_msgs
# Normalize and clip these damped, updated messages before
# returning them.
msgs = infer.normalize_and_clip_msgs(msgs, edges_num_states, max_msg_size)
return msgs, None
ftov_msgs, _ = jax.lax.scan(update, ftov_msgs, None, num_iters)
return BPArrays(
log_potentials=log_potentials, ftov_msgs=ftov_msgs, evidence=evidence
)
def to_bp_state(bp_arrays: BPArrays) -> BPState:
"""Function to reconstruct the BPState from a BPArrays
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
The reconstructed BPState
"""
return BPState(
log_potentials=LogPotentials(
fg_state=bp_state.fg_state, value=bp_arrays.log_potentials
),
ftov_msgs=FToVMessages(
fg_state=bp_state.fg_state,
value=bp_arrays.ftov_msgs,
),
evidence=Evidence(fg_state=bp_state.fg_state, value=bp_arrays.evidence),
)
@jax.jit
def get_beliefs(bp_arrays: BPArrays) -> Any:
"""Function to calculate beliefs from a BPArrays
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
beliefs: An array or a PyTree container containing the beliefs for the variables.
"""
beliefs = bp_state.fg_state.variable_group.unflatten(
jax.device_put(bp_arrays.evidence)
.at[jax.device_put(var_states_for_edges)]
.add(bp_arrays.ftov_msgs)
)
return beliefs
bp = BeliefPropagation(
init=functools.partial(update, None),
update=update,
run_bp=run_bp,
to_bp_state=to_bp_state,
get_beliefs=get_beliefs,
)
return bp
@jax.jit
def decode_map_states(beliefs: Any) -> Any:
"""Function to decode MAP states given the calculated beliefs.
Args:
beliefs: An array or a PyTree container containing beliefs for different variables.
Returns:
An array or a PyTree container containing the MAP states for different variables.
"""
map_states = jax.tree_util.tree_map(
lambda x: jnp.argmax(x, axis=-1),
beliefs,
)
return map_states
@jax.jit
def get_marginals(beliefs: Any) -> Any:
"""Function to get marginal probabilities given the calculated beliefs.
Args:
beliefs: An array or a PyTree container containing beliefs for different variables.
Returns:
An array or a PyTree container containing the marginal probabilities different variables.
"""
marginals = jax.tree_util.tree_map(
lambda x: jnp.exp(x - logsumexp(x, axis=-1, keepdims=True)),
beliefs,
)
return marginals
|
{"hexsha": "c0b4f0960ae7b8b4173a8fb52f73303a0c3e3b30", "size": 41946, "ext": "py", "lang": "Python", "max_stars_repo_path": "pgmax/fg/graph.py", "max_stars_repo_name": "StannisZhou/PGMax", "max_stars_repo_head_hexsha": "58fbe9516342eb79eee7a12c99ba84bb91d97520", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pgmax/fg/graph.py", "max_issues_repo_name": "StannisZhou/PGMax", "max_issues_repo_head_hexsha": "58fbe9516342eb79eee7a12c99ba84bb91d97520", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pgmax/fg/graph.py", "max_forks_repo_name": "StannisZhou/PGMax", "max_forks_repo_head_hexsha": "58fbe9516342eb79eee7a12c99ba84bb91d97520", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2191659272, "max_line_length": 141, "alphanum_fraction": 0.6315977686, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 8495}
|
import streamlit as st
# Import libraries | Standard
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
import os
import datetime
import warnings
warnings.filterwarnings("ignore") # ignoring annoying warnings
from time import time
from rich.progress import track
# Import libraries | Visualization
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# Import libraries | Sk-learn
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error
from sklearn.metrics.scorer import make_scorer
from sklearn.linear_model import LinearRegression, Lasso, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
import xgboost as xgb
from lightgbm import LGBMRegressor
# udf function
from util_func import distribution
@st.cache(persist=True,allow_output_mutation=True)
def read_data(file):
st.write('读取数据,被缓存命中~~~')
features = pd.read_csv('../raw_data/'+ file[0])
train = pd.read_csv('../raw_data/'+ file[1])
stores = pd.read_csv('../raw_data/'+ file[2])
test = pd.read_csv('../raw_data/'+ file[3])
return features,train,stores,test
filename = ["features.csv","train.csv","stores.csv","test.csv"]
stores = read_data(filename)[2]
features = read_data(filename)[0]
train = read_data(filename)[1]
test = read_data(filename)[3]
holidays = ['2010-02-12', '2011-02-11', '2012-02-10', '2013-02-08', #Super Bowl
'2010-09-10', '2011-09-09', '2012-09-07', '2013-02-06', #Labor Day
'2010-11-26', '2011-11-25', '2012-11-23', '2013-11-29', #Thanksgiving
'2010-12-31', '2011-12-30', '2012-12-28', '2013-12-27'] #Christmas
# st.title("XXX门店销售预测")
activity = ['项目简介','数据集探索']
choice = st.sidebar.selectbox("目录",activity)
if choice == "项目简介":
project_intro = """
<div style="background-color:tomato;">
<p style="color:white;font-size:25px;">项目简介</p>
</div>
"""
st.markdown(project_intro,unsafe_allow_html=True)
st.write('''
### 比赛描述
建模零售数据的一个挑战是需要根据有限的历史做出决策。
如果圣诞节一年一次,那么有机会看到战略决策如何影响到底线。
在此招聘竞赛中,为求职者提供位于**不同地区的45家沃尔玛商店的历史销售数据**。
每个商店都包含许多部门,参与者必须为**每个商店中的每个部门预测销售额**。
要添加挑战,选定的**假日降价事件**将包含在数据集中。
众所周知,这些降价会影响销售,但预测哪些部门受到影响以及影响程度具有挑战性。
### 比赛评估
本次比赛的加权平均绝对误差(WMAE)评估:
* n是行数
* yi是真实销售额
* wi是权重,如果该周是假日周,wi=5,否则为1
提交文件:**Id列是通过将Store,Dept和Date与下划线连接而形成的(例如Store_Dept_2012-11-02)**
对于测试集中的每一行(**商店+部门+日期三元组**),您应该预测该部门的每周销售额。
### 数据描述
您将获得位于不同地区的45家沃尔玛商店的历史销售数据。
每个商店都包含许多部门,您的任务是预测每个商店的部门范围内的销售额。
此外,沃尔玛全年举办多项促销降价活动。
这些降价活动在突出的假期之前,其中最大的四个是**超级碗,劳动节,感恩节和圣诞节**。
包括这些**假期的周数在评估中的加权比非假日周高五倍**。
本次比赛提出的部分挑战是**在没有完整/理想的历史数据的情况下模拟降价对这些假期周的影响**。
* stores.csv:
此文件包含有关45个商店的匿名信息,指示商店的类型和大小。
* train.csv:
这是历史销售数据,涵盖2010-02-05至2012-11-01。在此文件中,您将找到以下字段:
> Store - 商店编号
> Dept - 部门编号
> Date - 一周
> Weekly_Sales - 给定商店中给定部门的销售额(目标值)
> sHoliday - 周是否是一个特殊的假日周
* test.csv:
此文件**与train.csv相同,但我们保留了每周销售额**。您必须**预测此文件中每个商店,部门和日期三元组的销售额**。
* features.csv:
此文件包含与给定日期的商店,部门和区域活动相关的其他数据。它包含以下字段:
> Store - 商店编号
> Date - 一周
> Temperature - 该地区的平均温度
> Fuel_Price - 该地区的燃料成本
> MarkDown1-5 - 与沃尔玛正在运营的促销降价相关的匿名数据。MarkDown数据仅在2011年11月之后提供,并非始终适用于所有商店。任何缺失值都标有NA。
> CPI - 消费者物价指数
> Unemployment - 失业率
> IsHoliday - 周是否是一个特殊的假日周
为方便起见,数据集中的四个假期在接下来的几周内(并非所有假期都在数据中):
> 超级碗:2月12日至10日,11月2日至11日,10月2日至12日,2月8日至2月13
> 日劳动节:10月9日至10日,9月9日至9日,9月9日至9月12日-13
> 感恩节:26-Nov- 10,25 -Nov-11,23-Nov-12,29-Nov-13
> 圣诞节:31-Dec-10,30-Dec-11,28-Dec-12,27-Dec -13
''')
if choice == "数据集探索":
# st.subheader("数据集探索")
# st.info("zhaoyadong@sfmail.sf-express.com")
data_explore = """
<div style="background-color:tomato;">
<p style="color:white;font-size:25px;">数据集探索</p>
</div>
"""
st.markdown(data_explore,unsafe_allow_html=True)
st.write('你选择了以下数据:`{0}, {1}, {2}, {3}`'.format(filename[0],filename[1],filename[2],filename[3]))
# feat_sto = read_data(filename)[0].merge(read_data(filename)[2], how='inner', on='Store')
# if st.checkbox("展示features和stores合并后的数据集feat_sto"):
# st.dataframe(feat_sto.head())
# if st.checkbox("展示feat_sto数据类型"):
# st.dataframe(
# pd.DataFrame(feat_sto.dtypes, columns=['Type'])
# )
st.write('''
### 1 stores数据集
''')
if st.checkbox("stores数据集"):
st.dataframe(stores.head())
st.dataframe(
pd.DataFrame(stores.dtypes, columns=['Type'])
)
if st.checkbox("stores数据集缺失值情况"):
total = stores.isnull().sum().sort_values(ascending=False)
percent = (stores.isnull().sum()/stores.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
st.dataframe(missing_data.head(20))
if st.checkbox("stores每种类型的商店面积平均大小"):
st.dataframe(stores['Size'].groupby(stores['Type']).mean())
if st.checkbox("Plot[Matplotlib]"):
fig, ax = plt.subplots(1, 2, figsize = (15,6))
ax[0].bar(stores['Type'].unique(), stores['Size'].groupby(stores['Type']).count())
ax[0].set_ylabel('# of Stores')
ax[0].set_xlabel('Store Type')
ax[0].yaxis.grid(True, linewidth=0.3)
ax[1].scatter(stores['Type'], stores['Size'])
ax[1].scatter(stores['Type'].unique(), stores['Size'].groupby(stores['Type']).mean()) #Store Type Average Store Size Vs
ax[1].set_ylabel('Store Size (Total / Average)')
ax[1].set_xlabel('Store Type')
ax[1].yaxis.grid(True, linewidth=0.3)
st.pyplot()
if st.checkbox("商店size小于40000非C类商店"):
st.dataframe(
stores[(stores['Size'] < 40000) & (~stores['Type'].isin(['C']))]
)
if st.checkbox("商店size分布图[seaborn]"):
sns.distplot(stores['Size'])
st.pyplot()
st.markdown('''
#### stores数据集探索结论
* Column TYPE is a candidate for one-hot encoding.
* Most stores are of TYPE='A'. Only a few stores are of TYPE='C'.
* TYPE columns seem to be linked to Store Size. Average store size of TYPE 'A' is ~ 175k, TYPE 'B' is ~ 100k and TYPE 'C' is ~40k
* Four stores [3, 5, 33 & 36] whose size is < 40k, seem to have been incorrectly tagged as Types A & B
''')
st.write('''
### 2 features数据集
''')
if st.checkbox("features数据集"):
st.dataframe(features.head())
st.dataframe(
pd.DataFrame(features.dtypes, columns=['Type'])
)
st.dataframe(features.describe())
if st.checkbox("features数据集缺失值情况"):
total = features.isnull().sum().sort_values(ascending=False)
percent = (features.isnull().sum()/features.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
st.dataframe(missing_data.head(20))
if st.checkbox("所有特征列的空值分布"):
features_missing = features.isna().sum()/len(features) * 100
st.dataframe(features_missing)
plt.figure(figsize=(12,10))
plt.yticks(np.arange(len(features_missing)),features_missing.index,rotation='horizontal')
plt.xlabel('fraction of rows with missing data')
plt.barh(np.arange(len(features_missing)), features_missing)
st.pyplot()
if st.checkbox("Plot Year Vs # of Records/Unemployment/CPI"):
fig, ax = plt.subplots(2, 2, figsize = (12,9))
# Plot 1: Year Vs # of Records
ax[0,0].barh(features['Date'].str.slice(start=0, stop=4).unique(),
features['Date'].str.slice(start=0, stop=4).value_counts())
ax[0,0].set_xlabel('# of Records')
ax[0,0].set_ylabel('Year')
ax[0,0].yaxis.grid(True, linewidth=0.3)
# Plot 2: Month Vs # of Records with Missing Values - Unemployment
ax[1,0].barh(features['Date'].str.slice(start=0, stop=7)[features['Unemployment'].isna()].unique(),
features['Date'].str.slice(start=0, stop=7)[features['Unemployment'].isna()].value_counts())
ax[1,0].set_xlabel('# of Records with Missing Values - Unemployment')
ax[1,0].set_ylabel('Month')
ax[1,0].yaxis.grid(True, linewidth=0.3)
# Plot 3: Month Vs # of Records with Missing Values - CPI
ax[1,1].barh(features['Date'].str.slice(start=0, stop=7)[features['CPI'].isna()].unique(),
features['Date'].str.slice(start=0, stop=7)[features['CPI'].isna()].value_counts())
ax[1,1].set_xlabel('# of Records with Missing Values - CPI')
ax[1,1].set_ylabel('Month')
ax[1,1].yaxis.grid(True, linewidth=0.3)
st.pyplot()
if st.checkbox("features假期特征"):
st.write("有效假期")
st.dataframe(features['IsHoliday'][features['Date'].isin(holidays)].value_counts())
st.dataframe(features['Date'][features['IsHoliday'].isin([1])][~features['Date'].isin(holidays)].value_counts())
if st.checkbox("按照store和date分组"):
store_date_groupby = features[['CPI','Unemployment']].groupby([features['Store'], features['Date'].str.slice(start=0, stop=7)]).mean()
st.table(store_date_groupby.head(84))
if st.checkbox("按照date分组"):
date_count = features.groupby(features['Date'].str.slice(start=0, stop=7))['MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5'].count()
st.table(date_count)
if st.checkbox("features数据集的数值型特征分布"):
distribution(features, ['CPI','Unemployment']);st.pyplot()
distribution(features, ['Temperature','Fuel_Price']);st.pyplot()
distribution(features, ['MarkDown1','MarkDown2']);st.pyplot()
distribution(features, ['MarkDown3','MarkDown4']);st.pyplot()
distribution(features, ['MarkDown5']);st.pyplot()
st.markdown('''
#### features数据集探索结论
* Data requires pre-processing
* Column(s) ISHOLIDAY has been validated
* Column(s) UNEMPLOYMENT & CPI have missing values for May, Jun & Jul 2013.
For these columns as the values dont change significantly month on month,
value from Apr 2013 would be propogated over for each store.
* Column(s) MARKDOWN* have missing values for 2010 (entire year) and 2011 (until Nov).
Additionally, there are missing values for other other dates as well.
* CPI and UNEMPLOYMENT value are a bit skewed. MARKDOWN* columns are skewed.
''')
st.write('''
### 3 train数据集
''')
if st.checkbox("train数据集"):
st.dataframe(train.head())
st.dataframe(
pd.DataFrame(train.dtypes, columns=['Type'])
)
st.dataframe(train.describe())
if st.checkbox("探索date年份范围以及对于年份的数据量"):
st.dataframe(train['Date'].str.slice(start=0, stop=4).value_counts()) # slice(start,stop) 抽取字段片段,这里抽取年份
if st.checkbox("train假期特征"):
st.write("有效假期")
st.dataframe(train['IsHoliday'][train['Date'].isin(holidays)].value_counts())
st.dataframe(train['Date'][train['IsHoliday'].isin([1])][~train['Date'].isin(holidays)].value_counts())
if st.checkbox("train数据集的数值型特征分布"):
distribution(train, ['Weekly_Sales']);st.pyplot()
if st.checkbox("商店销售额为负的(相对于目标值)的商店数"):
st.write(train['Store'][train['Weekly_Sales'] < 0].count())
train_outliers = pd.merge(train, stores, how='left', on=['Store'])
if st.checkbox("每种商店类型的平均周销"):
st.dataframe(train_outliers.groupby(['Type'])['Weekly_Sales'].mean())
train_outliers = train_outliers[train_outliers['Store'].isin([3,5,33,36])]
if st.checkbox("可能是误分类的商店类型的平均周销"):
st.dataframe(train_outliers.groupby(['Store','Type'])['Weekly_Sales'].mean())
if st.checkbox("商店类型的平均周销可视化"):
fig, ax = plt.subplots(1, 2, figsize = (15,6))
ax[0].bar(train_outliers['Type'].unique(), train_outliers.groupby(['Type'])['Weekly_Sales'].mean())
ax[0].set_ylabel('Average Weekly Sales')
ax[0].set_xlabel('Store Type')
ax[0].yaxis.grid(True, linewidth=0.3)
ax[1].bar([3,5,33,36], train_outliers.groupby(['Store','Type'])['Weekly_Sales'].mean())
ax[1].set_ylabel('Average Weekly Sales')
ax[1].set_xlabel('Store ID')
ax[1].yaxis.grid(True, linewidth=0.3)
st.pyplot()
# Free up memory
train_outliers = None
st.markdown('''
#### train数据集探索结论
* Column DATE is non-numeric and is a candidate for pre-processing.
* 1285 records with Weekly Sales < 0
* Data spans years 2010, 2011 and 2012
* As suspected above, four stores [3, 5, 33 & 36] seem to have incorrectly classified as Type A & B.
Average Weekly Sales for these stores is in line with the average for Type C.
Hence, these would need to be reclassified as Type C.
''')
st.write('''
### 4 test数据集
''')
if st.checkbox("test数据集"):
st.dataframe(test.head())
st.dataframe(
pd.DataFrame(test.dtypes, columns=['Type'])
)
st.dataframe(test.describe())
if st.checkbox("探索date年份和年份对于的数据量"):
st.dataframe(test['Date'].str.slice(start=0, stop=4).value_counts()) # slice(start,stop) 抽取字段片段,这里抽取年份
if st.checkbox("test假期特征"):
st.write("有效假期")
st.dataframe(test['IsHoliday'][test['Date'].isin(holidays)].value_counts())
st.dataframe(test['Date'][test['IsHoliday'].isin([1])][~test['Date'].isin(holidays)].value_counts())
st.markdown('''
#### test数据集探索结论
* Column DATE is non-numeric and is a candidate for pre-processing.
* Data spans years 2012 and 2013
''')
|
{"hexsha": "726b3a2a6aa1363a1c1c76f05594a0960db6280c", "size": 14570, "ext": "py", "lang": "Python", "max_stars_repo_path": "case/solution-1/data_eda_with_streamlit_app.py", "max_stars_repo_name": "7125messi/streamlit-web-ml", "max_stars_repo_head_hexsha": "903d528e561d045d5f6c1dabdb0b78b28e32191c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-11T01:16:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-11T01:16:10.000Z", "max_issues_repo_path": "case/solution-1/data_eda_with_streamlit_app.py", "max_issues_repo_name": "7125messi/streamlit-web-ml", "max_issues_repo_head_hexsha": "903d528e561d045d5f6c1dabdb0b78b28e32191c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "case/solution-1/data_eda_with_streamlit_app.py", "max_forks_repo_name": "7125messi/streamlit-web-ml", "max_forks_repo_head_hexsha": "903d528e561d045d5f6c1dabdb0b78b28e32191c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-25T07:45:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-25T07:45:58.000Z", "avg_line_length": 39.0616621984, "max_line_length": 152, "alphanum_fraction": 0.6105696637, "include": true, "reason": "import numpy", "num_tokens": 4738}
|
subroutine my_sub(input_file)
implicit none
character(len=*), intent(in) :: input_file
logical :: is_file
inquire(file=input_file, exist=is_file)
if (is_file.EQV..TRUE.) then
write(*,'(A)') "Input file: '"//trim(input_file)//"'"
else
write(*,'(A)') "Input file: '"//trim(input_file)//"' (file does not exist)"
endif
end subroutine my_sub
program main
implicit none
character(len=255) :: str
integer :: argc
argc=command_argument_count()
if (argc.LE.0) then
call get_command_argument(0, str)
write(*,'(A)') "Usage: "//trim(str)//" [input_file]"
else
call get_command_argument(1, str)
call my_sub(str)
endif
end program main
|
{"hexsha": "30882ecd9b92730a62e84acc4789da4dc15217ed", "size": 731, "ext": "f03", "lang": "FORTRAN", "max_stars_repo_path": "src/template.f03", "max_stars_repo_name": "nathanielng/code-templates", "max_stars_repo_head_hexsha": "cb2aae1ec4462aaccdb313b8cd574ed57c685aa2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/template.f03", "max_issues_repo_name": "nathanielng/code-templates", "max_issues_repo_head_hexsha": "cb2aae1ec4462aaccdb313b8cd574ed57c685aa2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/template.f03", "max_forks_repo_name": "nathanielng/code-templates", "max_forks_repo_head_hexsha": "cb2aae1ec4462aaccdb313b8cd574ed57c685aa2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1071428571, "max_line_length": 83, "alphanum_fraction": 0.6128590971, "num_tokens": 203}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
fashion_mnist = keras.datasets.fashion_mnist
(trainFeatures, trainLabels), (testFeatures, testLabels) = fashion_mnist.load_data()
accuracies = []
trainFeatures = trainFeatures[:30000]
trainLabels = trainLabels[:30000]
for i in range(1, 26, 2):
tf.compat.v1.reset_default_graph()
tf.compat.v1.disable_eager_execution()
placeholderTrainFeatures = tf.compat.v1.placeholder(trainFeatures.dtype,
shape=trainFeatures.shape)
placeholderTrainLabels = tf.compat.v1.placeholder(trainLabels.dtype,
shape=trainLabels.shape)
placeholderTest = tf.compat.v1.placeholder(testFeatures.dtype,
(28, 28))
x = tf.cast(placeholderTrainFeatures, 'float32')
y = tf.cast(placeholderTest, 'float32')
substracted = tf.subtract(x, y)
distance = tf.sqrt(tf.reduce_sum(tf.square(substracted), axis=(1, 2)))
_, indices = tf.nn.top_k(tf.negative(distance), k=i, sorted=False)
top_k_labels = tf.gather(placeholderTrainLabels, indices)
labels, _, counts = tf.unique_with_counts(top_k_labels)
prediction = tf.gather(labels, tf.argmax(counts))
accuracy = 0.
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
for testFeature, testLabel in zip(testFeatures, testLabels):
predicted = sess.run(prediction,
feed_dict={placeholderTrainFeatures: trainFeatures[:],
placeholderTrainLabels : trainLabels[:],
placeholderTest: testFeature})
if predicted == testLabel:
accuracy += 1./ len(testFeatures)
accuracies.append((i, accuracy))
print("Done!")
print("Accuracy:", accuracy)
print(accuracies)
plt.plot(*zip(*accuracies))
plt.xlabel('Accuracy')
plt.ylabel('k')
plt.show()
|
{"hexsha": "c226f4067199a303cefc23907f26a5707926e2ea", "size": 2281, "ext": "py", "lang": "Python", "max_stars_repo_path": "k_nearest_neighbors.py", "max_stars_repo_name": "NikPyth/KNN", "max_stars_repo_head_hexsha": "62c04b30fb24f135193d22986587a93b31acd212", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "k_nearest_neighbors.py", "max_issues_repo_name": "NikPyth/KNN", "max_issues_repo_head_hexsha": "62c04b30fb24f135193d22986587a93b31acd212", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "k_nearest_neighbors.py", "max_forks_repo_name": "NikPyth/KNN", "max_forks_repo_head_hexsha": "62c04b30fb24f135193d22986587a93b31acd212", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0923076923, "max_line_length": 84, "alphanum_fraction": 0.6413853573, "include": true, "reason": "import numpy", "num_tokens": 479}
|
[STATEMENT]
lemma support_upd[simp]: "support z A (f(x := z)) = support z A f - {x}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. support z A (f(x := z)) = support z A f - {x}
[PROOF STEP]
unfolding support_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {xa \<in> A. (f(x := z)) xa \<noteq> z} = {x \<in> A. f x \<noteq> z} - {x}
[PROOF STEP]
by auto
|
{"llama_tokens": 168, "file": null, "length": 2}
|
import numpy as np
import support
from algorithm.algabc import GSA, Options
from problem.testfunc import TestFunction
# TODO: добавить воздможность выбора метода останова (по умолчанию - итерации) среднеквадратичное откл от лучшей точки
class GSAOptions(Options):
_alias_map = {
'g_idx': ['gi', 'g_index'], # -
'g_zero': ['gz'], # +
'alpha': ['a'], # +
'gamma': ['g'],
'elite_probe': ['ep'], # -
'r_norm': ['rn'], # -
'r_power': ['rp'], # -
'delta': ['d'], # -
}
_required_keys = ('g_zero', 'alpha')
def __init__(self, **kwargs): # n, ni, ig, g0, alpha, gamma, ep=True, rn=2, rp=1, kn=0, delta=pow(10, -4)
kw = support.normalize_kwargs(kwargs,
alias_map=GSAOptions._alias_map,
required=GSAOptions._required_keys)
super().__init__(**kw)
self._g_idx = 1 if 'g_idx' not in kw else kw['g_idx']
self._g0 = kw['g_zero']
self._alpha = kw['alpha']
self._elite_probe = True if 'elite_probe' not in kw else kw['elite_probe']
self._rn = 2 if 'r_norm' not in kw else kw['r_norm']
self._rp = 1 if 'r_power' not in kw else kw['r_power']
self._gamma = None if 'gamma' not in kw else kw['gamma'] # FIXME: зачем?
self._delta = pow(10, -4) if 'delta' not in kw else kw['delta'] # останов
def get_g_value(self, i, max_iter):
if self._g_idx == 1:
return self._g0 * np.exp(-self._alpha * i / max_iter)
elif self._g_idx == 2:
if self._gamma is not None:
return self._g0 / (self._alpha + i**self._gamma)
else:
ValueError('Атрибут _gamma не установлен.')
else:
ValueError('Функции с таким индексом не существует: ' + str(self._g_idx))
def update_op(self, **kwargs):
kw = support.normalize_kwargs(kwargs, alias_map=GSAOptions._alias_map)
for k, v in kw.items():
print(k, v)
if k in GSAOptions._alias_map:
self.__setattr__(k, v)
super().update_op(**kw)
def __repr__(self):
return (f'GSAOptions(n={self._number_points}, ni={self._number_iter}, '
f'gi={self._g_idx}, gz={self._g0}, a={self._alpha}, g={self._gamma}, '
f'ep={self._elite_probe}, rn={self._rn}, rp={self._rp}, d={self._delta}, kn={self._k_noise})')
@property
def g0(self):
return self._g0
@g0.setter
def g0(self, v):
self._g0 = v
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, v):
self._alpha = v
@property
def g_idx(self):
return self._g_idx
@g_idx.setter
def g_idx(self, v):
self._g_idx = v
@property
def elite_probe(self):
return self._elite_probe
@elite_probe.setter
def elite_probe(self, v):
self._elite_probe = v
@property
def rn(self):
return self._rn
@rn.setter
def rn(self, v):
self._rn = v
@property
def rp(self):
return self._rp
@rp.setter
def rp(self, v):
self._rp = v
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, v):
self._gamma = v
@property
def delta(self):
return self._delta
@delta.setter
def delta(self, v):
self._delta = v
class StandardGSA(GSA):
def __init__(self, op, **kwargs):
super().__init__(op=op, **kwargs)
self._name = 'Standard GSA'
self._full_name = 'Standard gravity search algorithm'
def optimization(self, tf, min_flag=1):
if self._options:
return gsa(self._options, tf, min_flag)
raise ValueError('Не установлены параметры алгоритма')
def initialization(n, dim, down, high):
if isinstance(down, (int, float)) and isinstance(high, (int, float)):
return np.random.uniform(down, high, (n, dim))
elif isinstance(down, (list, tuple)) and isinstance(high, (list, tuple)):
x = np.random.uniform(0, 1, (n, dim))
for i in range(dim):
x[:, i] = x[:, i] * (high[i] - down[i]) + down[i]
return x
else:
raise ValueError('Некорректные down или high: down=' + str(down) + '; high=' + str(high))
def get_eval_func_val(x, tf, kn):
f = np.array([tf.get_value(xi) + np.random.uniform(-tf.amp * kn, tf.amp * kn) for xi in x])
return f
def space_bound(x, down, high):
dim = len(x[0])
for i in range(len(x)):
high_border = x[i, :] > high
down_border = x[i, :] < down
# ~ - операция конвертации значения в противоположное
x[i, :] = (x[i, :] * (~(down_border + high_border))) + (
np.random.uniform(down, high, (1, dim)) * (down_border + high_border))
return x
def find_mass(fit, min_flag):
fit_max = np.max(fit)
fit_min = np.min(fit)
if fit_max == fit_min:
mass = np.ones((len(fit), ))
else:
if min_flag == 1: # minimization
best = fit_min
worst = fit_max
else: # maximization
best = fit_max
worst = fit_min
mass = (fit - best) / (worst - best)
mass = mass / np.sum(mass)
return mass
def find_acceleration(x, mass, g, r_norm, r_power, ec, iter, max_iter):
dim = len(x[0])
n = len(x)
final_per = 2
if ec:
k_best = final_per + (1 - iter / max_iter) * (100 - final_per)
k_best = round(n * k_best / 100)
else:
k_best = n
ds = np.argsort(mass)[::-1]
E = np.zeros((n, dim))
for i in range(n):
for j in range(k_best):
k = ds[j]
if k != i:
radius = np.linalg.norm(x[i, :] - x[k, :], r_norm)
E[i] += np.random.uniform(0, 1) * mass[k] * ((x[k] - x[i]) / (
np.power(radius, r_power) + np.finfo(float).eps))
return E * g
def move(x, a, last_v):
v = np.random.uniform(0, 1, (len(x), len(x[0]))) * last_v + a
new_x = x + v
return new_x, v
def gsa(op, tf, min_flag):
x = initialization(op.number_points, tf.dim, tf.down, tf.high)
velocity = np.zeros((op.number_points, tf.dim))
best_chart = []
mean_chart = []
func_best = None
agent_best = None
iteration = 0
for i in range(op.number_iter):
iteration = i + 1
x = space_bound(x, tf.down, tf.high)
fit = get_eval_func_val(x, tf, op.k_noise)
if min_flag == 1:
best = np.min(fit)
best_x = np.argmin(fit)
else:
best = np.max(fit)
best_x = np.argmax(fit)
if i == 0:
func_best = best
agent_best = x[best_x, :]
if min_flag == 1:
if best < func_best:
func_best = best
agent_best = x[best_x, :]
else:
if best > func_best:
func_best = best
agent_best = x[best_x, :]
best_chart.append(func_best)
mean_chart.append(np.mean(fit))
if op.delta is not None:
ar_std = np.std(x, axis=0, ddof=1)
_std = np.power(np.sum(np.power(ar_std, 2)), 0.5)
if _std <= op.delta:
return agent_best, func_best, iteration, best_chart, mean_chart
mass = find_mass(fit, min_flag)
g = op.get_g_value(iteration, op.number_iter)
a = find_acceleration(x, mass, g, op.rn, op.rp, op.elite_probe, i, op.number_iter)
x, velocity = move(x, a, velocity)
return agent_best, func_best, iteration, best_chart, mean_chart
def main():
TEST_FUNC_2 = {
"dimension": 2,
"type": "bf",
"number_extrema": 10,
"coordinates": [
[4, 2], [-3, -2], [-5, 3], [3, -3], [3, 5],
[-2, 4], [0, -4], [5, -5], [-4, -4], [1, -1]
],
"func_values": [0, 3, 5, 6, 7, 8, 9, 10, 11, 12],
"degree_smoothness": [
[0.7, 0.9], [0.9, 0.6], [1.2, 0.3], [0.6, 1.3], [1.5, 2],
[0.5, 0.9], [2, 0.6], [1.7, 1.1], [1.1, 0.8], [0.6, 1.1]
],
"coefficients_abruptness": [
[7, 7], [4, 5], [6, 6], [5, 7], [3.5, 5],
[7, 3], [6, 5], [3, 6.3], [4.5, 5], [2, 3]
],
"constraints_high": [6, 6],
"constraints_down": [-6, -6],
"global_min": [4, 2],
"global_max": [2, -6],
"amp_noise": 15.755,
"min_value": 0.0,
"max_value": 31.51
}
tf = TestFunction(**TEST_FUNC_2)
d = {'n': [50, 60, 70, 80]}
op = GSAOptions(np=10, ni=50, g_idx=2, g_zero=100, alpha=20, gamma=2)
alg = StandardGSA(op)
p = alg.probability_estimate(tf, op, d, ep=0.2, number_runs=100, min_flag=1)
print(p)
if __name__ == '__main__':
main()
|
{"hexsha": "2da003991e941459fde1ad4e6629475d2d1b85f0", "size": 8890, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithm/gsa.py", "max_stars_repo_name": "redb0/gotpy", "max_stars_repo_head_hexsha": "b3f2e12aff429e0bff0faa079a3694378293c974", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-02T08:37:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T14:25:22.000Z", "max_issues_repo_path": "algorithm/gsa.py", "max_issues_repo_name": "redb0/gotpy", "max_issues_repo_head_hexsha": "b3f2e12aff429e0bff0faa079a3694378293c974", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithm/gsa.py", "max_forks_repo_name": "redb0/gotpy", "max_forks_repo_head_hexsha": "b3f2e12aff429e0bff0faa079a3694378293c974", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1475409836, "max_line_length": 118, "alphanum_fraction": 0.5284589426, "include": true, "reason": "import numpy", "num_tokens": 2724}
|
# -*- coding: latin-1 -*-
from __future__ import division
import ast
import numpy as np
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from functools import partial
import banco.bd_sensores as bd_sensores
import banco.bd_perfil as bd_perfil
'''
Classes para tela e gráfico.
Como por padrão o Qt4 não possui um objeto de gráfico adequado, é utilizado o matplotlib,
sendo que um objeto de tela comum do qt herdará suas propriedades.
'''
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
self.fig.set_facecolor('white')
self.fig.subplots_adjust(bottom=0.15)
self.ax = self.fig.add_subplot(111)
self.ax.grid(True)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class matplotlibWidget(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
def plotar_sensores(self):
'''
Função que plota o gráfico na GUI, utilizando a classe Matplotlib
que sobrecarrega a o lable do qt (ver arquivo matplotlibwidgetFile.py)
'''
# Limpa os eixos para plotar novamente
self.ui.widget.canvas.ax.clear()
# Pega o nome do experimento e verifica se é 'Sem nome' ou não.
if (str(self.ui.label_nomeExperimento.text()) != 'Sem Nome'):
d = bd_sensores.retorna_dados(self.caminho_banco, delta_t=1,
experimento=str(self.ui.label_nomeExperimento.text()))
else:
delta_t = self.ui.horizontalSlider_graficoPeriodo.value()
d = bd_sensores.retorna_dados(self.caminho_banco, delta_t)
try:
if np.size(d[:,0]) > 1:
eixo_tempo = (d[:,2].astype(float) - float(d[0,2]) )/60
if self.ui.checkBox_sensor2.isChecked():
self.ui.widget.canvas.ax.plot(eixo_tempo,d[:,4],label="teto1")
if self.ui.checkBox_sensor3.isChecked():
self.ui.widget.canvas.ax.plot(eixo_tempo,d[:,5],label="teto2")
if self.ui.checkBox_sensor4.isChecked():
self.ui.widget.canvas.ax.plot(eixo_tempo,d[:,6],label="lateral1")
if self.ui.checkBox_sensor5.isChecked():
self.ui.widget.canvas.ax.plot(eixo_tempo,d[:,7],label="lateral2")
if self.ui.checkBox_sensor6.isChecked():
self.ui.widget.canvas.ax.plot(eixo_tempo,d[:,8],label="lateral3")
if self.ui.checkBox_sensor1.isChecked():
self.ui.widget.canvas.ax.plot(eixo_tempo,d[:,3],label="esteira")
self.ui.widget.canvas.ax.legend(loc='lower left',
frameon=True,shadow=True, fancybox=True)
self.ui.widget.canvas.ax.set_title('Sensores Forno')
self.ui.widget.canvas.ax.set_xlabel('tempo (minutos)')
self.ui.widget.canvas.ax.set_ylabel('temperatura (Celcius)')
self.ui.widget.canvas.ax.grid(True)
self.ui.widget.canvas.draw()
except Exception as e:
print 'e- ', e
self.alerta_toolbar("Erro: Grafico Sensores")
pass
def grafico_update(self):
'''
Método recursivo.
É chamado quando a checkbox de autoupdate do gráfico é ativada.
A função ativa uma thread do QT no modo singleShot após a quantidad de tempo escolhida no
spinBox da GUI. caso a checkbox continue ativada, a função se chamará novamente de forma recursiva
até que a checkbox seja desabilitada ou a conecção seja desfeita.
'''
self.alerta_toolbar("update-grafico")
if self.ui.checkBox_graficoAuto.isChecked():
plotar_sensores(self)
tempo_delay = 1000*int(self.ui.spinBox_graficoLatencia.value())
self.timer_grafico.singleShot(tempo_delay,partial(grafico_update,self))
def tempo_grafico(self):
'''
Evento ocorre quando o slider abaixo do gráfico é pressinado.
- Altera o tempo que será mostrado no gráfico (entre 1 e 99 min)
em relação ao tempo actual.
- Chama a função atualiza grafico.
'''
valor = self.ui.horizontalSlider_graficoPeriodo.value() # Pega o valor do intervalo de tempo do gráfico pelo slider
texto = 'Delta T = ' + str(valor) + ' min' # Texto para ser mostrado ao lado do slider com o valor escolhido
self.ui.label_graficoTempo.setText(texto) # Altera o texto do lable na GUI
def plota_perfil(self,tipo,posicao_atual):
'''
Plota o gráfico mostrando o perfil escolhido
'''
if tipo == 'temperatura':
nomes_drop = {0:'todos',1:'t_ar',2:'t_esteira'}
escolha = unicode(self.ui.comboBox_perfilTemperatura.currentText())
dados = bd_perfil.leitura_perfil(self.caminho_banco, escolha, 'temperatura')
indice = int(self.ui.comboBox_displayPerfilTemperatura.currentIndex())
self.ui.widget_perfilTemperatura.canvas.ax.clear()
elif tipo == 'potencia':
escolha = unicode(self.ui.comboBox_perfilPotencia.currentText())
dados = bd_perfil.leitura_perfil(self.caminho_banco, escolha, 'potencia')
indice = int(self.ui.comboBox_displayPerfilPotencia.currentIndex())
self.ui.widget_perfilPotencia.canvas.ax.clear()
x , y = [], []
if indice > 0:
v = ast.literal_eval(dados[indice + 1])
for i in v:
x.append(i[0])
y.append(i[1])
if tipo == 'temperatura':
self.ui.widget_perfilTemperatura.canvas.ax.plot(x,y,label=nomes_drop[indice])
self.ui.widget_perfilTemperatura.canvas.ax.legend(loc='lower right',
frameon=True,shadow=True, fancybox=True)
if posicao_atual:
self.ui.widget_perfilTemperatura.canvas.ax.plot((posicao_atual[0], posicao_atual[1]),
(posicao_atual[2], posicao_atual[3]), 'k--')
self.ui.widget_perfilTemperatura.canvas.ax.set_title('Perfil temperatura')
self.ui.widget_perfilTemperatura.canvas.ax.set_xlabel('Tempo (minutos)')
self.ui.widget_perfilTemperatura.canvas.ax.set_ylabel('Temperatura (K)')
self.ui.widget_perfilTemperatura.canvas.ax.set_ylim([0,300])
self.ui.widget_perfilTemperatura.canvas.ax.grid(True)
self.ui.widget_perfilTemperatura.canvas.draw()
elif tipo == 'potencia':
self.ui.widget_perfilPotencia.canvas.ax.plot(x,y,label="R" + str(indice))
self.ui.widget_perfilPotencia.canvas.ax.legend(loc='lower right',
frameon=True,shadow=True, fancybox=True)
if posicao_atual:
self.ui.widget_perfilPotencia.canvas.ax.plot((posicao_atual[0], posicao_atual[1]),
(posicao_atual[2], posicao_atual[3]), 'k--')
self.ui.widget_perfilPotencia.canvas.ax.set_title('Perfil Potencia')
self.ui.widget_perfilPotencia.canvas.ax.set_xlabel('Tempo (minutos)')
self.ui.widget_perfilPotencia.canvas.ax.set_ylabel('Potencia ()%)')
self.ui.widget_perfilPotencia.canvas.ax.set_ylim([0,110])
self.ui.widget_perfilPotencia.canvas.ax.grid(True)
self.ui.widget_perfilPotencia.canvas.draw()
elif indice == 0:
for elemento in range(2,8):
x , y = [], []
if tipo == 'temperatura' and elemento < 4:
v = ast.literal_eval(dados[elemento])
for i in v:
x.append(i[0])
y.append(i[1])
self.ui.widget_perfilTemperatura.canvas.ax.set_title('Perfil temperatura')
self.ui.widget_perfilTemperatura.canvas.ax.set_xlabel('Tempo (minutos)')
self.ui.widget_perfilTemperatura.canvas.ax.set_ylabel('Temperatura (K)')
self.ui.widget_perfilTemperatura.canvas.ax.set_ylim([0,300])
self.ui.widget_perfilTemperatura.canvas.ax.grid(True)
self.ui.widget_perfilTemperatura.canvas.ax.plot(x,y,label=nomes_drop[elemento-1])
self.ui.widget_perfilTemperatura.canvas.ax.legend(loc='lower right',
frameon=True,shadow=True, fancybox=True, ncol=2)
if posicao_atual:
self.ui.widget_perfilTemperatura.canvas.ax.plot((posicao_atual[0], posicao_atual[1]),
(posicao_atual[2], posicao_atual[3]), 'k--')
elif tipo == 'potencia':
v = ast.literal_eval(dados[elemento])
for i in v:
x.append(i[0])
y.append(i[1])
self.ui.widget_perfilPotencia.canvas.ax.plot(x,y,label=str('R' + str(elemento-1)))
self.ui.widget_perfilPotencia.canvas.ax.legend(loc='lower right',
frameon=True,shadow=True, fancybox=True, ncol=2)
if posicao_atual:
self.ui.widget_perfilPotencia.canvas.ax.plot((posicao_atual[0], posicao_atual[1]),
(posicao_atual[2], posicao_atual[3]), 'k--')
self.ui.widget_perfilPotencia.canvas.ax.grid(True)
self.ui.widget_perfilPotencia.canvas.ax.set_title('Perfil Potencia')
self.ui.widget_perfilPotencia.canvas.ax.set_xlabel('Tempo (minutos)')
self.ui.widget_perfilPotencia.canvas.ax.set_ylabel('Potencia ()%)')
self.ui.widget_perfilPotencia.canvas.ax.set_ylim([0,110])
if tipo == 'temperatura':
self.ui.widget_perfilTemperatura.canvas.draw()
elif tipo == 'potencia':
self.ui.widget_perfilPotencia.canvas.draw()
else:
return None
|
{"hexsha": "1f2406ab16fccfcd77b52639ecfc1ad6a39f4804", "size": 9936, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/graficos.py", "max_stars_repo_name": "Atzingen/controleForno-interface", "max_stars_repo_head_hexsha": "6a8968527f8b76c7d0c7ea26f8c8aca728fe4d2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/graficos.py", "max_issues_repo_name": "Atzingen/controleForno-interface", "max_issues_repo_head_hexsha": "6a8968527f8b76c7d0c7ea26f8c8aca728fe4d2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-09-06T15:54:00.000Z", "max_issues_repo_issues_event_max_datetime": "2016-09-15T15:05:41.000Z", "max_forks_repo_path": "source/graficos.py", "max_forks_repo_name": "Atzingen/controleForno-interface", "max_forks_repo_head_hexsha": "6a8968527f8b76c7d0c7ea26f8c8aca728fe4d2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.75, "max_line_length": 152, "alphanum_fraction": 0.6355676329, "include": true, "reason": "import numpy", "num_tokens": 2337}
|
import cv2
import time
import argparse
import numpy as np
def blur_face(image, face_detector):
"""
Runs the face detector, extracts face regions and blurs them
Args:
image: Input image or video frame
face_detector: Path to the face haarcascade file
Returns:
The processed image with face blurred
"""
(h, w) = image.shape[:2]
k_w = int(w / 7)
k_h = int(h / 7)
if k_w % 2 == 0:
k_w -= 1
if k_h % 2 == 0:
k_h -= 1
# Load the face haarcascade
face_cascade = cv2.CascadeClassifier(face_detector)
# Convert image to grayscale for detection
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect face regions in the image
detected_faces = face_cascade.detectMultiScale(image_gray, minNeighbors=5, scaleFactor=1.1,)
# Loop throught face regions and blur them using Gaussian Blur
for (x, y, w, h) in detected_faces:
blurred_face_image = image[y:y+h, x:x+w]
blurred_face_image= cv2.GaussianBlur(blurred_face_image, (k_w, k_h), 0)
image[y:y+h,x:x+w]=blurred_face_image
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--image', help='path to image')
parser.add_argument('-v', '--video', help='path to video file')
parser.add_argument('-f', '--face_cascade', help='path to haarcascade for face', default='haarcascade_frontalface_default.xml')
args = vars(parser.parse_args())
# Run the algorithm on an image and save the output
if args['image']:
test_image = cv2.imread(args['image'])
face_blurred_image = blur_face(test_image, args['face_cascade'])
cv2.imshow('Face Blurred', face_blurred_image)
cv2.imwrite('output/face_blurred_output.png', face_blurred_image)
cv2.waitKey(0)
# Run the algorithm on a video
elif args['video']:
video = cv2.VideoCapture(args['video'])
while True:
ret, frame = video.read()
if ret == False:
break
face_blurred_frame = blur_face(frame, args['face_cascade'])
face_blurred_frame = cv2.resize(face_blurred_frame, (500,500), cv2.INTER_CUBIC)
cv2.imshow('Face Blurred', face_blurred_frame)
if cv2.waitKey(1) & 0xff == ord('q'):
break
video.release()
# Run the algorithm on live web-cam feed
else:
video = cv2.VideoCapture(0)
time.sleep(2)
while True:
ret, frame = video.read()
if ret == False:
break
face_blurred_frame = blur_face(frame, args['face_cascade'])
cv2.imshow('Face Blurred', face_blurred_frame)
if cv2.waitKey(4) & 0xff == ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
{"hexsha": "d3f09b4c01847d61df4ada2940f948ae2b57f5f3", "size": 2533, "ext": "py", "lang": "Python", "max_stars_repo_path": "face_blur.py", "max_stars_repo_name": "GSNCodes/Blur-Face", "max_stars_repo_head_hexsha": "63134c4e63052da464331a725c685422c1c633be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "face_blur.py", "max_issues_repo_name": "GSNCodes/Blur-Face", "max_issues_repo_head_hexsha": "63134c4e63052da464331a725c685422c1c633be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "face_blur.py", "max_forks_repo_name": "GSNCodes/Blur-Face", "max_forks_repo_head_hexsha": "63134c4e63052da464331a725c685422c1c633be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4159292035, "max_line_length": 128, "alphanum_fraction": 0.7011448875, "include": true, "reason": "import numpy", "num_tokens": 715}
|
// test/wwwc/css_syntax/parsing.cpp
#include <boost/test/unit_test.hpp>
#include <wordring/wwwc/css_syntax/parsing.hpp>
#include <wordring/wwwc/selectors/grammar.hpp>
#include <algorithm>
#include <any>
#include <iterator>
#include <string>
#include <typeindex>
#include <vector>
namespace
{
inline std::u32string print(wordring::wwwc::css::function const& in);
inline std::u32string print(wordring::wwwc::css::simple_block const& in);
inline std::u32string print(wordring::wwwc::css::at_rule const& in);
inline std::u32string print(wordring::wwwc::css::qualified_rule const& in);
inline std::u32string print(wordring::wwwc::css::declaration const& in);
// inline std::u32string print(std::vector<wordring::wwwc::css::declaration> const& in);
inline std::u32string print(wordring::wwwc::css::syntax_primitive const& c);
inline std::u32string print(wordring::wwwc::css::component_value const& c);
inline std::u32string print(std::vector<wordring::wwwc::css::syntax_primitive> const& in);
inline std::u32string print(std::vector<wordring::wwwc::css::component_value> const& in);
// 数値を文字列化する
inline std::u32string print(double i)
{
std::u32string result;
std::string s = std::to_string(static_cast<int>(i));
for (unsigned char c : s) result.push_back(c);
return result;
}
inline std::u32string print(wordring::wwwc::css::function const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
out += in.m_name;
out += U"(";
out += print(in.m_value);
out += U")";
return out;
}
inline std::u32string print(wordring::wwwc::css::simple_block const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
out += in.m_associated_token;
out += print(in.m_value);
switch (in.m_associated_token)
{
case U'{':
out += U"}";
break;
case U'[':
out += U"]";
break;
case U'(':
out += U")";
break;
default:
assert(false);
}
return out;
}
inline std::u32string print(wordring::wwwc::css::at_rule const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
out += U"@";
out += in.m_name;
out += print(in.m_prelude);
if (in.m_block)
{
out += print(*in.m_block);
}
return out;
}
inline std::u32string print(wordring::wwwc::css::qualified_rule const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
out += print(in.m_prelude);
out += print(in.m_block);
return out;
}
inline std::u32string print(std::optional<wordring::wwwc::css::qualified_rule> const& in)
{
return in.has_value() ? print(*in) : U"";
}
inline std::u32string print(wordring::wwwc::css::declaration const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
out += in.m_name;
out += U":";
for(component_value const& c : in.m_value) out += print(c);
if (in.m_important_flag) out += U"!IMPORTANT";
return out;
}
inline std::u32string print(std::optional<wordring::wwwc::css::declaration> const& in)
{
return in.has_value() ? print(*in) : U"";
}
/*
inline std::u32string print(std::vector<wordring::wwwc::css::declaration> const& in)
{
std::u32string out;
for (auto const& c : in) out += print(c);
return out;
}
*/
// トークン/コンポーネント値を文字列化する
inline std::u32string print(wordring::wwwc::css::syntax_primitive const& c)
{
using namespace wordring::wwwc::css;
switch (c.type())
{
case syntax_primitive_name::IdentToken: return c.get<ident_token>().m_value;
case syntax_primitive_name::FunctionToken: return c.get<function_token>().m_value;
case syntax_primitive_name::AtKeywordToken: return c.get<at_keyword_token>().m_value;
case syntax_primitive_name::HashToken: return c.get<hash_token>().m_value;
case syntax_primitive_name::StringToken: return c.get<string_token>().m_value;
case syntax_primitive_name::BadStringToken: return U" BAD_STRING_TOKEN ";
case syntax_primitive_name::UrlToken: return c.get<url_token>().m_value;
case syntax_primitive_name::BadUrlToken: return U" BAD_URL_TOKEN ";
case syntax_primitive_name::DelimToken: return std::u32string(1, c.get<delim_token>().m_value);
case syntax_primitive_name::NumberToken: return print(c.get<number_token>().m_value);
case syntax_primitive_name::PercentageToken: return print(c.get<percentage_token>().m_value) + U"%";
case syntax_primitive_name::DimensionToken:
{
dimension_token const& d = c.get<dimension_token>();
return print(d.m_value) + d.m_unit;
}
case syntax_primitive_name::WhitespaceToken: return U" ";
case syntax_primitive_name::CdoToken: return U"<!--";
case syntax_primitive_name::CdcToken: return U"-->";
case syntax_primitive_name::ColonToken: return U":";
case syntax_primitive_name::SemicolonToken: return U";";
case syntax_primitive_name::CommaToken: return U",";
case syntax_primitive_name::OpenSquareToken: return U"[";
case syntax_primitive_name::CloseSquareToken: return U"]";
case syntax_primitive_name::OpenParenToken: return U"(";
case syntax_primitive_name::CloseParenToken: return U")";
case syntax_primitive_name::OpenCurlyToken: return U"{";
case syntax_primitive_name::CloseCurlyToken: return U"}";
case syntax_primitive_name::EofToken: return U"EOF_TOKEN";
case syntax_primitive_name::PreservedTokens: return U"PRESERVED_TOKENS";
case syntax_primitive_name::Function: return print(c.get<function>());
case syntax_primitive_name::SimpleBlock: return print(c.get<simple_block>());
case syntax_primitive_name::ComponentValue: return print(c.get<component_value>());
case syntax_primitive_name::AtRule: return print(c.get<at_rule>());
case syntax_primitive_name::QualifiedRule: return print(c.get<qualified_rule>());
case syntax_primitive_name::Declaration: return print(c.get<declaration>());
default:
break;
}
assert(false);
return U"";
}
inline std::u32string print(wordring::wwwc::css::component_value const& c)
{
using namespace wordring::wwwc::css;
switch (c.type())
{
case syntax_primitive_name::IdentToken: return c.get<ident_token>().m_value;
//case syntax_primitive_name::FunctionToken: return c.get<function_token>().m_value;
case syntax_primitive_name::AtKeywordToken: return c.get<at_keyword_token>().m_value;
case syntax_primitive_name::HashToken: return c.get<hash_token>().m_value;
case syntax_primitive_name::StringToken: return c.get<string_token>().m_value;
case syntax_primitive_name::BadStringToken: return U" BAD_STRING_TOKEN ";
case syntax_primitive_name::UrlToken: return c.get<url_token>().m_value;
case syntax_primitive_name::BadUrlToken: return U" BAD_URL_TOKEN ";
case syntax_primitive_name::DelimToken: return std::u32string(1, c.get<delim_token>().m_value);
case syntax_primitive_name::NumberToken: return print(c.get<number_token>().m_value);
case syntax_primitive_name::PercentageToken: return print(c.get<percentage_token>().m_value) + U"%";
case syntax_primitive_name::DimensionToken:
{
dimension_token const& d = c.get<dimension_token>();
return print(d.m_value) + d.m_unit;
}
case syntax_primitive_name::WhitespaceToken: return U" ";
case syntax_primitive_name::CdoToken: return U"<!--";
case syntax_primitive_name::CdcToken: return U"-->";
case syntax_primitive_name::ColonToken: return U":";
case syntax_primitive_name::SemicolonToken: return U";";
case syntax_primitive_name::CommaToken: return U",";
case syntax_primitive_name::CloseSquareToken: return U"]";
case syntax_primitive_name::CloseParenToken: return U")";
case syntax_primitive_name::CloseCurlyToken: return U"}";
case syntax_primitive_name::EofToken: return U"EOF_TOKEN";
case syntax_primitive_name::Function: return print(c.get<function>());
case syntax_primitive_name::SimpleBlock: return print(c.get<simple_block>());
default:
break;
}
assert(false);
return U"";
}
// トークン列/コンポーネント値列を文字列化する
inline std::u32string print(std::vector<wordring::wwwc::css::syntax_primitive> const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
for (auto c : in) out += print(c);
return out;
}
inline std::u32string print(std::vector<wordring::wwwc::css::component_value> const& in)
{
using namespace wordring::wwwc::css;
std::u32string out;
for (auto c : in) out += print(c);
return out;
}
}
BOOST_AUTO_TEST_SUITE(css_syntax_parsing_test)
// ------------------------------------------------------------------------------------------------
// 5. Parsing
//
// https://drafts.csswg.org/css-syntax-3/#parsing
// https://triple-underscore.github.io/css-syntax-ja.html#parsing
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// 5.2. Definitions
//
// https://drafts.csswg.org/css-syntax-3/#parser-definitions
// https://triple-underscore.github.io/css-syntax-ja.html#parser-definitions
// ------------------------------------------------------------------------------------------------
// token_stream::current_input_token
BOOST_AUTO_TEST_CASE(parsing_token_stream_current_input_token_1)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*(p{color: red;})*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
in.consume();
BOOST_CHECK(print(in.current_input_token()) == U"p");
}
// token_stream::current_input_token
// 終端に達している場合、eof_tokenを返す。
BOOST_AUTO_TEST_CASE(parsing_token_stream_current_input_token_2)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*()*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
in.consume();
BOOST_CHECK(print(in.current_input_token()) == U"EOF_TOKEN");
}
// token_stream::next_input_token
BOOST_AUTO_TEST_CASE(parsing_token_stream_next_input_token_1)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*(p{color: red;})*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
in.consume();
BOOST_CHECK(print(in.next_input_token()) == U"{");
}
// token_stream::next_input_token
// 終端に達している場合、eof_tokenを返す。
BOOST_AUTO_TEST_CASE(parsing_token_stream_next_input_token_2)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*(p)*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
in.consume();
BOOST_CHECK(print(in.next_input_token()) == U"EOF_TOKEN");
}
// token_stream::consume
BOOST_AUTO_TEST_CASE(parsing_token_stream_consume_1)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*(p{color: red;})*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
BOOST_CHECK(print(in.consume()) == U"p");
BOOST_CHECK(print(in.consume()) == U"{");
BOOST_CHECK(print(in.consume()) == U"color");
BOOST_CHECK(print(in.consume()) == U":");
BOOST_CHECK(print(in.consume()) == U" ");
BOOST_CHECK(print(in.consume()) == U"red");
BOOST_CHECK(print(in.consume()) == U";");
BOOST_CHECK(print(in.consume()) == U"}");
BOOST_CHECK(print(in.consume()) == U"EOF_TOKEN");
}
// token_stream::consume
// 終端に達している場合、eof_tokenを返す。
BOOST_AUTO_TEST_CASE(parsing_token_stream_consume_2)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*()*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
BOOST_CHECK(print(in.consume()) == U"EOF_TOKEN");
}
// token_stream::reconsume
BOOST_AUTO_TEST_CASE(parsing_token_stream_reconsume_1)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*(p{color: red;})*";
std::vector<syntax_primitive> tokens;
tokenize(css.begin(), css.end(), std::back_inserter(tokens), 0);
token_stream in(std::move(tokens));
BOOST_CHECK(print(in.consume()) == U"p");
in.reconsume();
BOOST_CHECK(print(in.consume()) == U"p");
BOOST_CHECK(print(in.consume()) == U"{");
}
// ------------------------------------------------------------------------------------------------
// 5.3. Parser Entry Points
//
// https://drafts.csswg.org/css-syntax-3/#parser-entry-points
// https://triple-underscore.github.io/css-syntax-ja.html#parser-entry-points
// ------------------------------------------------------------------------------------------------
// トークン列を正規化
BOOST_AUTO_TEST_CASE(parsing_normalize_into_token_stream_1)
{
using namespace wordring::wwwc::css;
std::u32string in = UR"*( p { color: red; } )*";
std::vector<syntax_primitive> v, v1, v2;
tokenize(in.begin(), in.end(), std::back_inserter(v), 0);
v1 = v;
v2 = normalize_into_token_stream(std::move(v));
BOOST_CHECK(print(v1) == print(v2));
}
// コード・ポイント列を正規化
BOOST_AUTO_TEST_CASE(parsing_normalize_into_token_stream_2)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*( p { color: red; } )*";
std::vector<syntax_primitive> v1, v2;
tokenize(css.begin(), css.end(), std::back_inserter(v1), 0);
v2 = normalize_into_token_stream(std::move(css));
BOOST_CHECK(print(v1) == print(v2));
}
// ------------------------------------------------------------------------------------------------
// 5.3.1. Parse something according to a CSS grammar
//
// https://drafts.csswg.org/css-syntax-3/#parse-grammar
// https://triple-underscore.github.io/css-syntax-ja.html#parse-grammar
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_grammar_1)
{
using namespace wordring::wwwc::css;
parse_context pc;
auto m = parse_grammar<complex_selector>(U"div span", pc);
BOOST_CHECK(m);
}
// ------------------------------------------------------------------------------------------------
// 5.3.2. Parse A Comma-Separated List According To A CSS Grammar
//
// https://drafts.csswg.org/css-syntax-3/#parse-comma-list
// https://triple-underscore.github.io/css-syntax-ja.html#parse-comma-list
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_comma_list_1)
{
using namespace wordring::wwwc::css;
parse_context pc;
auto v = parse_comma_list<complex_selector>(U"a, p h1", pc);
BOOST_CHECK(v.size() == 2);
}
// ------------------------------------------------------------------------------------------------
// 5.3.3. Parse a stylesheet
//
// https://drafts.csswg.org/css-syntax-3/#parse-stylesheet
// https://triple-underscore.github.io/css-syntax-ja.html#parse-stylesheet
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_stylesheet_1)
{
using namespace wordring::wwwc::css;
std::string css =
"@import \"my-styles.css\";"
"p > a { color: blue; }";
std::vector<syntax_primitive> v = parse_stylesheet(css);
std::u32string s = print(v);
BOOST_CHECK(s == U"@import my-styles.cssp > a { color: blue; }");
}
// ------------------------------------------------------------------------------------------------
// 5.3.4. Parse a list of rules
//
// https://drafts.csswg.org/css-syntax-3/#parse-list-of-rules
// https://triple-underscore.github.io/css-syntax-ja.html#parse-list-of-rules
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_list_of_rules_1)
{
using namespace wordring::wwwc::css;
std::u32string css =
U"@import \"my-styles.css\";"
U"p > a { color: blue; }";
std::vector<syntax_primitive> v = parse_list_of_rules(std::move(css));
std::u32string s = print(v);
BOOST_CHECK(s == U"@import my-styles.cssp > a { color: blue; }");
}
// ------------------------------------------------------------------------------------------------
// 5.3.5. Parse a rule
//
// https://drafts.csswg.org/css-syntax-3/#parse-rule
// https://triple-underscore.github.io/css-syntax-ja.html#parse-rule
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_rule_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"@import \"my-styles.css\" ; ";
std::optional<syntax_primitive> c = parse_rule(std::move(css));
BOOST_CHECK(c);
std::u32string s = print(*c);
BOOST_CHECK(s == U"@import my-styles.css ");
}
// ------------------------------------------------------------------------------------------------
// 5.3.6. Parse a declaration
//
// https://drafts.csswg.org/css-syntax-3/#parse-declaration
// https://triple-underscore.github.io/css-syntax-ja.html#parse-declaration
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_declaration_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"background-color: red;";
std::optional<declaration> c = parse_declaration(std::move(css));
BOOST_CHECK(c);
std::u32string s = print(*c);
BOOST_CHECK(s == U"background-color:red;");
}
// 失敗させてみる
BOOST_AUTO_TEST_CASE(parsing_parse_declaration_2)
{
using namespace wordring::wwwc::css;
std::u32string css = U"a, p h1";
std::optional<declaration> c = parse_declaration(std::move(css));
BOOST_CHECK(!c);
}
// ------------------------------------------------------------------------------------------------
// 5.3.7. Parse a list of declarations
//
// https://drafts.csswg.org/css-syntax-3/#parse-list-of-declarations
// https://triple-underscore.github.io/css-syntax-ja.html#parse-list-of-declarations
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_list_of_declarations_1)
{
using namespace wordring::wwwc::css;
std::u32string css =
U"background-color: red;"
U"color: blue !important;";
std::vector<syntax_primitive> v = parse_list_of_declarations(std::move(css));
std::u32string s = print(v);
BOOST_CHECK(s == U"background-color:redcolor:blue!IMPORTANT");
}
// ------------------------------------------------------------------------------------------------
// 5.3.8. Parse a component value
//
// https://drafts.csswg.org/css-syntax-3/#parse-component-value
// https://triple-underscore.github.io/css-syntax-ja.html#parse-component-value
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_component_value_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"a";
std::optional<component_value> c = parse_component_value(std::move(css));
BOOST_CHECK(c);
std::u32string s = print(*c);
BOOST_CHECK(s == U"a");
}
// ------------------------------------------------------------------------------------------------
// 5.3.9. Parse a list of component values
//
// https://drafts.csswg.org/css-syntax-3/#parse-list-of-component-values
// https://triple-underscore.github.io/css-syntax-ja.html#parse-list-of-component-values
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_list_of_component_values_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"a, p h1";
std::vector<syntax_primitive> v = parse_list_of_component_values(std::move(css));
std::u32string s = print(v);
BOOST_CHECK(s == U"a, p h1");
}
// ------------------------------------------------------------------------------------------------
// 5.3.10. Parse a comma-separated list of component values
//
// https://drafts.csswg.org/css-syntax-3/#parse-comma-separated-list-of-component-values
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_parse_comma_separated_list_of_component_values_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"a, p h1";
std::vector<std::vector<syntax_primitive>> v = parse_comma_separated_list_of_component_values(std::move(css));
std::u32string s;
for (auto const& x : v) s += print(x);
BOOST_CHECK(s == U"a p h1");
}
// ------------------------------------------------------------------------------------------------
// 5.4.1. Consume a list of rules
//
// https://drafts.csswg.org/css-syntax-3/#consume-list-of-rules
// https://triple-underscore.github.io/css-syntax-ja.html#consume-list-of-rules
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_list_of_rules_1)
{
using namespace wordring::wwwc::css;
std::u32string css =
U"@media print {\r\n"
U" body{ font - size: 10pt }\r\n"
U"}\r\n"
U"p > a {\r\n"
U" color: blue;\r\n"
U" text-decoration: underline;\r\n"
U"}\r\n";
token_stream in(normalize_into_token_stream(std::move(css), 0));
std::vector<syntax_primitive> v = consume_list_of_rules(in, false, nullptr);
std::u32string s = print(v);
BOOST_CHECK(s == U"@media print { body{ font - size: 10pt } }p > a { color: blue; text-decoration: underline; }");
}
// ------------------------------------------------------------------------------------------------
// 5.4.2. Consume an at-rule
//
// https://drafts.csswg.org/css-syntax-3/#consume-at-rule
// https://triple-underscore.github.io/css-syntax-ja.html#consume-at-rule
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_at_rule_1)
{
using namespace wordring::wwwc::css;
std::u32string css = UR"*(@import "my-styles.css";)*";
token_stream in(normalize_into_token_stream(std::move(css), 0));
//BOOST_CHECK(is_at_keyword_token(in.current_input_token()));
at_rule rule = consume_at_rule(in, nullptr);
std::u32string s = print(rule);
BOOST_CHECK(print(rule.m_prelude.back()) == U"my-styles.css");
}
BOOST_AUTO_TEST_CASE(parsing_consume_at_rule_2)
{
using namespace wordring::wwwc::css;
std::u32string css =
U"@page :left {\r\n"
U" margin-left: 4cm;\r\n"
U" margin-right: 3cm;\r\n"
U"}";
token_stream in(normalize_into_token_stream(std::move(css), 0));
//BOOST_CHECK(is_at_keyword_token(in.current_input_token()));
at_rule rule = consume_at_rule(in, nullptr);
std::u32string s = print(rule);
BOOST_CHECK(s == U"@page :left { margin-left: 4cm; margin-right: 3cm; }");
}
BOOST_AUTO_TEST_CASE(parsing_consume_at_rule_3)
{
using namespace wordring::wwwc::css;
std::u32string css =
U"@media print {\r\n"
U" body{ font - size: 10pt }\r\n"
U"}\r\n";
token_stream in(normalize_into_token_stream(std::move(css), 0));
//BOOST_CHECK(is_at_keyword_token(in.current_input_token()));
at_rule rule = consume_at_rule(in, nullptr);
std::u32string s = print(rule);
BOOST_CHECK(s == U"@media print { body{ font - size: 10pt } }");
}
// ------------------------------------------------------------------------------------------------
// 5.4.3. Consume a qualified rule
//
// https://drafts.csswg.org/css-syntax-3/#consume-qualified-rule
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_qualified_rule_1)
{
using namespace wordring::wwwc::css;
std::u32string css =
U"p > a {\r\n"
U" color: blue;\r\n"
U" text-decoration: underline;"
U"}\r\n";
token_stream in(normalize_into_token_stream(std::move(css), 0));
std::optional<qualified_rule> rule = consume_qualified_rule(in, nullptr);
std::u32string s = print(rule);
BOOST_CHECK(s == U"p > a { color: blue; text-decoration: underline;}");
}
// ------------------------------------------------------------------------------------------------
// 5.4.4. Consume a list of declarations
//
// https://drafts.csswg.org/css-syntax-3/#consume-list-of-declarations
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_list_of_declarations_1)
{
using namespace wordring::wwwc::css;
std::u32string css =
U" color: blue;\r\n"
U" text-decoration: underline !important;";
token_stream in(normalize_into_token_stream(std::move(css), 0));
std::vector<syntax_primitive> decl = consume_list_of_declarations(in, nullptr);
std::u32string s = print(decl);
BOOST_CHECK(s == U"color:bluetext-decoration:underline!IMPORTANT");
}
// ------------------------------------------------------------------------------------------------
// 5.4.5. Consume a declaration
//
// https://drafts.csswg.org/css-syntax-3/#consume-declaration
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_declaration_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"color: blue;";
token_stream in(normalize_into_token_stream(std::move(css), 0));
std::optional<declaration> decl = consume_declaration(in, nullptr);
auto s = print(decl);
BOOST_CHECK(s == U"color:blue;");
}
// ------------------------------------------------------------------------------------------------
// 5.4.6. Consume a component value
//
// https://drafts.csswg.org/css-syntax-3/#consume-component-value
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_component_value_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"[ABC]";
token_stream in(normalize_into_token_stream(std::move(css), 0));
in.consume();
simple_block block = consume_simple_block(in, nullptr);
auto s = print(block);
BOOST_CHECK(s == U"[ABC]");
}
BOOST_AUTO_TEST_CASE(parsing_consume_component_value_2)
{
using namespace wordring::wwwc::css;
std::u32string css = U"opacity(50%)";
token_stream in(normalize_into_token_stream(std::move(css), 0));
in.consume();
function fn = consume_function(in, nullptr);
auto s = print(fn);
BOOST_CHECK(s == U"opacity(50%)");
}
// ------------------------------------------------------------------------------------------------
// 5.4.7. Consume a simple block
//
// https://drafts.csswg.org/css-syntax-3/#consume-simple-block
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_simple_block_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"[ABC]";
token_stream in(normalize_into_token_stream(std::move(css), 0));
in.consume();
simple_block block = consume_simple_block(in, nullptr);
auto s = print(block);
BOOST_CHECK(s == U"[ABC]");
}
// ------------------------------------------------------------------------------------------------
// 5.4.8. Consume a function
//
// https://drafts.csswg.org/css-syntax-3/#consume-function
//
// ------------------------------------------------------------------------------------------------
BOOST_AUTO_TEST_CASE(parsing_consume_function_1)
{
using namespace wordring::wwwc::css;
std::u32string css = U"opacity(50%)";
token_stream in(normalize_into_token_stream(std::move(css), 0));
in.consume();
function fn = consume_function(in, nullptr);
auto s = print(fn);
BOOST_CHECK(s == U"opacity(50%)");
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "1ccff2762e65369835cbef86aac440ecbdd47620", "size": 27588, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/wwwc/css_syntax/parsing.cpp", "max_stars_repo_name": "wordring/wordring", "max_stars_repo_head_hexsha": "e2c9c2ed66010537efd78694521312c5b63f0510", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-03-07T05:23:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-23T15:19:18.000Z", "max_issues_repo_path": "test/wwwc/css_syntax/parsing.cpp", "max_issues_repo_name": "wordring/libwordring", "max_issues_repo_head_hexsha": "b71d990ea9288e9d9fe85521c8adac5d50471fa6", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/wwwc/css_syntax/parsing.cpp", "max_forks_repo_name": "wordring/libwordring", "max_forks_repo_head_hexsha": "b71d990ea9288e9d9fe85521c8adac5d50471fa6", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6485207101, "max_line_length": 115, "alphanum_fraction": 0.6057343773, "num_tokens": 6633}
|
// Copyright 2020, Beeri 15. All rights reserved.
// Author: Roman Gershman (romange@gmail.com)
//
#include "util/uring/http_handler.h"
#include <boost/beast/core.hpp> // for flat_buffer.
#include <boost/beast/http.hpp>
#include "base/logging.h"
namespace util {
using namespace http;
using namespace std;
using namespace boost;
namespace h2 = beast::http;
namespace uring {
namespace {
void FilezHandler(const QueryArgs& args, HttpContext* send) {
StringPiece file_name;
for (const auto& k_v : args) {
if (k_v.first == "file") {
file_name = k_v.second;
}
}
if (file_name.empty()) {
http::StringResponse resp = MakeStringResponse(h2::status::unauthorized);
return send->Invoke(std::move(resp));
}
FileResponse fresp;
string fname = strings::AsString(file_name);
auto ec = LoadFileResponse(fname, &fresp);
if (ec) {
StringResponse res = MakeStringResponse(h2::status::not_found);
SetMime(kTextMime, &res);
if (ec == boost::system::errc::no_such_file_or_directory)
res.body() = "The resource '" + fname + "' was not found.";
else
res.body() = "Error '" + ec.message() + "'.";
return send->Invoke(std::move(res));
}
return send->Invoke(std::move(fresp));
}
} // namespace
HttpListenerBase::HttpListenerBase() {
favicon_ =
"https://rawcdn.githack.com/romange/gaia/master/util/http/"
"favicon-32x32.png";
resource_prefix_ = "https://cdn.jsdelivr.net/gh/romange/gaia/util/http";
}
bool HttpListenerBase::HandleRoot(const RequestType& request,
HttpContext* cntx) const {
StringPiece target = as_absl(request.target());
if (target == "/favicon.ico") {
h2::response<h2::string_body> resp =
MakeStringResponse(h2::status::moved_permanently);
resp.set(h2::field::location, favicon_);
resp.set(h2::field::server, "GAIA");
resp.keep_alive(request.keep_alive());
cntx->Invoke(std::move(resp));
return true;
}
StringPiece path, query;
tie(path, query) = ParseQuery(target);
auto args = SplitQuery(query);
if (path == "/") {
cntx->Invoke(BuildStatusPage(args, resource_prefix_));
return true;
}
if (path == "/flagz") {
h2::response<h2::string_body> resp(h2::status::ok, request.version());
cntx->Invoke(ParseFlagz(args));
return true;
}
if (path == "/filez") {
FilezHandler(args, cntx);
return true;
}
if (path == "/profilez") {
cntx->Invoke(ProfilezHandler(args));
return true;
}
return false;
}
bool HttpListenerBase::RegisterCb(StringPiece path, RequestCb cb) {
CbInfo cb_info{.cb = cb};
auto res = cb_map_.emplace(path, cb_info);
return res.second;
}
HttpHandler2::HttpHandler2(const HttpListenerBase* base) : base_(base) {
}
void HttpHandler2::HandleRequests() {
CHECK(socket_.IsOpen());
beast::flat_buffer buffer;
RequestType request;
system::error_code ec;
AsioStreamAdapter<> asa(socket_);
while (true) {
h2::read(asa, buffer, request, ec);
if (ec) {
break;
}
HttpContext cntx(asa);
VLOG(1) << "Full Url: " << request.target();
HandleOne(request, &cntx);
}
VLOG(1) << "HttpHandler2 exit";
}
void HttpHandler2::HandleOne(const RequestType& req, HttpContext* cntx) {
CHECK(base_);
if (base_->HandleRoot(req, cntx)) {
return;
}
StringPiece target = as_absl(req.target());
StringPiece path, query;
tie(path, query) = ParseQuery(target);
VLOG(2) << "Searching for " << path;
auto it = base_->cb_map_.find(path);
if (it == base_->cb_map_.end()) {
h2::response<h2::string_body> resp(h2::status::unauthorized, req.version());
return cntx->Invoke(std::move(resp));
}
auto args = SplitQuery(query);
it->second.cb(args, cntx);
}
} // namespace uring
} // namespace util
|
{"hexsha": "e8058c24a2cb2c7ce057b47e9e08d36537a53280", "size": 3796, "ext": "cc", "lang": "C++", "max_stars_repo_path": "util/uring/http_handler.cc", "max_stars_repo_name": "ekatz-quotient/gaia", "max_stars_repo_head_hexsha": "63305f443416deccf96fd8ec2fb60bcb560e232b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 72.0, "max_stars_repo_stars_event_min_datetime": "2019-01-25T09:03:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T01:01:55.000Z", "max_issues_repo_path": "util/uring/http_handler.cc", "max_issues_repo_name": "ekatz-quotient/gaia", "max_issues_repo_head_hexsha": "63305f443416deccf96fd8ec2fb60bcb560e232b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 35.0, "max_issues_repo_issues_event_min_datetime": "2019-09-20T05:02:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T17:28:58.000Z", "max_forks_repo_path": "util/uring/http_handler.cc", "max_forks_repo_name": "ekatz-quotient/gaia", "max_forks_repo_head_hexsha": "63305f443416deccf96fd8ec2fb60bcb560e232b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2018-08-12T13:43:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T08:04:27.000Z", "avg_line_length": 24.9736842105, "max_line_length": 80, "alphanum_fraction": 0.6527924131, "num_tokens": 1046}
|
"""
..
Copyright (c) 2014-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing utilities for control of plotting using `matplotlib`.
The module has a number of public attributes which provide settings for
colormap cycles, linestyle cycles, and marker cycles that may be used in
combination with `matplotlib`.
Routine listings
----------------
setup_matplotlib(settings={}, cmap=None)
Function that set the Magni default `matplotlib` configuration.
colour_collections : dict
Collections of colours that may be used in e.g., a `matplotlib`
color_cycle / prop_cycle.
seq_cmaps : list
Names of `matplotlib.cm` colormaps optimized for sequential data.
div_cmaps : list
Names of `matplotlib.cm` colormaps optimized for diverging data.
linestyles : list
A subset of linestyles from `matplotlib.lines`
markers : list
A subset of markers from `matplotlib.markers`
Examples
--------
Use the default Magni matplotlib settings.
>>> import magni
>>> magni.utils.plotting.setup_matplotlib()
Get the normalised 'Blue' colour brew from the psp colour map:
>>> magni.utils.plotting.colour_collections['psp']['Blue']
((0.1255, 0.502, 0.8745),)
"""
from __future__ import division
import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from pkg_resources import parse_version as _parse_version
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_generic as _generic
from magni.utils.validation import validate_levels as _levels
from magni.utils.validation import validate_numeric as _numeric
if _parse_version(mpl.__version__) >= _parse_version('1.5.0'):
import cycler
_mpl_prop_era = True
else:
_mpl_prop_era = False
class _ColourCollection(object):
"""
A container for colour maps.
A single colour is stored as an RGB 3-tuple of integers in the interval
[0,255]. A set of related colours is termed a colour brew and is stored as
a list of colours. A set of related colour brews is termed a colour
collection and is stored as a dictionary. The dictionary key identifies
the name of the colour collection whereas the value is the list of colour
brews.
The default colour collections named "cb*" are colorblind safe, print
friendly, and photocopy-able. They have been created using the online
ColorBrewer 2.0 tool [1]_.
Parameters
----------
brews : dict
The dictionary of colour brews from which the colour collection is
created.
Notes
-----
Each colour brew is a list (or tuple) of length 3 lists (or tuples) of RGB
values.
References
----------
.. [1] M. Harrower and C. A. Brewer, "ColorBrewer.org: An Online Tool for
Selecting Colour Schemes for Maps", *The Cartographic Journal*, vol. 40,
pp. 27-37, 2003 (See also: http://colorbrewer2.org/)
"""
def __init__(self, brews):
@_decorate_validation
def validate_input():
_levels('brews', (_generic(None, 'mapping'),
_generic(None, 'explicit collection'),
_generic(None, 'explicit collection', len_=3),
_numeric(None, 'integer', range_='[0;255]')))
validate_input()
self._brews = brews
def __getitem__(self, name):
"""
Return a single colour brew.
The returned colour brew is normalised in the sense of matplotlib
normalised rgb values, i.e., colours are 3-tuples of floats in the
interval [0, 1].
Parameters
----------
name : str
Name of the colour brew to return.
Returns
-------
brew : tuple
A colour brew list.
"""
@_decorate_validation
def validate_input():
_generic('name', 'string', value_in=tuple(self._brews.keys()))
validate_input()
return tuple([tuple([round(val / 255, 4) for val in colour])
for colour in self._brews[name]])
colour_collections = {
'cb4': _ColourCollection({
'OrRd': ((254, 240, 217), (253, 204, 138), (252, 141, 89),
(215, 48, 31)),
'PuOr': ((230, 97, 1), (253, 184, 99), (178, 171, 210),
(94, 60, 153))}),
'cb3': _ColourCollection({
'BuGn': ((229, 245, 249), (153, 216, 201), (44, 162, 95)),
'BuPu': ((224, 236, 244), (158, 188, 218), (136, 86, 167)),
'GuBu': ((224, 243, 219), (168, 221, 181), (67, 162, 202)),
'OrRd': ((254, 232, 200), (253, 187, 132), (227, 74, 51)),
'PuBu': ((236, 231, 242), (166, 189, 219), (43, 140, 190)),
'PuBuGn': ((236, 226, 240), (166, 189, 219), (28, 144, 153)),
'PuRd': ((231, 225, 239), (201, 148, 199), (221, 28, 119)),
'RdPu': ((253, 224, 221), (250, 159, 181), (197, 27, 138)),
'YlGn': ((247, 252, 185), (173, 221, 142), (49, 163, 84)),
'YlGnBu': ((237, 248, 177), (127, 205, 187), (44, 127, 184)),
'YlOrBr': ((255, 247, 188), (254, 196, 79), (217, 95, 14)),
'YlOrRd': ((255, 237, 160), (254, 178, 76), (240, 59, 32)),
'Blues': ((222, 235, 247), (158, 202, 225), (49, 130, 189)),
'Greens': ((229, 245, 224), (161, 217, 155), (49, 163, 84)),
'Greys': ((240, 240, 240), (189, 189, 189), (99, 99, 99)),
'Purples': ((239, 237, 245), (188, 189, 220), (117, 107, 177)),
'Reds': ((254, 224, 210), (252, 146, 114), (222, 45, 38)),
'PuOr': ((241, 163, 64), (247, 247, 247), (153, 142, 195))}),
'psp': _ColourCollection({
'Blue': ((32, 128, 223),),
'Orange': ((223, 128, 32),),
'GreenY': ((128, 223, 32),),
'Purple': ((128, 32, 223),),
'Red': ((223, 32, 128),),
'GreenB': ((223, 32, 128),)}),
'bgg': _ColourCollection({
'Black': ((0, 0, 0),),
'Green': ((0, 191, 0),),
'Grey': ((170, 170, 170),)})}
seq_cmaps = ['YlOrRd', 'YlOrRd_r', 'YlGnBu', 'YlGnBu_r', 'PuBuGn', 'PuBuGn_r',
'YlOrBr', 'YlOrBr_r', 'BuGn', 'BuGn_r', 'GnBu', 'GnBu_r',
'PuBu', 'PuBu_r', 'PuRd', 'PuRd_r']
div_cmaps = ['PRGn', 'PRGn_r', 'PiYG', 'PiYG_r', 'RdBu', 'RdBu_r', 'PuOr',
'PuOr_r', 'RdGy', 'RdGy_r']
linestyles = ['-', '--', '-.', ':']
markers = ['o', '^', 'x', '+', 'd']
def setup_matplotlib(settings={}, cmap=None):
"""
Adjust the configuration of `matplotlib`.
Sets the default configuration of `matplotlib` to optimize for producing
high quality plots of the data produced by the functionality provided in
the Magni.
Parameters
----------
settings : dict, optional
A dictionary of custom matplotlibrc settings. See examples for details
about the structure of the dictionary.
cmap : str or tuple, optional
Colormap to be used by matplotlib (the default is None, which implices
that the 'coolwarm' colormap is used). If a tuple is supplied it must
be a ('colormap_name', matplotlib.colors.Colormap()) tuple.
Raises
------
UserWarning
If the supplied custom settings are invalid.
Examples
--------
For example, set lines.linewidth=2 and lines.color='r'.
>>> from magni.utils.plotting import setup_matplotlib
>>> custom_settings = {'lines': {'linewidth': 2, 'color': 'r'}}
>>> setup_matplotlib(custom_settings)
"""
@_decorate_validation
def validate_input():
_levels('settings', (_generic(None, 'mapping'),
_generic(None, 'mapping')))
_generic('cmap', ('string', tuple), ignore_none=True)
if isinstance(cmap, tuple):
_generic(('cmap', 0), 'string')
_generic(('cmap', 1), mpl.colors.Colormap)
validate_input()
global _settings, _cmap
for name, setting in settings.items():
if name in _settings:
_settings[name].update(setting)
else:
_settings[name] = setting
for name, setting in _settings.items():
try:
mpl.rc(name, **setting)
except (AttributeError, KeyError):
warnings.warn('Setting {!r} ignored.'.format(name), UserWarning)
if cmap is not None:
if isinstance(cmap, tuple):
mpl.cm.register_cmap(name=cmap[0], cmap=cmap[1])
cmap = cmap[0]
plt.set_cmap(cmap)
elif _cmap is not None:
plt.set_cmap(_cmap)
_settings = {}
_cmap = None
if _mpl_prop_era:
# Matplotlib >= 1.5.0
_style_cycle = cycler.cycler('linestyle', linestyles)
_color_cycle = cycler.cycler('color', colour_collections['cb4']['PuOr'])
_prop_settings = {'axes': {'prop_cycle': _style_cycle * _color_cycle}}
else:
_prop_settings = {
'axes': {'color_cycle': colour_collections['cb4']['PuOr']}}
_settings = dict({'text': {'usetex': False},
'font': {'size': 12},
'mathtext': {'fontset': 'cm'},
'pdf': {'fonttype': 42},
'ps': {'fonttype': 42},
'legend': {'fontsize': 11},
'lines': {'linewidth': 2},
'figure': {
'figsize': (8.0, float(8.0 / ((1 + np.sqrt(5)) / 2))),
'dpi': 600},
'image': {'interpolation': 'none'}}, **_prop_settings)
_cmap = 'coolwarm'
|
{"hexsha": "c4f23e73523d15d6fb896f9510f070e11dd67e63", "size": 9480, "ext": "py", "lang": "Python", "max_stars_repo_path": "magni/utils/plotting.py", "max_stars_repo_name": "SIP-AAU/Magni", "max_stars_repo_head_hexsha": "6328dc98a273506f433af52e6bd394754a844550", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-02-09T10:17:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T09:38:04.000Z", "max_issues_repo_path": "magni/utils/plotting.py", "max_issues_repo_name": "SIP-AAU/Magni", "max_issues_repo_head_hexsha": "6328dc98a273506f433af52e6bd394754a844550", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-03-20T12:00:40.000Z", "max_issues_repo_issues_event_max_datetime": "2015-03-20T12:01:16.000Z", "max_forks_repo_path": "magni/utils/plotting.py", "max_forks_repo_name": "SIP-AAU/Magni", "max_forks_repo_head_hexsha": "6328dc98a273506f433af52e6bd394754a844550", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2015-04-28T03:08:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-24T13:29:24.000Z", "avg_line_length": 34.2238267148, "max_line_length": 79, "alphanum_fraction": 0.5839662447, "include": true, "reason": "import numpy", "num_tokens": 2723}
|
import numpy as np
AGGREGATE_MAP = {
'mean': np.mean,
'min': np.min,
'median': np.median,
'max': np.max,
}
|
{"hexsha": "b2600b1f17ce40988a6f406209f5ebd2c1205d89", "size": 124, "ext": "py", "lang": "Python", "max_stars_repo_path": "summarizer/util.py", "max_stars_repo_name": "stungkit/bert-extractive-summarizer", "max_stars_repo_head_hexsha": "84f27333aef33629444589c24933b76448777d4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "summarizer/util.py", "max_issues_repo_name": "stungkit/bert-extractive-summarizer", "max_issues_repo_head_hexsha": "84f27333aef33629444589c24933b76448777d4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "summarizer/util.py", "max_forks_repo_name": "stungkit/bert-extractive-summarizer", "max_forks_repo_head_hexsha": "84f27333aef33629444589c24933b76448777d4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.7777777778, "max_line_length": 24, "alphanum_fraction": 0.5483870968, "include": true, "reason": "import numpy", "num_tokens": 41}
|
@testset "Test time series data application" begin
sys = PSB.build_system(PSB.PSITestSystems, "c_sys5")
pmi_data = PMI.get_pm_data(sys)
mn_data =
PMI.apply_time_series(pmi_data, sys, last(PSY.get_forecast_initial_times(sys)), 3:5)
@test mn_data["multinetwork"]
@test length(mn_data["nw"]) == 3
pmi_mn_data = PMI.get_pm_data(
sys,
start_time = last(PSY.get_forecast_initial_times(sys)),
time_periods = 3:5,
)
@test mn_data == pmi_mn_data
tp_data = PMI.get_pm_data(
sys,
start_time = first(PSY.get_forecast_initial_times(sys)),
period = 5,
)
@test tp_data != pmi_data
PMI.apply_time_period!(pmi_data, sys, first(PSY.get_forecast_initial_times(sys)), 5)
@test tp_data == pmi_data
#TODO: add tests to verify data is being applied correctly
end
|
{"hexsha": "33e4b6ff9f2b80b6781c2d1297f27186a70ea5aa", "size": 862, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_apply_time_series.jl", "max_stars_repo_name": "NREL-SIIP/PowerModelsInterface", "max_stars_repo_head_hexsha": "b4b589db5e276d71973ba169db29437fb9b5cb14", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-10T13:44:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T13:44:42.000Z", "max_issues_repo_path": "test/test_apply_time_series.jl", "max_issues_repo_name": "NREL-SIIP/PowerModelsInterface", "max_issues_repo_head_hexsha": "b4b589db5e276d71973ba169db29437fb9b5cb14", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-14T05:04:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-16T21:15:31.000Z", "max_forks_repo_path": "test/test_apply_time_series.jl", "max_forks_repo_name": "NREL-SIIP/PowerModelsInterface", "max_forks_repo_head_hexsha": "b4b589db5e276d71973ba169db29437fb9b5cb14", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3529411765, "max_line_length": 92, "alphanum_fraction": 0.662412993, "num_tokens": 254}
|
import streamlit as st
import pandas as pd
from pyvis.network import Network
import networkx as nx
import matplotlib.pyplot as plt
import bz2
import pickle
import _pickle as cPickle
# Load any compressed pickle file
def decompress_pickle(file):
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
try:
del uploaded_files
except:
pass
uploaded_files = st.file_uploader("Choose files", accept_multiple_files=True)
st.write("Files uploaded")
# for uploaded_file in uploaded_files:
# concepts = decompress_pickle(uploaded_file)
# st.write("filename:", uploaded_file.name)
filenames = [file.name for file in uploaded_files]
import pandas as pd
Agg_Conceptdata = pd.DataFrame()
All_Conceptdata = pd.DataFrame()
Agg_np_to_sent = dict()
Agg_sent_to_npflat = dict()
Agg_sent_to_phen = dict()
Agg_phen_to_sent = dict()
Agg_att_to_sent = dict()
Agg_sent_to_att = dict()
Agg_ins_to_sent = dict()
Agg_sent_to_ins = dict()
Agg_set_to_sent = dict()
Agg_np_to_forms = dict()
doc_to_np = dict()
np_to_doc = dict()
Agg_df = pd.DataFrame()
Agg_df = pd.DataFrame()
Agg_np_to_roles = dict()
Agg_sent_to_clt = dict()
Agg_sents = dict()
#Agg_sents_df = pd.DataFrame()
#Agg_docs_df = pd.DataFrame()
All_df = pd.DataFrame()
for uploaded_file in uploaded_files:
concepts = decompress_pickle(uploaded_file)
filename = uploaded_file.name
#st.write("filename:", uploaded_file.name)
Conceptdata = concepts['Conceptdata']
sent_to_npflat = concepts['sent_to_npflat']
np_to_sent = concepts['np_to_sent']
np_to_forms = concepts['np_to_forms']
sent_to_phen = concepts['sent_to_phen']
phen_to_sent = concepts['phen_to_sent']
sent_to_att = concepts['sent_to_att']
att_to_sent = concepts['att_to_sent']
att_to_sent = concepts['att_to_sent']
ins_to_sent = concepts['ins_to_sent']
sent_to_ins = concepts['sent_to_ins']
set_to_sent = concepts['set_to_sent']
sent_to_set = concepts['sent_to_set']
np_to_roles = concepts['np_to_roles']
sent_to_clt = concepts['sent_to_clt']
sents = concepts['sents']
df = concepts['df']
Conceptdata['docname'] = filename
Agg_Conceptdata = Agg_Conceptdata.append(Conceptdata,ignore_index=True)
Agg_sent_to_clt[filename.replace(".pbz2","")] = sent_to_clt
Agg_np_to_sent[filename.replace(".pbz2","")] = np_to_sent
Agg_sents[filename.replace(".pbz2","")] = sents
Agg_sent_to_npflat[filename.replace(".pbz2","")] = sent_to_npflat
Agg_df = Agg_df.append(df,ignore_index=True)
doc_to_np[filename] = list(np_to_sent.keys())
for np in np_to_sent:
# if np in Agg_np_to_sent:
# Agg_np_to_sent[np] = Agg_np_to_sent[np] + [(filename,s) for s in np_to_sent[np]]
# else:
# Agg_np_to_sent[np] = [(filename,s) for s in np_to_sent[np]]
if np in np_to_doc:
np_to_doc[np] = np_to_doc[np] + [filename]
else:
np_to_doc[np] = [filename]
for np in np_to_forms:
if np in Agg_np_to_forms:
Agg_np_to_forms[np] = Agg_np_to_forms[np] + np_to_forms[np]
else:
Agg_np_to_forms[np] = np_to_forms[np]
for np in np_to_roles:
if np in Agg_np_to_roles:
Agg_np_to_roles[np] = Agg_np_to_roles[np] + np_to_roles[np]
else:
Agg_np_to_roles[np] = np_to_roles[np]
for np in phen_to_sent:
if np in Agg_phen_to_sent:
Agg_phen_to_sent[np] = Agg_phen_to_sent[np] + [(filename,s) for s in phen_to_sent[np]]
else:
Agg_phen_to_sent[np] = [(filename,s) for s in phen_to_sent[np]]
for np in att_to_sent:
if np in Agg_att_to_sent:
Agg_att_to_sent[np] = Agg_att_to_sent[np] + [(filename,s) for s in att_to_sent[np]]
else:
Agg_att_to_sent[np] = [(filename,s) for s in att_to_sent[np]]
for np in set_to_sent:
if np in Agg_set_to_sent:
Agg_set_to_sent[np] = Agg_set_to_sent[np] + [(filename,s) for s in set_to_sent[np]]
else:
Agg_set_to_sent[np] = [(filename,s) for s in set_to_sent[np]]
for np in ins_to_sent:
if np in Agg_ins_to_sent:
Agg_ins_to_sent[np] = Agg_ins_to_sent[np] + [(filename,s) for s in ins_to_sent[np]]
else:
Agg_ins_to_sent[np] = [(filename,s) for s in ins_to_sent[np]]
st.write("""
Showing pyvis network drawing
""")
net = Network(height='700px', width = '500px',bgcolor='#222222', font_color='white')
net.from_nx(G)
net.repulsion(node_distance=420, central_gravity=0.33,
spring_length=110, spring_strength=0.10,
damping=0.95)
# Save and read graph as HTML file (on Streamlit Sharing)
try:
net.save_graph('pyvis_graph.html')
HtmlFile = open('pyvis_graph.html', 'r', encoding='utf-8')
# Save and read graph as HTML file (locally)
except:
st.write("Could not save graph or load")
# Load HTML file in HTML component for display on Streamlit page
components.html(HtmlFile.read(), height=435)
|
{"hexsha": "69e9b6ac19b84d4d1032896ae0625c658ace113a", "size": 4779, "ext": "py", "lang": "Python", "max_stars_repo_path": "streamlit/Autodidact/myapp_old.py", "max_stars_repo_name": "rts1988/IntelligentTutoringSystem_Experiments", "max_stars_repo_head_hexsha": "b2f797a5bfff18fb37c7a779a19a72a75db7eeef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-30T17:10:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-30T17:10:30.000Z", "max_issues_repo_path": "streamlit/myapp_old.py", "max_issues_repo_name": "rts1988/IntelligentTutoringSystem_Experiments", "max_issues_repo_head_hexsha": "b2f797a5bfff18fb37c7a779a19a72a75db7eeef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "streamlit/myapp_old.py", "max_forks_repo_name": "rts1988/IntelligentTutoringSystem_Experiments", "max_forks_repo_head_hexsha": "b2f797a5bfff18fb37c7a779a19a72a75db7eeef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-02T05:11:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-02T05:11:15.000Z", "avg_line_length": 30.6346153846, "max_line_length": 92, "alphanum_fraction": 0.7141661435, "include": true, "reason": "import networkx", "num_tokens": 1389}
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
# Load a text file as a string.
with open('hound.txt') as infile:
text = infile.read()
# Load an image as a NumPy array.
mask = np.array(Image.open('holmes.png'))
# Get stop words as a set and add extra words.
stopwords = STOPWORDS
stopwords.update(['us', 'one', 'will', 'said', 'now', 'well', 'man', 'may',
'little', 'say', 'must', 'way', 'long', 'yet', 'mean',
'put', 'seem', 'asked', 'made', 'half', 'much',
'certainly', 'might', 'came'])
# Generate word cloud.
wc = WordCloud(max_words=500,
relative_scaling=0.5,
mask=mask,
background_color='white',
stopwords=stopwords,
margin=2,
random_state=7,
contour_width=2,
contour_color='brown',
colormap='copper').generate(text)
# Turn wc object into an array.
colors = wc.to_array()
# Plot and save word cloud.
plt.figure()
plt.title("Chamberlain Hunt Academy Senior Class Presents:\n",
fontsize=15, color='brown')
plt.text(-10, 0, "The Hound of the Baskervilles",
fontsize=20, fontweight='bold', color='brown')
plt.suptitle("7:00 pm May 10-12 McComb Auditorium",
x=0.52, y=0.095, fontsize=15, color='brown')
plt.imshow(colors, interpolation="bilinear")
plt.axis('off')
plt.show()
##plt.savefig('hound_wordcloud.png')
|
{"hexsha": "0c013e5790891e8d52ad2a6bd2d348ade5bf44be", "size": 1512, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter_3/wc_hound.py", "max_stars_repo_name": "Soccertanker/Real_World_Python", "max_stars_repo_head_hexsha": "5a0671ec11e5b5522c8ee4683bac880b92d8ac12", "max_stars_repo_licenses": ["FTL"], "max_stars_count": 88, "max_stars_repo_stars_event_min_datetime": "2020-05-28T11:03:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:27:37.000Z", "max_issues_repo_path": "Chapter_3/wc_hound.py", "max_issues_repo_name": "Soccertanker/Real_World_Python", "max_issues_repo_head_hexsha": "5a0671ec11e5b5522c8ee4683bac880b92d8ac12", "max_issues_repo_licenses": ["FTL"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-01-02T07:07:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-17T22:42:53.000Z", "max_forks_repo_path": "Chapter_3/wc_hound.py", "max_forks_repo_name": "Soccertanker/Real_World_Python", "max_forks_repo_head_hexsha": "5a0671ec11e5b5522c8ee4683bac880b92d8ac12", "max_forks_repo_licenses": ["FTL"], "max_forks_count": 78, "max_forks_repo_forks_event_min_datetime": "2020-05-27T20:17:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T15:20:08.000Z", "avg_line_length": 32.170212766, "max_line_length": 75, "alphanum_fraction": 0.6025132275, "include": true, "reason": "import numpy", "num_tokens": 388}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 11:49:51 2017
@author: Jalen Morgan, Taylor Paskett
"""
import numpy as np
import sympy
from stablab.finite_difference_code import pde
from sympy import Matrix
from stablab.finite_difference_code import approximate
"""Used for both pdes and odes"""
def newtonSolve(initialGuess, newtonFunction, newtonJacobian, p=[], MAX_ERROR = 1e-8, TIMEOUT = 45, printIterations = True):
count = 0
#Make the initial guess for the coefficient List
inVector = initialGuess
outVector = newtonFunction(inVector, p)
#print(max(max(map(abs,outVector))))
#Loop through Newton's method.
while max(map(abs,outVector))-MAX_ERROR > 0:
#while True:
count += 1
A = newtonJacobian(inVector, p)
b = (outVector - np.dot(A,inVector))
inVector = np.linalg.solve(A,-b)
outVector = newtonFunction(inVector, p)
#Print the progress
if printIterations == True: print(count, end='')
if count == TIMEOUT:
#print("should be zero:", outVector)
return (inVector)
return (inVector)
def stringToSympy(inputString, myDict):
#Replace _x, _t, etc. with derivative.
for index in range(len(inputString)):
inputString[index] = inputString[index].replace("_x", ".diff(x)")
symbolOde = sympy.sympify(inputString, locals=myDict)
return symbolOde
def sympyGetParameters(inputSympy, myDict):
parameterSet = set()
for index in range(len(inputSympy)):
symbolSet = inputSympy[index].free_symbols
parameterSet = parameterSet | symbolSet
parameterSet -= set(myDict.values())
return list(parameterSet)
def shouldRewriteFile(fileName, stringToCompare):
try:
fileToReplace = open(fileName,'r')
fileString = ''.join(fileToReplace.readlines())
if fileString == stringToCompare:
print("************Not rewriting************")
return False
else:
print("**************Rewriting**************")
return True
except FileNotFoundError:
print("****************Writing***************")
return True
#Sets the default parameters so the user can simply run with defaults.
def init():
p = {
"name": "",
"newtonError": 1e-5,
#Ode defaults
"odeXList": [],
"odeGuess": [],
"ode": [],
"odeJacobian": ["estimate"],
"odeUnknowns": [],
"odeChebPoints": 10,
"odeGraphPoints": 100,
"odeBC": [],
"odeJacobianBC": ["estimate"],
"L": 1,
#Pde defaults
"pde": [],
"pdeInitial": "tanh",
"pdeSubstitute": [],
"pdeUnknowns": [],
"pdeFD": crankNicholson,
"pdeXPoints": 35,
"pdeTPoints": 35,
"pdeInitialValueFiles": [],
"T": 1
}
return p
def generateFiniteDifferenceConservation(f0,f1,g,B,unknowns, **kwargs):
#Input an equation of type 'f0(u)_t + f1(u)_x + g(u) = (B(u)u_x)_x'
f0 = toList(f0)
f1 = toList(f1)
g = toList(g)
B = toDoubleList(B)
#Assure inputs are of the correct form.
if not (len(B) == len(B[0])):
raise ValueError("B must be a square matrix")
if not (len(f0) == len(f1)):
raise ValueError("f0 and f1 must be the same size.")
if not (len(f0) == len(g)):
raise ValueError("f0 and g must be the same size.")
if not (len(f0) == len(B)):
raise ValueError("f0 and B[0] must be same size")
unknowns = toList(unknowns)
pdeString = []
for i in range(len(f0)):
bterm = ''
for j in range(len(B[0])):
if not j == 0:
bterm += " + "
bterm += str(B[i][j])+'*'+str(unknowns[j])+'_xx + ' + str(unknowns[j])+'_x'+'*'+str(B[i][j])+'_x'
#bterm = 'U_xx'
#bterm = 'U_xx'
pdeString.append('('+str(f0[i])+')_t + ('+str(f1[i])+')_x + '+str(g[i])+' - ('+bterm+')')
print(pdeString)
#print('generateFiniteDifference('+str(pdeString)+','+str(unknowns)+','+str(**kwargs)+')')
return generateFiniteDifference(pdeString, unknowns, **kwargs)
def generateFiniteDifference(pdeString, unknowns, **kwargs):
#Convert from a list of coefficients to a list of points in X_POINTS so
#They can be used as an initial guess for the newton solve of the PDE.
myDict = {"knownEquations": [], "fd": crankNicholson}
myDict.update(kwargs)
unknowns = toList(unknowns)
equations = toList(pdeString)
#print(str(unknowns))
#print(str(equations))
#print(str(kwargs))
knownEquations = myDict["knownEquations"]
fdMethod = myDict["fd"]
#Prepare the strings and define symbol functions of x and t
pde.prepareStrings(equations, ('t', 'x'))
myDictionary = pde.defineFunctions(unknowns,knownEquations)
#Sympify the equations and plug them in to the pde.
equations = pde.sympify(equations, locals = myDictionary)
pde.substituteKnownEquations(equations, knownEquations, myDictionary)
print(equations)
pde.simplifyCalculus(equations)
#Plug in finite differences and create jacobian
stencil = pde.createStencil(unknowns)
finiteDifference(equations, myDictionary, stencil, unknowns, fdMethod)
#substituteFiniteDifference(equations, myDictionary, stencil, unknowns)
parameters = pde.getParameters(equations, stencil, myDictionary)
jacobianEquations = pde.createJacobianEquations(len(equations), stencil, 0, 1, Matrix(equations))
#Create the folder and fill it.
import os
if not os.path.exists("__generated__"):
os.makedirs("__generated__")
#Write both the runner file and the Functions file.
fileName = fdMethod.__name__+ "_functions.py"
#writeRunnerFile(fileName, unknowns, parameters)
pde.writeFunctionsFile( "__generated__/" + fileName, unknowns, equations, jacobianEquations, parameters)
import importlib
functionsFile = importlib.import_module("__generated__."+fileName.replace(".py",""))
return [functionsFile.f, functionsFile.createJacobian]
def getInitialCondition(inVector, inFunction):
output = np.zeros(len(inVector))
for i in range(len(output)):
output[i] = inFunction(inVector[i])
return output
def toDoubleList(inputList):
#if isinstance(inputList, list):
# if isInstance(inputList[0], list):
# return inputList
# else:
# return [inputList]
#else:
# return [[inputList]]
if isinstance(inputList, str) or isinstance(inputList, float) or isinstance(inputList, int):
return [[inputList]]
elif isinstance(inputList[0], str) or isinstance(inputList[0], float) or isinstance(inputList[0], int):
return [inputList]
else:
return inputList
def toTripleList(inputList):
if isinstance(inputList, str) or isinstance(inputList, float) or isinstance(inputList, int):
return [[[inputList]]]
elif isinstance(inputList[0], str) or isinstance(inputList[0], float) or isinstance(inputList[0], int):
return [[inputList]]
elif isinstance(inputList[0][0], str) or isinstance(inputList[0][0], float) or isinstance(inputList[0][0], int):
return [inputList]
else:
return inputList
def jacobianWithBoundary(jacobian, leftBound, rightBound, matrices, n, K, H, P):
output = jacobian(matrices, n, K, H, P)
leftBound = toDoubleList(leftBound(matrices, n))
rightBound = toDoubleList(rightBound(matrices, n))
output = [output]
for eq in range(len(leftBound)):
output[eq][ 0:len(leftBound[0]),0] = leftBound[eq]
output[eq][ -len(rightBound[0]):len(output[0]),-1] = rightBound[eq]
return output[0]
def functionWithBoundary(f, leftBound, rightBound, matrices, P, K, H, n):
output = f(matrices, P, K, H, n)
leftList = leftBound(matrices, n)
rightList = rightBound(matrices, n)
numPoints = len(matrices[0])
for i in range(len(matrices)):
output[i*numPoints] = leftList[i]
output[(i+1)*numPoints-1] = rightList[i]
return output
def evolve(xPoints, tPoints, lBound, rBound, t0, myFunctions, **kwargs):
myDict = {"p":[], "fd":"crankNicholson", "MAX_ERROR":.01}
myDict.update(kwargs)
f = lambda matrices, time, K, H, P: functionWithBoundary(myFunctions[0], lBound[0], rBound[0], matrices, time, K, H, P)
jac = lambda matrices, time, K, H, P: jacobianWithBoundary(myFunctions[1], lBound[1], rBound[1], matrices, time, K, H, P)
t0 = toDoubleList(t0)
numVars = len(t0)
matrixList = []
for i in range(numVars):
if True:
currArray = np.zeros((len(tPoints),len(xPoints)))
currArray[0] = t0[i]
matrixList.append(currArray)
#print("Len",len(matrixList))
approximate.solveSystem(matrixList, xPoints, tPoints, myDict["p"], myDict["MAX_ERROR"], f,jac)
return matrixList
def toList(inputType):
if isinstance(inputType, list):
return inputType
else:
return [inputType]
# if isinstance(inputType, str):
# return [inputType]
# else:
# return inputType
def graph(unknown, matrixList):
approximate.plotMatrix(matrixList, unknown)
def getBoundaryFunctions(pdeString, pdeVariables):
#Write the Function
outputString = """def lBoundFunction(UIn, n):
"""
for i in range(len(pdeVariables)):
outputString += pdeVariables[i]
outputString += " = UIn[" +str(i) + """]
"""
outputString += """return """
pdeStringOutput = toList(pdeString)
for i in range(len(pdeStringOutput)):
pdeStringOutput[i] = pdeStringOutput[i].replace("(","[")
pdeStringOutput[i] = pdeStringOutput[i].replace(")","]")
#Write the Derivative.
outputString += str(pdeStringOutput).replace("'","")
outputString += """
def lBoundDerivative(UIn, n):
"""
for i in range(len(pdeVariables)):
outputString += pdeVariables[i]
outputString += " = UIn[" +str(i) + """]
"""
#print(outputString)
def finiteDifference(eq, myDictionary, stencil, unknowns, fdFunction):
n = 0
j = 1
t = myDictionary['t']
x = myDictionary['x']
h = myDictionary['H']
k = myDictionary['K']
#Loop through the equations and the unknowns.
for eqNum in range(len(eq)):
for i in range(len(unknowns)):
unknown = myDictionary[unknowns[i]]
(Uxx, Ux, Ut, U) = fdFunction(stencil[i], n, j, k, h)
eq[eqNum] = eq[eqNum].subs(unknown.diff(x).diff(x),Uxx)
eq[eqNum] = eq[eqNum].subs(unknown.diff(x),Ux)
eq[eqNum] = eq[eqNum].subs(unknown.diff(t),Ut)
eq[eqNum] = eq[eqNum].subs(unknown,U)
def crankNicholson(U, n, j, k, h):
Uxx = ((U[n+1][j+1] - 2*U[n+1][j] + U[n+1][j-1])/(h**2) +
(U[n][j+1] - 2*U[n][j] + U[n][j-1])/(h**2))/2
Ux = ((U[n+1][j+1] - U[n+1][j-1])/(2*h) +
(U[n][j+1] - U[n][j-1])/(2*h))/2
Ut = (U[n+1][j] - U[n][j])/(k)
UOut = (U[n+1][j]+U[n+1][j])/2
return (Uxx, Ux, Ut, UOut)
def explicit(U, n, j, k, h):
Uxx = (U[n][j+1]-2*U[n][j]+U[n][j-1])/(h**2)
Ux = (U[n][j+1]-U[n][j-1])/(2*h)
Ut = (U[n+1][j] - U[n][j])/(k)
UOut = U[n][j]
return (Uxx, Ux, Ut, UOut)
def implicit(U, n, j, k, h):
Uxx = (U[n+1][j+1]-2*U[n+1][j]+U[n+1][j-1])/(h**2)
Ux = (U[n+1][j+1]-U[n+1][j-1])/(2*h)
Ut = (U[n+1][j] - U[n][j])/(k)
UOut = U[n+1][j]
return (Uxx, Ux, Ut, UOut)
|
{"hexsha": "b626a8ffbd0ec12f9200254a44e603ed3984a212", "size": 12004, "ext": "py", "lang": "Python", "max_stars_repo_path": "stablab/finite_difference.py", "max_stars_repo_name": "nonlinear-waves/stablab_python", "max_stars_repo_head_hexsha": "101724f8bcefc34e90cf70d0813919188e08cb8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-27T14:20:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T21:04:57.000Z", "max_issues_repo_path": "stablab/finite_difference.py", "max_issues_repo_name": "nonlinear-waves/stablab_python", "max_issues_repo_head_hexsha": "101724f8bcefc34e90cf70d0813919188e08cb8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stablab/finite_difference.py", "max_forks_repo_name": "nonlinear-waves/stablab_python", "max_forks_repo_head_hexsha": "101724f8bcefc34e90cf70d0813919188e08cb8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-04-04T05:39:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-02T10:03:33.000Z", "avg_line_length": 35.9401197605, "max_line_length": 126, "alphanum_fraction": 0.5826391203, "include": true, "reason": "import numpy,import sympy,from sympy", "num_tokens": 3385}
|
# -*- coding: utf-8 -*-
from math import exp, factorial
import numpy as np
from numpy.linalg import matrix_power
from scipy.stats import poisson
from scipy.linalg import norm, null_space, solve, solve_sylvester, expm, inv
import matplotlib.pyplot as plt
import sys, warnings
from tqdm import tqdm
'''
̄W combining macron, treated as a special character by python; may not be correctly rendered by pyplot.
W̅ combining overline, treated as a letter by python; may not be correctly rendered by pyplot.
'''
def QuadraticMatrixEquationMinimalPositiveSolution(A=1, B=1, C=1, *, dim:int=None, method:str='Bernoulli', epsilon:float=1.e-10, maxiter:int=None):
"""QuadraticMatrixEquationMinimalPositiveSolution
Calculates the minimal elementwise-positive solvent the quadratic system `A X^2 + B X + C == O`, where `A, B, C, X, O` are all square matrices.
The existence and uniqueness of such solvent is demonstrated in https://dx.doi.org/10.1016/j.amc.2011.08.070
Args:
A (matrix, optional): square matrix or real number. Defaults to 1.
B (matrix, optional): square matrix or real number. Defaults to 1.
C (matrix, optional): square matrix or real number. Defaults to 1.
the provided A, B, C must be of the same order or a real number; if some of them are provided as real numbers, the dim of the system must be given explicitly.
dim (int, optional): the size of the square matrices. Defaults to None.
method (str, optional): 'Bernoulli' or 'Newton'. Defaults to 'Bernoulli'.
epsilon (float, optional): Tolerance of the norm of the residue. Defaults to 1.e-10.
maxiter (int, optional): Maximum rounds of iteration. Defaults to None.
Raises:
ValueError: Raised when the coefficients are not coherent square matrices.
NotImplementedError: When a method other than 'Bernoulli' or 'Newton' is specified.
Returns:
np.ndarray: the minimal positive solution to the quadratic system
"""
if isinstance(A, (int, float)):
if type(dim) is int: A = A*np.eye(dim)
else: raise TypeError("The quadratic coefficient is provided as a real number, but the dimensionality is not correctly specified.")
elif type(dim) is int and A.shape != (dim, dim): raise ValueError("The quadratic coefficient is not of specified shape.")
if type(dim) is not int: dim = A.shape[0]
if isinstance(A, (int, float)): A = A*np.eye(dim)
elif type(dim) is int and A.shape != (dim, dim): raise ValueError("The quadratic coefficient is not of specified shape.")
if type(dim) is not int: dim = A.shape[0]
if isinstance(A, (int, float)): A = A*np.eye(dim)
elif type(dim) is int and A.shape != (dim, dim): raise ValueError("The quadratic coefficient is not of specified shape.")
if type(dim) is not int: dim = A.shape[0]
try:
B = solve(A, B)
C = solve(A, C)
except:
warnings.warn(f"Warning: Exceptions encountered normalizing the linear system.")
def BernoulliIteration(X)->np.ndarray: return solve( X+B, -C)
def NewtonIteration(X)->np.ndarray: return solve_sylvester(X+B, X, X@X-C)
if method=='Bernoulli': iter = BernoulliIteration
elif method== 'Newton': iter = NewtonIteration
else : raise NotImplementedError(f"the specified iteration scheme {method} is not implemented.")
X = np.zeros(A.shape)
if type(epsilon) is float:
if maxiter==None:
while norm(iter(X)-X)>epsilon: X=iter(X)
else:
cnt = 0
while norm(iter(X)-X)>epsilon and cnt<maxiter: X=iter(X); cnt=cnt+1
else:
cnt = 0
while cnt<maxiter: X=iter(X); cnt=cnt+1
return X
solve_qme = QuadraticMatrixEquationMinimalPositiveSolution
class RandomVariable:...
class PointProcess(RandomVariable):
def __init__(self, rate:float=1):
if rate<=0: raise ValueError(f"The rate should be a positive real, but {rate} is provided.")
else: self.rate = rate
class MarkovianArrivalProcess(PointProcess):
def __init__() -> None: raise NotImplementedError
class ArrivalProcess(PointProcess):
def __init__(self, rate:float=1, *, dist=None):
super(ArrivalProcess, self).__init__(rate)
self.λ = self.lbd = self.arrival_rate = self.rate
if dist == None : self._dist = lambda x: (1/self.λ)*exp(-x/self.λ)
elif callable(dist): self._dist = dist
else: raise TypeError("The provided parameter is not a callable object.")
def dist(self, x:float)->float: return self._dist(x)
def moment(self, k:int )->float: return NotImplementedError("The moment computation method for this process has not been implemented.")
def as_MAP(self)->MarkovianArrivalProcess:
""" Convert the arrival process into a Markovian one."""
raise NotImplementedError("The MAP for this distribution is currently unknown.")
class ServiceProcess(PointProcess):
def __init__(self, rate:float=1):
super(ServiceProcess, self).__init__(rate)
self.μ = self.mu = self.service_rate = self.rate
def dist(self, x:float)->float: return (1/self.μ)*exp(-x/self.μ)
def moment(self, k:int )->float: return self.μ**k
class QueuingProcess(PointProcess):
def __init__(self, arrival_process:ArrivalProcess, service_process:ServiceProcess, *, service_policy:str="FIFO"):
self.arrival_process = arrival_process
self.service_process = service_process
self.λ = self.lbd = self.arrival_rate = self.arrival_process.rate
self.μ = self.mu = self.service_rate = self.service_process.rate
if self.service_rate<=0 :
raise ValueError(f"the specified service rate is not a positive real number")
else:
self.μ = self.mu = self.service_rate = self.service_rate
if self.service_rate<=self.arrival_rate:
warnings.warn(f"Service rate {self.service_rate:6f} is slower than the arrival rate {self.arrival_rate:6f}. The system is not positive-recurrent.",stacklevel=3)
self.ρ = self.rho = self.utilization_factor = self.arrival_rate/self.service_rate
self.policy = self.service_policy = service_policy
def is_positive_recurrent(self)->bool:
return True if self.service_rate > self.arrival_process.rate else False
def as_MAP(self):
queue = QueuingProcess(self.arrival_process.as_MAP(), self.service_process, service_policy=self.service_policy)
queue.M = queue.arrival_process.M
return queue
class ExponentialServiceProcess(ServiceProcess):...
class MarkovianArrivalProcess(ArrivalProcess):
def __init__(self,D0:np.ndarray,D1:np.ndarray):
"""MarkovianArrivalProcess
Args:
D0 (ndarray): matC, the transmission matrix
D1 (ndarray): matD, the emission matrix
"""
matC=np.asarray(D0)
matD=np.asarray(D1)
if matC.ndim != 2 or matC.ndim != 2:
raise ValueError(f"Matrices required. The matC provided is {self.matC.shape}-d, but the matD is {self.matD.shape}-d")
elif matC.shape != matD.shape:
raise ValueError(f"Shape mismatch. the matC provided has shape {self.matC.shape}, but the matD provided has {self.matD.shape}")
elif matC.shape[0] != matC.shape[1]:
raise ValueError(f"Not square matrices. the matC provided has shape {self.matC.shape}, but the matD provided has {self.matD.shape}")
else:
self.M = self.dim = self.number_of_states = matC.shape[0]
if not np.array_equal(matD, np.abs(matD)):
warnings.warn(f"The emission matrix is not elementwise positive. Trying to modify it to make the system legal.")
matD = np.abs(matD)
if not np.array_equal(matC-np.diag(np.diag(matC)), np.abs(matC-np.diag(np.diag(matC)))):
warnings.warn(f"The transmission matrix is not off-diagonal elementwise positive. Trying to modify it to make the system legal.")
matC = np.diag(np.diag(matC))+np.abs(matC-np.diag(np.diag(matC)))
if np.abs(rowsums:=np.sum(matC+matD,axis=1)).sum() != 0:
warnings.warn(f"The generator has rowsums {rowsums} instead of 0's. Trying to modify the diagonal of the transmission matrix to make the system legal.",stacklevel=3)
matC = matC-np.diag(rowsums)
if (pi:=null_space(matC+matD)).shape != (self.M,1): raise ValueError(f"No unique steady solution for the markov chain; the generator has null_space {pi.tolist()}")
self.D0=self.matC=matC
self.D1=self.matD=matD
self.π = self.pi = self.arrival_steady_distribution = self.steady_distribution = pi.reshape((self.M,))/pi.sum()
super(MarkovianArrivalProcess, self).__init__(rate=self.pi @ self.matD @ np.ones(self.M))
self.λ = self.lbd = self.arrival_rate
self.θ = self.theta = np.amax(np.abs(self.matC))
self._M_factorial = factorial(self.M)
@classmethod # trick: use class method to implement method overloading
def from_p(cls, p:float):
"""Use the original example used by the paper
Args:
p (float): an adjusting parameter for the emission matrix
matC=np.array([[-2 , 0],[0 ,-0.5 ]])
matD=np.array([[ 2*p,2*(1-p)],[0.5*(1-p), 0.5*p]])
"""
if p<0 or p>1:
raise ValueError(f"the provided parameter p={p} does not lie within [0,1]")
matC=np.array([[-2 , 0 ],[ 0 ,-0.5 ]])
matD=np.array([[ 2*p, 2*(1-p)],[ 0.5*(1-p), 0.5*p]])
queue = cls(matC, matD)
return queue
def dist(self, x:float)->float: return self.π.dot(self.D0).dot( expm(x*self.D0)).dot(np.ones(self.M))
def moment(self, k:int )->float: return self._M_factorial*self.π.dot(matrix_power(-inv(self.D0),k)).dot(np.ones(self.M))
MAP = MarkovianArrivalProcess
class PoissonArrivalProcess(ArrivalProcess):
def __init__(self, rate:float=1):
super(PoissonArrivalProcess, self).__init__(rate)
self.dist = poisson(rate)
def as_MAP(self)->MarkovianArrivalProcess:
return MarkovianArrivalProcess(np.array([-self.arrival_rate]),np.array([self.arrival_rate]))
class PoissonArrivalExponentialServiceFIFOQueue(QueuingProcess):
def __init__(self, arrival_rate:float=1, service_rate:float=1):
super(PoissonArrivalExponentialServiceFIFOQueue, self).__init__(PoissonArrivalProcess(arrival_rate), ExponentialServiceProcess(service_rate))
MM1Queue = PoissonArrivalExponentialServiceFIFOQueue
class PoissonArrivalExponentialServiceProcessorSharingQueue(QueuingProcess):
def __init__(self, arrival_rate:float=1, service_rate:float=1):
super(PoissonArrivalExponentialServiceProcessorSharingQueue, self).__init__(PoissonArrivalProcess(arrival_rate), ExponentialServiceProcess(service_rate), service_policy="PS")
MM1PSQueue = PoissonArrivalExponentialServiceProcessorSharingQueue
class MarvovianArrivalExponentialServiceFIFOQueue(QueuingProcess):
"""
A MAP/M/1-FIFO queue
"""
def __init__(self, MAP:MarkovianArrivalProcess, service_rate:float):
super(MarvovianArrivalExponentialServiceFIFOQueue, self).__init__(MAP, ExponentialServiceProcess(service_rate))
self.MAP = self.arrival_process
self.M = self.arrival_process.M
def as_MAP(self)->QueuingProcess:
return self
MAPM1Queue = MarvovianArrivalExponentialServiceFIFOQueue
class MarvovianArrivalExponentialServiceProcessorSharingQueue(QueuingProcess):
"""
A MAP/M/1-PS queue
"""
def _get_sojourn_time_matrix(self)->np.ndarray:
return np.transpose(solve_qme(self.mu, np.transpose(self.MAP.matC-self.mu*np.eye(self.M)), np.transpose(self.MAP.matD), dim=self.M))
def __init__(self, MAParr:MarkovianArrivalProcess, service_rate:float):
super(MarvovianArrivalExponentialServiceProcessorSharingQueue, self).__init__(MAParr, ExponentialServiceProcess(service_rate), service_policy="PS")
self.MAP = self.arrival_process
self.M = self.arrival_process.M
self.R = self.sojourn_time_matrix = self._get_sojourn_time_matrix()
self.π0 = self.pi0 = self.empty_steady_distriution = self.MAP.pi.dot(np.eye(self.M)-self.R)
def as_MAP(self)->QueuingProcess:
return self
MAPM1PSQueue = MarvovianArrivalExponentialServiceProcessorSharingQueue
|
{"hexsha": "042b6c618e166c57fe87055bb5450f3bd936d13d", "size": 12472, "ext": "py", "lang": "Python", "max_stars_repo_path": "queue-def.py", "max_stars_repo_name": "Gravifer/queue-sdp", "max_stars_repo_head_hexsha": "0541d2460e4cfd7a75d3578378d19cb0926bfbfe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "queue-def.py", "max_issues_repo_name": "Gravifer/queue-sdp", "max_issues_repo_head_hexsha": "0541d2460e4cfd7a75d3578378d19cb0926bfbfe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-14T21:11:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-14T21:11:16.000Z", "max_forks_repo_path": "queue-def.py", "max_forks_repo_name": "QueSDP/queue-sdp", "max_forks_repo_head_hexsha": "0541d2460e4cfd7a75d3578378d19cb0926bfbfe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.4343891403, "max_line_length": 182, "alphanum_fraction": 0.6849743425, "include": true, "reason": "import numpy", "num_tokens": 3166}
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# identifying str and unicode on Python 2, or str on Python 3
from six import string_types, text_type
import os, sys
import time
from abc import ABCMeta, abstractmethod
import re
import itertools
import glob
import csv
from text_unidecode import unidecode
from itertools import compress, chain
from io import open
import numpy as np
import pandas as pd
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
# from sklearn.linear_model import LassoCV
from sklearn.feature_selection import SelectFromModel, RFE, RFECV
# from sklearn.gaussian_process import GaussianProcessClassifier
# from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier
from sklearn.naive_bayes import GaussianNB
# from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler, StandardScaler
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC, LinearSVC
from sklearn.pipeline import Pipeline
# We'll use this library to make the display pretty
from tabulate import tabulate
from datasetcreator import strip_accents, LSimilarityVars, lsimilarity_terms, score_per_term, weighted_terms
from helpers import perform_stemming, normalize_str, sorted_nicely, StaticValues
import config
punctuation_regex = re.compile(u'[‘’“”\'"!?;/⧸⁄‹›«»`ʿ,.-]')
clf_names = [
# 'SVM':
[
LinearSVC, config.MLConf.SVM_hyperparameters, config.MLConf.SVM_hyperparameters_dist
],
# 'DecisionTree':
[
DecisionTreeClassifier, config.MLConf.DecisionTree_hyperparameters,
config.MLConf.DecisionTree_hyperparameters_dist
],
# 'RandomForest':
[
RandomForestClassifier, config.MLConf.RandomForest_hyperparameters,
config.MLConf.RandomForest_hyperparameters_dist
],
# 'MLP':
[MLPClassifier, config.MLConf.MLP_hyperparameters, config.MLConf.MLP_hyperparameters_dist],
# 'MLPDummy':
[MLPClassifier, config.MLConf.MLP_hyperparameters, config.MLConf.MLP_hyperparameters_dist],
# 'ExtraTrees':
[
ExtraTreesClassifier, config.MLConf.RandomForest_hyperparameters,
config.MLConf.RandomForest_hyperparameters_dist
],
# 'XGBoost':
[XGBClassifier, config.MLConf.XGBoost_hyperparameters, config.MLConf.XGBoost_hyperparameters_dist]
]
def ascii_transliteration_and_punctuation_strip(s):
# NFKD: first applies a canonical decomposition, i.e., translates each character into its decomposed form.
# and afterwards apply the compatibility decomposition, i.e. replace all compatibility characters with their
# equivalents.
s = unidecode(strip_accents(s.lower()))
s = punctuation_regex.sub('', s)
return s
def transform(strA, strB, sorting=False, simple_sorting=True, stemming=False, canonical=False, delimiter=' ', sort_thres=config.sort_thres):
a = text_type(strA) #.lower()
b = text_type(strB)
if canonical:
a = ascii_transliteration_and_punctuation_strip(a)
b = ascii_transliteration_and_punctuation_strip(b)
if sorting:
tmp_a = a.replace(' ', '')
tmp_b = b.replace(' ', '')
sim_concatenated = StaticValues.algorithms['damerau_levenshtein'](tmp_a, tmp_b)
sim_orig = StaticValues.algorithms['damerau_levenshtein'](a, b)
if simple_sorting or (sim_concatenated < sort_thres):
a = " ".join(sorted_nicely(a.split(delimiter)))
b = " ".join(sorted_nicely(b.split(delimiter)))
elif sim_concatenated > sim_orig:
a = tmp_a
b = tmp_b
if stemming:
a = perform_stemming(a)
b = perform_stemming(b)
return a, b
def transform_str(s, stemming=False, canonical=False, delimiter=' '):
a = text_type(s)
if canonical:
a = ascii_transliteration_and_punctuation_strip(a)
if stemming:
a = perform_stemming(a)
return a
class PipelineRFE(Pipeline):
def fit(self, X, y=None, **fit_params):
super(PipelineRFE, self).fit(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'feature_importances_'):
self.feature_importances_ = self.steps[-1][-1].feature_importances_
if hasattr(self.steps[-1][-1], 'coef_'):
self.coef_ = self.steps[-1][-1].coef_
return self
class FEMLFeatures:
no_freq_terms = 200
def __init__(self):
pass
# TODO to_be_removed = "()/.,:!'" # check the list of chars
# Returned vals: #1: str1 is subset of str2, #2 str2 is subset of str1
@staticmethod
def contains(str1, str2):
return all(x in str2 for x in str1.split())
@staticmethod
def is_matched(str):
"""
Finds out how balanced an expression is.
With a string containing only brackets.
>>> is_matched('[]()()(((([])))')
False
>>> is_matched('[](){{{[]}}}')
True
"""
opening = tuple('({[')
closing = tuple(')}]')
mapping = dict(zip(opening, closing))
queue = []
for letter in str:
if letter in opening:
queue.append(mapping[letter])
elif letter in closing:
if not queue or letter != queue.pop():
return False
return not queue
def hasEncoding_err(self, str):
return self.is_matched(str)
@staticmethod
def containsAbbr(str):
abbr = re.search(r"\b[A-Z]([A-Z\.]{1,}|[sr\.]{1,2})\b", str)
return '-' if abbr is None else abbr.group()
@staticmethod
def containsTermsInParenthesis(str):
tokens = re.split('[{\[(]', str)
bflag = True if len(tokens) > 1 else False
return bflag
@staticmethod
def containsDashConnected_words(str):
"""
Hyphenated words are considered to be:
* a number of word chars
* followed by any number of: a single hyphen followed by word chars
"""
is_dashed = re.search(r"\w+(?:-\w+)+", str)
return False if is_dashed is None else True
@staticmethod
def no_of_words(s1, s2):
return len(set(s1.split())), len(set(s2.split()))
@staticmethod
def containsFreqTerms(s1, s2):
# specialTerms = dict(a=[], b=[])
# specialTerms['a'] = filter(lambda x: x in a, freq_terms)
# specialTerms['b'] = filter(lambda x: x in b, freq_terms)
# for idx, x in enumerate(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']):
# if x in str1: specialTerms['a'].append([idx, x])
# if x in str2: specialTerms['b'].append([idx, x])
s1_flag = any(x in LSimilarityVars.freq_ngrams['tokens'] for x in s1.split())
s2_flag = any(x in LSimilarityVars.freq_ngrams['tokens'] for x in s2.split())
return s1_flag, s2_flag
@staticmethod
def positionalFreqTerms(s1, s2):
s1_terms = s1.split()
s2_terms = s2.split()
s1_pos = [x in s1_terms for x in list(LSimilarityVars.freq_ngrams['tokens'])[:config.MLConf.pos_freqs]]
s2_pos = [x in s2_terms for x in list(LSimilarityVars.freq_ngrams['tokens'])[:config.MLConf.pos_freqs]]
return s1_pos, s2_pos
@staticmethod
def ngram_tokens(tokens, ngram=1):
if tokens < 1: tokens = 1
return list(itertools.chain.from_iterable([[tokens[i:i + ngram] for i in range(len(tokens) - (ngram - 1))]]))
def ngrams(str, ngram=1):
pass
def _check_size(self, s):
if not len(s) == 3:
raise ValueError('expected size 3, got %d' % len(s))
def containsInPos(self, str1, str2):
fvec_str1 = []
fvec_str2 = []
sep_step = int(round(len(str1) / 3.0))
fvec_str1.extend(
[StaticValues.algorithms['damerau_levenshtein'](str1[0:sep_step], str2),
StaticValues.algorithms['damerau_levenshtein'](str1[sep_step:2*sep_step], str2),
StaticValues.algorithms['damerau_levenshtein'](str1[2*sep_step:], str2)]
)
sep_step = int(round(len(str2) / 3.0))
fvec_str2.extend(
[StaticValues.algorithms['damerau_levenshtein'](str1, str2[0:sep_step]),
StaticValues.algorithms['damerau_levenshtein'](str1, str2[sep_step:2*sep_step]),
StaticValues.algorithms['damerau_levenshtein'](str1, str2[2*sep_step:])]
)
self._check_size(fvec_str1)
self._check_size(fvec_str2)
return fvec_str1, fvec_str2
def get_freqterms(self, encoding):
input_path = (True, os.path.join(os.getcwd(), 'input/')) \
if os.path.isdir(os.path.join(os.getcwd(), 'input/')) \
else (os.path.isdir(os.path.join(os.getcwd(), '../input/')), os.path.join(os.getcwd(), '../input/'))
if input_path[0]:
for f in glob.iglob(os.path.join(input_path[1], '*gram*{}{}.csv'.format('_', encoding))):
gram_type = 'tokens' if 'token' in os.path.basename(os.path.normpath(f)) else 'chars'
with open(f) as csvfile:
print("Loading frequent terms from file {}...".format(f))
reader = csv.DictReader(csvfile, fieldnames=["term", "no"], delimiter='\t')
_ = reader.fieldnames
# go to next line after header
next(reader)
for i, row in enumerate(reader):
if i >= FEMLFeatures.no_freq_terms:
break
LSimilarityVars.freq_ngrams[gram_type].add(text_type(row['term']))
print('Frequent terms loaded.')
else:
print("Folder 'input' does not exist")
def update_weights(self, w):
if isinstance(w, tuple) and len(w) >= 3:
del LSimilarityVars.lsimilarity_weights[:]
LSimilarityVars.lsimilarity_weights.extend(w[:3])
class baseMetrics:
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, size, njobs=2, accuracyresults=False):
self.num_true_predicted_true = [0.0] * size
self.num_true_predicted_false = [0.0] * size
self.num_false_predicted_true = [0.0] * size
self.num_false_predicted_false = [0.0] * size
self.num_true = 0.0
self.num_false = 0.0
self.timer = 0.0
self.train_timer = 0.0
self.timers = [0.0] * size
self.file = None
self.accuracyresults = accuracyresults
self.feml_features = FEMLFeatures()
self.predictedState = {
'num_true_predicted_true': self.num_true_predicted_true,
'num_true_predicted_false': self.num_true_predicted_false,
'num_false_predicted_true': self.num_false_predicted_true,
'num_false_predicted_false': self.num_false_predicted_false
}
self.njobs = njobs
def __del__(self):
if self.accuracyresults and self.file is not None and not self.file.closed:
self.file.close()
def reset_vars(self):
self.num_true_predicted_true[:] = [0.0] * len(self.num_true_predicted_true)
self.num_true_predicted_false[:] = [0.0] * len(self.num_true_predicted_false)
self.num_false_predicted_true[:] = [0.0] * len(self.num_false_predicted_true)
self.num_false_predicted_false[:] = [0.0] * len(self.num_false_predicted_false)
self.timer = 0.0
self.timers[:] = [0.0] * len(self.timers)
def preprocessing(self, row):
if row['res'].upper() == "TRUE": self.num_true += 1.0
else: self.num_false += 1.0
def reset(self):
self.num_true = 0
self.num_false = 0
@abstractmethod
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig', selectable_features=None):
pass
def _compute_stats(self, idx, results=False):
exit_status, acc, pre, rec, f1, timer = 0, -1, -1, -1, -1, 0
try:
timer = (self.timers[idx]) # / float(int(self.num_true + self.num_false))) * 50000.0
acc = (self.num_true_predicted_true[idx] + self.num_false_predicted_false[idx]) / \
(self.num_true + self.num_false)
pre = (self.num_true_predicted_true[idx]) / \
(self.num_true_predicted_true[idx] + self.num_false_predicted_true[idx])
rec = (self.num_true_predicted_true[idx]) / \
(self.num_true_predicted_true[idx] + self.num_true_predicted_false[idx])
f1 = 2.0 * ((pre * rec) / (pre + rec))
except ZeroDivisionError:
exit_status = 1
# print("{0} is divided by zero\n".format(StaticValues.methods[idx][0]))
if results:
return exit_status, acc, pre, rec, f1, timer
def _print_stats(self, method, acc, pre, rec, f1, timer):
# print("Metric = Supervised Classifier :", method)
# print("Accuracy =", acc)
# print("Precision =", pre)
# print("Recall =", rec)
# print("F1 =", f1)
# print("Processing time per 50K records =", timer)
# print("")
print("| Method\t\t& Accuracy\t& Precision\t& Recall\t& F1-Score\t& Time (sec)") # (50K Pairs)")
print("||{0}\t& {1}\t& {2}\t& {3}\t& {4}\t& {5}".format(method, acc, pre, rec, f1, timer))
# print("")
sys.stdout.flush()
def print_stats(self):
for idx, m in enumerate(StaticValues.methods):
status, acc, pre, rec, f1, t = self._compute_stats(idx, True)
if status == 0: self._print_stats(m[0], acc, pre, rec, f1, t)
def get_stats(self):
res = {}
for idx, m in enumerate(StaticValues.methods):
status, acc, pre, rec, f1, _ = self._compute_stats(idx, True)
if status == 0: res[m[0]] = [acc, pre, rec, f1]
return res
def prediction(self, sim_id, pred_val, real_val, custom_thres):
result = ""
var_name = ""
thres = StaticValues.methods[sim_id - 1][1][custom_thres] if isinstance(custom_thres, string_types) else custom_thres
if real_val == 1.0:
if pred_val >= thres:
var_name = 'num_true_predicted_true'
result = "\tTRUE"
else:
var_name = 'num_true_predicted_false'
result = "\tFALSE"
else:
if pred_val >= thres:
var_name = 'num_false_predicted_true'
result = "\tTRUE"
else:
var_name = 'num_false_predicted_false'
result = "\tFALSE"
return result, var_name
def freq_terms_list(self, encoding):
self.feml_features.get_freqterms(encoding)
def _perform_feature_selection(self, X_train, y_train, X_test, method, model, no_features_to_keep=12):
fsupported = None
if method == 'rfe':
rfe = RFE(model, n_features_to_select=no_features_to_keep, step=1)
rfe.fit(X_train, y_train)
X = rfe.transform(X_train)
X_t = rfe.transform(X_test)
fsupported = rfe.support_
print("{} features selected using Recursive Feature Elimination (RFE).".format(X.shape[1]))
elif method == 'rfecv':
rfe = RFECV(model, min_features_to_select=no_features_to_keep, step=2, cv=5)
rfe.fit(X_train, y_train)
X = rfe.transform(X_train)
X_t = rfe.transform(X_test)
fsupported = rfe.support_
print("{} feature ranking withRecursive Feature Elimination (RFE) and Cross-Validation (CV).".format(X.shape[1]))
else: # default is SelectFromModel ('sfm') module
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
# clf = LassoCV(cv=5, max_iter=2000, n_jobs=2)
sfm = SelectFromModel(model, threshold=-np.inf, max_features=no_features_to_keep, prefit=False)
sfm.fit(X_train, y_train)
X = sfm.transform(X_train)
X_t = sfm.transform(X_test)
fsupported = sfm.get_support()
print("{} features selected using SelectFromModel.".format(X.shape[1]))
return X, X_t, fsupported
# Create our function which stores the feature rankings to the ranks dictionary
def ranking(self, ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order * np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x, 2), ranks)
return dict(zip(names, ranks))
class calcSotAMetrics(baseMetrics):
def __init__(self, njobs, accures):
super(calcSotAMetrics, self).__init__(len(StaticValues.methods), njobs, accures)
def _generic_evaluator(self, idx, algnm, str1, str2, is_a_match, custom_thres):
start_time = time.time()
sim_val = StaticValues.algorithms[algnm](str1, str2)
res, varnm = self.prediction(idx, sim_val, is_a_match, custom_thres)
self.timers[idx - 1] += (time.time() - start_time)
self.predictedState[varnm][idx - 1] += 1.0
return res
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig',
selectable_features=None):
tot_res = ""
flag_true_match = 1.0 if row['res'].upper() == "TRUE" else 0.0
a, b = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming, canonical=canonical)
tot_res += self._generic_evaluator(1, 'damerau_levenshtein', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(8, 'jaccard', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(2, 'jaro', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(3, 'jaro_winkler', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(4, 'jaro_winkler', a[::-1], b[::-1], flag_true_match, custom_thres)
tot_res += self._generic_evaluator(11, 'monge_elkan', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(7, 'cosine', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(9, 'strike_a_match', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(12, 'soft_jaccard', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(5, 'sorted_winkler', a, b, flag_true_match, custom_thres)
if permuted: tot_res += self._generic_evaluator(6, 'permuted_winkler', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(10, 'skipgram', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(13, 'davies', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(14, 'l_jaro_winkler', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(15, 'l_jaro_winkler', a[::-1], b[::-1], flag_true_match, custom_thres)
if sorting:
tot_res += self._generic_evaluator(16, 'lsimilarity', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(29, 'avg_lsimilarity', a, b, flag_true_match, custom_thres)
if self.accuracyresults:
if self.file is None:
file_name = 'dataset-accuracyresults-sim-metrics'
if canonical:
file_name += '_canonical'
if sorting:
file_name += '_sorted'
self.file = open(file_name + '.csv', 'w+')
if flag_true_match == 1.0:
self.file.write("TRUE{0}\t{1}\t{2}\n".format(tot_res, a.encode('utf8'), b.encode('utf8')))
else:
self.file.write("FALSE{0}\t{1}\t{2}\n".format(tot_res, a.encode('utf8'), b.encode('utf8')))
def evaluate_sorting(self, row, custom_thres, data_format, stemming=False, permuted=False):
tot_res = ""
flag_true_match = 1.0 if row['res'].upper() == "TRUE" else 0.0
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=True, stemming=stemming, canonical=True, thres=custom_thres)
tot_res += self._generic_evaluator(1, 'damerau_levenshtein', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(8, 'jaccard', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(2, 'jaro', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(3, 'jaro_winkler', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(4, 'jaro_winkler', row['s1'][::-1], row['s2'][::-1], flag_true_match, data_format)
tot_res += self._generic_evaluator(11, 'monge_elkan', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(7, 'cosine', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(9, 'strike_a_match', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(12, 'soft_jaccard', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(5, 'sorted_winkler', row['s1'], row['s2'], flag_true_match, data_format)
if permuted: tot_res += self._generic_evaluator(6, 'permuted_winkler', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(10, 'skipgram', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(13, 'davies', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(14, 'l_jaro_winkler', row['s1'], row['s2'], flag_true_match, data_format)
tot_res += self._generic_evaluator(15, 'l_jaro_winkler', row['s1'][::-1], row['s2'][::-1], flag_true_match, data_format)
if self.accuracyresults:
if self.file is None:
file_name = 'dataset-accuracyresults-sim-metrics'
if True:
file_name += '_canonical'
if True:
file_name += '_sorted'
self.file = open(file_name + '.csv', 'w+')
if flag_true_match == 1.0:
self.file.write("TRUE{0}\t{1}\t{2}\n".format(tot_res, row['s1'].encode('utf8'), row['s2'].encode('utf8')))
else:
self.file.write("FALSE{0}\t{1}\t{2}\n".format(tot_res, row['s1'].encode('utf8'), row['s2'].encode('utf8')))
class calcCustomFEML(baseMetrics):
max_important_features_toshow = 50
def __init__(self, njobs, accures):
self.X1 = []
self.Y1 = []
self.X2 = []
self.Y2 = []
self.train_X = []
self.train_Y = []
self.test_X = []
self.test_Y = []
self.classifiers = [
LinearSVC(random_state=0, C=1.0),
# GaussianProcessClassifier(1.0 * RBF(1.0), n_jobs=3, warm_start=True),
DecisionTreeClassifier(random_state=0, max_depth=100, max_features='auto'),
RandomForestClassifier(
n_estimators=250, random_state=0, n_jobs=int(njobs), max_depth=50, oob_score=True, bootstrap=True
),
MLPClassifier(alpha=1, random_state=0),
# AdaBoostClassifier(DecisionTreeClassifier(max_depth=50), n_estimators=300, random_state=0),
GaussianNB(),
# QuadraticDiscriminantAnalysis(), LinearDiscriminantAnalysis(),
ExtraTreesClassifier(n_estimators=100, random_state=0, n_jobs=int(njobs), max_depth=50),
XGBClassifier(n_estimators=3000, seed=0, nthread=int(njobs)),
]
# self.scores = [[] for _ in range(len(self.classifiers))]
self.importances = dict()
self.mlalgs_to_run = StaticValues.classifiers_abbr.keys()
# To be used within GridSearch
self.inner_cv = StratifiedKFold(n_splits=config.MLConf.kfold_inner_parameter, shuffle=False,
random_state=config.seed_no)
# To be used in outer CV
self.outer_cv = StratifiedKFold(n_splits=config.MLConf.kfold_parameter, shuffle=False,
random_state=config.seed_no)
self.kfold = config.MLConf.kfold_parameter
self.n_jobs = config.MLConf.n_jobs
self.search_method = config.MLConf.hyperparams_search_method
self.n_iter = config.MLConf.max_iter
super(calcCustomFEML, self).__init__(len(self.classifiers), njobs, accures)
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig',
selectable_features=None):
# if row['res'].upper() == "TRUE":
# if len(self.Y1) < ((self.num_true + self.num_false) / 2.0): self.Y1.append(1.0)
# else: self.Y2.append(1.0)
# else:
# if len(self.Y1) < ((self.num_true + self.num_false) / 2.0): self.Y1.append(0.0)
# else: self.Y2.append(0.0)
if row['res'].upper() == "TRUE":
self.train_Y.append(1.0)
else:
self.train_Y.append(0.0)
tmp_X1, tmp_X2 = [], []
for flag in list({False, sorting}):
a, b = transform(row['s1'], row['s2'], sorting=flag, stemming=stemming, canonical=flag)
start_time = time.time()
sim1 = StaticValues.algorithms['damerau_levenshtein'](a, b)
sim8 = StaticValues.algorithms['jaccard'](a, b)
sim2 = StaticValues.algorithms['jaro'](a, b)
sim3 = StaticValues.algorithms['jaro_winkler'](a, b)
sim4 = StaticValues.algorithms['jaro_winkler'](a[::-1], b[::-1])
sim11 = StaticValues.algorithms['monge_elkan'](a, b)
sim7 = StaticValues.algorithms['cosine'](a, b)
sim9 = StaticValues.algorithms['strike_a_match'](a, b)
sim12 = StaticValues.algorithms['soft_jaccard'](a, b)
if not flag: sim5 = StaticValues.algorithms['sorted_winkler'](a, b)
if permuted: sim6 = StaticValues.algorithms['permuted_winkler'](a, b)
sim10 = StaticValues.algorithms['skipgram'](a, b)
sim13 = StaticValues.algorithms['davies'](a, b)
self.train_timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# if permuted:
# if flag: tmp_X1.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else: tmp_X1.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else:
# if flag: tmp_X1.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else: tmp_X1.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else:
if permuted:
if flag: tmp_X2.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else: tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
if flag: tmp_X2.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else: tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# if selectable_features is not None:
# self.X1.append(list(compress(chain.from_iterable(tmp_X1), selectable_features)))
# else:
# self.X1.append(list(chain.from_iterable(tmp_X1)))
# else:
if selectable_features is not None:
self.train_X.append(list(compress(chain.from_iterable(tmp_X2), selectable_features)))
else:
self.train_X.append(np.around(list(chain.from_iterable(tmp_X2)), 4).tolist())
if self.file is None and self.accuracyresults:
file_name = 'dataset-accuracyresults-sim-metrics'
if canonical:
file_name += '_canonical'
if sorting:
file_name += '_sorted'
self.file = open(file_name + '.csv', 'w+')
def load_test_dataset(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig'):
if row['res'].upper() == "TRUE":
self.test_Y.append(1.0)
self.num_true += 1.0
else:
self.test_Y.append(0.0)
self.num_false += 1.0
tmp_X1, tmp_X2 = [], []
for flag in list({False, sorting}):
a, b = transform(row['s1'], row['s2'], sorting=flag, stemming=stemming, canonical=flag)
start_time = time.time()
sim1 = StaticValues.algorithms['damerau_levenshtein'](a, b)
sim8 = StaticValues.algorithms['jaccard'](a, b)
sim2 = StaticValues.algorithms['jaro'](a, b)
sim3 = StaticValues.algorithms['jaro_winkler'](a, b)
sim4 = StaticValues.algorithms['jaro_winkler'](a[::-1], b[::-1])
sim11 = StaticValues.algorithms['monge_elkan'](a, b)
sim7 = StaticValues.algorithms['cosine'](a, b)
sim9 = StaticValues.algorithms['strike_a_match'](a, b)
sim12 = StaticValues.algorithms['soft_jaccard'](a, b)
if not flag: sim5 = StaticValues.algorithms['sorted_winkler'](a, b)
if permuted: sim6 = StaticValues.algorithms['permuted_winkler'](a, b)
sim10 = StaticValues.algorithms['skipgram'](a, b)
sim13 = StaticValues.algorithms['davies'](a, b)
self.timer += (time.time() - start_time)
if permuted:
if flag:
tmp_X2.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
tmp_X2.append(
[sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
if flag:
tmp_X2.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
self.test_X.append(np.around(list(chain.from_iterable(tmp_X2)), 4).tolist())
def train_classifiers(self, ml_algs, polynomial=False, standardize=False, fs_method=None, features=None):
if polynomial:
self.X1 = PolynomialFeatures().fit_transform(self.X1)
self.X2 = PolynomialFeatures().fit_transform(self.X2)
# iterate over classifiers
if set(ml_algs) != {'all'}: self.mlalgs_to_run = ml_algs
# for i, (name, clf) in enumerate(zip(self.names, self.classifiers)):
hyperparams_data = list()
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
print('{} is not a valid ML algorithm'.format(name))
continue
clf_abbr = StaticValues.classifiers_abbr[name]
model = self.classifiers[clf_abbr]
clf = None
print('Running cv for {}...'.format(name))
if self.search_method.lower() == 'grid':
clf = GridSearchCV(
clf_names[clf_abbr][0](), clf_names[clf_abbr][1],
cv=self.outer_cv, scoring='accuracy', verbose=1,
n_jobs=self.n_jobs, return_train_score=config.MLConf.train_score
)
# elif self.search_method.lower() == 'hyperband' and clf_key in ['XGBoost', 'Extra-Trees', 'Random Forest']:
# HyperbandSearchCV(
# clf_val[0](probability=True) if clf_key == 'SVM' else clf_val[0](), clf_val[2].copy().pop('n_estimators'),
# resource_param='n_estimators',
# min_iter=500 if clf_key == 'XGBoost' else 200,
# max_iter=3000 if clf_key == 'XGBoost' else 1000,
# cv=self.inner_cv, random_state=seed_no, scoring=score
# )
else: # randomized is used as default
clf = RandomizedSearchCV(
clf_names[clf_abbr][0](), clf_names[clf_abbr][2],
cv=self.outer_cv, scoring='accuracy', verbose=1,
n_jobs=self.n_jobs, n_iter=self.n_iter, return_train_score=config.MLConf.train_score
)
clf.fit(np.asarray(self.train_X), pd.Series(self.train_Y))
hyperparams_found = dict()
hyperparams_found['score'] = clf.best_score_
hyperparams_found['results'] = clf.cv_results_
# hyperparams_found['test_len'] = [len(test) for _, test in self.outer_cv.split(X_train, y_train)]
hyperparams_found['hyperparams'] = clf.best_params_
hyperparams_found['estimator'] = clf.best_estimator_
hyperparams_found['classifier'] = name
hyperparams_found['scorers'] = clf.scorer_
hyperparams_data.append(hyperparams_found)
_, best_clf = max(enumerate(hyperparams_data), key=(lambda x: x[1]['score']))
print('score: {}, hyperparams: {}'.format(best_clf['score'], best_clf['hyperparams']))
train_time = 0
predictedL = list()
tot_features = list()
print("Training {}...".format(StaticValues.classifiers[clf_abbr]))
# for X_train, y_train, X_pred, y_pred in izip(
# (np.asarray(row, float) for row in (self.X1, self.X2)),
# (np.asarray(row, float) for row in (self.Y1, self.Y2)),
# (np.asarray(row, float) for row in (self.X2, self.X1)),
# ((row for row in (self.Y2, self.Y1)))
# ):
start_time = time.time()
features_supported = [True] * len(StaticValues.featureColumns)
# if features is not None:
# features_supported = [x and y for x, y in zip(features_supported, features)]
# if fs_method is not None and {'rf', 'et', 'xgboost'}.intersection({name}):
# X_train, X_pred, features_supported = self._perform_feature_selection(
# X_train, y_train, X_pred, fs_method, model
# )
# tot_features = [x or y for x, y in izip_longest(features_supported, tot_features, fillvalue=False)]
# model.fit(X_train, y_train)
best_clf['estimator'].fit(np.asarray(self.train_X), self.train_Y)
train_time += (time.time() - start_time)
start_time = time.time()
# predictedL += list(model.predict(X_pred))
predictedL += list(best_clf['estimator'].predict(self.test_X))
self.timers[clf_abbr] += (time.time() - start_time)
# if hasattr(model, "feature_importances_"):
# if clf_abbr not in self.importances:
# self.importances[clf_abbr] = np.zeros(len(StaticValues.featureColumns), dtype=float)
#
# for idx, val in zip([i for i, x in enumerate(features_supported) if x], model.feature_importances_):
# self.importances[clf_abbr][idx] += val
# elif hasattr(model, "coef_"):
# if clf_abbr not in self.importances:
# self.importances[clf_abbr] = np.zeros(len(StaticValues.featureColumns), dtype=float)
#
# for idx, val in zip([i for i, x in enumerate(features_supported) if x],
# model.coef_.ravel()):
# self.importances[clf_abbr][idx] += val
# # print(model.score(X_pred, y_pred))
print("Best features discovered: ", end="")
print(*tot_features, sep=",")
print("Training took {0:.3f} sec ({1:.3f} min)".format(train_time, train_time / 60.0))
self.timers[clf_abbr] += self.timer
print("Matching records...")
# real = self.Y2 + self.Y1
real = self.test_Y
for pos in range(len(real)):
if real[pos] == 1.0:
if predictedL[pos] == 1.0:
self.num_true_predicted_true[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("TRUE\tTRUE\n")
else:
self.num_true_predicted_false[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("TRUE\tFALSE\n")
else:
if predictedL[pos] == 1.0:
self.num_false_predicted_true[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("FALSE\tTRUE\n")
else:
self.num_false_predicted_false[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("FALSE\tFALSE\n")
# if hasattr(clf, "decision_function"):
# Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# else:
# Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
def print_stats(self):
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
continue
idx = StaticValues.classifiers_abbr[name]
status, acc, pre, rec, f1, t = self._compute_stats(idx, True)
if status == 0:
self._print_stats(StaticValues.classifiers[idx], acc, pre, rec, f1, t)
if idx not in self.importances or not isinstance(self.importances[idx], np.ndarray):
print("The classifier {} does not expose \"coef_\" or \"feature_importances_\" attributes".format(
name))
else:
importances = self.importances[idx] / 2.0
importances = np.ma.masked_equal(importances, 0.0)
if importances.mask is np.ma.nomask: importances.mask = np.zeros(importances.shape, dtype=bool)
# indices = np.argsort(importances)[::-1]
# for f in range(min(importances.shape[0], self.max_important_features_toshow)):
# print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
indices = np.argsort(importances.compressed())[::-1][
:min(importances.shape[0], self.max_important_features_toshow)]
headers = ["name", "score"]
print(tabulate(zip(
np.asarray(StaticValues.featureColumns, object)[~importances.mask][indices],
importances.compressed()[indices]
), headers, tablefmt="simple"))
# if hasattr(clf, "feature_importances_"):
# # if results:
# # result[indices[f]] = importances[indices[f]]
print("")
sys.stdout.flush()
class calcCustomFEMLExtended(baseMetrics):
max_important_features_toshow = 50
fterm_feature_size = 10
def __init__(self, njobs, accures):
self.X1 = []
self.Y1 = []
self.X2 = []
self.Y2 = []
self.train_X = []
self.train_Y = []
self.test_X = []
self.test_Y = []
self.best_clf = {}
self.classifiers = [
LinearSVC(
# random_state=0, C=1.0, max_iter=3000
**config.MLConf.clf_static_params['SVM']
),
# GaussianProcessClassifier(1.0 * RBF(1.0), n_jobs=3, warm_start=True),
DecisionTreeClassifier(
# random_state=0, max_depth=100, max_features='auto'
**config.MLConf.clf_static_params['DecisionTree']
),
RandomForestClassifier(
# default
# n_estimators=250, max_depth=50, oob_score=True, bootstrap=True
# optimized
**config.MLConf.clf_static_params['RandomForest']
),
MLPClassifier(alpha=1, random_state=0),
# AdaBoostClassifier(DecisionTreeClassifier(max_depth=50), n_estimators=300, random_state=0),
GaussianNB(),
# QuadraticDiscriminantAnalysis(), LinearDiscriminantAnalysis(),
ExtraTreesClassifier(
# n_estimators=100, random_state=0, n_jobs=int(njobs), max_depth=50
**config.MLConf.clf_static_params['ExtraTrees']
),
XGBClassifier(
# n_estimators=3000, seed=0, nthread=int(njobs)
**config.MLConf.clf_static_params['XGBoost']
),
]
self.scores = [[] for _ in range(len(self.classifiers))]
self.importances = dict()
self.mlalgs_to_run = StaticValues.classifiers_abbr.keys()
# To be used within GridSearch
self.inner_cv = StratifiedKFold(n_splits=config.MLConf.kfold_inner_parameter, shuffle=False,
random_state=config.seed_no)
# To be used in outer CV
self.outer_cv = StratifiedKFold(n_splits=config.MLConf.kfold_parameter, shuffle=False,
random_state=config.seed_no)
self.kfold = config.MLConf.kfold_parameter
self.n_jobs = config.MLConf.n_jobs
self.search_method = config.MLConf.hyperparams_search_method
self.n_iter = config.MLConf.max_iter
super(calcCustomFEMLExtended, self).__init__(len(self.classifiers), njobs, accures)
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig',
selectable_features=None):
# if row['res'].upper() == "TRUE":
# if len(self.Y1) < ((self.num_true + self.num_false) / 2.0): self.Y1.append(1.0)
# else: self.Y2.append(1.0)
# else:
# if len(self.Y1) < ((self.num_true + self.num_false) / 2.0): self.Y1.append(0.0)
# else: self.Y2.append(0.0)
if row['res'].upper() == "TRUE":
self.train_Y.append(1.0)
else:
self.train_Y.append(0.0)
tmp_X1, tmp_X2 = [], []
for flag in list({False, sorting}):
if (not flag and not config.MLConf.features_to_build['basic']) or (flag and not config.MLConf.features_to_build['sorted']): continue
a, b = transform(row['s1'], row['s2'], sorting=flag, stemming=stemming, canonical=flag)
start_time = time.time()
sim1 = StaticValues.algorithms['damerau_levenshtein'](a, b)
sim8 = StaticValues.algorithms['jaccard'](a, b)
sim2 = StaticValues.algorithms['jaro'](a, b)
sim3 = StaticValues.algorithms['jaro_winkler'](a, b)
sim4 = StaticValues.algorithms['jaro_winkler'](a[::-1], b[::-1])
sim11 = StaticValues.algorithms['monge_elkan'](a, b)
sim7 = StaticValues.algorithms['cosine'](a, b)
sim9 = StaticValues.algorithms['strike_a_match'](a, b)
sim12 = StaticValues.algorithms['soft_jaccard'](a, b)
if not flag: sim5 = StaticValues.algorithms['sorted_winkler'](a, b)
if permuted: sim6 = StaticValues.algorithms['permuted_winkler'](a, b)
sim10 = StaticValues.algorithms['skipgram'](a, b)
sim13 = StaticValues.algorithms['davies'](a, b)
if flag:
sim16 = StaticValues.algorithms['l_jaro_winkler'](a, b)
sim17 = StaticValues.algorithms['l_jaro_winkler'](a[::-1], b[::-1])
self.timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# if permuted:
# if flag: tmp_X1.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else: tmp_X1.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else:
# if flag: tmp_X1.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else: tmp_X1.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# if flag: tmp_X1.append([sim16, sim17, sim15])
# else:
if permuted:
if flag: tmp_X2.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else: tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
if flag: tmp_X2.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else: tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
if flag: tmp_X2.append([sim16, sim17])
# for flag in list({False, True}):
if config.MLConf.features_to_build['lgm'] and sorting:
start_time = time.time()
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
lsim_baseThres = 'avg' if flag else 'simple'
# sim14 = StaticValues.algorithms['lsimilarity'](a, b)
sim15 = StaticValues.algorithms['avg_lsimilarity'](row['s1'], row['s2'])
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['davies'][lsim_baseThres][0])
feature17 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'davies', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['skipgram'][lsim_baseThres][0]
)
feature18 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'skipgram', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['soft_jaccard'][lsim_baseThres][0]
)
feature19 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'soft_jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['strike_a_match'][lsim_baseThres][0]
)
feature20 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'strike_a_match', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['cosine'][lsim_baseThres][0]
)
feature21 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'cosine', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaccard'][lsim_baseThres][0]
)
feature22 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['monge_elkan'][lsim_baseThres][0]
)
feature23 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'monge_elkan', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler'][lsim_baseThres][0]
)
feature24 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro'][lsim_baseThres][0]
)
feature25 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler_r'][lsim_baseThres][0]
)
feature26 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'jaro_winkler', flag
)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler'][lsim_baseThres][0]
)
feature27 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'l_jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler_r'][lsim_baseThres][0]
)
feature28 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'l_jaro_winkler', flag
)
self.timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# tmp_X1.append([feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
# feature25, feature26, feature27])
# else:
tmp_X2.append([
sim15, feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
feature25, feature26, feature27, feature28
])
if config.MLConf.features_to_build['individual'] and sorting:
start_time = time.time()
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
method_nm = 'damerau_levenshtein'
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values[method_nm]['avg'][0])
feature1_1, feature1_2, feature1_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, method_nm)
# feature8_1, feature8_2, feature8_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'davies')
# feature9_1, feature9_2, feature9_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'skipgram')
# feature10_1, feature10_2, feature10_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'soft_jaccard')
# feature11_1, feature11_2, feature11_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'strike_a_match')
# feature12_1, feature12_2, feature12_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'cosine')
# feature13_1, feature13_2, feature13_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'monge_elkan')
# feature14_1, feature14_2, feature14_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'jaro_winkler')
# feature15_1, feature15_2, feature15_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'jaro')
# feature16_1, feature16_2, feature16_3 = score_per_term(
# {'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']]},
# {'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']]},
# {'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']]},
# 'jaro_winkler'
# )
self.timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# tmp_X1.append([
# feature1_1, feature1_2, feature1_3,
#
# # feature8_1, feature8_2, feature8_3,
# # feature9_1, feature9_2, feature9_3,
# # feature10_1, feature10_2, feature10_3,
# # feature11_1, feature11_2, feature11_3,
# # feature12_1, feature12_2, feature12_3,
# # feature13_1, feature13_2, feature13_3,
# # feature14_1, feature14_2, feature14_3,
# # feature15_1, feature15_2, feature15_3,
# # feature16_1, feature16_2, feature16_3,
# # int(feature2_1), int(feature2_2),
# # feature3_1, feature3_2,
# # int(feature4_1), int(feature4_2),
# # int(feature5_1), int(feature5_2)
# ])
# # tmp_X1.append(map(lambda x: int(x == max(feature6_1)), feature6_1))
# # tmp_X1.append(map(lambda x: int(x == max(feature6_2)), feature6_2))
# # self.X1[-1].extend(
# # feature7_1[:self.fterm_feature_size/2] + feature7_1[len(feature7_1)/2:self.fterm_feature_size/2] +
# # feature7_2[:self.fterm_feature_size/2] + feature7_2[len(feature7_2)/2:self.fterm_feature_size/2]
# # )
#
# if selectable_features is not None:
# self.X1.append(list(compress(chain.from_iterable(tmp_X1), selectable_features)))
# else:
# self.X1.append(list(chain.from_iterable(tmp_X1)))
# else:
tmp_X2.append([
feature1_1, feature1_2, feature1_3,
# feature8_1, feature8_2, feature8_3,
# feature9_1, feature9_2, feature9_3,
# feature10_1, feature10_2, feature10_3,
# feature11_1, feature11_2, feature11_3,
# feature12_1, feature12_2, feature12_3,
# feature13_1, feature13_2, feature13_3,
# feature14_1, feature14_2, feature14_3,
# feature15_1, feature15_2, feature15_3,
# feature16_1, feature16_2, feature16_3,
# int(feature2_1), int(feature2_2),
# feature3_1, feature3_2,
# int(feature4_1), int(feature4_2),
# int(feature5_1), int(feature5_2)
])
# tmp_X2.append(map(lambda x: int(x == max(feature6_1)), feature6_1))
# tmp_X2.append(map(lambda x: int(x == max(feature6_2)), feature6_2))
if config.MLConf.features_to_build['stats'] and sorting:
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
# feature2_1 = FEMLFeatures.contains(row['s1'], row['s2'])
# feature2_2 = FEMLFeatures.contains(row['s2'], row['s1'])
feature1_1, feature1_2 = FEMLFeatures.no_of_words(row['s1'], row['s2'])
# feature4_1 = FEMLFeatures.containsDashConnected_words(row['s1'])
# feature4_2 = FEMLFeatures.containsDashConnected_words(row['s2'])
feature2_1, feature2_2 = FEMLFeatures.containsFreqTerms(row['s1'], row['s2'])
# feature5_1 = False if len(fterms_s1) == 0 else True
# feature5_2 = False if len(fterms_s2) == 0 else True
# feature6_1, feature6_2 = FEMLFeatures().containsInPos(row['s1'], row['s2'])
# feature7_1 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# feature7_2 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# for x in fterms_s1: feature7_1[x[0]] = 1
# for x in fterms_s2: feature7_2[x[0]] = 1
feature3_1, feature3_2 = FEMLFeatures.positionalFreqTerms(row['s1'], row['s2'])
tmp_X2.append([feature1_1, feature1_2, feature2_1, feature2_2] + feature3_1 + feature3_2)
if selectable_features is not None:
self.train_X.append(list(compress(chain.from_iterable(tmp_X2), selectable_features)))
else:
self.train_X.append(np.around(list(chain.from_iterable(tmp_X2)), 5).tolist())
if self.file is None and self.accuracyresults:
file_name = 'dataset-accuracyresults-sim-metrics'
if canonical:
file_name += '_canonical'
if sorting:
file_name += '_sorted'
self.file = open(file_name + '.csv', 'w+')
def load_test_dataset(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig'):
if row['res'].upper() == "TRUE":
self.test_Y.append(1.0)
self.num_true += 1.0
else:
self.test_Y.append(0.0)
self.num_false += 1.0
tmp_X1, tmp_X2 = [], []
for flag in list({False, sorting}):
if (not flag and not config.MLConf.features_to_build['basic']) or (
flag and not config.MLConf.features_to_build['sorted']): continue
a, b = transform(row['s1'], row['s2'], sorting=flag, stemming=stemming, canonical=flag)
start_time = time.time()
sim1 = StaticValues.algorithms['damerau_levenshtein'](a, b)
sim8 = StaticValues.algorithms['jaccard'](a, b)
sim2 = StaticValues.algorithms['jaro'](a, b)
sim3 = StaticValues.algorithms['jaro_winkler'](a, b)
sim4 = StaticValues.algorithms['jaro_winkler'](a[::-1], b[::-1])
sim11 = StaticValues.algorithms['monge_elkan'](a, b)
sim7 = StaticValues.algorithms['cosine'](a, b)
sim9 = StaticValues.algorithms['strike_a_match'](a, b)
sim12 = StaticValues.algorithms['soft_jaccard'](a, b)
if not flag: sim5 = StaticValues.algorithms['sorted_winkler'](a, b)
if permuted: sim6 = StaticValues.algorithms['permuted_winkler'](a, b)
sim10 = StaticValues.algorithms['skipgram'](a, b)
sim13 = StaticValues.algorithms['davies'](a, b)
if flag:
sim16 = StaticValues.algorithms['l_jaro_winkler'](a, b)
sim17 = StaticValues.algorithms['l_jaro_winkler'](a[::-1], b[::-1])
self.timer += (time.time() - start_time)
if permuted:
if flag:
tmp_X2.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
if flag:
tmp_X2.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
if flag: tmp_X2.append([sim16, sim17])
# for flag in list({False, True}):
if config.MLConf.features_to_build['lgm'] and sorting:
start_time = time.time()
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
lsim_baseThres = 'avg' if flag else 'simple'
# sim14 = StaticValues.algorithms['lsimilarity'](a, b)
sim15 = StaticValues.algorithms['avg_lsimilarity'](row['s1'], row['s2'])
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['davies'][lsim_baseThres][0])
feature17 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'davies', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['skipgram'][lsim_baseThres][0]
)
feature18 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'skipgram', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['soft_jaccard'][lsim_baseThres][0]
)
feature19 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'soft_jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['strike_a_match'][lsim_baseThres][0]
)
feature20 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'strike_a_match', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['cosine'][lsim_baseThres][0]
)
feature21 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'cosine', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaccard'][lsim_baseThres][0]
)
feature22 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['monge_elkan'][lsim_baseThres][0]
)
feature23 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'monge_elkan', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler'][lsim_baseThres][0]
)
feature24 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro'][lsim_baseThres][0]
)
feature25 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler_r'][lsim_baseThres][0]
)
feature26 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'jaro_winkler', flag
)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler'][lsim_baseThres][0]
)
feature27 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'l_jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler_r'][lsim_baseThres][0]
)
feature28 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'l_jaro_winkler', flag
)
self.timer += (time.time() - start_time)
tmp_X2.append([
sim15, feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
feature25, feature26, feature27, feature28
])
if config.MLConf.features_to_build['individual'] and sorting:
start_time = time.time()
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
method_nm = 'damerau_levenshtein'
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values[method_nm]['avg'][0])
feature1_1, feature1_2, feature1_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, method_nm)
# feature2_1 = FEMLFeatures.contains(row['s1'], row['s2'])
# feature2_2 = FEMLFeatures.contains(row['s2'], row['s1'])
# feature3_1, feature3_2 = FEMLFeatures.no_of_words(row['s1'], row['s2'])
# feature4_1 = FEMLFeatures.containsDashConnected_words(row['s1'])
# feature4_2 = FEMLFeatures.containsDashConnected_words(row['s2'])
# fterms_s1, fterms_s2 = FEMLFeatures.containsFreqTerms(row['s1'], row['s2'])
# feature5_1 = False if len(fterms_s1) == 0 else True
# feature5_2 = False if len(fterms_s2) == 0 else True
# feature6_1, feature6_2 = FEMLFeatures().containsInPos(row['s1'], row['s2'])
# feature7_1 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# feature7_2 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# for x in fterms_s1: feature7_1[x[0]] = 1
# for x in fterms_s2: feature7_2[x[0]] = 1
# feature8_1, feature8_2, feature8_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'davies')
# feature9_1, feature9_2, feature9_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'skipgram')
# feature10_1, feature10_2, feature10_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'soft_jaccard')
# feature11_1, feature11_2, feature11_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'strike_a_match')
# feature12_1, feature12_2, feature12_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'cosine')
# feature13_1, feature13_2, feature13_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'monge_elkan')
# feature14_1, feature14_2, feature14_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'jaro_winkler')
# feature15_1, feature15_2, feature15_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, 'jaro')
# feature16_1, feature16_2, feature16_3 = score_per_term(
# {'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']]},
# {'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']]},
# {'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']]},
# 'jaro_winkler'
# )
self.timer += (time.time() - start_time)
tmp_X2.append([
feature1_1, feature1_2, feature1_3,
# feature8_1, feature8_2, feature8_3,
# feature9_1, feature9_2, feature9_3,
# feature10_1, feature10_2, feature10_3,
# feature11_1, feature11_2, feature11_3,
# feature12_1, feature12_2, feature12_3,
# feature13_1, feature13_2, feature13_3,
# feature14_1, feature14_2, feature14_3,
# feature15_1, feature15_2, feature15_3,
# feature16_1, feature16_2, feature16_3,
# int(feature2_1), int(feature2_2),
# feature3_1, feature3_2,
# int(feature4_1), int(feature4_2),
# int(feature5_1), int(feature5_2)
])
# tmp_X2.append(map(lambda x: int(x == max(feature6_1)), feature6_1))
# tmp_X2.append(map(lambda x: int(x == max(feature6_2)), feature6_2))
if config.MLConf.features_to_build['stats'] and sorting:
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
# feature2_1 = FEMLFeatures.contains(row['s1'], row['s2'])
# feature2_2 = FEMLFeatures.contains(row['s2'], row['s1'])
feature1_1, feature1_2 = FEMLFeatures.no_of_words(row['s1'], row['s2'])
# feature4_1 = FEMLFeatures.containsDashConnected_words(row['s1'])
# feature4_2 = FEMLFeatures.containsDashConnected_words(row['s2'])
feature2_1, feature2_2 = FEMLFeatures.containsFreqTerms(row['s1'], row['s2'])
# feature5_1 = False if len(fterms_s1) == 0 else True
# feature5_2 = False if len(fterms_s2) == 0 else True
# feature6_1, feature6_2 = FEMLFeatures().containsInPos(row['s1'], row['s2'])
# feature7_1 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# feature7_2 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# for x in fterms_s1: feature7_1[x[0]] = 1
# for x in fterms_s2: feature7_2[x[0]] = 1
feature3_1, feature3_2 = FEMLFeatures.positionalFreqTerms(row['s1'], row['s2'])
tmp_X2.append([feature1_1, feature1_2, feature2_1, feature2_2] + feature3_1 + feature3_2)
self.test_X.append(np.around(list(chain.from_iterable(tmp_X2)), 5).tolist())
def perform_cv(self, ml_algs, polynomial=False, standardize=False):
# iterate over classifiers
if set(ml_algs) != {'all'}: self.mlalgs_to_run = ml_algs
hyperparams_data = list()
# for i, (name, clf) in enumerate(zip(self.names, self.classifiers)):
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
print('{} is not a valid ML algorithm'.format(name))
continue
clf_abbr = StaticValues.classifiers_abbr[name]
model = self.classifiers[clf_abbr]
# selector = RFE(model, n_features_to_select=config.MLConf.features_to_select, step=2)
selector = RFECV(model, min_features_to_select=config.MLConf.features_to_select, step=2, scoring='accuracy',
cv=StratifiedKFold(2, random_state=config.seed_no), n_jobs=config.MLConf.n_jobs)
scaler = MinMaxScaler()
# scaler = StandardScaler()
print('Running cv for {}...'.format(name))
if self.search_method.lower() == 'grid':
cv = GridSearchCV(
clf_names[clf_abbr][0](),
clf_names[clf_abbr][1],
cv=self.outer_cv, scoring='accuracy', verbose=1, pre_dispatch='2*n_jobs',
n_jobs=self.n_jobs, return_train_score=config.MLConf.train_score
)
else: # randomized is used as default
cv = RandomizedSearchCV(
clf_names[clf_abbr][0](),
clf_names[clf_abbr][2],
cv=self.outer_cv, scoring='accuracy', verbose=1, pre_dispatch='2*n_jobs',
n_jobs=self.n_jobs, n_iter=self.n_iter, return_train_score=config.MLConf.train_score
)
# clf.fit(np.asarray(self.train_X), pd.Series(self.train_Y))
pipe_params = [('scaler', scaler), ('select', selector), ('clf', cv)]
# pipe_params = [ ('clf', cv)]
pipe_clf = Pipeline(pipe_params)
pipe_clf.fit(np.asarray(self.train_X), pd.Series(self.train_Y))
hyperparams_found = dict()
# hyperparams_found['score'] = clf.best_score_
# hyperparams_found['results'] = clf.cv_results_
# # hyperparams_found['test_len'] = [len(test) for _, test in self.outer_cv.split(X_train, y_train)]
# hyperparams_found['hyperparams'] = clf.best_params_
# hyperparams_found['estimator'] = clf.best_estimator_
# hyperparams_found['classifier'] = name
# hyperparams_found['scorers'] = clf.scorer_
hyperparams_found['score'] = pipe_clf.named_steps['clf'].best_score_
hyperparams_found['results'] = pipe_clf.named_steps['clf'].cv_results_
hyperparams_found['hyperparams'] = pipe_clf.named_steps['clf'].best_params_
hyperparams_found['estimator'] = pipe_clf.named_steps['clf'].best_estimator_
hyperparams_found['classifier'] = name
hyperparams_data.append(hyperparams_found)
_, self.best_clf = max(enumerate(hyperparams_data), key=(lambda x: x[1]['score']))
print('score: {}, hyperparams: {}'.format(self.best_clf['score'], self.best_clf['hyperparams']))
feature_importances_ = None
if hasattr(pipe_clf.named_steps['clf'].best_estimator_, 'feature_importances_') or hasattr(pipe_clf.named_steps['clf'].best_estimator_, 'coef_'):
feature_importances = pipe_clf.named_steps['clf'].best_estimator_.feature_importances_ \
if hasattr(pipe_clf.named_steps['clf'].best_estimator_, 'feature_importances_') \
else pipe_clf.named_steps['clf'].best_estimator_.coef_
cols = []
if config.MLConf.features_to_build['basic']:
cols += StaticValues.basicFeatures
if config.MLConf.features_to_build['sorted']:
cols += StaticValues.sortedFeatures
if config.MLConf.features_to_build['lgm']:
cols += StaticValues.lgmFeatures
if config.MLConf.features_to_build['individual']:
cols += StaticValues.individualFeatures
if config.MLConf.features_to_build['stats']:
cols += StaticValues.extraFeatures
feature_names = np.asarray(cols) # transformed list to array
support = pipe_clf.named_steps['select'].support_
print(feature_importances)
print('features selected: {}'.format(
{k: v for k, v in zip(feature_names[support], feature_importances)}
))
print('features mask: {}'.format(support))
else: print('Attr "feature_importances_" or "coef_" is not supported!!!')
def train_classifiers(self, ml_algs, polynomial=False, standardize=False, fs_method=None, features=None):
# if polynomial:
# self.X1 = PolynomialFeatures().fit_transform(self.X1)
# self.X2 = PolynomialFeatures().fit_transform(self.X2)
# if standardize:
# # self.X1 = StandardScaler().fit_transform(self.X1)
# # self.X2 = StandardScaler().fit_transform(self.X2)
# # print(zip(*self.X1)[18][:10], '||', zip(*self.X2)[18][:10])
# self.train_X = MinMaxScaler().fit_transform(self.train_X)
# self.test_X = MinMaxScaler().fit_transform(self.test_X)
# # print(zip(*self.X1)[18][:10], '||', zip(*self.X2)[18][:10])
# iterate over classifiers
if set(ml_algs) != {'all'}: self.mlalgs_to_run = ml_algs
# for i, (name, clf) in enumerate(zip(self.names, self.classifiers)):
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
print('{} is not a valid ML algorithm'.format(name))
continue
clf_abbr = StaticValues.classifiers_abbr[name]
# model = self.classifiers[clf_abbr]
# selector = RFE(model, n_features_to_select=config.MLConf.features_to_select, step=2)
# scaler = MinMaxScaler()
# # scaler = StandardScaler()
train_time = 0
predictedL = list()
tot_features = list()
print("Training {}...".format(StaticValues.classifiers[clf_abbr]))
# for X_train, y_train, X_pred, y_pred in izip(
# (np.asarray(row, float) for row in (self.X1, self.X2)),
# (np.asarray(row, float) for row in (self.Y1, self.Y2)),
# (np.asarray(row, float) for row in (self.X2, self.X1)),
# (row for row in (self.Y2, self.Y1))
# ):
start_time = time.time()
cols = []
if config.MLConf.features_to_build['basic']:
cols += StaticValues.basicFeatures
if config.MLConf.features_to_build['sorted']:
cols += StaticValues.sortedFeatures
if config.MLConf.features_to_build['lgm']:
cols += StaticValues.lgmFeatures
if config.MLConf.features_to_build['individual']:
cols += StaticValues.individualFeatures
if config.MLConf.features_to_build['stats']:
cols += StaticValues.extraFeatures
features_supported = [True] * len(cols)
# if features is not None:
# features_supported = [x and y for x, y in zip(features_supported, features)]
# if fs_method is not None and set([name]) & {'rf', 'et', 'xgboost'}:
# X_train, X_pred, features_supported = self._perform_feature_selection(
# X_train, y_train, X_pred, fs_method, model, 11
# )
# tot_features = [x or y for x, y in izip_longest(features_supported, tot_features, fillvalue=False)]
# model.fit(X_train, y_train)
# print('outside cv hyperparams: {}'.format(self.best_clf['estimator'].get_params()))
self.best_clf['estimator'].fit(self.train_X, self.train_Y)
# print(best_clf['estimator'].feature_importances_)
train_time += (time.time() - start_time)
start_time = time.time()
# predictedL += list(model.predict(X_pred))
# self.test_X = pipe_clf.transform(self.test_X)
predictedL += list(self.best_clf['estimator'].predict(self.test_X))
self.timers[clf_abbr] += (time.time() - start_time)
if hasattr(self.best_clf['estimator'], "feature_importances_"):
# self.importances[i] += model.feature_importances_
if clf_abbr not in self.importances:
self.importances[clf_abbr] = np.zeros(len(cols), dtype=float)
for idx, val in zip([i for i, x in enumerate(features_supported) if x], self.best_clf['estimator'].feature_importances_):
self.importances[clf_abbr][idx] += val
elif hasattr(self.best_clf['estimator'], "coef_"):
if clf_abbr in self.importances:
self.importances[clf_abbr] += self.best_clf['estimator'].coef_.ravel()
else:
self.importances[clf_abbr] = self.best_clf['estimator'].coef_.ravel()
# self.scores[i].append(model.score(np.array(pred_X), np.array(pred_Y)))
# if name in ['rf']:
# print('R^2 Training Score: {:.2f} \nOOB Score: {:.2f} \nR^2 Validation Score: {:.2f}'.format(
# model.score(X_train, y_train),
# model.oob_score_,
# model.score(X_pred, y_pred))
# )
print("Best Features discovered: ", end="")
print(*tot_features, sep=",")
print("Training took {0:.3f} sec ({1:.3f} min)".format(train_time, train_time / 60.0))
self.timers[clf_abbr] += self.timer
print("Matching records...")
# real = self.Y2 + self.Y1
real = self.test_Y
for pos in range(len(real)):
if real[pos] == 1.0:
if predictedL[pos] == 1.0:
self.num_true_predicted_true[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("TRUE\tTRUE\n")
else:
self.num_true_predicted_false[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("TRUE\tFALSE\n")
else:
if predictedL[pos] == 1.0:
self.num_false_predicted_true[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("FALSE\tTRUE\n")
else:
self.num_false_predicted_false[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("FALSE\tFALSE\n")
# if hasattr(clf, "decision_function"):
# Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# else:
# Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
def print_stats(self):
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
continue
idx = StaticValues.classifiers_abbr[name]
status, acc, pre, rec, f1, t = self._compute_stats(idx, True)
if status == 0:
self._print_stats(StaticValues.classifiers[idx], acc, pre, rec, f1, t)
if idx not in self.importances or not isinstance(self.importances[idx], np.ndarray):
print("The classifier {} does not expose \"coef_\" or \"feature_importances_\" attributes".format(
name))
else:
cols = []
if config.MLConf.features_to_build['basic']:
cols += StaticValues.basicFeatures
if config.MLConf.features_to_build['sorted']:
cols += StaticValues.sortedFeatures
if config.MLConf.features_to_build['lgm']:
cols += StaticValues.lgmFeatures
if config.MLConf.features_to_build['individual']:
cols += StaticValues.individualFeatures
if config.MLConf.features_to_build['stats']:
cols += StaticValues.extraFeatures
importances = self.importances[idx]
importances = np.ma.masked_equal(importances, 0.0)
if importances.mask is np.ma.nomask: importances.mask = np.zeros(importances.shape, dtype=bool)
indices = np.argsort(importances.compressed())[::-1][
:min(importances.compressed().shape[0], self.max_important_features_toshow)]
headers = ["name", "score"]
print(tabulate(zip(
np.asarray(cols, object)[~importances.mask][indices],
importances.compressed()[indices]
), headers, tablefmt="simple"))
# if hasattr(clf, "feature_importances_"):
# # if results:
# # result[indices[f]] = importances[indices[f]]
print("")
sys.stdout.flush()
def debug_stats(self):
if not os.path.exists("output"):
os.makedirs("output")
print('')
cols = []
if config.MLConf.features_to_build['basic']:
cols += StaticValues.basicFeatures
if config.MLConf.features_to_build['sorted']:
cols += StaticValues.sortedFeatures
if config.MLConf.features_to_build['lgm']:
cols += StaticValues.lgmFeatures
if config.MLConf.features_to_build['individual']:
cols += StaticValues.individualFeatures
if config.MLConf.features_to_build['stats']:
cols += StaticValues.extraFeatures
df = pd.DataFrame(np.array(self.X1).reshape(-1, len(cols)), columns=cols)
# with pd.option_context('display.max_columns', None):
output_f = './output/X1_train_stats.csv'
df.describe().T.to_csv(output_f)
with open(output_f, 'a') as f:
print("Existence of null values in X1_train: {}".format(df.isnull().values.any()))
f.write("\nExistence of null values in X1_train: {}\n".format(df.isnull().values.any()))
print(df.mode(axis=0, dropna=False).T)
# f.write("Highest freq values per column in X1_train\n")
# df.mode(axis=0, dropna=False).T.to_csv(f, header=False)
df = pd.DataFrame(np.array(self.X2).reshape(-1, len(cols)), columns=cols)
output_f = './output/X2_train_stats.csv'
df.describe().T.to_csv(output_f)
with open(output_f, 'a') as f:
print("Existence of null values in X2_train: {}".format(df.isnull().values.any()))
f.write("\nExistence of null values in X2_train: {}\n".format(df.isnull().values.any()))
print(df.mode(axis=0, dropna=False).transpose())
# f.write("\nHighest freq values per column in X2_train\n")
# df.mode(axis=0, dropna=False).transpose().to_csv(f, header=False)
class calcWithCustomHyperparams(baseMetrics):
max_important_features_toshow = 50
fname = ''
def __init__(self, njobs, accures):
self.X1 = []
self.Y1 = []
self.X2 = []
self.Y2 = []
self.train_X = []
self.train_Y = []
self.test_X = []
self.test_Y = []
self.classifiers = [
LinearSVC(
# random_state=0, C=1.0, max_iter=3000
**config.MLConf.clf_static_params['SVM']
),
# GaussianProcessClassifier(1.0 * RBF(1.0), n_jobs=3, warm_start=True),
DecisionTreeClassifier(
# random_state=0, max_depth=100, max_features='auto'
**config.MLConf.clf_static_params['DecisionTree']
),
RandomForestClassifier(
# default
# n_estimators=250, max_depth=50, oob_score=True, bootstrap=True
# optimized
**config.MLConf.clf_static_params['RandomForest']
),
MLPClassifier(alpha=1, random_state=0),
# AdaBoostClassifier(DecisionTreeClassifier(max_depth=50), n_estimators=300, random_state=0),
GaussianNB(),
# QuadraticDiscriminantAnalysis(), LinearDiscriminantAnalysis(),
ExtraTreesClassifier(
# n_estimators=100, random_state=0, n_jobs=int(njobs), max_depth=50
**config.MLConf.clf_static_params['ExtraTrees']
),
XGBClassifier(
# n_estimators=3000, seed=0, nthread=int(njobs)
**config.MLConf.clf_static_params['XGBoost']
),
]
# self.scores = [[] for _ in range(len(self.classifiers))]
self.importances = dict()
self.mlalgs_to_run = StaticValues.classifiers_abbr.keys()
# To be used within GridSearch
self.inner_cv = StratifiedKFold(n_splits=config.MLConf.kfold_inner_parameter, shuffle=False,
random_state=config.seed_no)
# To be used in outer CV
self.outer_cv = StratifiedKFold(n_splits=config.MLConf.kfold_parameter, shuffle=False,
random_state=config.seed_no)
self.kfold = config.MLConf.kfold_parameter
self.n_jobs = config.MLConf.n_jobs
self.search_method = config.MLConf.hyperparams_search_method
self.n_iter = config.MLConf.max_iter
super(calcWithCustomHyperparams, self).__init__(len(self.classifiers), njobs, accures)
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig',
selectable_features=None):
# if row['res'].upper() == "TRUE":
# if len(self.Y1) < ((self.num_true + self.num_false) / 2.0): self.Y1.append(1.0)
# else: self.Y2.append(1.0)
# else:
# if len(self.Y1) < ((self.num_true + self.num_false) / 2.0): self.Y1.append(0.0)
# else: self.Y2.append(0.0)
if row['res'].upper() == "TRUE":
self.train_Y.append(1.0)
else:
self.train_Y.append(0.0)
tmp_X1, tmp_X2 = [], []
for flag in list({False, sorting}):
if (not flag and not config.MLConf.features_to_build['basic']) or (
flag and not config.MLConf.features_to_build['sorted']): continue
a, b = transform(row['s1'], row['s2'], sorting=flag, stemming=stemming, canonical=flag)
start_time = time.time()
sim1 = StaticValues.algorithms['damerau_levenshtein'](a, b)
sim8 = StaticValues.algorithms['jaccard'](a, b)
sim2 = StaticValues.algorithms['jaro'](a, b)
sim3 = StaticValues.algorithms['jaro_winkler'](a, b)
sim4 = StaticValues.algorithms['jaro_winkler'](a[::-1], b[::-1])
sim11 = StaticValues.algorithms['monge_elkan'](a, b)
sim7 = StaticValues.algorithms['cosine'](a, b)
sim9 = StaticValues.algorithms['strike_a_match'](a, b)
sim12 = StaticValues.algorithms['soft_jaccard'](a, b)
if not flag: sim5 = StaticValues.algorithms['sorted_winkler'](a, b)
if permuted: sim6 = StaticValues.algorithms['permuted_winkler'](a, b)
sim10 = StaticValues.algorithms['skipgram'](a, b)
sim13 = StaticValues.algorithms['davies'](a, b)
if flag:
sim16 = StaticValues.algorithms['l_jaro_winkler'](a, b)
sim17 = StaticValues.algorithms['l_jaro_winkler'](a[::-1], b[::-1])
self.train_timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# if permuted:
# if flag: tmp_X1.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else: tmp_X1.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else:
# if flag: tmp_X1.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else: tmp_X1.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
# else:
if permuted:
if flag: tmp_X2.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else: tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
if flag: tmp_X2.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else: tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
if flag: tmp_X2.append([sim16, sim17])
# for flag in list({False, True}):
if config.MLConf.features_to_build['lgm'] and sorting:
start_time = time.time()
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
lsim_baseThres = 'avg' if flag else 'simple'
# sim14 = StaticValues.algorithms['lsimilarity'](a, b)
sim15 = StaticValues.algorithms['avg_lsimilarity'](row['s1'], row['s2'])
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['davies'][lsim_baseThres][0])
feature17 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'davies', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['skipgram'][lsim_baseThres][0]
)
feature18 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'skipgram', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['soft_jaccard'][lsim_baseThres][0]
)
feature19 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'soft_jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['strike_a_match'][lsim_baseThres][0]
)
feature20 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'strike_a_match', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['cosine'][lsim_baseThres][0]
)
feature21 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'cosine', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaccard'][lsim_baseThres][0]
)
feature22 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['monge_elkan'][lsim_baseThres][0]
)
feature23 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'monge_elkan', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler'][lsim_baseThres][0]
)
feature24 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro'][lsim_baseThres][0]
)
feature25 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler_r'][lsim_baseThres][0]
)
feature26 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'jaro_winkler', flag
)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler'][lsim_baseThres][0]
)
feature27 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'l_jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler_r'][lsim_baseThres][0]
)
feature28 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'l_jaro_winkler', flag
)
self.timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# tmp_X1.append([feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
# feature25, feature26, feature27])
# else:
tmp_X2.append([
sim15, feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
feature25, feature26, feature27, feature28
])
if config.MLConf.features_to_build['individual'] and sorting:
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
start_time = time.time()
method_nm = 'damerau_levenshtein'
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values[method_nm]['avg'][0])
feature1_1, feature1_2, feature1_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, method_nm)
self.timer += (time.time() - start_time)
tmp_X2.append([
feature1_1, feature1_2, feature1_3,
# feature8_1, feature8_2, feature8_3,
# feature9_1, feature9_2, feature9_3,
# feature10_1, feature10_2, feature10_3,
# feature11_1, feature11_2, feature11_3,
# feature12_1, feature12_2, feature12_3,
# feature13_1, feature13_2, feature13_3,
# feature14_1, feature14_2, feature14_3,
# feature15_1, feature15_2, feature15_3,
# feature16_1, feature16_2, feature16_3,
# int(feature2_1), int(feature2_2),
# feature3_1, feature3_2,
# int(feature4_1), int(feature4_2),
# int(feature5_1), int(feature5_2)
])
if config.MLConf.features_to_build['stats'] and sorting:
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
# feature2_1 = FEMLFeatures.contains(row['s1'], row['s2'])
# feature2_2 = FEMLFeatures.contains(row['s2'], row['s1'])
feature1_1, feature1_2 = FEMLFeatures.no_of_words(row['s1'], row['s2'])
# feature4_1 = FEMLFeatures.containsDashConnected_words(row['s1'])
# feature4_2 = FEMLFeatures.containsDashConnected_words(row['s2'])
feature2_1, feature2_2 = FEMLFeatures.containsFreqTerms(row['s1'], row['s2'])
# feature5_1 = False if len(fterms_s1) == 0 else True
# feature5_2 = False if len(fterms_s2) == 0 else True
# feature6_1, feature6_2 = FEMLFeatures().containsInPos(row['s1'], row['s2'])
# feature7_1 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# feature7_2 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# for x in fterms_s1: feature7_1[x[0]] = 1
# for x in fterms_s2: feature7_2[x[0]] = 1
feature3_1, feature3_2 = FEMLFeatures.positionalFreqTerms(row['s1'], row['s2'])
tmp_X2.append([feature1_1, feature1_2, feature2_1, feature2_2] + feature3_1 + feature3_2)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# if selectable_features is not None:
# self.X1.append(list(compress(chain.from_iterable(tmp_X1), selectable_features)))
# else:
# self.X1.append(list(chain.from_iterable(tmp_X1)))
# else:
if selectable_features is not None:
self.train_X.append(list(compress(chain.from_iterable(tmp_X2), selectable_features)))
else:
new_features = np.around(list(chain.from_iterable(tmp_X2)), 5)
self.train_X.append(new_features.tolist())
if not self.fname: self.fname = 'results-evaluation_{}'.format(custom_thres.replace('/', ''))
def load_test_dataset(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig'):
if row['res'].upper() == "TRUE":
self.test_Y.append(1.0)
self.num_true += 1.0
else:
self.test_Y.append(0.0)
self.num_false += 1.0
tmp_X1, tmp_X2 = [], []
for flag in list({False, sorting}):
if (not flag and not config.MLConf.features_to_build['basic']) or (
flag and not config.MLConf.features_to_build['sorted']): continue
a, b = transform(row['s1'], row['s2'], sorting=flag, stemming=stemming, canonical=flag)
start_time = time.time()
sim1 = StaticValues.algorithms['damerau_levenshtein'](a, b)
sim8 = StaticValues.algorithms['jaccard'](a, b)
sim2 = StaticValues.algorithms['jaro'](a, b)
sim3 = StaticValues.algorithms['jaro_winkler'](a, b)
sim4 = StaticValues.algorithms['jaro_winkler'](a[::-1], b[::-1])
sim11 = StaticValues.algorithms['monge_elkan'](a, b)
sim7 = StaticValues.algorithms['cosine'](a, b)
sim9 = StaticValues.algorithms['strike_a_match'](a, b)
sim12 = StaticValues.algorithms['soft_jaccard'](a, b)
if not flag: sim5 = StaticValues.algorithms['sorted_winkler'](a, b)
if permuted: sim6 = StaticValues.algorithms['permuted_winkler'](a, b)
sim10 = StaticValues.algorithms['skipgram'](a, b)
sim13 = StaticValues.algorithms['davies'](a, b)
if flag:
sim16 = StaticValues.algorithms['l_jaro_winkler'](a, b)
sim17 = StaticValues.algorithms['l_jaro_winkler'](a[::-1], b[::-1])
self.timer += (time.time() - start_time)
if permuted:
if flag:
tmp_X2.append([sim1, sim2, sim3, sim4, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
tmp_X2.append(
[sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
if flag:
tmp_X2.append([sim1, sim2, sim3, sim4, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
else:
tmp_X2.append([sim1, sim2, sim3, sim4, sim5, sim7, sim8, sim9, sim10, sim11, sim12, sim13])
if flag: tmp_X2.append([sim16, sim17])
if config.MLConf.features_to_build['lgm'] and sorting:
start_time = time.time()
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
lsim_baseThres = 'avg' if flag else 'simple'
# sim14 = StaticValues.algorithms['lsimilarity'](a, b)
sim15 = StaticValues.algorithms['avg_lsimilarity'](row['s1'], row['s2'])
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['davies'][lsim_baseThres][0])
feature17 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'davies', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['skipgram'][lsim_baseThres][0]
)
feature18 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'skipgram', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['soft_jaccard'][lsim_baseThres][0]
)
feature19 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'soft_jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['strike_a_match'][lsim_baseThres][0]
)
feature20 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'strike_a_match', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['cosine'][lsim_baseThres][0]
)
feature21 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'cosine', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaccard'][lsim_baseThres][0]
)
feature22 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaccard', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['monge_elkan'][lsim_baseThres][0]
)
feature23 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'monge_elkan', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler'][lsim_baseThres][0]
)
feature24 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro'][lsim_baseThres][0]
)
feature25 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'jaro', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['jaro_winkler_r'][lsim_baseThres][0]
)
feature26 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'jaro_winkler', flag
)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler'][lsim_baseThres][0]
)
feature27 = weighted_terms(baseTerms, mismatchTerms, specialTerms, 'l_jaro_winkler', flag)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values['l_jaro_winkler_r'][lsim_baseThres][0]
)
feature28 = weighted_terms(
{'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']},
{'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']},
{'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']},
'l_jaro_winkler', flag
)
self.timer += (time.time() - start_time)
# if len(self.X1) < ((self.num_true + self.num_false) / 2.0):
# tmp_X1.append([feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
# feature25, feature26, feature27])
# else:
tmp_X2.append([
sim15, feature17, feature18, feature19, feature20, feature21, feature22, feature23, feature24,
feature25, feature26, feature27, feature28
])
if config.MLConf.features_to_build['individual'] and sorting:
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
start_time = time.time()
method_nm = 'damerau_levenshtein'
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(
row['s1'], row['s2'], LSimilarityVars.per_metric_optimal_values[method_nm]['avg'][0])
feature1_1, feature1_2, feature1_3 = score_per_term(baseTerms, mismatchTerms, specialTerms, method_nm)
self.timer += (time.time() - start_time)
tmp_X2.append([
feature1_1, feature1_2, feature1_3,
# feature8_1, feature8_2, feature8_3,
# feature9_1, feature9_2, feature9_3,
# feature10_1, feature10_2, feature10_3,
# feature11_1, feature11_2, feature11_3,
# feature12_1, feature12_2, feature12_3,
# feature13_1, feature13_2, feature13_3,
# feature14_1, feature14_2, feature14_3,
# feature15_1, feature15_2, feature15_3,
# feature16_1, feature16_2, feature16_3,
# int(feature2_1), int(feature2_2),
# feature3_1, feature3_2,
# int(feature4_1), int(feature4_2),
# int(feature5_1), int(feature5_2)
])
if config.MLConf.features_to_build['stats'] and sorting:
row['s1'], row['s2'] = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming,
canonical=canonical, simple_sorting=False)
# feature2_1 = FEMLFeatures.contains(row['s1'], row['s2'])
# feature2_2 = FEMLFeatures.contains(row['s2'], row['s1'])
feature1_1, feature1_2 = FEMLFeatures.no_of_words(row['s1'], row['s2'])
# feature4_1 = FEMLFeatures.containsDashConnected_words(row['s1'])
# feature4_2 = FEMLFeatures.containsDashConnected_words(row['s2'])
feature2_1, feature2_2 = FEMLFeatures.containsFreqTerms(row['s1'], row['s2'])
# feature5_1 = False if len(fterms_s1) == 0 else True
# feature5_2 = False if len(fterms_s2) == 0 else True
# feature6_1, feature6_2 = FEMLFeatures().containsInPos(row['s1'], row['s2'])
# feature7_1 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# feature7_2 = [0] * (len(LSimilarityVars.freq_ngrams['tokens'] | LSimilarityVars.freq_ngrams['chars']))
# for x in fterms_s1: feature7_1[x[0]] = 1
# for x in fterms_s2: feature7_2[x[0]] = 1
feature3_1, feature3_2 = FEMLFeatures.positionalFreqTerms(row['s1'], row['s2'])
tmp_X2.append([feature1_1, feature1_2, feature2_1, feature2_2] + feature3_1 + feature3_2)
new_features = np.around(list(chain.from_iterable(tmp_X2)), 5)
self.test_X.append(new_features.tolist())
def train_classifiers(self, ml_algs, polynomial=False, standardize=False, fs_method=None, features=None):
if polynomial:
self.X1 = PolynomialFeatures().fit_transform(self.X1)
self.X2 = PolynomialFeatures().fit_transform(self.X2)
# iterate over classifiers
if set(ml_algs) != {'all'}: self.mlalgs_to_run = ml_algs
# for i, (name, clf) in enumerate(zip(self.names, self.classifiers)):
cols = []
if config.MLConf.features_to_build['basic']:
cols += StaticValues.basicFeatures
if config.MLConf.features_to_build['sorted']:
cols += StaticValues.sortedFeatures
if config.MLConf.features_to_build['lgm']:
cols += StaticValues.lgmFeatures
if config.MLConf.features_to_build['individual']:
cols += StaticValues.individualFeatures
if config.MLConf.features_to_build['stats']:
cols += StaticValues.extraFeatures
feature_names = np.asarray(cols)
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
print('{} is not a valid ML algorithm'.format(name))
continue
clf_abbr = StaticValues.classifiers_abbr[name]
model = self.classifiers[clf_abbr]
train_time = 0
predictedL = list()
tot_features = list()
print("Training {}...".format(StaticValues.classifiers[clf_abbr]))
# for X_train, y_train, X_pred, y_pred in izip(
# (np.asarray(row, float) for row in (self.X1, self.X2)),
# (np.asarray(row, float) for row in (self.Y1, self.Y2)),
# (np.asarray(row, float) for row in (self.X2, self.X1)),
# ((row for row in (self.Y2, self.Y1)))
# ):
start_time = time.time()
# features_supported = [True] * len(cols)
# if features is not None:
# features_supported = [x and y for x, y in zip(features_supported, features)]
# if fs_method is not None and {'rf', 'et', 'xgboost'}.intersection({name}):
# X_train, X_pred, features_supported = self._perform_feature_selection(
# X_train, y_train, X_pred, fs_method, model
# )
# tot_features = [x or y for x, y in izip_longest(features_supported, tot_features, fillvalue=False)]
# selector = RFE(model, n_features_to_select=config.MLConf.features_to_select, step=2)
scaler = MinMaxScaler()
# scaler = StandardScaler()
pipe_params = None
# TODO check why hasattr cannot find feature_importances_
# if hasattr(model, 'feature_importances_') or \
# isinstance(getattr(type(model), 'feature_importances_', None), property) or \
# hasattr(model, 'coef_') or \
# isinstance(getattr(type(model), 'coef_', None), property):
# print('feature_importances found for clf {}'.format(name))
# selector = RFE(model, n_features_to_select=config.MLConf.features_to_select, step=2)
pipe_params = PipelineRFE([('scaler', scaler), ('clf', model)])
# else:
# pipe_params = [('scaler', scaler), ('clf', model)]
pipe_clf = RFECV(
pipe_params, step=2, scoring='accuracy', cv=StratifiedKFold(5, random_state=config.seed_no),
n_jobs=config.MLConf.n_jobs, min_features_to_select=min(config.MLConf.features_to_select, len(feature_names))
)
pipe_clf.fit(self.train_X, self.train_Y)
# print(pipe_clf.named_steps['clf'].support_)
# model.fit(np.asarray(self.train_X), self.train_Y)
train_time += (time.time() - start_time)
start_time = time.time()
predictedL += list(pipe_clf.predict(self.test_X))
# predictedL += list(best_clf['estimator'].predict(self.test_X))
self.timers[clf_abbr] += (time.time() - start_time)
# print(pipe_clf.named_steps['clf'].ranking_)
if hasattr(pipe_clf.estimator_, "feature_importances_"):
if clf_abbr not in self.importances:
self.importances[clf_abbr] = np.zeros(len(cols), dtype=float)
feature_importances = pipe_clf.estimator_.feature_importances_
support = pipe_clf.support_
for k, v in zip(feature_names[support], feature_importances):
self.importances[clf_abbr][cols.index(k)] += v
elif hasattr(pipe_clf.estimator_, "coef_"):
if clf_abbr not in self.importances:
self.importances[clf_abbr] = np.zeros(len(cols), dtype=float)
feature_importances = pipe_clf.estimator_.coef_.ravel()
support = pipe_clf.support_
for k, v in zip(feature_names[support], feature_importances):
self.importances[clf_abbr][cols.index(k)] += v
# # print(model.score(X_pred, y_pred))
print("Best features discovered: ", end="")
print(*tot_features, sep=",")
print("Training took {0:.3f} sec ({1:.3f} min)".format(train_time, train_time / 60.0))
self.timers[clf_abbr] += self.timer
trues_sum = sum(config.MLConf.features_to_build.values())
max_len = len(config.MLConf.features_to_build.keys())
pos = '_'.join([k for k, v in config.MLConf.features_to_build.items() if v is True]) \
if trues_sum < max_len \
else 'tot'
if self.accuracyresults: self.file = open('{}_{}_{}.csv'.format(self.fname, name, pos), 'w+')
print("Matching records...")
# real = self.Y2 + self.Y1
real = self.test_Y
for pos in range(len(real)):
if real[pos] == 1.0:
if predictedL[pos] == 1.0:
self.num_true_predicted_true[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("TRUE\tTRUE\n")
else:
self.num_true_predicted_false[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("TRUE\tFALSE\n")
else:
if predictedL[pos] == 1.0:
self.num_false_predicted_true[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("FALSE\tTRUE\n")
else:
self.num_false_predicted_false[clf_abbr] += 1.0
if self.accuracyresults:
self.file.write("FALSE\tFALSE\n")
# if hasattr(clf, "decision_function"):
# Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# else:
# Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
if self.accuracyresults and not self.file.closed: self.file.close()
def print_stats(self):
for name in self.mlalgs_to_run:
if name not in StaticValues.classifiers_abbr.keys():
continue
idx = StaticValues.classifiers_abbr[name]
status, acc, pre, rec, f1, t = self._compute_stats(idx, True)
if status == 0:
self._print_stats(StaticValues.classifiers[idx], acc, pre, rec, f1, t)
if idx not in self.importances or not isinstance(self.importances[idx], np.ndarray):
print("The classifier {} does not expose \"coef_\" or \"feature_importances_\" attributes".format(
name))
else:
cols = []
if config.MLConf.features_to_build['basic']:
cols += StaticValues.basicFeatures
if config.MLConf.features_to_build['sorted']:
cols += StaticValues.sortedFeatures
if config.MLConf.features_to_build['lgm']:
cols += StaticValues.lgmFeatures
if config.MLConf.features_to_build['individual']:
cols += StaticValues.individualFeatures
if config.MLConf.features_to_build['stats']:
cols += StaticValues.extraFeatures
importances = self.importances[idx]
importances = np.ma.masked_equal(importances, 0.0)
if importances.mask is np.ma.nomask: importances.mask = np.zeros(importances.shape, dtype=bool)
# indices = np.argsort(importances)[::-1]
# for f in range(min(importances.shape[0], self.max_important_features_toshow)):
# print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
indices = np.argsort(importances.compressed())[::-1][
:min(importances.shape[0], self.max_important_features_toshow)]
headers = ["name", "score"]
print(tabulate(zip(
np.asarray(cols, object)[~importances.mask][indices],
importances.compressed()[indices]
), headers, tablefmt="simple"))
# if hasattr(clf, "feature_importances_"):
# # if results:
# # result[indices[f]] = importances[indices[f]]
print("")
sys.stdout.flush()
class calcDLearning(baseMetrics):
pass
class calcSotAML(baseMetrics):
pass
class calcLSimilarities(baseMetrics):
def __init__(self, njobs, accures):
super(calcLSimilarities, self).__init__(len(StaticValues.methods), njobs, accures)
def _generic_evaluator(self, idx, lgm_metric, str1, str2, is_a_match, custom_thres):
tot_res = ""
for alg_info in [[13, 'avg_lsimilarity']]:
start_time = time.time()
sim_val = StaticValues.algorithms[alg_info[1]](str1, str2, method=lgm_metric)
res, varnm = self.prediction(idx + alg_info[0], sim_val, is_a_match, custom_thres)
self.timers[idx + alg_info[0] - 1] += (time.time() - start_time)
self.predictedState[varnm][idx + alg_info[0] - 1] += 1.0
tot_res += res
return tot_res
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig',
features=None, selectable_features=None):
tot_res = ""
flag_true_match = 1.0 if row['res'].upper() == "TRUE" else 0.0
a, b = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming, canonical=canonical, simple_sorting=False)
tot_res += self._generic_evaluator(16, 'damerau_levenshtein', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(21, 'jaccard', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(17, 'jaro', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(18, 'jaro_winkler', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(19, 'jaro_winkler', a[::-1], b[::-1], flag_true_match, custom_thres)
tot_res += self._generic_evaluator(24, 'monge_elkan', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(20, 'cosine', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(22, 'strike_a_match', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(25, 'soft_jaccard', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(23, 'skipgram', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(26, 'davies', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(27, 'l_jaro_winkler', a, b, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(28, 'l_jaro_winkler', a[::-1], b[::-1], flag_true_match, custom_thres)
if self.accuracyresults:
if self.file is None:
file_name = 'dataset-accuracyresults-lgm-sim-metrics'
if canonical:
file_name += '_canonical'
if sorting:
file_name += '_sorted'
self.file = open(file_name + '.csv', 'w+')
if flag_true_match == 1.0:
self.file.write("TRUE{0}\t{1}\t{2}\n".format(tot_res, a.encode('utf8'), b.encode('utf8')))
else:
self.file.write("FALSE{0}\t{1}\t{2}\n".format(tot_res, a.encode('utf8'), b.encode('utf8')))
class testMetrics(baseMetrics):
def __init__(self, njobs, accures):
super(testMetrics, self).__init__(len(StaticValues.methods), njobs, accures)
def _generic_evaluator(self, idx, sim_metric, baseTerms, mismatchTerms, specialTerms, is_a_match, custom_thres):
start_time = time.time()
sim_val = weighted_terms(baseTerms, mismatchTerms, specialTerms, sim_metric, averaged=True, test_mode=True)
res, varnm = self.prediction(idx, sim_val, is_a_match, custom_thres)
self.timers[idx - 1] += (time.time() - start_time)
self.predictedState[varnm][idx - 1] += 1.0
return res
def evaluate(self, row, sorting=False, stemming=False, canonical=False, permuted=False, custom_thres='orig',
term_split_thres=0.55):
tot_res = ""
flag_true_match = 1.0 if row['res'].upper() == "TRUE" else 0.0
a, b = transform(row['s1'], row['s2'], sorting=sorting, stemming=stemming, canonical=canonical, simple_sorting=False)
baseTerms, mismatchTerms, specialTerms = lsimilarity_terms(a, b, term_split_thres)
rbaseTerms = {
'a': [x[::-1] for x in baseTerms['a']], 'b': [x[::-1] for x in baseTerms['b']],
'len': baseTerms['len'], 'char_len': baseTerms['char_len']
}
rmismatchTerms = {
'a': [x[::-1] for x in mismatchTerms['a']], 'b': [x[::-1] for x in mismatchTerms['b']],
'len': mismatchTerms['len'], 'char_len': mismatchTerms['char_len']
}
rspecialTerms = {
'a': [x[::-1] for x in specialTerms['a']], 'b': [x[::-1] for x in specialTerms['b']],
'len': specialTerms['len'], 'char_len': specialTerms['char_len']
}
tot_res += self._generic_evaluator(1, 'damerau_levenshtein', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(8, 'jaccard', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(2, 'jaro', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(3, 'jaro_winkler', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(4, 'jaro_winkler', rbaseTerms, rmismatchTerms, rspecialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(11, 'monge_elkan', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(7, 'cosine', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(9, 'strike_a_match', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(12, 'soft_jaccard', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
# tot_res += self._generic_evaluator(5, 'sorted_winkler', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(10, 'skipgram', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(13, 'davies', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(14, 'l_jaro_winkler', baseTerms, mismatchTerms, specialTerms, flag_true_match, custom_thres)
tot_res += self._generic_evaluator(15, 'l_jaro_winkler', rbaseTerms, rmismatchTerms, rspecialTerms, flag_true_match, custom_thres)
"""
Compute the Damerau-Levenshtein distance between two given
strings (s1 and s2)
https://www.guyrutenberg.com/2008/12/15/damerau-levenshtein-distance-in-python/
"""
# def damerau_levenshtein_distance(s1, s2):
# d = {}
# lenstr1 = len(s1)
# lenstr2 = len(s2)
# for i in xrange(-1, lenstr1 + 1):
# d[(i, -1)] = i + 1
# for j in xrange(-1, lenstr2 + 1):
# d[(-1, j)] = j + 1
#
# for i in xrange(lenstr1):
# for j in xrange(lenstr2):
# if s1[i] == s2[j]:
# cost = 0
# else:
# cost = 1
# d[(i, j)] = min(
# d[(i - 1, j)] + 1, # deletion
# d[(i, j - 1)] + 1, # insertion
# d[(i - 1, j - 1)] + cost, # substitution
# )
# if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]:
# d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) # transposition
#
# return d[lenstr1 - 1, lenstr2 - 1]
|
{"hexsha": "99fbaeca5bffcdbb792214eb480fa143ad07c827", "size": 131423, "ext": "py", "lang": "Python", "max_stars_repo_path": "femlAlgorithms.py", "max_stars_repo_name": "athenarc/ToponymPairClassification", "max_stars_repo_head_hexsha": "41dd487ad5f9ba4c0f60342d1af0a1c56c3e4136", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "femlAlgorithms.py", "max_issues_repo_name": "athenarc/ToponymPairClassification", "max_issues_repo_head_hexsha": "41dd487ad5f9ba4c0f60342d1af0a1c56c3e4136", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "femlAlgorithms.py", "max_forks_repo_name": "athenarc/ToponymPairClassification", "max_forks_repo_head_hexsha": "41dd487ad5f9ba4c0f60342d1af0a1c56c3e4136", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.6745490982, "max_line_length": 157, "alphanum_fraction": 0.5890749717, "include": true, "reason": "import numpy", "num_tokens": 33566}
|
context("Immutable")
# Create smaller subset of baseball data (for speed)
bsmall <- subset(baseball, id %in% sample(unique(baseball$id), 20))[, 1:5]
bsmall$id <- factor(bsmall$id)
bsmall <- bsmall[sample(rownames(bsmall)), ]
rownames(bsmall) <- NULL
test_that("idf is immutable", {
#Since idf are constructed by scratch in both idata.frame and `[.idf]`
#I will test idf objects created both ways.
#create both before testing any, to make sure that subsetting
#doesn't change the subsetted idf
idf <- idata.frame(bsmall)
x <- idf[1:10, ]
y <- bsmall[1:10, ]
expect_error(x[1,"year"] <- 1994)
expect_error(x[["stint"]] <- rev(y[["stint"]]))
expect_error(x$team <- sort(y$team))
expect_error(names(idf) <- c("ID", "YR", "ST", "TM", "LG"))
expect_error(idf[1,"year"] <- 1994)
expect_error(idf[["stint"]] <- rev(bsmall[["stint"]]))
expect_error(idf$team <- sort(bsmall$team))
expect_error(names(idf) <- c("ID", "YR", "ST", "TM", "LG"))
})
test_that("idf subset by [i]", {
idf <- idata.frame(bsmall)
x <- idf[3]
y <- bsmall[3]
expect_equal(idf[[2]], bsmall[[2]])
expect_equal(x[[1]], y[[1]])
})
test_that("idf subset data by [i,j]", {
idf <- idata.frame(bsmall)
x <- idf[1:10, ]
y <- bsmall[1:10, ]
xx <- x[3:5, c('id', 'team')]
yy <- y[3:5, c('id', 'team')]
xxx <- idf[ , names(idf)]
yyy <- idf[ , names(y)]
expect_equal(idf[3, "year"], bsmall[[3, "year"]])
expect_equal(x[, "year"], y[, "year"])
expect_equal(xx[, "id"], yy[, "id"])
expect_equal(xxx[, "team"], yyy[, "team"])
})
test_that("idf extract by [[i]]", {
idf <- idata.frame(bsmall)
x <- idf[6:20,]
y <- bsmall[6:20,]
expect_equal(x[[4]], y[[4]])
expect_equal(idf[[3]], bsmall[[3]])
expect_equal(idf[["year"]], bsmall[["year"]])
})
test_that("idf extract $name", {
idf <- idata.frame(bsmall)
x <- idf[500:510,]
y <- bsmall[500:510,]
expect_equal(x$team, y$team)
expect_equal(idf$team, bsmall$team)
})
test_that("idf as environment", {
idf <- idata.frame(bsmall)
x <- idf[5:10,]
y <- bsmall[5:10,]
expect_equal(with(x, mean(year)), with(y, mean(year)))
expect_equal(with(idf, table(team)), with(bsmall, table(team)))
})
|
{"hexsha": "176338a30edc07c2dadb6593018157b22815a3d1", "size": 2191, "ext": "r", "lang": "R", "max_stars_repo_path": "source/gdaexperience6/plyr/tests/testthat/test-idf.r", "max_stars_repo_name": "lalaithan/developer-immersion-data", "max_stars_repo_head_hexsha": "b48d291ad5a03d56c0228d00e0b290b638d50194", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2017-05-24T22:55:14.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-31T00:56:05.000Z", "max_issues_repo_path": "packrat/lib/x86_64-pc-linux-gnu/3.4.0/plyr/tests/testthat/test-idf.r", "max_issues_repo_name": "lordbitin/ESWA-2017", "max_issues_repo_head_hexsha": "9778cf54724b6c55f68dfe77bbfc206aab769730", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-20T16:10:54.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-30T18:04:46.000Z", "max_forks_repo_path": "packrat/lib/x86_64-pc-linux-gnu/3.4.0/plyr/tests/testthat/test-idf.r", "max_forks_repo_name": "lordbitin/ESWA-2017", "max_forks_repo_head_hexsha": "9778cf54724b6c55f68dfe77bbfc206aab769730", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2017-05-20T12:42:19.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-26T16:38:16.000Z", "avg_line_length": 24.8977272727, "max_line_length": 74, "alphanum_fraction": 0.6029210406, "num_tokens": 761}
|
from typing import Any, Callable, Dict, List, Optional, Tuple
import configparser
import logging
import os
# from packaging.version import parse, Version
import torch
from catalyst.tools.frozen_class import FrozenClass
logger = logging.getLogger(__name__)
IS_CUDA_AVAILABLE = torch.cuda.is_available()
NUM_CUDA_DEVICES = torch.cuda.device_count()
def _is_apex_avalilable():
try:
import apex # noqa: F401
from apex import amp # noqa: F401
return True
except ImportError:
return False
def _is_amp_available():
try:
import torch.cuda.amp as amp # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_albumentations_available():
try:
import albumentations as albu # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_xla_available():
try:
import torch_xla.core.xla_model as xm # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_fairscale_available():
try:
import fairscale # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_deepspeed_available():
try:
import deepspeed # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_onnx_available():
try:
import onnx # noqa: F401, E401
import onnxruntime # noqa: F401, E401
return True
except ImportError:
return False
def _is_pruning_available():
try:
import torch.nn.utils.prune as prune # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_quantization_available():
try:
import torch.quantization # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_optuna_available():
try:
import optuna # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_hydra_available():
try:
import hydra # noqa: F401
from omegaconf import DictConfig, OmegaConf # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_cv_available():
try:
import cv2 # noqa: F401
import imageio # noqa: F401
from skimage.color import label2rgb, rgb2gray # noqa: F401
import torchvision # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_nifti_available():
try:
import nibabel # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_ml_available():
try:
import matplotlib # noqa: F401
import pandas # noqa: F401
import scipy # noqa: F401
import sklearn # noqa: F401
return True
except ModuleNotFoundError:
return False
def _is_mlflow_available():
try:
import mlflow # noqa: F401
return True
except ImportError:
return False
def _is_wandb_available():
try:
import wandb # noqa: F401
return True
except ImportError:
return False
def _is_comet_available():
try:
import comet_ml # noqa: F401
return True
except ImportError:
return False
def _is_neptune_available():
try:
import neptune.new as neptune # noqa: F401
return True
except ModuleNotFoundError:
return False
def _get_optional_value(
is_required: Optional[bool], is_available_fn: Callable, assert_msg: str
) -> bool:
if is_required is None:
return is_available_fn()
elif is_required:
assert is_available_fn(), assert_msg
return True
else:
return False
class Settings(FrozenClass):
"""Catalyst settings."""
def __init__( # noqa: D107
self,
# [subpackages]
cv_required: Optional[bool] = None,
nifti_required: Optional[bool] = None,
ml_required: Optional[bool] = None,
# [integrations]
albu_required: Optional[bool] = None,
hydra_required: Optional[bool] = None,
# nmslib_required: Optional[bool] = False,
optuna_required: Optional[bool] = None,
# [engines]
amp_required: Optional[bool] = None,
apex_required: Optional[bool] = None,
xla_required: Optional[bool] = None,
fairscale_required: Optional[bool] = None,
deepspeed_required: Optional[bool] = None,
# [dl-extras]
onnx_required: Optional[bool] = None,
pruning_required: Optional[bool] = None,
quantization_required: Optional[bool] = None,
# [logging]
# alchemy_required: Optional[bool] = None,
neptune_required: Optional[bool] = None,
mlflow_required: Optional[bool] = None,
wandb_required: Optional[bool] = None,
comet_required: Optional[bool] = None,
# [extras]
use_lz4: Optional[bool] = None,
use_pyarrow: Optional[bool] = None,
use_libjpeg_turbo: Optional[bool] = None,
):
# True – use the package
# None – use the package if available
# False - block the package
# [subpackages]
self.cv_required: bool = _get_optional_value(
cv_required,
_is_cv_available,
"catalyst[cv] is not available, to install it, run `pip install catalyst[cv]`.",
)
self.nifti_required: bool = _get_optional_value(
nifti_required,
_is_nifti_available,
"catalyst[nifti] is not available, to install it, run `pip install catalyst[nifti]`.",
)
self.ml_required: bool = _get_optional_value(
ml_required,
_is_ml_available,
"catalyst[ml] is not available, to install it, run `pip install catalyst[ml]`.",
)
# [integrations]
self.albu_required: bool = _get_optional_value(
albu_required,
_is_albumentations_available,
"catalyst[albu] is not available, to install it, " "run `pip install catalyst[albu]`.",
)
self.hydra_required: bool = _get_optional_value(
hydra_required,
_is_hydra_available,
"catalyst[hydra] is not available, to install it, run `pip install catalyst[hydra]`.",
)
# self.nmslib_required: bool = nmslib_required
self.optuna_required: bool = _get_optional_value(
optuna_required,
_is_optuna_available,
"catalyst[optuna] is not available, to install it, "
"run `pip install catalyst[optuna]`.",
)
# [engines]
self.amp_required: bool = _get_optional_value(
amp_required,
_is_amp_available,
"catalyst[amp] is not available, to install it, run `pip install catalyst[amp]`.",
)
self.apex_required: bool = _get_optional_value(
apex_required,
_is_apex_avalilable,
"catalyst[apex] is not available, to install it, run `pip install catalyst[apex]`.",
)
self.xla_required: bool = _get_optional_value(
xla_required,
_is_xla_available,
"catalyst[xla] is not available, to install it, run `pip install catalyst[xla]`.",
)
self.fairscale_required: bool = _get_optional_value(
fairscale_required,
_is_fairscale_available,
"catalyst[fairscale] is not available, "
"to install it, run `pip install catalyst[fairscale]`.",
)
self.deepspeed_required: bool = _get_optional_value(
deepspeed_required,
_is_deepspeed_available,
"catalyst[deepspeed] is not available, "
"to install it, run `pip install catalyst[deepspeed]`.",
)
# [dl-extras]
self.onnx_required: bool = _get_optional_value(
onnx_required,
_is_onnx_available,
"catalyst[onnx] is not available, to install it, "
"run `pip install catalyst[onnx]` or `pip install catalyst[onnx-gpu]`.",
)
self.pruning_required: bool = _get_optional_value(
pruning_required,
_is_pruning_available,
"catalyst[pruning] is not available, to install it, "
"run `pip install catalyst[pruning]`.",
)
self.quantization_required: bool = _get_optional_value(
quantization_required,
_is_quantization_available,
"catalyst[quantization] is not available, to install it, "
"run `pip install catalyst[quantization]`.",
)
# [logging]
# self.alchemy_required: bool = alchemy_required
self.neptune_required: bool = _get_optional_value(
neptune_required,
_is_neptune_available,
"neptune is not available, to install it, run `pip install neptune-client`.",
)
self.mlflow_required: bool = _get_optional_value(
mlflow_required,
_is_mlflow_available,
"catalyst[mlflow] is not available, to install it, "
"run `pip install catalyst[mlflow]`.",
)
self.wandb_required: bool = _get_optional_value(
wandb_required,
_is_wandb_available,
"wandb is not available, to install it, " "run `pip install wandb`.",
)
self.comet_required: bool = _get_optional_value(
comet_required,
_is_comet_available,
"comet is not available, to install, run 'pip install comet_ml'.",
)
# self.wandb_required: bool = wandb_required
# [extras]
self.use_lz4: bool = use_lz4 or False
self.use_pyarrow: bool = use_pyarrow or False
self.use_libjpeg_turbo: bool = use_libjpeg_turbo or False
# [global]
# stages
self.stage_train_prefix: str = "train"
self.stage_valid_prefix: str = "valid"
self.stage_infer_prefix: str = "infer"
# epoch
self.epoch_metrics_prefix: str = "_epoch_"
# loader
self.loader_train_prefix: str = "train"
self.loader_valid_prefix: str = "valid"
self.loader_infer_prefix: str = "infer"
@staticmethod
def _optional_value(value, default):
return value if value is not None else default
@staticmethod
def parse() -> "Settings":
"""Parse and return the settings.
Returns:
Settings: Dictionary of the parsed and merged Settings.
"""
kwargrs = MergedConfigParser(ConfigFileFinder("catalyst")).parse()
return Settings(**kwargrs)
def type_hint(self, key: str):
"""Returns type hint for the specified ``key``.
Args:
key: key of interest
Returns:
type hint for the specified key
"""
# return get_type_hints(self).get(key, None)
return type(getattr(self, key, None))
DEFAULT_SETTINGS = Settings()
class ConfigFileFinder:
"""Encapsulate the logic for finding and reading config files.
Adapted from:
- https://gitlab.com/pwoolvett/flake8 (MIT License)
- https://github.com/python/mypy (MIT License)
"""
def __init__(self, program_name: str) -> None:
"""Initialize object to find config files.
Args:
program_name: Name of the current program (e.g., catalyst).
"""
# user configuration file
self.program_name = program_name
self.user_config_file = self._user_config_file(program_name)
# list of filenames to find in the local/project directory
self.project_filenames = ("setup.cfg", "tox.ini", f".{program_name}")
self.local_directory = os.path.abspath(os.curdir)
@staticmethod
def _user_config_file(program_name: str) -> str:
if os.name == "nt": # if running on Windows
home_dir = os.path.expanduser("~")
config_file_basename = f".{program_name}"
else:
home_dir = os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
config_file_basename = program_name
return os.path.join(home_dir, config_file_basename)
@staticmethod
def _read_config(*files: str) -> Tuple[configparser.RawConfigParser, List[str]]:
config = configparser.RawConfigParser()
found_files: List[str] = []
for filename in files:
try:
found_files.extend(config.read(filename))
except UnicodeDecodeError:
logger.exception(
f"There was an error decoding a config file."
f" The file with a problem was {filename}."
)
except configparser.ParsingError:
logger.exception(
f"There was an error trying to parse a config file."
f" The file with a problem was {filename}."
)
return config, found_files
def generate_possible_local_files(self):
"""Find and generate all local config files.
Yields:
str: Path to config file.
"""
parent = tail = os.getcwd()
found_config_files = False
while tail and not found_config_files:
for project_filename in self.project_filenames:
filename = os.path.abspath(os.path.join(parent, project_filename))
if os.path.exists(filename):
yield filename
found_config_files = True
self.local_directory = parent
(parent, tail) = os.path.split(parent)
def local_config_files(self) -> List[str]: # noqa: D202
"""
Find all local config files which actually exist.
Returns:
List[str]: List of files that exist that are
local project config files with extra config files
appended to that list (which also exist).
"""
return list(self.generate_possible_local_files())
def local_configs(self):
"""Parse all local config files into one config object."""
config, found_files = self._read_config(*self.local_config_files())
if found_files:
logger.debug(f"Found local configuration files: {found_files}")
return config
def user_config(self):
"""Parse the user config file into a config object."""
config, found_files = self._read_config(self.user_config_file)
if found_files:
logger.debug(f"Found user configuration files: {found_files}")
return config
class MergedConfigParser:
"""Encapsulate merging different types of configuration files.
This parses out the options registered that were specified in the
configuration files, handles extra configuration files, and returns
dictionaries with the parsed values.
Adapted from:
- https://gitlab.com/pwoolvett/flake8 (MIT License)
- https://github.com/python/mypy (MIT License)
"""
#: Set of actions that should use the
#: :meth:`~configparser.RawConfigParser.getbool` method.
GETBOOL_ACTIONS = {"store_true", "store_false"}
def __init__(self, config_finder: ConfigFileFinder):
"""Initialize the MergedConfigParser instance.
Args:
config_finder: Initialized ConfigFileFinder.
"""
self.program_name = config_finder.program_name
self.config_finder = config_finder
def _normalize_value(self, option, value):
final_value = option.normalize(value, self.config_finder.local_directory)
logger.debug(
f"{value} has been normalized to {final_value}" f" for option '{option.config_name}'"
)
return final_value
def _parse_config(self, config_parser):
type2method = {
bool: config_parser.getboolean,
int: config_parser.getint,
}
config_dict: Dict[str, Any] = {}
if config_parser.has_section(self.program_name):
for option_name in config_parser.options(self.program_name):
type_ = DEFAULT_SETTINGS.type_hint(option_name)
method = type2method.get(type_, config_parser.get)
config_dict[option_name] = method(self.program_name, option_name)
return config_dict
def parse(self) -> dict:
"""Parse and return the local and user config files.
First this copies over the parsed local configuration and then
iterates over the options in the user configuration and sets them if
they were not set by the local configuration file.
Returns:
dict: Dictionary of the parsed and merged configuration options.
"""
user_config = self._parse_config(self.config_finder.user_config())
config = self._parse_config(self.config_finder.local_configs())
for option, value in user_config.items():
config.setdefault(option, value)
return config
SETTINGS = Settings.parse()
setattr(SETTINGS, "IS_CUDA_AVAILABLE", IS_CUDA_AVAILABLE) # noqa: B010
setattr(SETTINGS, "NUM_CUDA_DEVICES", NUM_CUDA_DEVICES) # noqa: B010
__all__ = [
"SETTINGS",
"Settings",
"ConfigFileFinder",
"MergedConfigParser",
]
|
{"hexsha": "078340ff999026914d1535eabf04e34cb8752000", "size": 17426, "ext": "py", "lang": "Python", "max_stars_repo_path": "catalyst/settings.py", "max_stars_repo_name": "stjordanis/catalyst-1", "max_stars_repo_head_hexsha": "93eedf0b9520bf1f83f63b13d6818df2a1e85b33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-12-14T07:27:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-23T14:34:37.000Z", "max_issues_repo_path": "catalyst/settings.py", "max_issues_repo_name": "Ran485/catalyst", "max_issues_repo_head_hexsha": "84bc7576c981278f389279d87dda85dd66a758b6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "catalyst/settings.py", "max_forks_repo_name": "Ran485/catalyst", "max_forks_repo_head_hexsha": "84bc7576c981278f389279d87dda85dd66a758b6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.465034965, "max_line_length": 99, "alphanum_fraction": 0.6204521979, "include": true, "reason": "import scipy", "num_tokens": 3918}
|
module TwoDGridWorldUtils
using SparseArrays
using Distributions
import ..TMazeCumulantSchedules
import ..ContGridWorldParams
import ..ContGridWorld
import ..Learner
import ..check_goal
import ..range_check
import ..get_action_probs
import ..GVFHordes
import ..update
import ..Curiosity
import ..GVFSRHordes
import ..SRCreationUtils
import ..FeatureCreator
using ...StatsBase
const TMCS = TMazeCumulantSchedules
const CGWP = ContGridWorldParams
const SRCU = Curiosity.SRCreationUtils
struct GoalTermination <: GVFHordes.GVFParamFuncs.AbstractDiscount
γ::Float64
end
function Base.get(gt::GoalTermination; state_tp1, kwargs...)
any(state_tp1[3:end] .!= 0) ? 0.0 : gt.γ
end
struct GoalPolicy <: GVFHordes.GVFParamFuncs.AbstractPolicy
goal::Int
normalized::Bool
end
(π::GoalPolicy)(s) = sample(Weights([get(π, state_t=s, action_t=a) for a ∈ 1:4]))
function Base.get(π::GoalPolicy; state_t, action_t, kwargs...)
cur_y = state_t[1]
cur_x = state_t[2]
if π.goal == 1
boundry_y = π.normalized ? 0.9 : 9.0
boundry_x = π.normalized ? 0.1 : 1.0
if cur_x > boundry_x && cur_y < boundry_y
if CGWP.UP == action_t || CGWP.LEFT == action_t
0.5
else
0.0
end
elseif cur_x > boundry_x
if CGWP.LEFT == action_t
1.0
else
0.0
end
elseif cur_y < boundry_y
if CGWP.UP == action_t
1.0
else
0.0
end
else
if CGWP.UP == action_t
1.0
else
0.0
end
# @show action_t, state_t
# throw("What?")
end
elseif π.goal == 2
boundry_y = π.normalized ? 0.1 : 1.0
boundry_x = π.normalized ? 0.1 : 1.0
if cur_x > boundry_x && cur_y > boundry_y
if CGWP.DOWN == action_t || CGWP.LEFT == action_t
0.5
else
0.0
end
elseif cur_x > boundry_x
if CGWP.LEFT == action_t
1.0
else
0.0
end
elseif cur_y > boundry_y
if CGWP.DOWN == action_t
1.0
else
0.0
end
else
if CGWP.DOWN == action_t
1.0
else
0.0
end
# @show action_t, state_t
# throw("What?")
end
elseif π.goal == 3
boundry_y = π.normalized ? 0.9 : 9.0
boundry_x = π.normalized ? 0.9 : 9.0
if cur_x < boundry_x && cur_y < boundry_y
if CGWP.UP == action_t || CGWP.RIGHT == action_t
0.5
else
0.0
end
elseif cur_x < boundry_x
if CGWP.RIGHT == action_t
1.0
else
0.0
end
elseif cur_y < boundry_y
if CGWP.UP == action_t
1.0
else
0.0
end
else
if CGWP.UP == action_t
1.0
else
0.0
end
# @show action_t, state_t
# throw("What?")
end
elseif π.goal == 4
boundry_y = π.normalized ? 0.1 : 1.0
boundry_x = π.normalized ? 0.9 : 9.0
if cur_x < boundry_x && cur_y > boundry_y
if CGWP.DOWN == action_t || CGWP.RIGHT == action_t
0.5
else
0.0
end
elseif cur_x < boundry_x
if CGWP.RIGHT == action_t
1.0
else
0.0
end
elseif cur_y > boundry_y
if CGWP.DOWN == action_t
1.0
else
0.0
end
else
if CGWP.DOWN == action_t
1.0
else
0.0
end
# @show action_t, state_t
# throw("What?")
end
end
end
struct NaiveGoalPolicy <: GVFHordes.GVFParamFuncs.AbstractPolicy
goal::Int
end
(π::NaiveGoalPolicy)(s) = sample(Weights([Base.get(π, state_t=s, action_t=a) for a ∈ 1:4]))
function Base.get(π::NaiveGoalPolicy; state_t, action_t, kwargs...)
if π.goal == 1
if CGWP.UP == action_t || CGWP.LEFT == action_t
0.5
else
0.0
end
elseif π.goal == 2
if CGWP.DOWN == action_t || CGWP.LEFT == action_t
0.5
else
0.0
end
elseif π.goal == 3
if CGWP.UP == action_t || CGWP.RIGHT == action_t
0.5
else
0.0
end
elseif π.goal == 4
if CGWP.DOWN == action_t || CGWP.RIGHT == action_t
0.5
else
0.0
end
end
end
function create_demons(parsed, demon_projected_fc = nothing)
action_space = 4
demons = if parsed["demon_learner"] != "SR"
GVFHordes.Horde(
[GVFHordes.GVF(GVFHordes.GVFParamFuncs.FeatureCumulant(i+2),
GoalTermination(parsed["demon_gamma"]),
NaiveGoalPolicy(i)) for i in 1:4])
elseif parsed["demon_learner"] == "SR"
@assert demon_projected_fc != nothing
pred_horde = GVFHordes.Horde(
[GVFHordes.GVF(GVFHordes.GVFParamFuncs.FeatureCumulant(i+2),
GVFHordes.GVFParamFuncs.ConstantDiscount(0.0),
NaiveGoalPolicy(i)) for i in 1:4])
SF_policies = [NaiveGoalPolicy(i) for i in 1:4]
SF_discounts = [GoalTermination(parsed["demon_gamma"]) for i in 1:4]
num_SFs = length(SF_policies)
SF_horde = SRCU.create_SF_horde(SF_policies, SF_discounts, demon_projected_fc,1:action_space)
GVFSRHordes.SRHorde(pred_horde, SF_horde, num_SFs, demon_projected_fc)
else
throw(ArgumentError("Cannot create demons"))
end
return demons
end
function b_π(state_constructor, learner, exploration_strategy; kwargs...)
s = state_constructor(kwargs[:state_t])
preds = learner(s)
return exploration_strategy(preds)[kwargs[:action_t]]
end
function make_behaviour_gvf(learner, γ, state_constructor, expl_strat) #discount, state_constructor_func, learner, exploration_strategy)
GVF_policy = GVFHordes.GVFParamFuncs.FunctionalPolicy((;kwargs...) -> b_π(state_constructor, learner, expl_strat; kwargs...))
BehaviourGVF = GVFHordes.GVF(GVFHordes.GVFParamFuncs.RewardCumulant(),
GoalTermination(γ),
GVF_policy)
end
function check_goal(goal, state, epsilon=0.0)
cur_y = state[1]
cur_x = state[2]
if goal == 1
boundry_y = 0.9 - epsilon
boundry_x = 0.1 + epsilon
cur_y > boundry_y && cur_x < boundry_x
elseif goal == 2
boundry_y = 0.1 + epsilon
boundry_x = 0.1 + epsilon
cur_y < boundry_y && cur_x < boundry_x
elseif goal == 3
boundry_y = 0.9 - epsilon
boundry_x = 0.9 - epsilon
cur_y > boundry_y && cur_x > boundry_x
elseif goal == 4
boundry_y = 0.1 + epsilon
boundry_x = 0.9 - epsilon
cur_y < boundry_y && cur_x > boundry_x
else
false
end
end
####
# Ideal Feature Creator
####
struct IdealDemonFeatures <: FeatureCreator
end
function project_features(fc::IdealDemonFeatures, state, action, next_state)
new_state = sparsevec(convert(Array{Int,1}, [check_goal(i, next_state) for i in 1:4]))
return new_state
end
(FP::IdealDemonFeatures)(s_t,a_t,s_tp1) = project_features(FP, s_t, a_t, s_tp1)
Base.size(FP::IdealDemonFeatures) = 4
####
# Ideal State Action Feature Creator
####
struct IdealStateActionDemonFeatures <: FeatureCreator
num_actions::Int
end
function project_features(fc::IdealStateActionDemonFeatures, s_t, a_t, s_tp1)
goal_ind = findfirst([check_goal(i, s_tp1) for i in 1:4])
reward_feature = zeros(Int(fc.num_actions*4))
if !(goal_ind isa Nothing)
reward_feature[Int((goal_ind-1)*fc.num_actions + a_t)] = 1
end
return sparsevec(reward_feature)
end
(FP::IdealStateActionDemonFeatures)(s_t,a_t,s_tp1) = project_features(FP, s_t, a_t, s_tp1)
Base.size(FP::IdealStateActionDemonFeatures) = 4 * FP.num_actions
struct MarthaIdealDemonFeatures <: FeatureCreator
end
function project_features(fc::MarthaIdealDemonFeatures, state)
new_state = sparsevec(convert(Array{Int,1}, [check_goal(i, state, 0.05) for i in 1:4]))
return new_state
end
(FP::MarthaIdealDemonFeatures)(state) = project_features(FP, state)
Base.size(FP::MarthaIdealDemonFeatures) = 4
struct StateAggregation <: FeatureCreator
bins_per_dim::Int
end
function project_features(sa::StateAggregation, state)
bpd = sa.bins_per_dim
new_state = spzeros(Int, bpd*bpd)
y = state[1]
x = state[2]
idx_y = bpd
idx_x = bpd
for i in 1:(bpd)
if y < i//bpd
idx_y = i
end
if x < i//bpd
idx_x = i
end
end
idx = (idx_x - 1) * bpd + idx_y
new_state[idx] = 1
new_state
end
(FP::StateAggregation)(state) = project_features(FP, state)
Base.size(FP::StateAggregation) = begin; bpd = FP.bins_per_dim; bpd*bpd; end
struct SmallStateAggregation <: FeatureCreator end
function project_features(::SmallStateAggregation, state)
new_state = spzeros(Int, 9)
y = state[1]
x = state[2]
idx_y = if y < 1//3
1
elseif y < 2//3
2
else
3
end
idx_x = if x < 1//3
1
elseif x < 2//3
2
else
3
end
idx = (idx_x - 1) * 3 + idx_y
new_state[idx] = 1
new_state
end
(fp::SmallStateAggregation)(state) = project_features(fp, state)
Base.size(::SmallStateAggregation) = 9
DrifterDistractor(parsed) = begin
c_dist = Uniform(parsed["constant_target"][1],parsed["constant_target"][2])
c1,c2 = rand(c_dist,2)
if "drifter" ∈ keys(parsed)
TMCS.DrifterDistractor(
c1,
c2,
parsed["drifter"][1],
parsed["drifter"][2],
parsed["distractor"][1],
parsed["distractor"][2])
else
TMCS.DrifterDistractor(
c1,
c2,
parsed["drifter_init"],
parsed["drifter_std"],
parsed["distractor_mean"],
parsed["distractor_std"])
end
end
DrifterDrifterDistractor(parsed) = begin
c_dist = Uniform(parsed["constant_target"][1],parsed["constant_target"][2])
c1 = rand(c_dist)
if "drifter_1" ∈ keys(parsed)
TMCS.DrifterDrifterDistractor(
c1,
parsed["drifter_1"][1],
parsed["drifter_1"][2],
parsed["drifter_2"][1],
parsed["drifter_2"][2],
parsed["distractor"][1],
parsed["distractor"][2])
else
TMCS.DrifterDrifterDistractor(
c1,
parsed["drifter_1_init"],
parsed["drifter_1_std"],
parsed["drifter_2_init"],
parsed["drifter_2_std"],
parsed["distractor_mean"],
parsed["distractor_std"])
end
end
function get_cumulant_schedule(parsed)
sched = parsed["cumulant_schedule"]
if parsed["cumulant_schedule"] == "DrifterDistractor"
DrifterDistractor(parsed)
elseif sched == "DrifterDrifterDistractor"
DrifterDrifterDistractor(parsed)
elseif parsed["cumulant_schedule"] == "Constant"
if parsed["cumulant"] isa Number
TMCS.Constant(parsed["cumulant"])
else
TMCS.Constant(parsed["cumulant"]...)
end
else
throw("$(sched) Not Implemented")
end
end
function get_true_values(env::Curiosity.ContGridWorld, eval_set)
copy_eval_est = deepcopy(eval_set)
num_gvfs = 4
goal_cumulants = TMCS.get_cumulant_eval_values(env.cumulant_schedule)
for i in 1:num_gvfs
copy_eval_est[i, :] .*= goal_cumulants[i]
end
return copy_eval_est
end
function get_true_values(env::Curiosity.ContGridWorld, eval_set, gvf_idx)
copy_eval_est = deepcopy(eval_set)
goal_cumulants = TMCS.get_cumulant_eval_values(env.cumulant_schedule)
copy_eval_est .*= goal_cumulants[gvf_idx]
return copy_eval_est
end
Base.@kwdef mutable struct RoundRobinPolicy{GT} <: Learner
cur_goal::Int = 1
num_goals::Int = 4
goal_type::GT = NaiveGoalPolicy
update = nothing
end
Curiosity.update!(rrp::RoundRobinPolicy, args...) = if args[end-2]
rrp.cur_goal += 1
if rrp.cur_goal > rrp.num_goals
rrp.cur_goal = 1
end
end
Base.get(rrp::RoundRobinPolicy; state_t, action_t, kwargs...) =
get(rrp.goal_type(rrp.cur_goal); state_t = state_t, action_t = action_t)
function get_action_probs(π::RoundRobinPolicy, features, state)
[get(π; state_t=state, action_t=a) for a ∈ 1:4]
end
end
|
{"hexsha": "b912089d2940bc1ea0ce8b49ed73d6989e6f1ecd", "size": 12956, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils/2d-gridworld.jl", "max_stars_repo_name": "MatthewMcLeod/curiosity", "max_stars_repo_head_hexsha": "7b452cb296c36a2e7b2f01763177c097bae8011c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-06T22:40:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T22:40:25.000Z", "max_issues_repo_path": "src/utils/2d-gridworld.jl", "max_issues_repo_name": "MatthewMcLeod/curiosity", "max_issues_repo_head_hexsha": "7b452cb296c36a2e7b2f01763177c097bae8011c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/2d-gridworld.jl", "max_forks_repo_name": "MatthewMcLeod/curiosity", "max_forks_repo_head_hexsha": "7b452cb296c36a2e7b2f01763177c097bae8011c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3333333333, "max_line_length": 136, "alphanum_fraction": 0.579345477, "num_tokens": 3706}
|
class DataAveraging:
@staticmethod
def add_sensor_data_to_total(sensor_data, previous_total):
new_total = previous_total
for i in range(0,len(sensor_data)):
data_point = sensor_data[i]
if type(data_point) == int or type(data_point) == float:
new_total[i] = sensor_data[i]
return new_total
@staticmethod
def get_average_sensor_data(total_sensor_data, index):
averaged_sensor_data = total_sensor_data
for i in range(0, len(averaged_sensor_data)):
data_point = averaged_sensor_data[i]
if type(data_point) == int or type(data_point) == float:
averaged_sensor_data[i] = averaged_sensor_data[i] / index
return averaged_sensor_data
def update_prev_points(pos, index):
"""
We are using deque objects instead of this function.
This function will be used to update the variables that can be used for calculations.
For this function to work, prev_pos must be set to 0 in main function.
Ideas: utilize if statements with the index variable to determine when to define the previous position points.
"""
"""
try:
prev_fifth = prev_fourth
prev_fourth = prev_third
prev_third = prev_second
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second, prev_third, prev_fourth, prev_fifth
import pdb; pdb.set_trace()
except NameError:
try:
prev_fourth = prev_third
prev_third = prev_second
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second, prev_third, prev_fourth
except NameError:
try:
#This code is always causing an error
prev_third = prev_second
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second, prev_third
except NameError:
if prev_pos != 0:
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second
else:
prev_pos = pos
return prev_pos
"""
if index == 0:
prev_pos = pos
return prev_pos
elif index == 1:
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second
elif index == 2:
prev_third = prev_second
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second, prev_third
elif index == 3:
prev_fourth = prev_third
prev_third = prev_second
prev_second = prev_pos
prev_pos = pos
return prev_pos, prev_second, prev_third, prev_fourth
elif index >= 4:
prev_fifth = prev_fourth
prev_fourth = prev_third
prev_third = prev_second
prev_second = prev_pos
prev_pos = pos
pos_data_list = [pos, prev_pos, prev_second, prev_third, prev]
return prev_pos, prev_second, prev_third, prev_fourth, prev_fifth
from collections import deque #deque is a useful data storing tool
class BinData:
"""
This class is used for taking previous data points and making them available, in a list, for calculation.
Notes:
For use, the object must be defined in the main function.
All the imports needed are:
from modules.data_functions import DataFunctions as DataFunctions
from collections import deque
from modules.data_averaging import BinData as BinData
import numpy as np
"""
def __init__(self, bin_size = 5):
self.num = deque(maxlen=bin_size)
def add(self, new_data):
"""
This function appends new data to the list and pops out old data.
:param self: the object being used for this class
:param float new_data: the data to be inserted into the list
"""
import numpy as np
if new_data == 0:
new_data = np.nan
else:
self.num.append(new_data)
def return_data(self):
"""
This function allows the user to store the list by returning it back to them.
:param self: the object being used for this class
:return list self.data: this returns the list created by deque
"""
return self.num
|
{"hexsha": "d1b1a658f60e42e0c81c15d0dfb3c3fd161f1c9f", "size": 4634, "ext": "py", "lang": "Python", "max_stars_repo_path": "house_code/main_programs/PSUPozyx/modules/data_averaging.py", "max_stars_repo_name": "mukobi/Pozyx-Gabe", "max_stars_repo_head_hexsha": "a8b444c2013b1df5043cd25106b72562409b5130", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-12T07:21:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-12T07:21:56.000Z", "max_issues_repo_path": "house_code/main_programs/PSUPozyx/modules/data_averaging.py", "max_issues_repo_name": "mukobi/Pozyx-Gabe", "max_issues_repo_head_hexsha": "a8b444c2013b1df5043cd25106b72562409b5130", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "house_code/main_programs/PSUPozyx/modules/data_averaging.py", "max_forks_repo_name": "mukobi/Pozyx-Gabe", "max_forks_repo_head_hexsha": "a8b444c2013b1df5043cd25106b72562409b5130", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8421052632, "max_line_length": 118, "alphanum_fraction": 0.583081571, "include": true, "reason": "import numpy", "num_tokens": 959}
|
from scipy.sparse import csr_matrix, coo_matrix, diags
from scipy.sparse import isspmatrix
import random
class WordSaladMatrixBuilder():
"""Aids in the construction of a WordSaladMatrix. The WordSaladMatrix object
has some finicky requirements and this object helps construct one in a
reasonably efficient manner.
It uses a sparse COO matrix to construct the final sparse matrix which can
then be used to find word follower probabilities."""
def __init__(self):
self.words = dict()
self.c = 0
self.row = []
self.col = []
self.data = []
def add_word(self, w):
if w not in self.words:
self.words[w] = self.c
#self.row.append(self.c)
#self.col.append(self.c)
#self.data.append(0)
self.c += 1
return self.c - 1
else:
return self.words[w]
def count_follower(self, w, f):
i = self.add_word(w)
j = self.add_word(f)
self.row.append(i)
self.col.append(j)
self.data.append(1)
def build_matrix(self):
m = coo_matrix((self.data, (self.row, self.col)), shape=(self.c, self.c))
m.sum_duplicates()
m = m.tocsr()
# Get row sums as a row matrix, and convert to float (default is int).
sums = m.sum(axis=1).astype("f")
# Get the reciprocal of each element.
for i in range(0, sums.shape[0]):
if sums[i, 0] > 0.0:
sums[i, 0] = 1.0 / sums[i, 0]
else:
sums[i, 0] = 0.0
# Create a diagonal matrix (scales rows on left-multiply) of sums
# When we multiply we will normalize each row so it becomes a
# weighted sum instead, and in our case a probability vector for a
# certain word.
sums = diags(sums.flat, 0, shape=m.shape)
return WordSaladMatrix(sums * m, self.words)
class WordSaladMatrix:
"""The WordSaladMatrix is a matrix (and a table) of "words" and their
associated "followers", encoding a Markov chain for them.
A word does not have to be an actual english word, it can be anything
hashable by Python. This is useful for tracking pairs of words for
instance, by inserting tuples instead of single strings. But it can also
be numbers, letters or anything else that can vaguely be emulated by
a Markov chain.
A follower is simply another "word" that follows the "word" in question,
the amount of time a word is followed by another is what is encoded by the
matrix.
The underlying matrix is sparse with the motivation that since a structure
is expected, a great deal of followers will have probability zero.
"""
def __init__(self, freqmatrix, wordtoindex):
if not isspmatrix(freqmatrix):
raise TypeError("freqmatrix must be a scipy sparse matrix, is type {}.".format(type(freqmatrix)))
self.matrix = freqmatrix
# Bijection word -> index
self.wordtoindex = dict(wordtoindex)
# The inverse of the bijection word -> index
self.indextoword = {i:w for w,i in self.wordtoindex.items()}
if self.matrix.shape[0] != self.matrix.shape[1]:
raise ValueError("Needs a square matrix.")
if len(self.wordtoindex) != self.matrix.shape[0]:
raise ValueError("length of wordtoindex does not match dimension of matrix.")
def __contains__(self, w):
return w in self.wordtoindex
def indexOf(self, w):
return self.wordtoindex[w]
def wordAt(self, i):
return self.indextoword[i]
def wordCount(self):
return len(self.wordtoindex)
def probability(self, w, f):
"""Returns the probability that a word w is followed by word f."""
if w not in self.wordtoindex or f not in self.wordtoindex:
raise ValueError("w or f is not in the matrix.")
i = self.wordtoindex[w]
j = self.wordtoindex[f]
return self.matrix[i, j]
def probabilities(self, w):
"""Returns the probability vector for a word. This contains as many
elements as there are words encoded in the matrix.
Each index has a bijective relation to a word."""
if w not in self.wordtoindex:
raise ValueError("w is not in the matrix.")
return self.matrix.getrow(self.wordtoindex[w])
def power(self, n):
"""Raises the probability matrix by integer n.
This can be used to find out what the probabilities are after n words.
This is usually pretty CPU-intensive, depending on the size of the
matrix.
"""
n = int(n)
self.matrix **= n
def __repr__(self):
return "<WordSaladMatrix with matrix shape {}>".format(self.matrix.shape)
def draw_follower(mat, w):
probs = mat.probabilities(w).tocoo() # Use a COO, lets us iterate better.
p = random.uniform(0.01, 1.0)
f = -1
for i,p1 in zip(probs.col, probs.data):
if p1 != 0.0:
p -= p1
if p <= 0.0:
f = i
break
if f == -1:
return None
return mat.wordAt(i)
import string
def parseTextIntoMatrix(text, matrixbuilder, groupSize=1, sentenceStop=".!?", strip={"\r"," ", "\n", "\t"}, whitespace=string.whitespace, punctuation=string.punctuation, startGroups=None):
if groupSize < 1:
raise ValueError("groupSize must be >= 1")
if len(whitespace) < 1:
raise ValueError("whitespace list is empty.")
def tagStartGroup(x):
if startGroups is not None:
startGroups.append(x)
def split(txt):
last = ""
prev = ""
for c in txt:
if c in whitespace:
if prev in whitespace:
continue
else:
yield last
last = ""
elif c in punctuation:
yield last
yield c
last = ""
else:
last += c
prev = c
rawTokens = split(text)
def tokens(raw):
for r in raw:
if r in strip or r in whitespace:
continue
yield r
prev = None
group = []
def popgroup():
nonlocal prev, group
if groupSize == 1:
g = group[0]
group = []
return g
else:
tg = tuple(group)
group = []
return tg
for t in tokens(rawTokens):
group.append(t)
if len(group) == groupSize:
tg = popgroup()
matrixbuilder.count_follower(prev, tg)
if prev is None or any((p in sentenceStop for p in prev)): tagStartGroup(tg)
prev = tg
if group is not []:
matrixbuilder.count_follower(prev, tuple(group))
def joinSpecial(iterable, noprespace=".,:;)-!?'\"", nopostspace="(-'\"", humanize=True, stops="?!."):
fin = ""
prev = stops
for s in iterable:
if prev in stops:
s = s.capitalize()
if s in noprespace or prev in nopostspace:
fin += s
else:
fin += " " + s
prev = s
return fin
def test():
mb = WordSaladMatrixBuilder()
import re
ss = None
with open("test.txt", "rb") as f:
ss = f.read()
ss = ss.decode("utf-8")
ss = re.sub("[0-9]", "", ss)
startGroups = []
parseTextIntoMatrix(ss, mb, groupSize=2, startGroups=startGroups)
m = mb.build_matrix()
print(m)
for i in range(0, 1):
prev = random.choice(startGroups)
s = []
while "." not in prev:
s += prev
prev = draw_follower(m, prev)
if prev is None:
break
#m.power(2)
print(joinSpecial(map(lambda x: "" if x is None else x, s)))
#import cProfile
#cProfile.run("test()", sort="cumtime")
test()
|
{"hexsha": "5177f3b273ed335088934e0af031e3e6d9383613", "size": 8170, "ext": "py", "lang": "Python", "max_stars_repo_path": "markovchaintest.py", "max_stars_repo_name": "skurmedel/wordsalad", "max_stars_repo_head_hexsha": "5feaf29bf8b9c88624b783cd087a6589ea0ab48a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "markovchaintest.py", "max_issues_repo_name": "skurmedel/wordsalad", "max_issues_repo_head_hexsha": "5feaf29bf8b9c88624b783cd087a6589ea0ab48a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "markovchaintest.py", "max_forks_repo_name": "skurmedel/wordsalad", "max_forks_repo_head_hexsha": "5feaf29bf8b9c88624b783cd087a6589ea0ab48a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.68, "max_line_length": 188, "alphanum_fraction": 0.55875153, "include": true, "reason": "from scipy", "num_tokens": 1942}
|
# Export DynamicLinks API
export DynamicLinks,
getindex, endof, length, start, next, done, eltype,
handle, header
# Export Dynamic Link API
export DynamicLink,
DynamicLinks, handle, path
# Export RPath API
export RPath,
handle, rpaths, canonical_rpaths, find_library
"""
DynamicLinks
This type encapsulates the list of dynamic links within an object, holding a
collection of `DynamicLink` objects. The list of available API operations is
given below, with methods that subclasses must implement marked in emphasis:
### Creation
- *DynamicLinks()*
### Iteration
- *getindex()*
- *endof()*
- length()
- start()
- next()
- done()
- eltype()
### Misc.
- *handle()*
"""
abstract type DynamicLinks{H <: ObjectHandle} end
@mustimplement endof(dls::DynamicLinks)
start(dls::DynamicLinks) = 1
done(dls::DynamicLinks, idx) = idx > length(dls)
next(dls::DynamicLinks, idx) = (dls[idx], idx+1)
length(dls::DynamicLinks) = endof(dls)
eltype(::Type{D}) where {D <: DynamicLinks} = DynamicLink
@mustimplement getindex(dls::DynamicLinks, idx)
"""
DynamicLink
This type encapsulates the linkage of one object file to another. The list of
available API operations is given below, with methods that subclasses must
implement marked in emphasis:
### Creation:
- *DynamicLink()*
### Accessors:
- *DynamicLinks()*
- *handle()*
- *path()*
"""
abstract type DynamicLink{H <: ObjectHandle} end
@mustimplement path(dl::DynamicLink)
"""
RPath
This type encapsulates the search path used by an object file when looking for
a shared library. This class enables not only looking at the path, but
querying the path for matches for given library names. The list of available
API operations is given below, with methods that subclasses must implement
marked in emphasis:
### Creation:
- *RPath()*
### Utility
- *handle()*
### RPath operations
- *rpaths()*
- canonical_rpaths()
- find_library()
"""
abstract type RPath{H <: ObjectHandle} end
"""
RPath(oh::ObjectHandle)
Construct an `RPath` object from the given `ObjectHandle`.
"""
@mustimplement RPath(oh::ObjectHandle)
"""
handle(rpath::RPath)
Return the handle that this `RPath` object refers to.
"""
@mustimplement handle(rpath::RPath)
"""
rpaths(rpath::RPath)
Return the list of paths that will be searched for shared libraries.
"""
@mustimplement rpaths(rpath::RPath)
endof(rpath::RPath) = endof(rpaths(rpath))
start(rpath::RPath) = 1
done(rpath::RPath, idx) = idx > length(rpath)
next(rpath::RPath, idx) = (rpath[idx], idx+1)
length(rpath::RPath) = endof(rpath)
eltype(::Type{D}) where {D <: RPath} = String
getindex(rpath::RPath, idx) = rpaths(rpath)[idx]
"""
canonical_rpaths(rpath::RPath)
Return a canonicalized list of paths that will be searched.
"""
function canonical_rpaths(rpath::RPath)
origin = dirname(path(handle(rpath)))
paths = rpaths(rpath)
for idx in 1:length(paths)
# Substitute the path of the containing handle for `$ORIGIN` and
# `@loader_path`. Do the same for `@executable_path` even though
# that's technically incorrect, because we don't have a good way to
# track the web of dependencies right now.
paths[idx] = replace(paths[idx], "\$ORIGIN", origin)
paths[idx] = replace(paths[idx], "@loader_path", origin)
paths[idx] = replace(paths[idx], "@executable_path", origin)
paths[idx] = abspath(paths[idx])
end
return paths
end
"""
find_library(rpath::RPath, soname::String)
Return the full path to a library, searching the given `RPath`, and then the
default library search paths. This method takes the given `soname` and joins
it to the end of every path within the given `RPath`, returning the resultant
path if it exists, returning back the original `soname` if it doesn't.
"""
function find_library(rpath::RPath, soname::AbstractString)
for path in canonical_rpaths(rpath)
libpath = joinpath(path, soname)
if isfile(libpath)
return libpath
end
end
return soname
end
### Printing
function show(io::IO, dl::DynamicLink{H}) where {H <: ObjectHandle}
print(io, "$(format_string(H)) DynamicLink \"$(path(dl))\"")
end
show(io::IO, dls::DynamicLinks{H}) where {H <: ObjectHandle} = show_collection(io, dls, H)
show(io::IO, rp::RPath{H}) where {H <: ObjectHandle} = show_collection(io, rp, H)
|
{"hexsha": "950155fe8d24e55163a292250a6d59621e505ffc", "size": 4425, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Abstract/DynamicLink.jl", "max_stars_repo_name": "Keno/ObjectFile.jl", "max_stars_repo_head_hexsha": "98d7b327448456df024ab87243520f972c715e16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-15T18:48:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-20T02:49:23.000Z", "max_issues_repo_path": "src/Abstract/DynamicLink.jl", "max_issues_repo_name": "Keno/ObjectFile.jl", "max_issues_repo_head_hexsha": "98d7b327448456df024ab87243520f972c715e16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Abstract/DynamicLink.jl", "max_forks_repo_name": "Keno/ObjectFile.jl", "max_forks_repo_head_hexsha": "98d7b327448456df024ab87243520f972c715e16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-20T02:49:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-20T02:49:35.000Z", "avg_line_length": 26.497005988, "max_line_length": 90, "alphanum_fraction": 0.6894915254, "num_tokens": 1175}
|
\lab{Object-Oriented Programming}{Object Oriented Programming}
\label{lab:OOP}
\objective{Teach object-oriented programming in Python.}
\section*{Introduction}
Writing readable code is an important skill for computer programmers.
Well-written code is easy to understand and modify.
An important part of readable code is how it is organized.
Object-oriented programming is one way to accomplish good code organization.
Python is a class-based language.
A class is a sort of blueprint for an object.
Calling a class constructs an object.
Once constructed, the user has access to an object's methods and attributes.
An object created by a class is called an \emph{instance} of the class.
It is also said that a class \emph{instantiates} objects.
\section*{Python Classes}
A Python class is code that determines an object's behavior.
A class is defined using the the \li{class} command to name the class.
Other statements follow the \li{class} command to determine the behavior of objects instantiated by the class.
Finally, a class needs a method called a constructor that is called when the class instantiates a new object.
The constructor specifies the initial state of the object.
In Python, a class's constructor is always named \li{__init__}.
For example, the following is a class that created backpack objects.
\begin{lstlisting}
# Backpack.py
class Backpack(object):
"""A Backpack object class. Has a color and a list of contents.
Attributes:
color (str): the color of the backpack.
contents (list): the contents of the backpack.
"""
def __init__(self, color = 'black'):
"""Constructor for a backpack object.
Set the color and initialize the contents list.
Inputs:
color (str, opt): the color of the backpack. Defaults to 'black'.
Returns:
A backpack object wth no contents.
"""
# Assign the backpack a color.
self.color = color
# Create a list to store the contents of the backpack.
self.contents = []
def put(self, item):
"""Add 'item' to the backpack's content list."""
self.contents.append(item)
def take(self, item):
"""Remove 'item' from the backpack's content list."""
self.contents.remove(item)
\end{lstlisting}
Note that the first argument of each method is called \li{self}.
This name refers to the object internally once it has been created.
The \li{self} argument is only included in the declaration of the class methods, but not when calling the methods.
In the above example, \li{self.color} refers to one of the backpack's internal attributes.
The argument \li{color} is an input for the constructor, and holds the value that is then stored in the backpack as \li{self.color}.
Classes may be imported in the same way as modules.
If the above module is saved as \li{Backpack.py}, it is possible to import the \li{Backpack} class from it.
Once imported, the \li{Backpack} class can be called to construct backpack objects.
\begin{lstlisting}
# From the file Backpack.py import the Backpack class. Then create a backpack.
>>> from Backpack import Backpack
>>> my_backpack = Backpack('green')
# Add an item to the backpack.
>>> my_backpack.put('notebook')
>>> my_backpack.put('pencils')
# Access attributes of the object.
>>> my_backpack.color
'green'
>>> my_backpack.contents
['notebook', 'pencils']
# Remove an item from the backpack.
>>> my_backpack.take('pencils')
>>> my_backpack.contents
['notebook']
\end{lstlisting}
% Problem 1: expand the Backpack class.
\begin{problem}
Expand the \li{Backpack} class.
Add attributes called \li{name} and \li{max_size}.
The \li{name} attribute is a label for the backpack, and the \li{max_size} attribute is the total capacity of the backpack.
Modify the constructor method to accept a \li{name} and a \li{max_size} in addition to \li{color}.
Set the default values of \li{name} to `backpack' and \li{max_size} to 5.
Once the new attributes are set, modify the \li{put} method to ensure that the backpack does not go over capacity.
If the user tries to put in more than \li{max\_size} items, print ``Backpack Full.''
Finally, add a new method to the backpack called \li{dump} that empties the contents of the backpack.
\end{problem}
\section*{Inheritance}
In object-oriented programming, inheritance is a tool for code reuse and organization.
To create a new class similar to one that already exists, it is sometimes easier to ``inherit'' the already existing methods.
This is done by including the existing class as an argument in the class definition.
This creates a \emph{class heierarchy}.
For example, since a knapsack is a backpack (but not all backpacks are knapsacks), we can create a special \li{Knapsack} class that automatically inherits the attributes and methods of the \li{Backpack} class, and adds extra functionality.
A class that inherits from another class is called a \emph{subclass}.
The class that a subclass inherits from is called a \emph{superclass}.
We define a \li{Knapsack} subclass that inherits from the \li{Backpack} superclass using the following code:
\begin{lstlisting}
# Backpack.py
# Inherit the Backpack class in the Knapsack definition
class Knapsack(Backpack):
"""A Knapsack object class. Inherits from the Backpack class.
A knapsack is smaller than a backpack and can be tied closed.
Attributes:
color (str): the color of the knapsack.
name (str): the name of the knapsack.
max_size (int): the maximum number of items that can fit in the
knapsack.
contents (list): the contents of the backpack.
closed (bool): whether or not the knapsack is tied shut.
"""
def __init__(self, color='brown', name='knapsack', max_size=3):
"""Constructor for a knapsack object. A knapsack only holds 3 item by
default instead of 5. Use the Backpack constructor to initialize the
name and max_size attributes.
Inputs:
color (str, opt): the color of the knapsack. Defaults to 'brown'.
name (str, opt): the name of the knapsack. Defaults to 'knapsack'.
max_size (int, opt): the maximum number of items that can be
stored in the knapsack. Defaults to 3.
Returns:
A knapsack object with no contents.
"""
Backpack.__init__(self, color, name, max_size)
self.closed = True
\end{lstlisting}
A subclass may have new attributes and methods that are unavailable to the superclass.
If methods in the new class need to be changed, they are overwritten as is the case of the constructor in the \li{Knapsack} class.
New methods are included normally.
For example, the \li{put} and \li{take} methods in \li{knapsack} are modified to check if the knapsack is shut.
\li{tie} and \li{untie} methods are added as well.
\begin{lstlisting}
# Knapsack class
def put(self, item):
"""If the knapsack is untied, use the Backpack put() method."""
if self.closed:
print "Knapsack closed!"
else:
Backpack.put(self, item)
def take(self, item):
"""If the knapsack is untied, use the Backpack take() method."""
if self.closed:
print "Knapsack closed!"
else:
Backpack.take(self, item)
def untie(self):
"""Untie the knapsack."""
self.closed = False
def tie(self):
"""Tie the knapsack."""
self.closed = True
\end{lstlisting}
Since \li{Knapsack} inherits from \li{Backpack}, a knapsack object is a backpack object.
All methods defined in the \li{Backpack} class are available to instances of the \li{Knapsack} class.
Thus, in this example, the \li{dump} method is available even though it is not defined explicitly in the \li{knapsack} class.
\begin{lstlisting}
# Import the Knapsack class from Backpay.py
>>> from Backpack import Knapsack
>>> my_knapsack = Knapsack()
# The put and take method now require the knapsack to be open.
>>> my_knapsack.put('compass')
Knapsack closed!
# Open the knapsack and put in some items.
>>> my_knapsack.untie()
>>> my_knapsack.put('compass')
>>> my_knapsack.put('pocket knife')
>>> my_knapsack.contents
['compass', 'pocket knife']
# The dump method is inherited from the backpack class, and
# can be used even though it is not defined in the knapsack class.
>>> my_knapsack.dump()
>>> my_knapsack.contents
[]
\end{lstlisting}
% Problem 2: create an inheritance class.
\begin{problem}
Create a jetpack class that inherits from the backpack class.
Overwrite the constructor so that the \li{color} attribute defaults to `silver', the \li{name} defaults to `jetpack', and the \li{max_size} defaults to 2.
Also in the constructor initialize a \li{fuel} attribute that keeps track of how much fuel is left in the jetpack.
Set the default amount of \li{fuel} to 10.
Add a \li{fly} method that accepts an amount of fuel to be burned, and decrement \li{fuel} by that amount.
If the user tries to burn more fuel than remains, print ``Not enough fuel!"
Finally, overload the \li{dump} method so that both the contents and the fuel tank are emptied.
\end{problem}
\section*{Magic Methods}
Python magic methods can be used to make objects behave like built-in data types.
All magic methods begin and end with two underscores, like \li{__init__}.
Every Python object is automatically endowed with several magic methods, but they are hidden from the user because they begin with an underscore (this is a way of hiding attributes or methods from the user; for example, try hiding the \li{closed} attribute in the \li{Knapsack} class by changing it to \li{_closed}).
You can see all of the available magic methods for a \li{Backpack} object in IPython with the following code:
\begin{lstlisting}
In [1]: from Backpack import Backpack
In [2]: b = Backpack()
In [3]: b.__ # Press 'tab'
b.__add__ b.__eq__ b.__lt__ b.__str__
b.__doc__ b.__init__ b.__module__
\end{lstlisting}
In all of our preceding examples, the comments enclosed by sets of three double quotes are the object's \emph{docstring}, stored as \li{__doc__}.
It is important to write good docstrings so that others can utilize your code correctly.
Now, suppose we wanted an operation for adding two backpacks together.
In the following class, adding two backpacks combines their contents.
\begin{lstlisting}
# Backpack class
def __add__(self, other):
"""Add the contents of 'other' to the contents of 'self'.
Note that the contents of 'other' are unchanged.
Inputs:
self (Backpack): the backpack on the left-hand side of the
'+' addition operator.
other (Backpack): The backpack on the right-hand side of the
'+' addition operator.
"""
self.contents = self.contents + other.contents
\end{lstlisting}
To demonstrate the addition method, create two instances of the \li{Backpack} class and add them together.
\begin{lstlisting}
>>> from Backpack import Backpack
>>> backpack1 = Backpack()
>>> backpack2 = Backpack()
# Put some items in the backpacks
>>> backpack1.put('textbook')
>>> backpack2.put('water bottle')
# Now add the backpacks like numbers
>>> backpack1 + backpack2
>>> backpack1.contents
['textbook', 'water bottle']
>>> backpack2.contents
['water bottle']
\end{lstlisting}
Subtraction, division, or multiplication may be similary defined with the magic methods \li{__sub__}, \li{__div__}, and \li{__mul__}.
Magic methods also allow for comparisons.
The methods \li{__lt__}, \li{__le__}, \li{__gt__}, and \li{__ge__} methods correspond to $<$, $<=$, $>$, and $>=$ respectively.
For example, one backpack might be less than another if it has fewer items in contents.
\begin{lstlisting}
# Backpack class
def __lt__(self, other):
"""Compare two backpacks. If 'self' has fewer contents than 'other',
return True. Otherwise, return False.
Inputs:
self (Backpack): the backpack on the left-hand side of the
'<' comparison operator.
other (Backpack): The backpack on the right-hand side of the
'<' comparison operator.
"""
if len(self.contents) < len(other.contents):
return True
else:
return False
\end{lstlisting}
To test this, create two backpacks and use the $<$ operator.
\begin{lstlisting}
>>> from Backpack import Backpack
>>> backpack1 = Backpack()
>>> backpack2 = Backpack()
>>> backpack1.put('book')
>>> backpack2.put('water bottle')
>>> backpack1 < backpack2
False
>>> backpack2.put('pencils')
>>> backpack1 < backpack2
True
\end{lstlisting}
A complete list of available magic methods can be found \href{https://docs.python.org/2/reference/datamodel.html#special-method-names}{here}.
% Problem 3: __str__ and __eq__ magic methods
\begin{problem}
Endow the \li{Backpack} class with additional magic methods:
\begin{enumerate}
\item The \li{__str__} magic method is used to give the string representation of an object. When the object is used with the \li{print} command, the \li{__str__} method is called. Add the \li{__str__} method to the \li{Backpack} class so that printing a \li{Backpack} object yields:
\begin{lstlisting}
Name: <name>
Color: <color>
Size: <number of items in contents>
Max Size: <max_size>
Contents:
<item1>
<item2> ...
\end{lstlisting}
If the backpack is empty, the contents line should read:
\begin{lstlisting}
Contents: Empty
\end{lstlisting}
(Hint: Use the `\textbackslash{t}' tab character to help align output.)
\item The \li{__eq__} magic method is used to determine if two objects are equal. Add the \li{__eq__} magic method to Backpack.py so that two backpack objects are equal if and only if they have the same name, color, and contents. Note that the contents do not need to be in the same order for the contents to be the same.
\end{enumerate}
\end{problem}
% Problem 4: ComplexNumber class
\begin{problem}
Create a \li{ComplexNumber} class that supports the basic operations of complex numbers. Implement a \li{conjugate} method that returns a \li{ComplexNumber} object that is the complex conjugate, and a \li{norm} method that returns the magnitude of the complex number.
Neither method should receive any arguments.
Also implement the following magic methods:
\begin{enumerate}
\item \li{__add__(self, other)}
\item \li{__sub__(self, other)}
\item \li{__mul__(self, other)}
\item \li{__div__(self, other)} (Use the complex conjugate to implement division)
\end{enumerate}
\end{problem}
|
{"hexsha": "1c6c36fda53e6290a3ed336685f077f2ddcb1d23", "size": 14721, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Python/OOP/ObjectOriented.tex", "max_stars_repo_name": "jessicaleete/numerical_computing", "max_stars_repo_head_hexsha": "cc71f51f35ca74d00e617af3d1a0223e19fb9a68", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/OOP/ObjectOriented.tex", "max_issues_repo_name": "jessicaleete/numerical_computing", "max_issues_repo_head_hexsha": "cc71f51f35ca74d00e617af3d1a0223e19fb9a68", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/OOP/ObjectOriented.tex", "max_forks_repo_name": "jessicaleete/numerical_computing", "max_forks_repo_head_hexsha": "cc71f51f35ca74d00e617af3d1a0223e19fb9a68", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5537190083, "max_line_length": 321, "alphanum_fraction": 0.7119760886, "num_tokens": 3583}
|
import faiss
import random
import os
import torch
import numpy as np
import torch.nn.functional as F
from tqdm.auto import tqdm
from torch.utils.data import DataLoader, TensorDataset, SequentialSampler
from datasets import load_from_disk
from transformers import AdamW, get_linear_schedule_with_warmup
class DenseRetrieval_with_Faiss:
def __init__(
self,
tokenizer,
q_encoder,
dataset=None,
args=None,
p_encoder=None,
num_neg=5,
is_trained=False,
wiki_path="/opt/ml/data/wiki_preprocessed_droped",
):
self.args = args
self.dataset = dataset
self.num_neg = num_neg
self.tokenizer = tokenizer
self.p_encoder = p_encoder
self.q_encoder = q_encoder
if torch.cuda.is_available():
if p_encoder is not None:
self.p_encoder.cuda()
self.q_encoder.cuda()
self.wiki_dataset = load_from_disk(wiki_path)
self.search_corpus = self.wiki_dataset.tolist()
if not is_trained:
self.prepare_in_batch_negative(num_neg=num_neg)
def prepare_in_batch_negative(self, dataset=None, num_neg=5, tokenizer=None):
wiki_datasets = self.wiki_dataset
wiki_datasets.load_elasticsearch_index(
"text", host="localhost", port="9200", es_index_name="wikipedia_contexts"
)
if dataset is None:
dataset = self.dataset
if tokenizer is None:
tokenizer = self.tokenizer
# 1. In-Batch-Negative 만들기
p_with_neg = []
for c in tqdm(dataset):
p_with_neg.append(c["context"])
query = c["question"]
p_neg = []
_, retrieved_examples = wiki_datasets.get_nearest_examples(
"text", query, k=num_neg * 10
)
for index in range(num_neg * 10):
if retrieved_examples["document_id"][index] == c["document_id"]:
continue
p_neg.append(retrieved_examples["text"][index])
p_with_neg.extend(random.sample(p_neg, num_neg))
assert len(p_with_neg) % (num_neg + 1) == 0, "데이터가 잘못 추가되었습니다."
# 2. (Question, Passage) 데이터셋 만들어주기
q_seqs = tokenizer(
dataset["question"],
padding="max_length",
truncation=True,
return_tensors="pt",
)
p_seqs = tokenizer(
p_with_neg, padding="max_length", truncation=True, return_tensors="pt"
)
max_len = p_seqs["input_ids"].size(-1)
p_seqs["input_ids"] = p_seqs["input_ids"].view(-1, num_neg + 1, max_len)
p_seqs["attention_mask"] = p_seqs["attention_mask"].view(
-1, num_neg + 1, max_len
)
p_seqs["token_type_ids"] = p_seqs["token_type_ids"].view(
-1, num_neg + 1, max_len
)
train_dataset = TensorDataset(
p_seqs["input_ids"],
p_seqs["attention_mask"],
p_seqs["token_type_ids"],
q_seqs["input_ids"],
q_seqs["attention_mask"],
q_seqs["token_type_ids"],
)
self.train_dataloader = DataLoader(
train_dataset,
shuffle=True,
batch_size=self.args.per_device_train_batch_size,
)
def build_faiss(
self,
index_file_path="/opt/ml/data/wiki.index",
use_gpu=False,
del_p_encoder=False,
):
if os.path.isfile(index_file_path):
self.indexer = faiss.read_index(index_file_path)
if use_gpu:
self.indexer = faiss.index_cpu_to_all_gpus(self.indexer)
else:
self.__make_faiss_index(index_file_path, use_gpu, del_p_encoder)
def __make_faiss_index(
self,
index_file_path="/opt/ml/data/wiki.index",
use_gpu=False,
del_p_encoder=False,
):
eval_batch_size = 8
# Construt dataloader
valid_p_seqs = self.tokenizer(
[example['text'] for example in self.search_corpus],
padding="max_length",
truncation=True,
return_tensors="pt",
)
valid_dataset = TensorDataset(
valid_p_seqs["input_ids"],
valid_p_seqs["attention_mask"],
valid_p_seqs["token_type_ids"],
)
valid_sampler = SequentialSampler(valid_dataset)
valid_dataloader = DataLoader(
valid_dataset, sampler=valid_sampler, batch_size=eval_batch_size
)
# Inference using the passage encoder to get dense embeddeings
p_embs = []
with torch.no_grad():
epoch_iterator = tqdm(
valid_dataloader, desc="Iteration", position=0, leave=True
)
self.p_encoder.eval()
for _, batch in enumerate(epoch_iterator):
batch = tuple(t.cuda() for t in batch)
p_inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
outputs = self.p_encoder(**p_inputs).to("cpu").numpy()
p_embs.extend(outputs)
# 이제 p encoder 쓸 일이 없을경우 삭제
if del_p_encoder:
del self.p_encoder
p_embs = np.array(p_embs)
emb_dim = p_embs.shape[-1]
self.indexer = faiss.IndexFlatL2(emb_dim) # Flat에 GPU 사용
if use_gpu:
self.indexer = faiss.index_cpu_to_all_gpus(self.indexer)
self.indexer.add(p_embs)
faiss.write_index(
faiss.index_gpu_to_cpu(self.indexer) if use_gpu else self.indexer,
index_file_path,
)
def train(self, args=None):
if args is None:
args = self.args
batch_size = args.per_device_train_batch_size
# Optimizer
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.p_encoder.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in self.p_encoder.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
{
"params": [
p
for n, p in self.q_encoder.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in self.q_encoder.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
t_total = (
len(self.train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Start training!
self.p_encoder.train()
self.q_encoder.train()
global_step = 0
self.p_encoder.zero_grad()
self.q_encoder.zero_grad()
torch.cuda.empty_cache()
train_iterator = tqdm(range(int(args.num_train_epochs)), desc="Epoch")
# for _ in range(int(args.num_train_epochs)):
for _ in train_iterator:
with tqdm(self.train_dataloader, unit="batch") as tepoch:
for batch in tepoch:
targets = torch.zeros(
batch_size
).long() # positive example은 전부 첫 번째에 위치하므로
targets = targets.to(args.device)
p_inputs = {
"input_ids": batch[0]
.view(batch_size * (self.num_neg + 1), -1)
.to(args.device),
"attention_mask": batch[1]
.view(batch_size * (self.num_neg + 1), -1)
.to(args.device),
"token_type_ids": batch[2]
.view(batch_size * (self.num_neg + 1), -1)
.to(args.device),
}
q_inputs = {
"input_ids": batch[3].to(args.device),
"attention_mask": batch[4].to(args.device),
"token_type_ids": batch[5].to(args.device),
}
# (batch_size*(num_neg+1), emb_dim)
p_outputs = self.p_encoder(**p_inputs)
# (batch_size*, emb_dim)
q_outputs = self.q_encoder(**q_inputs)
# Calculate similarity score & loss
p_outputs = p_outputs.view(batch_size, self.num_neg + 1, -1)
p_outputs = torch.transpose(p_outputs, 1, 2)
q_outputs = q_outputs.view(batch_size, 1, -1)
sim_scores = torch.bmm(
q_outputs, p_outputs
).squeeze() # (batch_size, num_neg + 1)
sim_scores = sim_scores.view(batch_size, -1)
sim_scores = F.log_softmax(sim_scores, dim=1)
loss = F.nll_loss(sim_scores, targets)
tepoch.set_postfix(loss=f"{str(loss.item())}")
loss.backward()
optimizer.step()
scheduler.step()
self.p_encoder.zero_grad()
self.q_encoder.zero_grad()
global_step += 1
torch.cuda.empty_cache()
del p_inputs, q_inputs
print("save model & tokenizer")
self.p_encoder.save_pretrained(os.path.join(args.output_dir, 'p_encoder'))
self.q_encoder.save_pretrained(os.path.join(args.output_dir, 'q_encoder'))
self.tokenizer.save_pretrained(os.path.join(args.output_dir, 'tokenizer'))
def get_relevant_doc(self, query, k=1):
valid_q_seqs = self.tokenizer(
query, padding="max_length", truncation=True, return_tensors="pt"
).to("cuda")
with torch.no_grad():
self.q_encoder.eval()
q_emb = self.q_encoder(**valid_q_seqs).to("cpu").numpy()
del valid_q_seqs
q_emb = q_emb.astype(np.float32)
D, I = self.indexer.search(q_emb, k)
distances, index = D.tolist()[0], I.tolist()[0]
distance_list, doc_list = [], []
for d, i in zip(distances, index):
distance_list.append(d)
doc_list.append(self.search_corpus[i])
return distance_list, doc_list
|
{"hexsha": "dc4b231d162f705a1f11a66d69c0a6e40cc2bbc5", "size": 11191, "ext": "py", "lang": "Python", "max_stars_repo_path": "deprecated/dpr/code/dense_retrieval.py", "max_stars_repo_name": "eunaoeh/mrc-level2-nlp-01", "max_stars_repo_head_hexsha": "caa893ca7d689200b3528377901d59fa9ca452ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-25T04:30:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T04:30:51.000Z", "max_issues_repo_path": "deprecated/dpr/code/dense_retrieval.py", "max_issues_repo_name": "eunaoeh/mrc-level2-nlp-01", "max_issues_repo_head_hexsha": "caa893ca7d689200b3528377901d59fa9ca452ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deprecated/dpr/code/dense_retrieval.py", "max_forks_repo_name": "eunaoeh/mrc-level2-nlp-01", "max_forks_repo_head_hexsha": "caa893ca7d689200b3528377901d59fa9ca452ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-11-21T22:53:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T09:22:25.000Z", "avg_line_length": 33.6066066066, "max_line_length": 86, "alphanum_fraction": 0.5314985256, "include": true, "reason": "import numpy", "num_tokens": 2374}
|
\documentclass[a4paper,10pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[margin=1in]{geometry}
\usepackage{graphicx}
\usepackage{listings}
\usepackage{hyperref}
\begin{document}
\begin{titlepage}
\begin{center}
\vspace*{1cm}
\huge{\textbf{Music Genre Classification}}
\vspace{0.5cm}
CS725 Project
\vspace{3.5cm}
Indian Institute of Technology, Bombay\\
Department of Computer Science and Engineering
\vspace{3.5cm}
\Large{Anshul Gupta (16305R001) \\ Khursheed Ali (163059009) \\ Abhijeet Dubey (16305R006) \\ Nithin S (16305R007)}
\vfill
\vspace{0.8cm}
\includegraphics[scale=0.35]{IITB.png}
\end{center}
\end{titlepage}
\section{Project Description}
Sometimes it happens that we listen to a particular music, we instantly develop an affinity towards that genre and want to listen to same type of music. Or sometimes we just want to organize our music collection based on genre.
This project aims to classify music into different categories such as:
\begin{enumerate}
\item Rock
\item Hip Hop
\item Jazz
\item Metal
\item Classical
\item Pop
\item Disco
\end{enumerate}
\section{Tentative Approach}
We'll proceed using the following workflow:
\begin{itemize}
\item Pre-process of the audio files.
\item Extract relevant features from the pre-processed files. For example, Mel-Frequency Cepstral Coefficient, Spectral Centroid etc.
\item Use ensemble learning for classification of the features. Classification algorithms like K-NN, Random Forest, Linear Kernel SVN etc. will be a part of our ensemble learning.
\end{itemize}
\section{Papers}
\begin{itemize}
\item A Comparative Study on Content-based Music Genre Classification \cite{Li:2003:CSC:860435.860487}
\item Deep Content-based Music Recommendation \cite{Oord:2013:DCM:2999792.2999907}
\item A Benchmark Dataset for Audio Classification and Clustering \cite{HomburgEtAl_2005_ABencDataFor}
\end{itemize}
\section{Datasets}
%We will be using MSD \cite{Bertin-Mahieux2011} (Million Song Dataset). The dataset consists of almost all the information available through The Echo Nest API for one million popular tracks. This encompasses both metadata and audio analysis features. Each file is for one track which corresponds to one song, one release and one artist. All the information about these four items (track, song, release, artist) are in every file (which involves some redundancy, although the bulk of the data, relating to the audio analysis, is unique). Each audio is a 30 second sample.
We will use \textit{GTZAN Genre Collection} \cite{GTZAN}.
The dataset consists of 1000 audio tracks each 30 seconds long. It contains 10 genres, each represented by 100 tracks. The tracks are all 22050Hz Mono 16-bit audio files in .wav format.
\bibliographystyle{unsrt}
\bibliography{References}
\end{document}
|
{"hexsha": "89fa80bf5674f66cab58161bae938280a5e2419d", "size": 2800, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Project Proposal/Music Genre Classification.tex", "max_stars_repo_name": "anshulgupta0803/ipl-match-prediction", "max_stars_repo_head_hexsha": "955cf539b307c1a2b0d93f6dfc7036f379902678", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Project Proposal/Music Genre Classification.tex", "max_issues_repo_name": "anshulgupta0803/ipl-match-prediction", "max_issues_repo_head_hexsha": "955cf539b307c1a2b0d93f6dfc7036f379902678", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project Proposal/Music Genre Classification.tex", "max_forks_repo_name": "anshulgupta0803/ipl-match-prediction", "max_forks_repo_head_hexsha": "955cf539b307c1a2b0d93f6dfc7036f379902678", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3333333333, "max_line_length": 570, "alphanum_fraction": 0.7864285714, "num_tokens": 730}
|
"""Tests for writers.record."""
import numpy as np
import tensorflow as tf
import pytest
import deepr as dpr
@pytest.mark.parametrize("shape", [[1], [2], [2, 3], [None, 3], [2, 3, 4], [None, 3, 4]])
@pytest.mark.parametrize("dtype", [tf.int64, tf.float32])
@pytest.mark.parametrize("chunk_size", [None, 2])
def test_writers_record_simple(tmpdir, shape, dtype, chunk_size):
"""Simple test for record writer."""
path = str(tmpdir.join("data.tfrecord.gz")) if not chunk_size else str(tmpdir.join("data"))
size = 1
static_shape = [s if s is not None else 2 for s in shape]
for s in static_shape:
size *= s
# Define dataset
def _gen():
for idx in range(5):
yield {"x": np.reshape(np.arange(size) * idx, static_shape)}
dataset = tf.data.Dataset.from_generator(_gen, output_types={"x": dtype}, output_shapes={"x": shape})
# Write dataset
field = dpr.Field(name="x", shape=shape, dtype=dtype)
prepro_fn = dpr.prepros.ToExample([field])
writer = dpr.writers.TFRecordWriter(path=path, chunk_size=chunk_size)
writer.write(prepro_fn(dataset))
# Read dataset
reader = dpr.readers.TFRecordReader(path=path, shuffle=False, num_parallel_reads=None, num_parallel_calls=None)
prepro_fn = dpr.prepros.FromExample([field])
idx = 0
for idx, (got, expected) in enumerate(zip(dpr.readers.from_dataset(prepro_fn(reader())), _gen())):
np.testing.assert_equal(got, expected)
assert idx == 4
|
{"hexsha": "b9f2ea2ffe8c4b176390b015e564ee938e8d13f0", "size": 1485, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/writers/test_writers_record.py", "max_stars_repo_name": "drohde/deepr", "max_stars_repo_head_hexsha": "672772ea3ce9cf391f9f8efc7ae9c9d438957817", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/writers/test_writers_record.py", "max_issues_repo_name": "drohde/deepr", "max_issues_repo_head_hexsha": "672772ea3ce9cf391f9f8efc7ae9c9d438957817", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/unit/writers/test_writers_record.py", "max_forks_repo_name": "drohde/deepr", "max_forks_repo_head_hexsha": "672772ea3ce9cf391f9f8efc7ae9c9d438957817", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2195121951, "max_line_length": 115, "alphanum_fraction": 0.6727272727, "include": true, "reason": "import numpy", "num_tokens": 405}
|
[STATEMENT]
lemma merge_eq: "xs\<noteq>[] \<or> ys\<noteq>[] \<Longrightarrow> merge xs ys = (
if ys=[] \<or> (xs\<noteq>[] \<and> hd xs < hd ys) then hd xs # merge (tl xs) ys
else hd ys # merge xs (tl ys)
)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xs \<noteq> [] \<or> ys \<noteq> [] \<Longrightarrow> merge xs ys = (if ys = [] \<or> xs \<noteq> [] \<and> hd xs < hd ys then hd xs # merge (tl xs) ys else hd ys # merge xs (tl ys))
[PROOF STEP]
by (cases xs; cases ys; simp)
|
{"llama_tokens": 215, "file": "IMP2_doc_Examples", "length": 1}
|
\SecDef{minimal}{Minimal and Maximal Zero-Sum Sets}
In this section we study zero-sum sets of particular rank $n$ and prove results on their existence. We are particularly interested in the smallest of such sets, defined in the following sense.
\begin{definition}
We denote by $\minzs{n}{d}$ the minimum number $m \in \ZZplus$ for which there exists an $S \in \ZS{n}{m}{d}$. We call a zero-sum set \emph{minimal} if it is contained in $\ZS{n}{\minzs{n}{d}}{d}$. Analogously, a zero-sum set $S \in \ZS{n}{m}{d}$ is called \emph{maximal} if $\ZS{n'}{m}{d} = \varnothing$ for all $n' > n$.
\end{definition}
Note that $\minzs{n}{d}$ is only defined if $n > d$ as otherwise, the only degree-$d$ zero-sum set in $\F_2^n$ is the empty set. We first characterize the zero-sum sets of particular rank $n$ in terms of Boolean functions.
\subsection{Relations between Zero-Sum Sets and Affine Annihilators of Boolean Functions}
The first three existence results are presented in \PropRef{annihilator_construction}, \PropRef{annihilator_construction2} and \PropRef{max_annihilator_construction} and outline the link between zero-sum sets and the dimensions of degree-$1$ annihilator spaces of Boolean functions.
\begin{proposition}
\PropLabel{annihilator_construction}
There exists a degree-$d$ zero-sum set $S \in \ZS{n}{m}{d}$ if and only if there exists a Boolean function $h \in \BF{n}{n-d-1}$ with $\wt(h) = m$ and $\dim \AN_1(h) \leq 1$.
\end{proposition}
\begin{proof}
Let us assume that $S \in \ZS{n}{m}{d}$ is given in systematic form, i.e., it can be represented as in \EqRef{systematic_form}. Then, $S = \supp(h)$ for a Boolean function $h \in \BF{n}{n-d-1}$ for which $\forall i\in \{1,\dots,n\}: h(e_i) = 1$. Such a function cannot have a linear annihilator and therefore, any $a \in \AN_1(h) \setminus \{0\}$ must be of the form $a = \ell+1$ for a linear Boolean function $\ell$. It follows that $\dim AN_1(h) \leq 1$.
Let now $h \in \BF{n}{n-d-1}$ with $\wt(h) = m$ and $\dim \AN_1(h) \leq 1$. Let $a \in \AN_1(h) \setminus \{0\}$. If $a = \ell + 1$ for a linear function $\ell$, then $h$ has no linear annihilator. If $a$ is linear, we fix a constant $c \in \F_2^n$ for which $a(c)=1$ and consider the function $h_c\colon x \mapsto h(x+c) \in \BF{n}{n-d-1}$ which is domain affine equivalent to $h$ and thus has the same weight. It is easy to verify that $a+1$ is an affine annihilator for $h_c$. Because the dimensions of the annihilator spaces are invariant under domain affine equivalence, $h_c$ has no linear annihilators. Therefore, without loss of generality, we can assume that $h$ has no linear annihilator. Let $S = \supp(h) \subseteq \F_2^n$ be the support of $h$ and consider a matrix $\matzs{S}$ the columns of which form exactly the set $S$. Since $h$ has no linear annihilator, there is no linear combination of rows of $\matzs{S}$ that is equal to zero. We conclude that $\matzs{S}$ has full rank $n$ and $S\in \ZS{n}{m}{d}$.
\end{proof}
\begin{proposition}
\PropLabel{annihilator_construction2}
Given a function $h \in \BF{n}{n-d-1}$ with $\wt(h) = m$ and $\AN_1(h) = \{0\}$, it is possible to construct a zero-sum set in $\ZS{(n+1)}{m}{d}$.
\end{proposition}
\begin{proof}
Consider the function \[h': \F_2^{n+1} \to \F_2, (x_1, \ldots, x_{n+1}) \mapsto x_{n+1} h(x_1, \ldots, x_n)\;.\] Note that $h'$ has degree at most $n-d$. Further, $h'$ has no linear annihilator. Otherwise, by setting $x_{n+1} = 1$, we would obtain that $h$ has an annihilator of algebraic degree $1$, contradicting $\AN_1(h) = \{0\}$. By \PropRef{annihilator_construction}, we can construct $S \in \ZS{(n+1)}{m}{d}$.
\end{proof}
The converse statement is true for maximal zero-sum sets.
\begin{proposition}
\PropLabel{max_annihilator_construction}
Let $n \geq 2$ and let $S \in \ZS{(n+1)}{m}{d}$ be maximal. Then, $\Ind_S$ is domain linear equivalent to a function $h \in \BF{n+1}{n-d}$ of the form
\eql{annihilator_reduce}{
h(x_1,\dots,x_{n+1}) = x_{n+1}\cdot g(x_1,\dots,x_{n}),
}
where $g \in \BF{n}{n-d-1}$ with $\wt(g) = \wt(h) = m$ and $\AN_1(g) = \{0\}$. Further, if $m < 2^{n-1}$, then $\AI(g) \geq 2$.
\end{proposition}
\begin{proof}
Let $\matzs{S}$ be a matrix which columns correspond to the elements of $S$. Because $S$ is maximal, the vector subspace of $\F_2^m$ spanned by the rows of $\matzs{S}$ must contain the all-1 vector $\idvec{n} \coloneqq (1,1,\dots,1)$. Otherwise, one would obtain a zero-sum set in $\ZS{(n+2)}{m}{d}$ defined by the matrix
$$
\coltwo{\matzs{S}}{\idvec{n}}.
$$
Therefore, we can apply a linear permutation $A$ on the columns of $\matzs{S}$ such that $\Ind_{A(S)} = h$ where $h \in \BF{n+1}{n-d}$ is of the form as given in \EqRef{annihilator_reduce} with $g \in \BF{n}{n-d-1}$ and $\wt(g) = \wt(h)$. It is left to show that $\AN_1(g) = \{0\}$.
Clearly, $g$ cannot have a linear annihilator. We assume now that $g$ has an annihilator of degree $1$ of the form $(x_1,\dots,x_n) \mapsto 1+\bigoplus_{i=1}^{n}a_ix_i$. Then, $g(x)=0$ for all $x$ with $\bigoplus_{i=1}^{n}a_ix_i = 0$. Let $j$ be such that $a_j = 1$. For the linear permutation $Q:\field{n} \to \field{n}$, $Q(x_1, \ldots, x_{n}) = (x_1, \ldots, x_{j-1}, \bigoplus_{i=1}^{n}a_ix_i, x_{j+1}, \dots, x_{n})$, we have
\[ g(Q(x_1,\dots,x_{n})) = x_{j}\cdot g'(x_1,\dots,x_{j-1},x_{j+1},\dots,x_{n})\;\] for a function $g' \in \BF{n-1}{n-d-2}$. But this means that $h$ is linear-equivalent to a function of the form $(x_1,\dots,x_{n+1}) \mapsto x_{n+1}\cdot x_{n}\cdot g'(x_1, \ldots, x_{n-1})$, which has a linear annihilator $x_{n+1}+x_{n}$. We get a contradiction and conclude that $\AN_1(g) = \{0\}$.
If $m < 2^{n-1}$, it is easy to see that $g+1$ cannot admit an annihilator of algebraic degree $1$. Suppose that $a \in \AN_1(g+1)\setminus \{0\}$. Then, $\wt(a) = 2^{n-1}$ and $ag = a$, which is impossible.
\end{proof}
As \PropRef{max_annihilator_construction} only holds for maximal zero-sum sets we cannot use it to establish an equivalence between minimal degree-$d$ zero-sums of rank $n+1$ and $n$-bit Boolean functions of degree $n-d-1$ with algebraic immunity at least 2 and minimum weight. We therefore propose the following question:
\begin{question}
\Label{q:minimal-maximal}
Let $S \in \ZS{n}{F(n,d)}{d}$ be minimal. What are necessary and sufficient conditions for $S$ to be maximal?
\end{question}
\subsection{Minimal Zero-Sum Sets: Bounds and Values for $\minzs{n}{d}$}
In order to derive values for $\minzs{n}{d}$, we basically have to study the Boolean functions that admit at most one annihilator of algebraic degree $1$ and find those of minimum weight. Indeed, from \PropRef{annihilator_construction}, we know that
$$
\minzs{n}{d} = \min\{ \wt(g) \mid g \in \BF{n}{n-d-1} \setminus \{0\} ~\text{with}~ \dim \AN_1(g) \leq 1\}.
$$
For $d=1$ and $d=2$ we can easily determine the cardinalities of minimal degree-$d$ zero-sum sets, as stated in \PropRef{min_1} and \PropRef{min_2}. The proofs also provide a construction for a minimal zero-sum set. While the proof for $d=1$ is rather trivial, the proof for $d=2$ relies on the relation between degree-$2$ zero-sum sets and semi-orthogonal matrices.
\begin{proposition}
\PropLabel{min_1}
For $n \geq 2$, $\minzs{n}{1} = n + 2 - (n \mod 2)$.
\end{proposition}
\begin{proof}
Consider a zero-sum set $S\in\ZS{n}{m}{1}$ and its matrix in systematic form. Each row must have an even weight, therefore there must be at least one extra column besides the identity part, i.e. $m \ge n+1$. By setting the extra column to the all-one vector $\idvec{n}$ we make all rows to have even weight. Furthermore, $m$ must be even and we may also need to add the all-zero column. The proposition follows.
\end{proof}
\begin{proposition}
\PropLabel{min_2}
For $n = 4$ and for $n > 5$, it is $\minzs{n}{2} = 2n$. Further, $\minzs{3}{2} = 8$ and $\minzs{5}{2} = 12$.
\end{proposition}
\begin{proof}
Let $n \geq 3$ and $m$ be minimal such that there exists an $S \in \ZS{n}{m}{2}$. Let further $L \in \F_2^{n \times (m-n)}$ such that $S$ is in systematic form with $\matzs{S} = \rowtwo{\idmat{n}}{L}$. As $\matzs{S}$ cannot contain any repeated columns, it is $\matzs{S} = \matl{L}$ and thus, $L$ must be semi-orthogonal and $n \leq (m-n)$. It follows that $\minzs{n}{2} = m \geq 2n$.
Let now $n = 4$ or $n \geq 6$. To prove the existence of an $S \in \ZS{n}{2n}{2}$, we observe that if $L \in \F_2^{n\times n}$ is an orthogonal matrix for which each column has weight larger than $1$, $\matl{L}$ defines a degree-$2$ zero-sum set of size $2n$ and rank $n$ according to \PropRef{inner_product}. It is left to show that, for any dimension $n=4$ or $n \geq 6$, there exists an orthogonal matrix for which no column corresponds to a unit vector. We are going to distinguish four cases. Let us define the orthogonal matrices $M_4$ and $M_6$ as
$$
M_4 = \matb{
0 & 1 & 1 & 1 \\
1 & 0 & 1 & 1 \\
1 & 1 & 0 & 1 \\
1 & 1 & 1 & 0
}, \quad M_6 = \matb{
0 & 1 & 1 & 1 & 1 & 1 \\
1 & 0 & 1 & 1 & 1 & 1 \\
1 & 1 & 0 & 1 & 1 & 1 \\
1 & 1 & 1 & 0 & 1 & 1 \\
1 & 1 & 1 & 1 & 0 & 1 \\
1 & 1 & 1 & 1 & 1 & 0
}.
$$
Case 1 ($n = 0 \mod 4$): The block-diagonal matrix $\diag(M_4,\dots,M_4)$ which contains $M_4$ as its diagonal blocks is orthogonal and each column weight is equal to $3$.
Case 2 ($n = 2 \mod 4$): Because $n > 5$, it is $n = 4k+6$ for $k \geq 0$ and the matrix $\diag(M_6,M_4,M_4,\dots,M_4)$ is orthogonal and each column has weight at least $3$.
Case 3 ($n = 3 \mod 4$): Because $n > 5$, it is $n = 4k + 3$ for $k \geq 1$ and the two matrices $D_1 = \diag(1,1,1,M_4,M_4,\dots,M_4)$ and $D_2 = \diag(M_4,1,1,\dots,1)$ are orthogonal. Their product is orthogonal and of the form
\newcommand{\biga}{\mbox{\normalfont\Large A}}
\newcommand{\bigd}{\mbox{\normalfont\Large D}}
\eql{diag_matrix}{
D_1D_2 =
\left[\begin{array}{@{}c|@{}c}
\begin{matrix}
0 & 1 & 1 & 1 \\
1 & 0 & 1 & 1\\
1 & 1 & 0 & 1
\end{matrix}
~&~ \begin{matrix}
0 & 0 & \dots & 0 \\
0 & 0 & \dots & 0\\
0 & 0 & \dots & 0
\end{matrix} \\
\hline
\biga &
\bigd
\end{array}\right],
}
where $D$ is the $4k \times (4k-1)$ submatrix of $\diag(M_4,\dots,M_4)$ omitting the first column. It is obvious that each column has weight at least $3$.
Case 4 ($n = 1 \mod 4$): Because $n > 5$, it is $n \geq 9$ and $n = 4k + 6 +3$ for $k \geq 0$. The two matrices $D_1 = \diag(1,1,1,M_6,M_4,\dots,M_4)$ and $D_2 = \diag(M_4,1,1,\dots,1)$ are orthogonal. Their product is orthogonal and of the form given in \EqRef{diag_matrix} with $D$ as the $4k + 6 \times (4k + 6 -1)$ submatrix of $\diag(M_6,M_4,M_4,\dots,M_4)$ omitting the first column. It is obvious that each column has weight at least $3$.
For $n=3$ we use that any degree-$d$ zero-sum set must contain at least $2^{d+1}$ elements. Thus, $\minzs{n}{2} \geq 8$. We obtain $\minzs{3}{2} = 8$ because $\F_2^3$ is a degree-$2$ zero-sum set.
For $n=5$, assume that there exists an orthogonal matrix $L \in \F_2^{5 \times 5}$ which does not have a unit vector as its row (or column). From point $(iii)$ of \PropRef{zero_sum} it follows that any $2\times 5$ submatrix of $L$ must contain an odd number of columns equal to each of $(0,1),(1,0),(0,0)$ and an even number of columns equal to $(1,1)$ (same applies for rows of any $5 \times 2$ submatrix of $L$). It follows that, up to a permutation of rows, $L$ has the following form:
\begin{equation}
L = \matb{
1 & 0 & 0 & 1 & 1\\
0 & 1 & 0 & 1 & 1\\
0 & 0 & . & . & . \\
1 & 1 & . & . & . \\
1 & 1 & . & . & .
}.
\end{equation}
It is easy to see that it is not possible to complete this matrix such that all $2\times 5$ and $5\times 2$ submatrices satisfy the condition.
Therefore, $\minzs{5}{2} > 10$. Moreover, it is easy to verify that
$$
\matzs{S} = \matb{
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1 & 1 \\
0 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 1\\
0 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 1 & 1\\
0 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 0 & 1
}
$$
defines a zero-sum set in $\ZS{5}{12}{2}$, thus $\minzs{5}{2} = 12$.
\end{proof}
\PropRef{extending_construction} below presents a simple way to construct a $d+1$ zero-sum set of rank $n+1$ from a degree-$d$ zero-sum set of rank $n$. This construction might be used to derive an upper bound on $\minzs{n}{d}$.
\begin{proposition}
\PropLabel{extending_construction}
If there exists an $S \in \ZS{n}{m}{d}$, one can construct a zero-sum set $S' \in \ZS{(n+1)}{2m}{d+1}$. In particular, for $n > d+1$, $\minzs{n}{d} \leq 2 \minzs{n-1}{d-1}$.
\end{proposition}
\begin{proof}
If $S \in \ZS{n}{m}{d}$, then the columns of the matrix
\[ \left[\begin{array}{ccc}
0~\ldots~0 &|& 1~\ldots~1 \\
\matzs{S} &|& \matzs{S}
\end{array} \right]\]
define a degree-$(d+1)$ zero-sum set $S'$ with $2m$ elements of rank $n+1$. We remark that both sets $S$ and $S'$ have essentially the same indicator function, only the domain dimension is different.
\end{proof}
Note that the upper bound on $F(n,d)$ given by this construction is not always tight. Let $S \subseteq \F_2^9$ be such that $\Ind_S(x)=x_1(x_2x_3x_4x_5 + x_6x_7x_8x_9)$. It easy to verify that $S \in \ZS{9}{30}{3}$. It follows that $F(9,3) \le 30 \ne 2F(8,2) = 32$.
The corresponding matrix $\matzs{S}$ is given by:
\setcounter{MaxMatrixCols}{30}
\begin{equation}
\small
\setlength\arraycolsep{2pt}
\matzs{S} = \begin{bmatrix}
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
0 & 0 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & 0 & 0 & 0 & 1 & 1 & 1 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 0 \\
1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1
\end{bmatrix}.
\end{equation}
\begin{proposition}
\PropLabel{diagonal_construction}
For any $d \in \ZZplus$ and $n_1,n_2 >d$, $\minzs{n_1+n_2}{d} \leq \minzs{n_1}{d}+\minzs{n_2}{d}$.
\end{proposition}
\begin{proof}
If $S_1 \in \ZS{n_1}{m_1}{d}, S_2 \in \ZS{n_2}{m_2}{d}$, then the columns of the matrix
$$
\matzs{S} = \psquare{
\begin{array}{c|c}
\matzs{S_1} &
\begin{array}{c}0~\ldots~0 \\ \vdots \\ 0~\ldots~0\end{array} \\
\hline
\begin{array}{c}0~\ldots~0 \\ \vdots \\ 0~\ldots~0\end{array} &
\matzs{S_2}
\end{array}
}
$$
repeating an odd number of times define a degree-$d$ zero-sum set $S$ with at most $m_1+m_2$ elements of rank $n_1+n_2$. More precisely, if both $S_1$ and $S_2$ contain the zero vector, then the resulting zero-sum set has size $m_1 + m_2 - 2$ due to the zero-vector being cancelled by the repetition. Otherwise, $S$ has size $m_1 + m_2$.
\end{proof}
\begin{proposition}
\PropLabel{lower_bound_n_plus_d}
Let $d \geq 2$. If there exist an $S \in \ZS{n}{m}{d}$, one can construct a zero-sum set in $\ZS{(n+d)}{m}{d-1}$. In particular, for $n > d$, $\minzs{n}{d} \geq \minzs{n+d}{d-1}$.
\end{proposition}
\begin{proof}
Let $\matzs{S} = \rowtwo{\idmat{n}}{L}$ be a matrix for $S$ in systematic form. By reordering the rows of $\matzs{S}$, one can bring it into the form
\eql{proof_form}{
\psquare{
\begin{array}{c|c|c|c} 1 \dots 1 & 1 & 0 \dots 0 & 0 \dots 0 \\
A & 0 & B & \idmat{(n-1)}\end{array}
},
}
where $A \in \F_2^{(n-1) \times m_1}$ and $B \in \F_2^{(n-1) \times m_2}$ for some $m_1$, $m_2$ with $m_1+m_2+n = m$. Moreover, $m_1$ cannot be zero because the first row must have an even weight.
We see that $\left[\begin{array}{c|c} A & 0\end{array}\right]$ must define a degree-$(d-1)$ zero-sum set in $\F_2^{n-1}$, i.e., $\left[\begin{array}{c|c} A & 0\end{array}\right] = \matzs{T}$ for a $T \in \ZS{r}{(m_1+1)}{d-1}$. This is simply because the Hadamard (component-wise) product of any $d-1$ rows of $\left[\begin{array}{c|c} A & 0\end{array}\right]$ can be expressed as the Hadamard (component-wise) product of $d$ rows of $\matzs{S}$, i.e., the $d-1$ rows at the same positions as those of $\left[\begin{array}{c|c} A & 0\end{array}\right]$ and the first row $[1 1 \dots 1 0 0 \dots 0]$. We conclude that $m_1 = |T| \geq 2^d$ and thus, $r \geq d$.
Let $v_1,\dots,v_d$ be $d$ linearly independent rows of $A$ and consider the matrix
\[ \left[ \begin{array}{c|c|c|c}
1 \dots 1 & 1 & 0 \dots 0 & 0 \dots 0 \\
A & 0 & B & \idmat{(n-1)} \\
v_1 & 0 & 0 \dots 0 & 0 \dots 0\\
v_2 & 0 & 0 \dots 0 & 0 \dots 0\\
\vdots & \vdots & \vdots & \vdots \\
v_d & 0 & 0 \dots 0 & 0 \dots 0\\
\end{array} \right]\;,\]
which must define a zero-sum set in $\ZS{(n+d)}{m}{d-1}$ by the same argument as above, i.e., the Hadamard product of any $d-1$ rows can be expressed as the Hadamard product of $d$ rows of $\matzs{S}$. It is also easy to see that no linear combination of rows can be equal to zero, i.e. the constructed set has full rank $n+d$.
\end{proof}
Using the above result and \PropRef{min_2}, we can prove a lower bound on $\minzs{n}{3}$ as follows.
\begin{corollary}
\Label{cor:Fn3}
For $n \geq 4$ it is $\minzs{n}{3} \geq 2n+6$.
\end{corollary}
So far, we were able to characterize the minimal degree-$d$ zero-sum sets for $d=1$ and $d=2$ and proved some inequalities for the general case. Further, we can use the following classification theorem by Kasami, Tokura and Azumi in order to derive some more exact values of $\minzs{n}{d}$.
\begin{theorem}[\cite{kasami_tokura,kasami_tokura_azumi}]
\ThmLabel{low-weight-bf}
Let $r\geq 2$ and let $f \in \BF{n}{r}$ with $\wt(f) < 2^{n-r+1}$. Then $f$ is domain affine equivalent to either $(i)$ or $(ii)$, where
\enumroman
\begin{enumerate}
\item $f = x_1 \ldots x_{r-2}(x_{r-1} x_{r} + x_{r+1} x_{r+2} + \ldots + x_{r+2\ell-3}x_{r+2\ell-2}), n \geq r+2\ell -2$
\vspace{.5em}
\item $f = x_1 \ldots x_{r-\ell}(x_{r-\ell+1}\ldots x_{r} + x_{r+1} \ldots x_{r+\ell}), r \geq \ell, n \geq r + \ell$\;.
\end{enumerate}
\end{theorem}
A direct application leads to the following results.
\begin{proposition}[Values of $\minzs{n}{d}$ for $n \leq 2d+4$]
\PropLabel{diagonal_values}
\enumroman
$ $\newline % hack to start with newline
\begin{enumerate}
\item $\minzs{d+1}{d} = 2^{d+1}$.
\item $\minzs{d+2}{d} = 2^{d+1}$ and the minimal zero-sum sets in $\F_2^{d+2}$ correspond to the Boolean functions of algebraic degree $1$.
\item $\minzs{d+3}{d} = 3 \cdot 2^{d}$ and the minimal zero-sum sets in $\F_2^n$ correspond to the Boolean functions domain affine equivalent to $x \mapsto x_1x_2 + x_3x_4$.
\item For $d + 4 \le n \le 2d+3$, $\minzs{n}{d} = 2^{2d-n+4}(2^{n-d-2}-1) = \wt(h_{n,d})$,
where
\[
r=n-d-1, h_{n,d}\colon (x_1,\dots,x_n) \mapsto x_1(x_2x_3\dots x_{r} + x_{r+1}x_{r+2}\dots x_{2r-1})\;.
\]
\item $\minzs{2d+4}{d} = 2^{d+2} = \wt(g_d)$, where: \[
g_d\colon (x_1,\dots,x_{2d+4}) \mapsto x_1(x_2x_3\dots x_{d+3} + (x_2+1)x_{d+4}x_{d+5}\dots x_{2d+4})\;.
\]
\end{enumerate}
\end{proposition}
\begin{proof}
For $d \in \ZZplus, d<n$, let us define the set \[S_{n,d} \coloneqq \{g \in \BF{n}{d}\setminus \{0\} \text{ with } \dim \AN_1(g) \leq 1 \}\;.\]
From \PropRef{annihilator_construction} we know that $\minzs{n}{d} = \min \{\wt(g) \mid g \in S_{n,n-d-1} \}$. Therefore, we trivially obtain $\minzs{d+1}{d} = 2^{d+1}$. $S_{d+2,1}$ is the set of Boolean functions of algebraic degree $1$ and thus $\minzs{d+2}{d} = 2^{d+1}$.
To obtain the minimum weight of functions in $S_{d+3,2}$, we first note that every Boolean function of algebraic degree $2$ of the minimum weight $2^{d+1}$ must be domain affine equivalent to a monomial function, i.e., $x \mapsto x_1x_2$ (see Proposition 12 of~\cite{BMM:Carlet07}). As this monomial function admits the annihilators $x \mapsto x_1 +1$ and $x \mapsto x_2+1$, the minimum weight in $S_{d+3,d}$ must be at least $2^{d+2} - 2^{d}$ (see, e.g.,~\cite[p. 70]{BMM:Carlet07} for the possible weights of quadratic Boolean functions). This weight is obtained by the function $x \mapsto x_1x_2 + x_3x_4$, which clearly is in $S_{d+3,2}$. To see that all other functions in $S_{d+3,2}$ of minimal weight are domain affine equivalent to it, it is enough to see that all of the functions
$$
q_{n,\ell}\colon (x_1,\dots,x_n) \mapsto x_1x_2 + x_3x_4 + \dots + x_{2\ell-1}x_{2\ell}
$$
with $\ell \geq 3$ have a strictly larger weight. Indeed, by induction on $\ell$, it can be easily shown that $\wt(q_{n,\ell}) = 2^{n-1} - 2^{n-\ell-1}$.
Let now $d+4 \le n \le 2d+3$. It is easy to see that $h_{n,d} \in S_{n,n-d-1}$. Further, its weight can be computed as \[
\wt(h_{n,d}) =
2^{d+1} + 2^{d+1} - 2^{2d-n+4} =
2^{2d-n+4}(2^{n-d-2}-1)\;.
\]
It is left to show that $h_{n,d}$ is an element of minimum weight in $S_{n,n-d-1}$. Let therefore be $h'$ in $S_{n,n-d-1}$ with $\wt(h') \leq \wt(h_{n,d})$. Since $\wt(h_{n,d}) < 2^{n-(n-d-1)+1}=2^{d+2}$, the assumptions of \ThmRef{low-weight-bf} are fulfilled and $h'$ would be domain affine equivalent to one of the forms given in cases $(i)$ and $(ii)$ of \ThmRef{low-weight-bf}. If $n \geq d + 5$, Case $(i)$ corresponds to a Boolean function of the form $x \mapsto x_1x_2 g$ which admits $x \mapsto x_1+1$ and $x \mapsto x_2+1$ as degree-$1$ annihilators. For $n = d + 4$, Case $(i)$ corresponds to a function of the form
$$
x \mapsto x_1(x_2x_3 + x_4x_5 + \dots + x_{2\ell}x_{2\ell+1}) = x_1g
$$
for $g \in S_{n,2}$ and, therefore, its weight must be at least $2^{n-2}-2^{n-4} = 2^{2d-n+4}(2^{n-d-2}-1)$.
Otherwise, $h'$ must be domain affine equivalent to one of the functions given in Case $(ii)$. Since it cannot admit two annihilators of algebraic degree $1$, it must be domain affine equivalent to either
$$
x \mapsto x_1(x_2x_3\dots x_{r} + x_{r+1}x_{r+2}\dots x_{2r-1}) = h_{n,d},
$$
or
$$
g_{n,d}\colon x \mapsto x_1x_2 \dots x_{r} + x_{r+1}x_{r+2} \dots x_{2r},
$$
where $r = n-d-1$.
As
$$
\wt(g_{n,d}) = 2^{2d-n+3}(2^{n-d-1}-1) > \wt(h_{n,d})=2^{2d-n+3}(2^{n-d-1}-2),
$$
the point $(iv)$ follows.
It is easy to see that $\wt(g_d)=2^{d+2}$, i.e. $F(2d+4,d)\le 2^{d+2}$. By \PropRef{extending_construction} and $(iv)$ of this Theorem, $F(2d+4,d) \ge F(2d+5,d+1)/2 = (2^{d+2}-1)$. Since $F(2d+4,d)$ has to be even, the Theorem follows.
\end{proof}
We are now going to show that, for any fixed $d$, the sequence $\minzs{n}{d}$ is increasing with $n$. For that, we need the following lemma.
\begin{lemma}
\LemLabel{growing}
For $n > 2d+3$, we have $\minzs{n}{d} \leq \frac{2^n}{n+1}$.
\end{lemma}
\begin{proof}
%If we substitute $d$ by $n-k$, the condition $n > 2d+3$ is equivalent to $n <2k-3$ where $k \geq 5$.
By repeatedly applying \PropRef{extending_construction}, we obtain
%\[\minzs{n}{d} \leq 2^{n-k-1}(k+2) = 2^n \frac{k+2}{2^{k+1}}\;.\]
\[\minzs{n}{d} \leq 2^{d-1}(n-d+2) = 2^n \frac{n-d+2}{2^{n-d+1}}\;.\]
It is left to show that $\frac{n-d+2}{2^{n-d+1}} \leq \frac{1}{n+1}$. We know that
% \[(n+1)(n-d+2) < (2k-2)(k+2) = 2(k-1)(k+2) \leq 2^{k+1}\;,\]
\[(n+1)(n-d+2) < (2n-2d-2)(n-d+2) = 2(n-d-1)(n-d+2) \leq 2^{n-d+1}\;,\]
which is true for $n - d \geq 5$. The latter is guaranteed by $n \ge 2d + 4$ and $d \ge 1$. This proves the statement.
\end{proof}
\begin{proposition}
For $n > d+1$, it is $\minzs{n}{d} \geq \minzs{n-1}{d}$.
\end{proposition}
\begin{proof}
We prove this statement by induction on $d$. If $d = 1$ and $d=2$, the statement is obviously true by \PropRef{min_1} and \PropRef{min_2}. Let thereby $d \geq 3$ and assume that the statement is true for $d-1$.
Let $S \in \ZS{n}{m}{d}$ be a minimal zero-sum set, i.e., $m = \minzs{n}{d}$, such that $\matzs{S}$ can be given as in \EqRef{proof_form} for $A \in \F_2^{(n-1) \times m_1}$ and $B \in \F_2^{(n-1) \times m_2}$ with $m_1$, $m_2$ such that $m_1+m_2+n = m$. Let $m' \coloneqq m_2+n-1$. We see that $[B | \idmat{(n-1)}]$ must define a degree-$(d-1)$-zero-sum set in $\F_2^{n-1}$, i.e., $[B | \idmat{(n-1)}] = \matzs{T}$ for a $T \in \ZS{(n-1)}{m'}{d-1}$. This is because every $(d-1) \times (m')$ submatrix of $\matzs{T}$ must occur an even number of times (from the property of $S$ being a degree-$d$ zero-sum set) and, since $\matzs{T}$ contains $\idmat{(n-1)}$, it must have rank $n-1$. We now distinguish two cases.
Case 1 ($m' \leq \frac{m}{2}$): In that case we directly obtain
\[m = \minzs{n}{d} \geq 2 \minzs{n-1}{d-1} \geq 2 \minzs{n-2}{d-1} \geq \minzs{n-1}{d}\;,\]
where the second estimation follows from the induction hypothesis and the last one follows from \PropRef{extending_construction}.
Case 2 ($m' > \frac{m}{2}$): We first remark that if $n \leq 2d+3$, the statement directly follows from \PropRef{diagonal_values}. For example, for $n \geq d + 5$,
\[ \minzs{n}{d} = 2^{d+2} - 2^{2d-n+4} \geq 2^{d+2} - 2^{2d-n+5} = \minzs{n-1}{d}\;.\]
Let us therefore assume that $n > 2d +3$. Note that in the matrix $\matzs{S}$, we can add the first row $[11\dots 1 00\dots 0]$ to any other row and would obtain an equivalent zero-sum set. This operation does not change the right part of $\matzs{S}$ containing $\idmat{(n-1)}$. Indeed, it allows us to obtain a zero-sum set $S_c \in \ZS{n}{m}{d}$ represented by
\eq{
\matzs{S_c} = \psquare{
\begin{array}{c|c|c|c}
1 \dots 1 & 1 & 0 \dots 0 & 0 \dots 0 \\
A + c^{\top} & c^{\top} & B & \idmat{(n-1)}
\end{array}
}
}
for any $c \in \F_2^{n-1}$. Let us denote by $R$ the set of columns of $A$ together with the $(n-1)$-bit zero vector. Our statement to prove follows if we can guarantee the existence of a vector $\tilde{c}$ such that, for all $v \in (R+\tilde{c}^{\top})$, $\wt(v) \geq 2$. Then, we would obtain a zero-sum set in $\ZS{(n-1)}{m''}{d}$ defined by \[\left[ \begin{array}{c|c|c|c}
A + \tilde{c}^{\top} & \tilde{c}^{\top} & B & \idmat{(n-1)}\end{array} \right]\]
as there won't be any cancellation between $[A +\tilde{c}^{\top} \mid \tilde{c}^{\top}]$ and $\idmat{(n-1)}$, thus keeping the rank maximum. Indeed, such a vector must always exist. Assume that, for all $c \in \F_2^{n-1}$, there exists a $v \in (R+\tilde{c}^{\top})$ with weight at most $1$. This is equivalent to say that the covering radius of the set $R \subseteq \field{n-1}$ is equal to $1$. By a simple counting argument it follows that $|R| \geq \frac{2^{n-1}}{n}$. On the other hand, it is
$$
|R| = m - m' < \minzs{n}{d} - \frac{\minzs{n}{d}}{2} = \frac{1}{2} \minzs{n}{d} \leq \frac{2^{n-1}}{n+1}\;,
$$
where the last inequality follows from the previous lemma. We get a contradiction, therefore such vector $\tilde{c}$ always exists.
\end{proof}
|
{"hexsha": "6cf1fc677cc55b968b28d956350edbe4bbf6c99f", "size": 27122, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis-source/9niLinear/4minimal.tex", "max_stars_repo_name": "hellman/thesis", "max_stars_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-05-16T19:55:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:36:12.000Z", "max_issues_repo_path": "thesis-source/9niLinear/4minimal.tex", "max_issues_repo_name": "hellman/thesis", "max_issues_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-09T11:26:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T11:26:45.000Z", "max_forks_repo_path": "thesis-source/9niLinear/4minimal.tex", "max_forks_repo_name": "hellman/thesis", "max_forks_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-05T19:40:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-05T19:40:16.000Z", "avg_line_length": 72.7131367292, "max_line_length": 1023, "alphanum_fraction": 0.623774058, "num_tokens": 10947}
|
#ifndef BOOST_NETWORK_PROTOCOL_HTTP_MESSAGE_MODIFIERS_VERSION_HPP_20100608
#define BOOST_NETWORK_PROTOCOL_HTTP_MESSAGE_MODIFIERS_VERSION_HPP_20100608
// Copyright 2010 (c) Dean Michael Berris
// Copyright 2010 (c) Sinefunc, Inc.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <boost/mpl/if.hpp>
#include <boost/network/support/is_async.hpp>
#include <boost/network/support/is_sync.hpp>
namespace boost {
namespace network {
namespace http {
template <class Tag>
struct basic_response;
namespace impl {
template <class Tag, class T>
void version(basic_response<Tag> &response, T const &value,
mpl::false_ const & /*unused*/) {
response << boost::network::http::version(value);
}
template <class Tag, class T>
void version(basic_response<Tag> &response, T const &future,
mpl::true_ const & /*unused*/) {
response.version(future);
}
} // namespace impl
template <class Tag, class T>
void version(basic_response<Tag> &response, T const &value) {
impl::version(response, value, is_async<Tag>());
}
} // namespace http
} // namespace network
} // namespace boost
#endif // BOOST_NETWORK_PROTOCOL_HTTP_MESSAGE_MODIFIERS_VERSION_HPP_20100608
|
{"hexsha": "bb16abb0129c076f6bcf765de6ae563b9892f3ef", "size": 1302, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/network/protocol/http/message/modifiers/version.hpp", "max_stars_repo_name": "yhager/cpp-netlib", "max_stars_repo_head_hexsha": "540ed7622be3f9534709036522f86bde1e84829f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1502.0, "max_stars_repo_stars_event_min_datetime": "2015-01-01T13:41:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:38:26.000Z", "max_issues_repo_path": "boost/network/protocol/http/message/modifiers/version.hpp", "max_issues_repo_name": "yhager/cpp-netlib", "max_issues_repo_head_hexsha": "540ed7622be3f9534709036522f86bde1e84829f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 419.0, "max_issues_repo_issues_event_min_datetime": "2015-01-01T18:25:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T11:23:22.000Z", "max_forks_repo_path": "boost/network/protocol/http/message/modifiers/version.hpp", "max_forks_repo_name": "yhager/cpp-netlib", "max_forks_repo_head_hexsha": "540ed7622be3f9534709036522f86bde1e84829f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 421.0, "max_forks_repo_forks_event_min_datetime": "2015-01-01T16:04:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T22:48:07.000Z", "avg_line_length": 26.5714285714, "max_line_length": 77, "alphanum_fraction": 0.7442396313, "num_tokens": 316}
|
function gauss_quadrature(y::Array{<:Number},GLweights::AbstractArray{<:Real,1})
# Assume that y is evaluated at the zeros
# I = Σ wi*y(zi) where zi are the roots of Pl of the appropriate order
return squeeze(sum(GLweights.*y,1),1) :: Array{<:Number}
end
function clenshaw_curtis_quadrature(y::Array{<:Real})::Array{<:Real}
N = size(y,1)::Int64-1 # number of divisions
g = (FFTW.r2r(y, FFTW.REDFT00, 1)/N) :: Array{<:Real}
inty = zeros(size(y)[2:end]...) :: Array{Float64}
for ind in eachindex(inty)
remaining_indices = ind2sub(size(inty),ind)
inty[ind] = g[1,remaining_indices...]
for k=1:div(N+1,2)-1
inty[ind] += 2g[2k+1,remaining_indices...]/(1-(2k)^2)
end
end
return inty
end
function clenshaw_curtis_quadrature(y::Array{<:Complex})::Array{<:Complex}
Ny = size(y,1)::Int64-1
# compute the fft of the even, complex sequence
slice = [1:size(y,dim) for dim in 2:ndims(y)]
g = (fft(vcat(y,y[Ny:-1:2,slice...]),1)/2Ny ) :: Array{<:Complex}
inty = zeros(Complex128,size(y)[2:end]...) :: Array{Complex128}
for ind in eachindex(inty)
remaining_indices = ind2sub(size(inty),ind)
inty[ind] = 2g[1,remaining_indices...]
for k=1:div(Ny+1,2)-1
inty[ind] += 2(g[2k+1,remaining_indices...]+
g[end-2k+1,remaining_indices...])/(1-(2k)^2)
end
end
return inty
end
#####################################################################################
#####################################################################################
# Integral on and in sphere
#####################################################################################
function sphericalIntegral3D(y::Array{<:Number,3},
r::Union{Real,AbstractArray{<:Real,1}}=1,
θ::AbstractArray{<:Real,1}=linspace(0,π,size(y,2)),
ϕ::AbstractArray{<:Real,1}=linspace(0,2π,size(y,3));
θint::String="clenshaw_quadrature",even::String="avg",
GLweights::Union{Void,AbstractArray{<:Real,1}} = nothing,
ϕ2π::Bool=true):: Array{<:Number,0}
if typeof(r) <: AbstractArray
z = simps(r.^2 .* y,x=r,even=even) :: Array{<:Number,2}
else
dr = r
z = dr^2 .* simps((1:size(y,1)).^2 .* y,dx=dr,even=even) :: Array{<:Number,2}
end
trapzSpherical2D(z,θ,ϕ,θint=θint,GLweights=GLweights,ϕ2π=ϕ2π) :: Array{<:Number,0}
end
function sphericalIntegral2D(y::Array{<:Number,2},
θ::AbstractArray{<:Real,1}=linspace(0,π,size(y,1)),
ϕ::AbstractArray{<:Real,1}=linspace(0,2π,size(y,2));
θint::String="clenshaw_quadrature",
GLweights::Union{Void,AbstractArray{<:Real,1}} = nothing,
ϕ2π::Bool=true):: Array{<:Number,0}
trapzSpherical2D(y,θ,ϕ,θint=θint,GLweights=GLweights,ϕ2π=ϕ2π)
end
function sphericalIntegral1D(y::Vector{<:Number},
ϕ::AbstractArray{<:Real,1}=linspace(0,2π,size(y,1));
ϕ2π::Bool=true):: Array{<:Number,0}
trapzCircle(y,ϕ,ϕ2π=ϕ2π)
end
function sphericalIntegral(y::Array{<:Number,N},args...;kwargs...) where N
@assert(N<=3,"Integrals above 3D are not defined")
f = eval(parse("sphericalIntegral"*string(N)*"D"))
return f(y,args...,kwargs...) :: Array{<:Number,0}
end
function cylindricalIntegral1D(y::Vector{<:Number},
ϕ::AbstractArray{<:Real,1}=linspace(0,2π,size(y,1));
ϕ2π::Bool=true):: Array{<:Number,0}
trapzCircle(y,ϕ,ϕ2π=ϕ2π)
end
function cylindricalIntegral2D(y::Array{<:Number,2},
r::Union{Real,AbstractArray{<:Real,1}}=1,
θ::AbstractArray{<:Real,1}=linspace(0,2π,size(y,2));
even::String="avg",ϕ2π::Bool=true):: Array{<:Number,0}
if typeof(r) <: AbstractArray
z = simps(r.* y,x=r,even=even) :: Vector{<:Number}
else
dr = r
z = dr*simps((1:size(y,1)).* y,dx=dr,even=even) :: Vector{<:Number}
end
trapzCircle(z,θ,ϕ2π=ϕ2π) :: Array{<:Number,0}
end
function cylindricalIntegral3D(y::Array{<:Number,3},
r::Union{Real,AbstractArray{<:Real,1}}=1,
θ::AbstractArray{<:Real,1}=linspace(0,2π,size(y,2)),
z::Union{Real,AbstractArray{<:Real,1}}=1;
even::String="avg",ϕ2π::Bool=true):: Array{<:Number,0}
if typeof(r) <: AbstractArray
yθz = simps(r.* y,x=r,even=even) :: Array{<:Number,2}
else
dr = r
yθz = dr*simps((1:size(y,1)).* y,dx=dr,even=even) :: Array{<:Number,2}
end
T = promote_type(eltype(y), eltype(θ))
yz = zeros(T,size(y,3)) :: Array{<:Number,1}
for ind in eachindex(yz)
yz[ind] = trapzCircle(yθz[1:end,ind],θ,ϕ2π=ϕ2π)[1]
end
if typeof(z) <: AbstractArray
y_int = simps(z.* yz,x=z,even=even) :: Array{<:Number,0}
else
dz = z
y_int = dz*simps((1:size(yz,1)).* yz,dx=dz,even=even) :: Array{<:Number,0}
end
return y_int :: Array{<:Number,0}
end
function cylindricalIntegral(y::Array{<:Number,N},args...;kwargs...) where N
@assert(N<=3,"Integrals above 3D are not defined")
f = eval(parse("cylindricalIntegral"*string(N)*"D"))
return f(y,args...,kwargs...) :: Array{<:Number,0}
end
|
{"hexsha": "45970cc45c30e90663af764aeba2c086ba02f0af", "size": 4672, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/periodic_integrals.jl", "max_stars_repo_name": "jishnub/NumericallyIntegrateArrays.jl", "max_stars_repo_head_hexsha": "96d11436753909b8d0c992055cb1696503b92b24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-07T19:12:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-07T19:12:47.000Z", "max_issues_repo_path": "src/periodic_integrals.jl", "max_issues_repo_name": "jishnub/NumericallyIntegrateArrays.jl", "max_issues_repo_head_hexsha": "96d11436753909b8d0c992055cb1696503b92b24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/periodic_integrals.jl", "max_forks_repo_name": "jishnub/NumericallyIntegrateArrays.jl", "max_forks_repo_head_hexsha": "96d11436753909b8d0c992055cb1696503b92b24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:04:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:04:29.000Z", "avg_line_length": 31.5675675676, "max_line_length": 85, "alphanum_fraction": 0.6286386986, "num_tokens": 1734}
|
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
# 颜色变换(色调,明暗,直方图和Gamma曲线)
def img_color(imgPath):
original_img = cv2.imread(imgPath)
img = cv2.resize(original_img,None,fx=0.8,fy=0.8,
interpolation=cv2.INTER_AREA) # 图像缩小
Make_border_img = cv2.copyMakeBorder(img, 30, 30, 0, 0,
cv2.BORDER_CONSTANT,
value=(0, 0, 0)) # 绘制边框(原图上下贴30像素黑边)
img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) # 转成 HSV 格式
colorless_hsv = img_hsv.copy()
colorless_hsv[:,:,1] = 0.5 * colorless_hsv[:,:,1] # 减小饱和度会让图像损失鲜艳,变得更灰
colorless_img = cv2.cvtColor(colorless_hsv,cv2.COLOR_HSV2BGR)
darker_hsv = img_hsv.copy()
darker_hsv[:,:,2] = 0.5 * darker_hsv[:,:,2] # 减小明度为原来一半
darker_img = cv2.cvtColor(darker_hsv,cv2.COLOR_HSV2BGR)
img_corrected = gamma_trans(img,0.5) # 执行Gamma矫正
cv2.imshow("original_img",img) # 原图
cv2.imshow("Make_border_img",Make_border_img) # 添加黑边
cv2.imshow('colorless_jpg',colorless_img) # 图像变灰
cv2.imshow('darker_jpg',darker_img) # 图像变暗
cv2.imshow('gamma_corrected_jpg',img_corrected) # gamma校正
# cv2.waitKey(0)
# cv2.destroyAllWindows()
hist_b = cv2.calcHist([img],[0],None,[256],[0,256]) # 分通道计算每个通道的直方图
hist_g = cv2.calcHist([img],[1],None,[256],[0,256])
hist_r = cv2.calcHist([img],[2],None,[256],[0,256])
plt.plot(hist_b, label='B', color='blue') # 显示3个通道的颜色直方图
plt.plot(hist_g, label='G', color='green')
plt.plot(hist_r, label='R', color='red')
plt.legend(loc='best')
plt.xlim([0, 256])
plt.show()
def gamma_trans(img,gamma):
gamma_table = [np.power(x/255.0,gamma)*255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
return cv2.LUT(img,gamma_table) # 实现这个映射用的是OpenCV的查表函数
if __name__ == "__main__":
imgPath = 'src/python-opencv/a.jpg'
# img_three(imgPath)
img_color(imgPath)
|
{"hexsha": "6f18e52a0d0215bcebedd70876bcae19b7964808", "size": 2161, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python-opencv/001-base/test_004_change_color.py", "max_stars_repo_name": "bjlhx15/python-algorithm", "max_stars_repo_head_hexsha": "bbd162e194359a01806922d73b709fe64fcfa422", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/python-opencv/001-base/test_004_change_color.py", "max_issues_repo_name": "bjlhx15/python-algorithm", "max_issues_repo_head_hexsha": "bbd162e194359a01806922d73b709fe64fcfa422", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python-opencv/001-base/test_004_change_color.py", "max_forks_repo_name": "bjlhx15/python-algorithm", "max_forks_repo_head_hexsha": "bbd162e194359a01806922d73b709fe64fcfa422", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9122807018, "max_line_length": 81, "alphanum_fraction": 0.5923183711, "include": true, "reason": "import numpy", "num_tokens": 749}
|
"""
Class to manage building, loading, and saving features for an action
recognition CNN over the NTURGB dataset
Features Implemented
--------------------
- 3D voxel flow
- 3D image as voxel grid
"""
import sys, os
import numpy as np
from ntu_rgb import NTU
from sysu_dataset import SYSU
from tqdm import tqdm, trange
import multiprocessing
from config import *
####################
# Files & Directories
# CACHE_DIR = "/home/mike/Documents/Activity_Recognition/nturgb+d_features_small"
# CACHE_DIR = "/hdd/Datasets/SYSU/voxel_flow_3D_54"
CACHE_DIR = "/hdd/Datasets/NTU/ntu_3D_voxel_images"
####################
# Hyper Parameters
# K -- The number of features per video
K = 5
# T -- The number of frames in each feature
T = 10
class FeatureManager:
def __init__(self):
self.dataset = NTU()
# self.dataset = SYSU()
def build_feature(self, vid_id):
"""
Build the specified feature using the dataset wrapper
"""
# Get the voxel flow from the ntu_rgb wrapper
vox_flow = self.dataset.get_voxel_flow(vid_id)
# Split up video into K equal parts of size T
frames = vox_flow.shape[0]
skip_amount = (frames - T) / (K - 1)
features = []
for feature_idx in range(K):
start = int(skip_amount * feature_idx)
end = int(start + T)
feature = np.vstack(vox_flow[start:end,1:,:,:,:]) # Stack frames
features.append(feature)
# Combine all chunks into one tensor
stacked_feature = np.stack(features)
return stacked_feature
def save_feature_sparse(self, feature, vid_id):
"""
Create the feature, save the non-zero values along with all the data
needed to fill it back in
"""
# Get nonzero values
nonzeros = np.array(np.nonzero(feature))
# Save the non-zero locations, values, and shape of original feature
np.save("{}/{:05}.npy".format(CACHE_DIR, vid_id), feature[tuple(nonzeros)])
np.save("{}/{:05}.nonzeros.npy".format(CACHE_DIR, vid_id), nonzeros)
np.save("{}/{:05}.shape.npy".format(CACHE_DIR, vid_id), feature.shape)
def load_feature(self, vid_id):
"""
Load a feature from the cached data
"""
# Load the data
feat_values = np.load("{}/{:05}.npy".format(CACHE_DIR, vid_id))
feat_nonzero = np.load("{}/{:05}.nonzeros.npy".format(CACHE_DIR, vid_id))
feat_shape = np.load("{}/{:05}.shape.npy".format(CACHE_DIR, vid_id))
# Rebuild the feature from the saved data
feature = np.zeros(feat_shape, np.float32)
feature[tuple(feat_nonzero)] = feat_values
return feature
def load_3D_image(self, vid_id):
VOXEL_SIZE = 108
feat_nonzero = np.load("{}/{:05}.nonzeros.npy".format(CACHE_3D_IMAGES, vid_id))
feature = np.zeros([5, VOXEL_SIZE, VOXEL_SIZE, VOXEL_SIZE], np.float32)
feature[tuple(feat_nonzero)] = 1
return feature
def build_and_save_3D_image(self, vid_id):
VOXEL_SIZE = 108
op_flow_3D = np.load('{}/{:05}.npz'.format(CACHE_3D_IMAGES, vid_id))['arr_0']
num_frames = len(op_flow_3D)
all_xyz = np.vstack(op_flow_3D)
max_x, max_y, max_z = np.max(all_xyz, axis=0)[:3] + 0.00001
min_x, min_y, min_z = np.min(all_xyz, axis=0)[:3]
voxel_flow_tensor = np.zeros([num_frames, 4, VOXEL_SIZE, VOXEL_SIZE, VOXEL_SIZE])
for frame in range(num_frames):
vox_x = np.floor((op_flow_3D[frame][:,0] - min_x)/(max_x - min_x) * VOXEL_SIZE).astype(np.uint8)
vox_y = np.floor((op_flow_3D[frame][:,1] - min_y)/(max_y - min_y) * VOXEL_SIZE).astype(np.uint8)
vox_z = np.floor((op_flow_3D[frame][:,2] - min_z)/(max_z - min_z) * VOXEL_SIZE).astype(np.uint8)
voxel_flow_tensor[frame, 0, vox_x, vox_y, vox_z] = 1
skip_amount = (voxel_flow_tensor.shape[0] - T) / (K - 1)
fancy_idx = [int(skip_amount * i)+5 for i in range(K)]
feature = voxel_flow_tensor[fancy_idx,0,:,:,:]
nonzeros = np.array(np.nonzero(feature)).astype(np.uint8)
np.save("{}/{:05}.nonzeros.npy".format(CACHE_DIR, vid_id), nonzeros)
def main():
"""
Cache all features
"""
fm = FeatureManager()
for x in tqdm(range(fm.dataset.num_vids, desc="Creating features")):
feature = fm.build_feature(x)
fm.save_feature_sparse(feature, x)
if __name__ == '__main__':
main()
|
{"hexsha": "c73366d5a1ea9b216f335a781eda6d2c320685c0", "size": 4517, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_manager.py", "max_stars_repo_name": "mpeven/ntu_rgb", "max_stars_repo_head_hexsha": "4a8b43c521500907d2f241e4b440381cf8c62350", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2017-12-21T12:06:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-13T08:15:38.000Z", "max_issues_repo_path": "feature_manager.py", "max_issues_repo_name": "3huo/ntu_rgb", "max_issues_repo_head_hexsha": "4a8b43c521500907d2f241e4b440381cf8c62350", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-07-26T02:27:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-13T06:56:22.000Z", "max_forks_repo_path": "feature_manager.py", "max_forks_repo_name": "mpeven/ntu_rgb", "max_forks_repo_head_hexsha": "4a8b43c521500907d2f241e4b440381cf8c62350", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-09-20T06:54:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-16T09:12:50.000Z", "avg_line_length": 29.522875817, "max_line_length": 108, "alphanum_fraction": 0.6207659951, "include": true, "reason": "import numpy", "num_tokens": 1223}
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# @author : East
# @time : 2019/7/14 18:03
# @file : 02.2d_laplace.py
# @project : fempy
# software : PyCharm
import numpy as np
import matplotlib.pyplot as plt
from fempy.mesh import Mesh2D
from fempy.fem2d import FEM2D
from fempy.femplot.plot2d import tri_mesh, tri_tripcolor, tri_contour, tri_contourf
r"""
\nabla^2 u(x, y) + \nabla u(x, y) + u(x, y) = f(x, y)
\nabla^2 u
----------
(\nabla^2 u, \phi_j)
= -(\nabla^2 u, \nabla \phi_j)
= -(\sum^n_{i=1} u_i \nabla \phi_i, \nabla \phi_j)
= -\sum^n_{i=1} \left(u_i (\nabla \phi_i, \nabla \phi_j) \right)
\partial u
----------
(\nabla u, \phi_j)
= (\sum^n_{i=1} u_i \partial \phi_i, \phi_j)
= \sum^n_{i=1} \left(u_i (\partial \phi_i, \phi_j) \right)
u
----------
(u, \phi_j)
= (\sum^n_{i=1} u_i \phi_i, \phi_j)
= \sum^n_{i=1} \left(u_i (\phi_i, \phi_j) \right)
matrix compute
--------------
2 dim, triangular mesh, linear basis function
.. Tips: basis grad is constant in linear basis function.
gauss_n = int: N
basis_v = ndarray, (N, 3)
-------------------------
[[basis_0(p[0]), basis_1(p[0]), basis_2(p[0])],
.............., ............., ..............,
[basis_0(p[n]), basis_1(p[n]), basis_2(p[n])],]
basis_g = ndarray, (2, 3)
-------------------------
[[basis_0_x(p[0]), basis_1_x(p[0]), basis_2_x(p[0])],
[basis_0_y(p[0]), basis_1_y(p[0]), basis_2_y(p[0])]]
basis_g = ndarray, (2, N, 3)
----------------------------
[[[basis_0_x(p[0]), basis_1_x(p[0]), basis_2_x(p[0])],
................, ..............., ................,
[basis_0_x(p[n]), basis_1_x(p[n]), basis_2_x(p[n])],],
[[basis_0_y(p[0]), basis_1_y(p[0]), basis_2_y(p[0])],
................, ..............., ................,
[basis_0_y(p[n]), basis_1_y(p[n]), basis_2_y(p[n])]]]
\nabla^2 u
----------
-\sum^n_{i=1} \left(u_i (\nabla \phi_i, \nabla \phi_j) \right)
if grad is constant
^^^^^^^^^^^^^^^^^^^
> np.dot(basis_g.T, basis_g) * np.sum(gauss_w)
[[u_0_x, u_0_y],
[u_1_x, u_1_y],
[u_2_x, u_2_y],]
[[u_0_x, u_1_x, u_2_x, ],
[u_0_y, u_1_y, u_2_y, ]]
if grad is not constant
^^^^^^^^^^^^^^^^^^^^^^^
> basis_g_x, basis_g_y = basis_g
> np.dot(basis_g_x.T * gauss_w, basis_g_x) + np.dot(basis_g_y.T * gauss_w, basis_g_y)
.. (3, N), (3, N) = (2, 3, N)
.. (3, N) .* (N, ) * (N, 3) + (3, N) .* (N, ) * (N, 3) -> (3, 3)
\partial u
--------
\sum^n_{i=1} \left(u_i (\partial \phi_i, \phi_j) \right)
if grad is constant
^^^^^^^^^^^^^^^^^^^
> basis_g_x, basis_g_y = basis_g.transpose()
> np.dot(basis_v.T, gauss_w) * basis_g_x
.. (N, 3).T * (N, ) -> (3, 1) * (1, 3) -> (3, 3)
if grad is not constant
^^^^^^^^^^^^^^^^^^^^^^^
> basis_g_x, basis_g_y = basis_g.transpose((0, 1, 2))
> np.dot(basis_v.T * gauss_w, basis_g_x)
u
---
\sum^n_{i=1} \left(u_i (\phi_i, \phi_j) \right)
> np.dot(basis_v.T, basis_v) * gauss_w
.. (3, N) * (N, 3) -> (3, 3)
"""
r"""
A simple example of **2D Laplace equation**.
.. math::
\begin{cases}
\nabla^2 u = 2\pi^2 \sin(\pi x) \sin(\pi y) \\
u(x, 0) = u(0, y) = u(x, 1) = u(1, y) = 0
\end{cases}
and the variational formulation is
.. math::
\begin{align}
(-\nabla^2 u, v) &= (f, v) \\
(\nabla u, \nabla v) &= (f, v) \\
(\sum^n_{i=0} u_i \nabla \phi_i, \nabla \phi_j) &= (f, \phi_j),\quad j=1,\cdots,n \\
\sum^n_{i=0} u_i (\nabla \phi_i, \nabla phi_j) &= (f, \phi_j),\quad j=1,\cdots,n \\
\end{align}
and the *exact solution* is :math:`u = \sin(\pi x) \sin(\pi y)`.
The default is the **linear basis functions**.
"""
__test_name__ = '02.2d_laplace'
# Init condition
# --------------
def f(x, y):
return 2 * np.pi ** 2 * np.sin(np.pi * x) * np.sin(np.pi * y)
def u_true(x, y):
return np.sin(np.pi * x) * np.sin(np.pi * y)
def variation(basis_v, basis_g, gauss_p, gauss_w):
f_v = f(*gauss_p.T)
f_elem = np.dot(f_v * gauss_w, basis_v)
a_elem = np.dot(basis_g.T, basis_g) * np.sum(gauss_w)
return a_elem, f_elem
# compute numerical solution
# --------------------------
mesh = Mesh2D('rect_tri', [(0, 1, 16)])
fem = FEM2D(variation, mesh)
fem.run()
# print and save solution
# -----------------------
fem.save_data(file_name='02.2d_laplace', form='npy')
# print('> Matrix A=\n', fem.a_mat)
# print('> Matrix F=\n', fem.f_lst)
print('> Solution U=\n', fem.u_lst)
print('> L2Error =', fem.error_l2(u_true))
# process data
# ------------
z = fem.u_lst
mtri = fem.mesh.mtri
xp, yp = fem.mesh.points.T
# interpolation
r_mesh, r_u = fem.mesh.refine(z, subdiv=3)
# visualization
# -------------
cm = plt.cm.get_cmap('RdBu_r')
fig, axs = plt.subplots(figsize=(12, 10), nrows=2, ncols=2)
axs = axs.flatten()
axs[0].set_aspect('equal')
tri_mesh(axs[0], mtri, mfc='tab:green')
axs[1].set_aspect('equal')
tcf = tri_contourf(axs[1], mtri, z, levels=10, cmap=cm)
fig.colorbar(tcf, ax=axs[1])
axs[2].set_aspect('equal')
tri_tripcolor(axs[2], mtri, z, edgecolor='k', cmap=cm)
axs[3].set_aspect('equal')
tcf = tri_contour(axs[3], mtri, z, levels=10, cmap=cm)
fig.colorbar(tcf, ax=axs[3])
plt.savefig(__test_name__ + '.png', dpi=200)
# plt.show()
|
{"hexsha": "58ccf255a3fd7c4ef454db9e7e8ae67a6f364a14", "size": 5067, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/02.2d_laplace/02.2d_laplace.py", "max_stars_repo_name": "EastMagica/fempy", "max_stars_repo_head_hexsha": "5f16fe458a63ede5a49925a691924fbbbea767ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/02.2d_laplace/02.2d_laplace.py", "max_issues_repo_name": "EastMagica/fempy", "max_issues_repo_head_hexsha": "5f16fe458a63ede5a49925a691924fbbbea767ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/02.2d_laplace/02.2d_laplace.py", "max_forks_repo_name": "EastMagica/fempy", "max_forks_repo_head_hexsha": "5f16fe458a63ede5a49925a691924fbbbea767ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2236842105, "max_line_length": 88, "alphanum_fraction": 0.5486481153, "include": true, "reason": "import numpy", "num_tokens": 1959}
|
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/home/austin/Github/natural-selection-simulator/lib/')
from pylive.pylive import live_plotter
import numpy as np
size = 100
x_vec = np.linspace(0,1,size+1)[0:-1]
y_vec = np.random.randn(len(x_vec))
line1 = []
while True:
rand_val = np.random.randn(1)
y_vec[-1] = rand_val
line1 = live_plotter(x_vec,y_vec,line1)
y_vec = np.append(y_vec[1:],0.0)
|
{"hexsha": "b16b485f47bb21c3d1b7d40016bfd1044175d024", "size": 444, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/pylive_test.py", "max_stars_repo_name": "austinmdillow/natural-selection-simulator", "max_stars_repo_head_hexsha": "01c7d3ba310a3629a04f1a7e67c04a1b87ee4f09", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/pylive_test.py", "max_issues_repo_name": "austinmdillow/natural-selection-simulator", "max_issues_repo_head_hexsha": "01c7d3ba310a3629a04f1a7e67c04a1b87ee4f09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-12T20:52:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-12T20:52:07.000Z", "max_forks_repo_path": "tests/pylive_test.py", "max_forks_repo_name": "austinmdillow/natural-selection-simulator", "max_forks_repo_head_hexsha": "01c7d3ba310a3629a04f1a7e67c04a1b87ee4f09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6, "max_line_length": 74, "alphanum_fraction": 0.6959459459, "include": true, "reason": "import numpy", "num_tokens": 144}
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Method to merge all .csv files into one, so we can enter all data
def merge_data():
data = [files for files in os.listdir(os.path.join('..', '..', 'data','raw')) if files.endswith('.csv')]
dataFrames = []
for i in range(len(data)):
dataFrames.append(pd.read_csv(r"{0}/{1}".format(os.path.join('..', '..', 'data','raw'), data[i]), low_memory=False))
result = pd.concat(dataFrames, ignore_index=True)
return load_and_process(result)
# Method Chaining
def load_and_process(result):
# Method Chain 1 (Load data and deal with missing data)
df1 = (
result
.dropna(subset=['Junction_Control'], how='all')
.reset_index(drop=True)
)
# Method Chain 2 (Create new columns, drop others, and do processing)
df2 = (
df1.drop(['Accident_Index', 'Urban_or_Rural_Area', 'Location_Easting_OSGR', 'Location_Northing_OSGR',
'LSOA_of_Accident_Location', '1st_Road_Class',
'1st_Road_Number', 'Special_Conditions_at_Site',
'2nd_Road_Class', '2nd_Road_Number', 'Junction_Detail',
'Local_Authority_(District)', 'Local_Authority_(Highway)'], axis=1)
.reset_index(drop=True)
.rename(columns={"Weather_Conditions": "Weather_Type", "Did_Police_Officer_Attend_Scene_of_Accident": "Police_Presense"})
.replace({'Police_Presense': {'Yes': True, 'No': False}})
.replace({'Day_of_Week': {1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', 7: 'Sunday'}})
.convert_dtypes()
.sort_values("Year", ascending=True)
)
df2['Date'] = pd.to_datetime(df2['Date'], format='%d/%m/%Y')
return df2
def process_data(data):
SPLIT_FILE_NUMBER = 12 # Number of files we want to split
for i, merged_df in enumerate(np.array_split(data, SPLIT_FILE_NUMBER)): # Figuring out how we want to split data size between SPLIT_FILE_NUMBER
print('🚨 Started to create formatted .csv file [{}/{}]\n'.format(i + 1, SPLIT_FILE_NUMBER)) # Displaying/testing if everything works properly
with open('{}/out{}.csv'.format(os.path.join('..', '..', 'data','processed'), i + 1), "w") as new_file: # Creating file
new_file.write(merged_df.to_csv()) # Writing data to new file
print("👍 Finished creating formatted .csv files") # Showing result to confirm
|
{"hexsha": "8ec07d175cf607d7cd81be96dd888cb4eaa5f5f0", "size": 2400, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/scripts/project_functions.py", "max_stars_repo_name": "data301-2020-winter2/course-project-group_1007", "max_stars_repo_head_hexsha": "3918515b3f6622d732d10cfae7d0c7bc30a2b449", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-08T05:28:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T19:20:56.000Z", "max_issues_repo_path": "analysis/scripts/project_functions.py", "max_issues_repo_name": "d3li0n/course-project-group_1007", "max_issues_repo_head_hexsha": "3918515b3f6622d732d10cfae7d0c7bc30a2b449", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-24T00:24:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-29T23:33:15.000Z", "max_forks_repo_path": "analysis/scripts/project_functions.py", "max_forks_repo_name": "d3li0n/course-project-group_1007", "max_forks_repo_head_hexsha": "3918515b3f6622d732d10cfae7d0c7bc30a2b449", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-29T19:21:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T19:21:02.000Z", "avg_line_length": 48.0, "max_line_length": 145, "alphanum_fraction": 0.6729166667, "include": true, "reason": "import numpy", "num_tokens": 664}
|
import matplotlib.pyplot as plt
import numpy as np
def MB_speed(v, m, T):
"""
:param v: velocidades das moléculas
:param m: massa da molécula estudada
:param T: temperatura da atmosfera
:return: função de densidade de probabilidade
Calcula a distribuição de velocidades para um gás ideal
conhecida como distribuição de Maxwell-Boltzmann.
"""
kB = 1.38e-23 # constante de Boltzmann
return (m/(2*np.pi*kB*T))**1.5 * 4*np.pi * v**2 * np.exp(-m*v**2/(2*kB*T))
v = np.arange(0, 8000, 1) # distribuição das velocidades
el = int(input('Massa(u): ')) # massa atômica u do elemento
mass = el * 1.66e-27 # massa da molécula em kg
temp1 = int(input('Temperatura 1(K): ')) # temperatura da atmosfera 1
temp2 = int(input('Temperatura 2(K): ')) # temperatura da atmosfera 2
plt.plot(v, MB_speed(v, mass, temp1), 'b', label=f'T={temp1} K')
plt.plot(v, MB_speed(v, mass, temp2), 'r--', label=f'T={temp2} K')
plt.title('Distribuição de Velocidade para a molécula') # você pode trocar 'molécula' pelo nome do elemnto usado ;)
plt.xlabel('v(m/s)')
plt.ylabel('f(v)')
plt.legend()
plt.show()
"""
Alguns valores para exemplo:
Elemento argônio(Ar) - massa(u) = 40
temperaturas t1=196 K t2=415 K
"""
|
{"hexsha": "5d0d85a593e2df880376a4f35024ad2311982560", "size": 1288, "ext": "py", "lang": "Python", "max_stars_repo_path": "dist-Maxwell-Boltzmann.py", "max_stars_repo_name": "Gabriel-Scheffel/dist-vel-Maxwell-Boltsmann", "max_stars_repo_head_hexsha": "6e45e255c79c7b8aa745e7330ebf5e226a1bc1fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dist-Maxwell-Boltzmann.py", "max_issues_repo_name": "Gabriel-Scheffel/dist-vel-Maxwell-Boltsmann", "max_issues_repo_head_hexsha": "6e45e255c79c7b8aa745e7330ebf5e226a1bc1fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dist-Maxwell-Boltzmann.py", "max_forks_repo_name": "Gabriel-Scheffel/dist-vel-Maxwell-Boltsmann", "max_forks_repo_head_hexsha": "6e45e255c79c7b8aa745e7330ebf5e226a1bc1fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7777777778, "max_line_length": 116, "alphanum_fraction": 0.6397515528, "include": true, "reason": "import numpy", "num_tokens": 420}
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping, Optional, Union
import numpy as np
from cirq import protocols, value
from cirq.ops import raw_types
class LinearCombinationOfGates(value.LinearDict[raw_types.Gate]):
"""Represents linear operator defined by a linear combination of gates.
Suppose G1, G2, ..., Gn are gates and b1, b2, ..., bn are complex
numbers. Then
LinearCombinationOfGates({G1: b1, G2: b2, ..., Gn: bn})
represents the linear operator
A = b1 G1 + b2 G2 + ... + bn * Gn
Note that A may not be unitary or even normal.
Rather than creating LinearCombinationOfGates instance explicitly, one may
use overloaded arithmetic operators. For example,
cirq.LinearCombinationOfGates({cirq.X: 2, cirq.Z: -2})
is equivalent to
2 * cirq.X - 2 * cirq.Z
"""
def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None:
"""Initializes linear combination from a collection of terms.
Args:
terms: Mapping of gates to coefficients in the linear combination
being initialized.
"""
super().__init__(terms, validator=self._is_compatible)
def num_qubits(self) -> Optional[int]:
"""Returns number of qubits in the domain if known, None if unknown."""
if not self:
return None
any_gate = next(iter(self))
return any_gate.num_qubits()
def _is_compatible(self, gate: raw_types.Gate) -> bool:
return (self.num_qubits() is None or
self.num_qubits() == gate.num_qubits())
def __add__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__add__(other)
def __iadd__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__iadd__(other)
def __sub__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__sub__(other)
def __isub__(self,
other: Union[raw_types.Gate, 'LinearCombinationOfGates']
) -> 'LinearCombinationOfGates':
if not isinstance(other, LinearCombinationOfGates):
other = other.wrap_in_linear_combination()
return super().__isub__(other)
def matrix(self) -> np.ndarray:
"""Reconstructs matrix of self using unitaries of underlying gates.
Raises:
TypeError: if any of the gates in self does not provide a unitary.
"""
num_qubits = self.num_qubits()
if num_qubits is None:
raise ValueError('Unknown number of qubits')
num_dim = 2 ** num_qubits
result = np.zeros((num_dim, num_dim), dtype=np.complex128)
for gate, coefficient in self.items():
result += protocols.unitary(gate) * coefficient
return result
def _pauli_expansion_(self) -> value.LinearDict[str]:
result = value.LinearDict({}) # type: value.LinearDict[str]
for gate, coefficient in self.items():
result += protocols.pauli_expansion(gate) * coefficient
return result
|
{"hexsha": "ba26fd8bf682a846ba26effcd170c57fb911a27c", "size": 4203, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirq/ops/linear_combinations.py", "max_stars_repo_name": "rickyHong/Quantum-cirq-repl", "max_stars_repo_head_hexsha": "fca994bb8184be96354c0c4bc64dbcad6df517f1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-02T09:16:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-25T18:35:19.000Z", "max_issues_repo_path": "cirq/ops/linear_combinations.py", "max_issues_repo_name": "babbush/Cirq", "max_issues_repo_head_hexsha": "447b2c762cc2820dd28abb3bd2bc785d36bae39a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2019-04-03T23:03:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-15T23:49:01.000Z", "max_forks_repo_path": "cirq/ops/linear_combinations.py", "max_forks_repo_name": "babbush/Cirq", "max_forks_repo_head_hexsha": "447b2c762cc2820dd28abb3bd2bc785d36bae39a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-03T22:55:05.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-24T23:24:53.000Z", "avg_line_length": 36.8684210526, "max_line_length": 79, "alphanum_fraction": 0.6542945515, "include": true, "reason": "import numpy", "num_tokens": 981}
|
"""
Structure featurizers generating a matrix for each structure.
Most matrix structure featurizers contain the ability to flatten matrices to be dataframe-friendly.
"""
import numpy as np
import scipy.constants as const
from sklearn.exceptions import NotFittedError
from pymatgen.core import Structure
from pymatgen.core.periodic_table import Element
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import pymatgen.analysis.local_env as pmg_le
from matminer.featurizers.base import BaseFeaturizer
ANG_TO_BOHR = const.value("Angstrom star") / const.value("Bohr radius")
class CoulombMatrix(BaseFeaturizer):
"""
The Coulomb matrix, a representation of nuclear coulombic interaction.
Generate the Coulomb matrix, M, of the input structure (or molecule). The
Coulomb matrix was put forward by Rupp et al. (Phys. Rev. Lett. 108, 058301,
2012) and is defined by off-diagonal elements M_ij = Z_i*Z_j/|R_i-R_j| and
diagonal elements 0.5*Z_i^2.4, where Z_i and R_i denote the nuclear charge
and the position of atom i, respectively.
Coulomb Matrix features are flattened (for ML-readiness) by default. Use
fit before featurizing to use flattened features. To return the matrix form,
set flatten=False.
Args:
diag_elems (bool): flag indication whether (True, default) to use
the original definition of the diagonal elements; if set to False,
the diagonal elements are set to 0
flatten (bool): If True, returns a flattened vector based on eigenvalues
of the matrix form. Otherwise, returns a matrix object (single
feature), which will likely need to be processed further.
"""
def __init__(self, diag_elems=True, flatten=True):
self.diag_elems = diag_elems
self.flatten = flatten
self._max_eigs = None
def _check_fitted(self):
if self.flatten and not self._max_eigs:
raise NotFittedError("Please fit the CoulombMatrix before " "featurizing if using flatten=True.")
def fit(self, X, y=None):
"""
Fit the Coulomb Matrix to a list of structures.
Args:
X ([Structure]): A list of pymatgen structures.
y : unused (added for consistency with overridden method signature)
Returns:
self
"""
if self.flatten:
n_sites = [structure.num_sites for structure in X]
# CM makes sites x sites matrix; max eigvals for n x n matrix is n
self._max_eigs = max(n_sites)
return self
def featurize(self, s):
"""
Get Coulomb matrix of input structure.
Args:
s: input Structure (or Molecule) object.
Returns:
m: (Nsites x Nsites matrix) Coulomb matrix.
"""
self._check_fitted()
m = np.zeros((s.num_sites, s.num_sites))
atomic_numbers = []
for site in s.sites:
if isinstance(site.specie, Element):
atomic_numbers.append(site.specie.Z)
else:
atomic_numbers.append(site.specie.element.Z)
for i in range(s.num_sites):
for j in range(s.num_sites):
if i == j:
if self.diag_elems:
m[i, j] = 0.5 * atomic_numbers[i] ** 2.4
else:
m[i, j] = 0
else:
d = s.get_distance(i, j) * ANG_TO_BOHR
m[i, j] = atomic_numbers[i] * atomic_numbers[j] / d
cm = np.array(m)
if self.flatten:
eigs, _ = np.linalg.eig(cm)
zeros = np.zeros((self._max_eigs,))
zeros[: len(eigs)] = eigs
return zeros
else:
return [cm]
def feature_labels(self):
self._check_fitted()
if self.flatten:
return ["coulomb matrix eig {}".format(i) for i in range(self._max_eigs)]
else:
return ["coulomb matrix"]
def citations(self):
return [
"@article{rupp_tkatchenko_muller_vonlilienfeld_2012, title={"
"Fast and accurate modeling of molecular atomization energies"
" with machine learning}, volume={108},"
" DOI={10.1103/PhysRevLett.108.058301}, number={5},"
" pages={058301}, journal={Physical Review Letters}, author={"
'Rupp, Matthias and Tkatchenko, Alexandre and M"uller,'
" Klaus-Robert and von Lilienfeld, O. Anatole}, year={2012}}"
]
def implementors(self):
return ["Nils E. R. Zimmermann", "Alex Dunn"]
class SineCoulombMatrix(BaseFeaturizer):
"""
A variant of the Coulomb matrix developed for periodic crystals.
This function generates a variant of the Coulomb matrix developed
for periodic crystals by Faber et al. (Inter. J. Quantum Chem.
115, 16, 2015). It is identical to the Coulomb matrix, except
that the inverse distance function is replaced by the inverse of a
sin**2 function of the vector between the sites which is periodic
in the dimensions of the structure lattice. See paper for details.
Coulomb Matrix features are flattened (for ML-readiness) by default. Use
fit before featurizing to use flattened features. To return the matrix form,
set flatten=False.
Args:
diag_elems (bool): flag indication whether (True, default) to use
the original definition of the diagonal elements; if set to False,
the diagonal elements are set to 0
flatten (bool): If True, returns a flattened vector based on eigenvalues
of the matrix form. Otherwise, returns a matrix object (single
feature), which will likely need to be processed further.
"""
def __init__(self, diag_elems=True, flatten=True):
self.diag_elems = diag_elems
self.flatten = flatten
self._max_eigs = None
def _check_fitted(self):
if self.flatten and not self._max_eigs:
raise NotFittedError("Please fit the SineCoulombMatrix before " "featurizing if using flatten=True.")
def fit(self, X, y=None):
"""
Fit the Sine Coulomb Matrix to a list of structures.
Args:
X ([Structure]): A list of pymatgen structures.
y : unused (added for consistency with overridden method signature)
Returns:
self
"""
if self.flatten:
nsites = [structure.num_sites for structure in X]
self._max_eigs = max(nsites)
return self
def featurize(self, s):
"""
Args:
s (Structure or Molecule): input structure (or molecule)
Returns:
(Nsites x Nsites matrix) Sine matrix or
"""
self._check_fitted()
sites = s.sites
atomic_numbers = np.array([site.specie.Z for site in sites])
sin_mat = np.zeros((len(sites), len(sites)))
coords = np.array([site.frac_coords for site in sites])
lattice = s.lattice.matrix
for i in range(len(sin_mat)):
for j in range(len(sin_mat)):
if i == j:
if self.diag_elems:
sin_mat[i][i] = 0.5 * atomic_numbers[i] ** 2.4
elif i < j:
vec = coords[i] - coords[j]
coord_vec = np.sin(np.pi * vec) ** 2
trig_dist = np.linalg.norm((np.matrix(coord_vec) * lattice).A1) * ANG_TO_BOHR
sin_mat[i][j] = atomic_numbers[i] * atomic_numbers[j] / trig_dist
else:
sin_mat[i][j] = sin_mat[j][i]
if self.flatten:
eigs, _ = np.linalg.eig(sin_mat)
zeros = np.zeros((self._max_eigs,))
zeros[: len(eigs)] = eigs
return zeros
else:
return [sin_mat]
def feature_labels(self):
self._check_fitted()
if self.flatten:
return ["sine coulomb matrix eig {}".format(i) for i in range(self._max_eigs)]
else:
return ["sine coulomb matrix"]
def citations(self):
return [
"@article {QUA:QUA24917,"
"author = {Faber, Felix and Lindmaa, Alexander and von "
"Lilienfeld, O. Anatole and Armiento, Rickard},"
"title = {Crystal structure representations for machine "
"learning models of formation energies},"
"journal = {International Journal of Quantum Chemistry},"
"volume = {115},"
"number = {16},"
"issn = {1097-461X},"
"url = {http://dx.doi.org/10.1002/qua.24917},"
"doi = {10.1002/qua.24917},"
"pages = {1094--1101},"
"keywords = {machine learning, formation energies, "
"representations, crystal structure, periodic systems},"
"year = {2015},"
"}"
]
def implementors(self):
return ["Kyle Bystrom", "Alex Dunn"]
class OrbitalFieldMatrix(BaseFeaturizer):
"""
Representation based on the valence shell electrons of neighboring atoms.
Each atom is described by a 32-element vector (or 39-element vector, see
period tag for details) uniquely representing the valence subshell.
A 32x32 (39x39) matrix is formed by multiplying two atomic vectors.
An OFM for an atomic environment is the sum of these matrices for each atom
the center atom coordinates with multiplied by a distance function
(In this case, 1/r times the weight of the coordinating atom in the Voronoi
Polyhedra method). The OFM of a structure or molecule is the average of the
OFMs for all the sites in the structure.
Args:
period_tag (bool): In the original OFM, an element is represented
by a vector of length 32, where each element is 1 or 0,
which represents the valence subshell of the element.
With period_tag=True, the vector size is increased
to 39, where the 7 extra elements represent the period
of the element. Note lanthanides are treated as period 6,
actinides as period 7. Default False as in the original paper.
flatten (bool): Flatten the avg OFM to a 1024-vector (if period_tag
False) or a 1521-vector (if period_tag=True).
...attribute:: size
Either 32 or 39, the size of the vectors used to describe elements.
Reference:
`Pham et al. _Sci Tech Adv Mat_. 2017 <http://dx.doi.org/10.1080/14686996.2017.1378060>_`
"""
def __init__(self, period_tag=False, flatten=True):
"""Initialize the featurizer
Args:
period_tag (bool): In the original OFM, an element is represented
by a vector of length 32, where each element is 1 or 0,
which represents the valence subshell of the element.
With period_tag=True, the vector size is increased
to 39, where the 7 extra elements represent the period
of the element. Note lanthanides are treated as period 6,
actinides as period 7. Default False as in the original paper.
"""
my_ohvs = {}
if period_tag:
self.size = 39
else:
self.size = 32
for Z in range(1, 95):
el = Element.from_Z(Z)
my_ohvs[Z] = self.get_ohv(el, period_tag)
my_ohvs[Z] = np.matrix(my_ohvs[Z])
self.ohvs = my_ohvs
self.flatten = flatten
def get_ohv(self, sp, period_tag):
"""
Get the "one-hot-vector" for pymatgen Element sp. This 32 or 39-length
vector represents the valence shell of the given element.
Args:
sp (Element): element whose ohv should be returned
period_tag (bool): If true, the vector contains items
corresponding to the period of the element
Returns:
my_ohv (numpy array length 39 if period_tag, else 32): ohv for sp
"""
el_struct = sp.full_electronic_structure
ohd = {j: {i + 1: 0 for i in range(2 * (2 * j + 1))} for j in range(4)}
nume = 0
shell_num = 0
max_n = el_struct[-1][0]
while shell_num < len(el_struct):
if el_struct[-1 - shell_num][0] < max_n - 2:
shell_num += 1
continue
elif el_struct[-1 - shell_num][0] < max_n - 1 and el_struct[-1 - shell_num][1] != "f":
shell_num += 1
continue
elif el_struct[-1 - shell_num][0] < max_n and (
el_struct[-1 - shell_num][1] != "d" and el_struct[-1 - shell_num][1] != "f"
):
shell_num += 1
continue
curr_shell = el_struct[-1 - shell_num]
if curr_shell[1] == "s":
l = 0
elif curr_shell[1] == "p":
l = 1
elif curr_shell[1] == "d":
l = 2
elif curr_shell[1] == "f":
l = 3
ohd[l][curr_shell[2]] = 1
nume += curr_shell[2]
shell_num += 1
my_ohv = np.zeros(self.size, np.int)
k = 0
for j in range(4):
for i in range(2 * (2 * j + 1)):
my_ohv[k] = ohd[j][i + 1]
k += 1
if period_tag:
row = sp.row
if row > 7:
row -= 2
my_ohv[row + 31] = 1
return my_ohv
def get_single_ofm(self, site, site_dict):
"""
Gets the orbital field matrix for a single chemical environment,
where site is the center atom whose environment is characterized and
site_dict is a dictionary of site : weight, where the weights are the
Voronoi Polyhedra weights of the corresponding coordinating sites.
Args:
site (Site): center atom
site_dict (dict of Site:float): chemical environment
Returns:
atom_ofm (size X size numpy matrix): ofm for site
"""
ohvs = self.ohvs
atom_ofm = np.matrix(np.zeros((self.size, self.size)))
ref_atom = ohvs[site.specie.Z]
for other_site in site_dict:
scale = other_site["weight"]
other_atom = ohvs[other_site["site"].specie.Z]
atom_ofm += other_atom.T * ref_atom * scale / site.distance(other_site["site"]) / ANG_TO_BOHR
return atom_ofm
def get_atom_ofms(self, struct, symm=False):
"""
Calls get_single_ofm for every site in struct. If symm=True,
get_single_ofm is called for symmetrically distinct sites, and
counts is constructed such that ofms[i] occurs counts[i] times
in the structure
Args:
struct (Structure): structure for find ofms for
symm (bool): whether to calculate ofm for only symmetrically
distinct sites
Returns:
ofms ([size X size matrix] X len(struct)): ofms for struct
if symm:
ofms ([size X size matrix] X number of symmetrically distinct sites):
ofms for struct
counts: number of identical sites for each ofm
"""
ofms = []
vnn = pmg_le.VoronoiNN(allow_pathological=True)
if symm:
symm_struct = SpacegroupAnalyzer(struct).get_symmetrized_structure()
indices = [lst[0] for lst in symm_struct.equivalent_indices]
counts = [len(lst) for lst in symm_struct.equivalent_indices]
else:
indices = [i for i in range(len(struct.sites))]
for index in indices:
ofms.append(self.get_single_ofm(struct.sites[index], vnn.get_nn_info(struct, index)))
if symm:
return ofms, counts
return ofms
def get_mean_ofm(self, ofms, counts):
"""
Averages a list of ofms, weights by counts
"""
ofms = [ofm * c for ofm, c in zip(ofms, counts)]
return sum(ofms) / sum(counts)
def get_structure_ofm(self, struct):
"""
Calls get_mean_ofm on the results of get_atom_ofms
to give a size X size matrix characterizing a structure
"""
ofms, counts = self.get_atom_ofms(struct, True)
return self.get_mean_ofm(ofms, counts)
def featurize(self, s):
"""
Makes a supercell for structure s (to protect sites
from coordinating with themselves), and then finds the mean
of the orbital field matrices of each site to characterize
a structure
Args:
s (Structure): structure to characterize
Returns:
mean_ofm (size X size matrix): orbital field matrix
characterizing s
"""
s *= [3, 3, 3]
ofms, counts = self.get_atom_ofms(s, True)
mean_ofm = self.get_mean_ofm(ofms, counts)
if self.flatten:
return mean_ofm.A.flatten()
else:
return [mean_ofm.A]
def feature_labels(self):
if self.flatten:
slabels = ["s^{}".format(i) for i in range(1, 3)]
plabels = ["p^{}".format(i) for i in range(1, 7)]
dlabels = ["d^{}".format(i) for i in range(1, 11)]
flabels = ["f^{}".format(i) for i in range(1, 15)]
labelset_1D = slabels + plabels + dlabels + flabels
# account for period tags
if self.size == 39:
period_labels = ["period {}".format(i) for i in range(1, 8)]
labelset_1D += period_labels
labelset_2D = []
for l1 in labelset_1D:
for l2 in labelset_1D:
labelset_2D.append("OFM: " + l1 + " - " + l2)
return labelset_2D
else:
return ["orbital field matrix"]
def citations(self):
return [
"@article{LamPham2017,"
"author = {{Lam Pham}, Tien and Kino, Hiori and Terakura, Kiyoyuki and "
"Miyake, Takashi and Tsuda, Koji and Takigawa, Ichigaku and {Chi Dam}, Hieu},"
"doi = {10.1080/14686996.2017.1378060},"
"journal = {Science and Technology of Advanced Materials},"
"month = {dec},"
"number = {1},"
"pages = {756--765},"
"publisher = {Taylor {\&} Francis},"
"title = {{Machine learning reveals orbital interaction in materials}},"
"url = {https://www.tandfonline.com/doi/full/10.1080/14686996.2017.1378060},"
"volume = {18},"
"year = {2017}"
"}"
]
def implementors(self):
return ["Kyle Bystrom", "Alex Dunn"]
|
{"hexsha": "df49ba793e6d458e21db40b9ccc898c1237e7245", "size": 18794, "ext": "py", "lang": "Python", "max_stars_repo_path": "matminer/featurizers/structure/matrix.py", "max_stars_repo_name": "ncfrey/matminer", "max_stars_repo_head_hexsha": "5a688de8f2c7eaf5109d34d58ab7875cfe980e48", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 326, "max_stars_repo_stars_event_min_datetime": "2017-01-26T00:12:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T15:07:31.000Z", "max_issues_repo_path": "matminer/featurizers/structure/matrix.py", "max_issues_repo_name": "ncfrey/matminer", "max_issues_repo_head_hexsha": "5a688de8f2c7eaf5109d34d58ab7875cfe980e48", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 578, "max_issues_repo_issues_event_min_datetime": "2017-01-02T23:57:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:01:31.000Z", "max_forks_repo_path": "matminer/featurizers/structure/matrix.py", "max_forks_repo_name": "ncfrey/matminer", "max_forks_repo_head_hexsha": "5a688de8f2c7eaf5109d34d58ab7875cfe980e48", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 182, "max_forks_repo_forks_event_min_datetime": "2017-01-12T18:45:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T15:03:54.000Z", "avg_line_length": 38.670781893, "max_line_length": 113, "alphanum_fraction": 0.5809832925, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4575}
|
import pickle
import librosa
import numpy as np
from tensorflow import keras
class ser:
def mfcc(self):
file = "./Audios/Dataset/1092_Help_FEA_XX.wav"
data, sampling_rate = librosa.load(file)
X = []
mfcc_feature = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)
X.append(mfcc_feature)
MFCCs = np.array(X)
return MFCCs
def ser(self):
emotions = {
'0': 'angry',
'1': 'disgust',
'2': 'fear',
'3': 'happy',
'4': 'neutral',
'5': 'sad'
}
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
MFCCs = self.mfcc()
p = model.predict(MFCCs, verbose=0)
yhat_classes = np.argmax(p, 1)
output = emotions.get(str(yhat_classes[0]))
if output == 'fear':
print('Fear')
obj = ser()
obj.ser()
|
{"hexsha": "da46238f108dfcfb858d6698dab931a501014d4f", "size": 977, "ext": "py", "lang": "Python", "max_stars_repo_path": "Source Code/Speech Emotion Dection/ser.py", "max_stars_repo_name": "GALI-SAI-SHANKAR/Threat-Alert-AI", "max_stars_repo_head_hexsha": "f50743c23c05684d6e32ff52799dc4cc24dcd98f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source Code/Speech Emotion Dection/ser.py", "max_issues_repo_name": "GALI-SAI-SHANKAR/Threat-Alert-AI", "max_issues_repo_head_hexsha": "f50743c23c05684d6e32ff52799dc4cc24dcd98f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-03T13:18:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T04:04:38.000Z", "max_forks_repo_path": "Source Code/Speech Emotion Dection/ser.py", "max_forks_repo_name": "GALI-SAI-SHANKAR/Threat-Alert-AI", "max_forks_repo_head_hexsha": "f50743c23c05684d6e32ff52799dc4cc24dcd98f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.425, "max_line_length": 100, "alphanum_fraction": 0.4964176049, "include": true, "reason": "import numpy", "num_tokens": 260}
|
import pickle
import glob
import numpy as np
def print_stats(data):
returns = []
path_lengths = []
print("num trajectories", len(data))
for path in data:
rewards = path["rewards"]
returns.append(np.sum(rewards))
path_lengths.append(len(rewards))
print("returns")
print("min", np.min(returns))
print("max", np.max(returns))
print("mean", np.mean(returns))
print("std", np.std(returns))
print("path lengths")
print("min", np.min(path_lengths))
print("max", np.max(path_lengths))
print("mean", np.mean(path_lengths))
print("std", np.std(path_lengths))
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc1/run5/id0/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc2.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run4/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc3.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run4/id*/video_*vae.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc3_vae.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run4/id*/video_*env.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc3_env.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run6/id*/video_*vae.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc4_vae.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/door/demo-bc5/run2/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/door_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/hammer/demo-bc1/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/hammer_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/relocate/demo-bc1/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/relocate_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/door/bc/bc-data1/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/door_bc2.npy"
input_patterns = [
"/media/ashvin/data2/s3doodad/ashvin/rfeatures/rlbench/open-drawer-vision3/td3bc-with-state3/run0/id0/video_*_vae.p",
]
output_file = "/home/ashvin/data/s3doodad/demos/icml2020/rlbench/rlbench_bc1.npy"
data = []
for pattern in input_patterns:
for file in glob.glob(pattern):
d = pickle.load(open(file, "rb"))
print(file, len(d))
for path in d: # for deleting image observations
for i in range(len(path["observations"])):
ob = path["observations"][i]
keys = list(ob.keys())
for key in keys:
if key != "state_observation":
del ob[key]
data.extend(d)
pickle.dump(data, open(output_file, "wb"))
print(output_file)
print_stats(data)
|
{"hexsha": "ce20ffe52de5597a4618268be84969ec69785382", "size": 3393, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/ashvin/icml2020/process_data/consolidate.py", "max_stars_repo_name": "Asap7772/railrl_evalsawyer", "max_stars_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/ashvin/icml2020/process_data/consolidate.py", "max_issues_repo_name": "Asap7772/railrl_evalsawyer", "max_issues_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/ashvin/icml2020/process_data/consolidate.py", "max_forks_repo_name": "Asap7772/railrl_evalsawyer", "max_forks_repo_head_hexsha": "baba8ce634d32a48c7dfe4dc03b123e18e96e0a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5940594059, "max_line_length": 121, "alphanum_fraction": 0.6755083996, "include": true, "reason": "import numpy", "num_tokens": 1089}
|
#! /usr/bin/env python
#
# File Name : generate_grid_mrf_model.py
# Created By : largelymfs
# Creation Date : [2016-01-20 14:42]
# Last Modified : [2016-01-20 14:50]
# Description : the pyscripts to generate mrf grid model
#
def output_2darray(array, fout):
for item in array:
for value in item:
print >>fout, value,
print >>fout
if __name__=="__main__":
import sys
filename_output = sys.argv[2]
cnt_variable = int(sys.argv[1])
import numpy as np
#generate phi
phi = np.random.normal(0.0, 0.1, cnt_variable * cnt_variable).reshape((cnt_variable, cnt_variable))
theta_a = np.random.normal(0.0, 1.0, cnt_variable * (cnt_variable - 1)).reshape((cnt_variable - 1, cnt_variable))
theta_b = np.random.normal(0.0, 1.0, cnt_variable * (cnt_variable - 1)).reshape((cnt_variable, cnt_variable - 1))
with open(filename_output,"w") as fout:
print >>fout, cnt_variable
print >>fout
output_2darray(phi, fout)
print >>fout
output_2darray(theta_a, fout)
print >>fout
output_2darray(theta_b, fout)
|
{"hexsha": "79cd0f819d43c93f8a9c2e5c308bde2f2f576e0a", "size": 1188, "ext": "py", "lang": "Python", "max_stars_repo_path": "Grid_MRF/generate_grid_mrf_model.py", "max_stars_repo_name": "YoungLew/NoiseContrastiveLearning", "max_stars_repo_head_hexsha": "2abff09651e17af4370319ca63a4c090097f914f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Grid_MRF/generate_grid_mrf_model.py", "max_issues_repo_name": "YoungLew/NoiseContrastiveLearning", "max_issues_repo_head_hexsha": "2abff09651e17af4370319ca63a4c090097f914f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Grid_MRF/generate_grid_mrf_model.py", "max_forks_repo_name": "YoungLew/NoiseContrastiveLearning", "max_forks_repo_head_hexsha": "2abff09651e17af4370319ca63a4c090097f914f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9411764706, "max_line_length": 117, "alphanum_fraction": 0.6102693603, "include": true, "reason": "import numpy", "num_tokens": 337}
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math.ascend import EqualCount
from akg import tvm
import math
def compute_blockdim(shape):
size = 0
if isinstance(shape, (list, tuple)):
for i in shape:
if isinstance(i, int):
size = size * i
elif isinstance(i, (list, tuple)):
for ii in i:
if isinstance(ii, int):
size = size * ii
elif isinstance(shape, int):
size = shape
else:
size = 2
return min(32, math.ceil(size / 8192 + 1))
def equal_count_run(shapes, dtype, kernel_name, attrs):
# shape check
if attrs is None:
attrs = {}
if attrs.get("dynamic"):
var_size = tvm.var("I0")
var_shape = []
for shape in shapes:
assert len(shape) == 1
var_shape.append([var_size])
build_shape = var_shape
else:
build_shape = shapes
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(EqualCount, build_shape, [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
benchMark1, inputs1, output1 = gen_data(dtype, shapes)
return mod, benchMark1, inputs1 + [output1]
else:
return mod
else:
mod = utils.op_build_test(EqualCount, build_shape, [dtype, dtype], kernel_name=kernel_name, attrs=attrs)
benchMark1, inputs1, output1 = gen_data(dtype, shapes)
if attrs.get("dynamic"):
args = inputs1.copy()
args.append(output1)
for i in range(len(shape) - 1, -1, -1):
args.append(shape[i])
block_dim = compute_blockdim(shapes)
args.append(block_dim)
else:
args = inputs1 + [output1]
output1 = utils.mod_launch(mod, args, outputs=(2,), expect=benchMark1)
return inputs1, output1, benchMark1, (output1[0] == benchMark1)
def gen_data(dtype, shapes, class_num=10):
support_list = {"int32": np.int32}
inputs1 = []
for i in range(len(shapes)):
shape = shapes[i]
input = np.random.randint(low=0, high=class_num, size=shape).astype(support_list[dtype.lower()])
inputs1.append(input)
if len(inputs1) != 2:
raise RuntimeError("inputs num should be 2")
equal_result = np.equal(inputs1[0], inputs1[1])
equal_count_num = np.sum(equal_result)
output1 = np.full((1,), np.nan, dtype)
return equal_count_num, inputs1, output1
|
{"hexsha": "9f600b2cd72d5db5ac46f4312b17f0ad0c1c69fe", "size": 3215, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/common/test_run/ascend/equal_count_run.py", "max_stars_repo_name": "tianjiashuo/akg", "max_stars_repo_head_hexsha": "a9cbf642063fb1086a93e8bc6be6feb145689817", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 286, "max_stars_repo_stars_event_min_datetime": "2020-06-23T06:40:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:27:49.000Z", "max_issues_repo_path": "tests/common/test_run/ascend/equal_count_run.py", "max_issues_repo_name": "tianjiashuo/akg", "max_issues_repo_head_hexsha": "a9cbf642063fb1086a93e8bc6be6feb145689817", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-07-31T03:26:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-27T15:00:54.000Z", "max_forks_repo_path": "tests/common/test_run/ascend/equal_count_run.py", "max_forks_repo_name": "tianjiashuo/akg", "max_forks_repo_head_hexsha": "a9cbf642063fb1086a93e8bc6be6feb145689817", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2020-07-17T01:04:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T14:05:19.000Z", "avg_line_length": 36.9540229885, "max_line_length": 122, "alphanum_fraction": 0.6242612753, "include": true, "reason": "import numpy", "num_tokens": 792}
|
[STATEMENT]
lemma is_pseudonatural_equivalence:
shows "pseudonatural_equivalence V\<^sub>C H\<^sub>C \<a>\<^sub>C \<i>\<^sub>C src\<^sub>C trg\<^sub>C V\<^sub>D H\<^sub>D \<a>\<^sub>D \<i>\<^sub>D src\<^sub>D trg\<^sub>D
F \<Phi>\<^sub>F H \<Phi>\<^sub>H map\<^sub>0 map\<^sub>1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pseudonatural_equivalence (\<cdot>\<^sub>C) (\<star>\<^sub>C) \<a>\<^sub>C \<i>\<^sub>C src\<^sub>C trg\<^sub>C (\<cdot>\<^sub>D) (\<star>\<^sub>D) \<a>\<^sub>D \<i>\<^sub>D src\<^sub>D trg\<^sub>D F \<Phi>\<^sub>F H \<Phi>\<^sub>H map\<^sub>0 map\<^sub>1
[PROOF STEP]
..
|
{"llama_tokens": 261, "file": "Bicategory_PseudonaturalTransformation", "length": 1}
|
#!/usr/bin/env python
import sys
import re
# plotting
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
net_re = re.compile('^net:.*/train_val_(.*)\.prototxt')
model_re1 = re.compile('Finetuning from models/bvlc_reference_caffenet/[^/]*/(.*)\.caffemodel')
model_re2 = re.compile('Finetuning from models/bvlc_reference_caffenet/(.*)\.caffemodel')
model_re3 = re.compile('Resuming from models/bvlc_reference_caffenet/[^/]*/(.*)\.solverstate')
weight_decay_re = re.compile('weight_decay: (.+)')
accuracy_re = re.compile('Test net output \#\d+: accuracy = (\d+\.\d+)')
iteration_re = re.compile('Iteration (\d+), loss')
sparsity_re = re.compile('^(\d+\.?\d*)\W+(\d+\.?\d*)\W+(\d+\.?\d*)\W+(\d+\.?\d*)\W+(\d+\.?\d*)\W+(\d+\.?\d*)\W+(\d+\.?\d*)\W+(\d+\.?\d*)')
#plt.xlabel('Iteration')
#plt.ylabel('Accuracy (validation) / Sparsity')
net = None
model = None
weight_decay = None
accuracy = []
iteration = []
sparsity = [[], [], [], [], [], [], [], []]
mode = 0 # 0 : looking for accuracy, 1: looking for iteration, 2: looking for sparsity
with open(sys.argv[1] + "/train.info") as logfile:
for line in logfile:
m = net_re.search(line)
if m:
net = m.group(1)
m = model_re1.search(line)
if m:
model = m.group(1)
else:
m = model_re2.search(line)
if m:
model = m.group(1)
else:
m = model_re3.search(line)
if m:
model = m.group(1)
m = weight_decay_re.search(line)
if m:
weight_decay = m.group(1)
if mode == 0:
m = accuracy_re.search(line)
if m:
accuracy.append(float(m.group(1)))
mode = 1
elif mode == 1:
m = iteration_re.search(line)
if m:
iteration.append(int(m.group(1)))
mode = 2
elif mode == 2:
m = sparsity_re.search(line)
if m:
for i in range(8):
sparsity[i].append(float(m.group(i + 1))/100)
mode = 0
#print len(iteration)
#print len(accuracy)
#for i in range(8):
#print len(sparsity[i])
#for i in range(len(accuracy)):
#print iteration[i], accuracy[i],
#for j in range(8):
#print sparsity[j][i],
#print
min_len = min(len(iteration), len(accuracy))
fig, ax1 = plt.subplots()
ax1.plot(iteration[:min_len], accuracy[:min_len], label='acc', marker='*')
ax1.set_ylabel('accuracy (validation)')
ax1.set_xlabel('iteration')
ax2 = ax1.twinx()
for i in range(5):
ax2.plot(iteration, sparsity[i][:min_len], label=('l' + str(i + 1)))
for i in range(5,8):
ax2.plot(iteration, sparsity[i][:min_len], label=('l' + str(i + 1)), marker='+')
ax2.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=9,mode="expand", borderaxespad=0.)
ax2.set_ylabel('sparsity')
#plt.legend(bbox_to_anchor=(0.9, 1), loc=2, borderaxespad=0.)
#ax = plt.subplot(111)
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width, box.height*0.7])
#ax.legend(loc='upper right', bbox_to_anchor=(1.2, 1.6))
#if len(sys.argv) > 1
#outfile = sys.argv[-1]
#else
#outfile = net + "_" + model + "_" + weight_decay + ".png"
outfile = sys.argv[1] + ".png"
plt.savefig(outfile)
|
{"hexsha": "4a93015f558880a2c2ec8fd1a30e572c00ab26ba", "size": 3170, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/bvlc_reference_caffenet/plot_accuracy_sparsity.py", "max_stars_repo_name": "IntelLabs/SkimCaffe", "max_stars_repo_head_hexsha": "27df6a8796a012da722c3e2673739350133c1779", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 255, "max_stars_repo_stars_event_min_datetime": "2016-09-14T03:34:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T17:09:24.000Z", "max_issues_repo_path": "models/bvlc_reference_caffenet/plot_accuracy_sparsity.py", "max_issues_repo_name": "IntelLabs/SkimCaffe", "max_issues_repo_head_hexsha": "27df6a8796a012da722c3e2673739350133c1779", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2017-05-08T11:13:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-06T02:53:10.000Z", "max_forks_repo_path": "models/bvlc_reference_caffenet/plot_accuracy_sparsity.py", "max_forks_repo_name": "IntelLabs/SkimCaffe", "max_forks_repo_head_hexsha": "27df6a8796a012da722c3e2673739350133c1779", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2016-09-18T19:33:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-29T07:35:32.000Z", "avg_line_length": 28.5585585586, "max_line_length": 138, "alphanum_fraction": 0.6170347003, "include": true, "reason": "import numpy", "num_tokens": 973}
|
import numpy as np
from io import BytesIO
from typing import List
def bytes2numpy(data: bytes) -> np.ndarray:
'''
TODO: Annotation
'''
nda_bytes = BytesIO(data)
nda = np.load(nda_bytes, allow_pickle=False)
return nda
def numpy2bytes(data: np.ndarray) -> bytes:
'''
TODO: Annotation
'''
nda_bytes = BytesIO()
np.save(nda_bytes, data, allow_pickle=False)
return nda_bytes.getvalue()
def batch_bytes2numpy(data: List[bytes]) -> List[np.ndarray]:
'''
TODO: Annotation
'''
return list(map(bytes2numpy, data))
def batch_numpy2bytes(data: List[np.ndarray]) -> List[bytes]:
'''
TODO: Annotation
'''
return list(map(numpy2bytes, data))
|
{"hexsha": "9a636f72c17a81ff647429f88d4cea62389aeaf7", "size": 716, "ext": "py", "lang": "Python", "max_stars_repo_path": "rls/distribute/utils/numpy.py", "max_stars_repo_name": "yisuoyanyudmj/RLs-1", "max_stars_repo_head_hexsha": "a336b57e804507bca23cbadc3b5af1924c80d942", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-11T13:08:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T09:50:56.000Z", "max_issues_repo_path": "rls/distribute/utils/numpy.py", "max_issues_repo_name": "kiminh/RLs", "max_issues_repo_head_hexsha": "a336b57e804507bca23cbadc3b5af1924c80d942", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rls/distribute/utils/numpy.py", "max_forks_repo_name": "kiminh/RLs", "max_forks_repo_head_hexsha": "a336b57e804507bca23cbadc3b5af1924c80d942", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.3513513514, "max_line_length": 61, "alphanum_fraction": 0.6438547486, "include": true, "reason": "import numpy", "num_tokens": 184}
|
"""
This code is used for plotting annual anomalies of radiative fluxes for the model mean of CMIP5 and CMIP6 models.
"""
import matplotlib.pyplot as plt
import xarray as xr
import numpy as np
import seaborn as sns
import pandas as pd
import scipy as sc
#=== Import SEB Anomalies ====
#from seasonal_SEB_components import *
#CMIP5
ACCESS = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/ACCESS_anomaly_annual.nc')
HADGEM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/HADGEM_anomaly_annual.nc')
CSIRO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CSIRO_anomaly_annual.nc')
IPSL = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/IPSL_anomaly_annual.nc')
MIROC5 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/MIROC5_anomaly_annual.nc')
NORESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/NORESM_anomaly_annual.nc')
#CMIP6
CESM = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CESM_anomaly_annual.nc')
CNRM_CM6 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CNRM_CM6_anomaly_annual.nc')
CNRM_ESM2 = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/CNRM_ESM2_anomaly_annual.nc')
MRI = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/MRI_anomaly_annual.nc')
UKMO = xr.open_dataset('/projects/NS9600K/idunnam/Thesis/src/SEB_anomalies_annual/UKMO_anomaly_annual.nc')
#=== CMIP5 component model mean ===
def model_mean(mod):
return sum(mod)/ len(mod)
CMIP5_models = [ACCESS, HADGEM, CSIRO, IPSL, MIROC5, NORESM]
TT_CMIP5 = []
LWU_CMIP5 = []
LWD_CMIP5 = []
SWD_CMIP5 = []
ALB_CMIP5 = []
SW_net_CMIP5 = []
LW_net_CMIP5 = []
Net_rad_f_CMIP5 = []
for i in range(len(CMIP5_models)):
TT_CM5 = CMIP5_models[i].TT.mean(dim=["X10_105","Y21_199"])
LWU_CM5 = CMIP5_models[i].LWU.mean(dim=["X10_105","Y21_199"]) *(-1)
LWD_CM5 = CMIP5_models[i].LWD.mean(dim=["X10_105","Y21_199"])
SWD_CM5 = CMIP5_models[i].SWD.mean(dim=["X10_105","Y21_199"])
ALB_CM5 = CMIP5_models[i].AL2.mean(dim=["X10_105","Y21_199"])
SW_net_CM5 = CMIP5_models[i].SW_net.mean(dim=["X10_105","Y21_199"])
LW_net_CM5 = CMIP5_models[i].LW_net.mean(dim=["X10_105","Y21_199"])
Net_rad_f_CM5 = CMIP5_models[i].NET_rad_f.mean(dim=["X10_105","Y21_199"])
TT_CMIP5.append(TT_CM5)
SWD_CMIP5.append(SWD_CM5)
LWU_CMIP5.append(LWU_CM5)
LWD_CMIP5.append(LWD_CM5)
ALB_CMIP5.append(ALB_CM5)
SW_net_CMIP5.append(SW_net_CM5)
LW_net_CMIP5.append(LW_net_CM5)
Net_rad_f_CMIP5.append(Net_rad_f_CM5)
TT_CMIP5 = model_mean(TT_CMIP5)
SWD_CMIP5 = model_mean(SWD_CMIP5)
LWU_CMIP5 = model_mean(LWU_CMIP5)
LWD_CMIP5 = model_mean(LWD_CMIP5)
ALB_CMIP5 = model_mean(ALB_CMIP5)
SW_net_CMIP5 = model_mean(SW_net_CMIP5)
LW_net_CMIP5 = model_mean(LW_net_CMIP5)
Net_rad_f_CMIP5 = model_mean(Net_rad_f_CMIP5)
SEB_var_CMIP5 = [LWU_CMIP5, LWD_CMIP5, SWD_CMIP5, SW_net_CMIP5, LW_net_CMIP5, Net_rad_f_CMIP5]
#=== CMIP6 component model mean ===
CMIP6_models = [CESM, CNRM_CM6, CNRM_ESM2, MRI, UKMO]
TT_CMIP6 = []
LWU_CMIP6 = []
LWD_CMIP6 = []
SWD_CMIP6 = []
ALB_CMIP6 = []
SW_net_CMIP6 = []
LW_net_CMIP6 = []
Net_rad_f_CMIP6 = []
for i in range(len(CMIP6_models)):
TT_CM6 = CMIP6_models[i].TT.mean(dim=["X10_105","Y21_199"])
LWU_CM6 = CMIP6_models[i].LWU.mean(dim=["X10_105","Y21_199"]) *(-1)
LWD_CM6 = CMIP6_models[i].LWD.mean(dim=["X10_105","Y21_199"])
SWD_CM6 = CMIP6_models[i].SWD.mean(dim=["X10_105","Y21_199"])
ALB_CM6 = CMIP6_models[i].AL2.mean(dim=["X10_105","Y21_199"])
SW_net_CM6 = CMIP6_models[i].SW_net.mean(dim=["X10_105","Y21_199"])
LW_net_CM6 = CMIP6_models[i].LW_net.mean(dim=["X10_105","Y21_199"])
Net_rad_f_CM6 = CMIP6_models[i].NET_rad_f.mean(dim=["X10_105","Y21_199"])
TT_CMIP6.append(TT_CM6)
SWD_CMIP6.append(SWD_CM6)
LWU_CMIP6.append(LWU_CM6)
LWD_CMIP6.append(LWD_CM6)
ALB_CMIP6.append(ALB_CM6)
SW_net_CMIP6.append(SW_net_CM6)
LW_net_CMIP6.append(LW_net_CM6)
Net_rad_f_CMIP6.append(Net_rad_f_CM6)
TT_CMIP6 = model_mean(TT_CMIP6)
SWD_CMIP6 = model_mean(SWD_CMIP6)
LWU_CMIP6 = model_mean(LWU_CMIP6)
LWD_CMIP6 = model_mean(LWD_CMIP6)
ALB_CMIP6 = model_mean(ALB_CMIP6)
SW_net_CMIP6 = model_mean(SW_net_CMIP6)
LW_net_CMIP6 = model_mean(LW_net_CMIP6)
Net_rad_f_CMIP6 = model_mean(Net_rad_f_CMIP6)
SEB_var_CMIP6 = [LWU_CMIP6, LWD_CMIP6, SWD_CMIP6, SW_net_CMIP6, LW_net_CMIP6,Net_rad_f_CMIP6]
SEB_var_label = ['LWU','LWD','SWD','SW$_{net}$', 'LW$_{net}$','Net energy flux']
# ==== REGRESSION =====
# CMIP5
TT_reg_CM5 = TT_CMIP5.to_dataframe()
LWU_reg_CM5 = LWU_CMIP5.to_dataframe()
LWD_reg_CM5 = LWD_CMIP5.to_dataframe()
SWD_reg_CM5 = SWD_CMIP5.to_dataframe()
ALB_reg_CM5 = ALB_CMIP5.to_dataframe()
SW_net_reg_CM5 = SW_net_CMIP5.to_dataframe()
LW_net_reg_CM5 = LW_net_CMIP5.to_dataframe()
Net_rad_f_reg_CM5 = Net_rad_f_CMIP5.to_dataframe()
#CMIP6
TT_reg_CM6 = TT_CMIP6.to_dataframe()
LWU_reg_CM6 = LWU_CMIP6.to_dataframe()
LWD_reg_CM6 = LWD_CMIP6.to_dataframe()
SWD_reg_CM6 = SWD_CMIP6.to_dataframe()
ALB_reg_CM6 = ALB_CMIP6.to_dataframe()
SW_net_reg_CM6 = SW_net_CMIP6.to_dataframe()
LW_net_reg_CM6 = LW_net_CMIP6.to_dataframe()
Net_rad_f_reg_CM6 = Net_rad_f_CMIP6.to_dataframe()
### CMIP5 ###
x_CM5 = TT_reg_CM5['TT']
y1_CM5 = LWU_reg_CM5['LWU']
y2_CM5 = LWD_reg_CM5['LWD']
y3_CM5 = SWD_reg_CM5['SWD']
y4_CM5 = ALB_reg_CM5['AL2']
y5_CM5 = SW_net_reg_CM5['SW_net']
y7_CM5 = LW_net_reg_CM5['LW_net']
y6_CM5 = Net_rad_f_reg_CM5['NET_rad_f']
coeff_CM5 = np.polyfit(x_CM5, y1_CM5,2)
poly1_CM5 = np.poly1d(coeff_CM5)
coeff2_CM5 = np.polyfit(x_CM5, y2_CM5, 2)
poly2_CM5 = np.poly1d(coeff2_CM5)
coeff3_CM5 = np.polyfit(x_CM5, y3_CM5, 2)
poly3_CM5 = np.poly1d(coeff3_CM5)
coeff4_CM5 = np.polyfit(x_CM5, y4_CM5, 2)
poly4_CM5 = np.poly1d(coeff4_CM5)
coeff5_CM5 = np.polyfit(x_CM5, y5_CM5, 2)
poly5_CM5 = np.poly1d(coeff5_CM5)
coeff7_CM5 = np.polyfit(x_CM5, y7_CM5, 2)
poly7_CM5 = np.poly1d(coeff7_CM5)
coeff6_CM5 = np.polyfit(x_CM5, y6_CM5, 2)
poly6_CM5 = np.poly1d(coeff6_CM5)
t = np.sort(TT_CMIP5)
curve_x_CM5 = np.linspace(t[0], t[-1])
curve_y1_CM5 = poly1_CM5(curve_x_CM5)
curve_y2_CM5 = poly2_CM5(curve_x_CM5)
curve_y3_CM5 = poly3_CM5(curve_x_CM5)
curve_y4_CM5 = poly4_CM5(curve_x_CM5)
curve_y5_CM5 = poly5_CM5(curve_x_CM5)
curve_y7_CM5 = poly7_CM5(curve_x_CM5)
curve_y6_CM5 = poly6_CM5(curve_x_CM5)
### CMIP6 ###
x_CM6 = TT_reg_CM6['TT']
y1_CM6 = LWU_reg_CM6['LWU']
y2_CM6 = LWD_reg_CM6['LWD']
y3_CM6 = SWD_reg_CM6['SWD']
y4_CM6 = ALB_reg_CM6['AL2']
y5_CM6 = SW_net_reg_CM6['SW_net']
y7_CM6 = LW_net_reg_CM6['LW_net']
y6_CM6 = Net_rad_f_reg_CM6['NET_rad_f']
coeff_CM6 = np.polyfit(x_CM6, y1_CM6,2)
poly1_CM6 = np.poly1d(coeff_CM6)
coeff2_CM6 = np.polyfit(x_CM6, y2_CM6, 2)
poly2_CM6 = np.poly1d(coeff2_CM6)
coeff3_CM6 = np.polyfit(x_CM6, y3_CM6, 2)
poly3_CM6 = np.poly1d(coeff3_CM6)
coeff4_CM6 = np.polyfit(x_CM6, y4_CM6, 2)
poly4_CM6 = np.poly1d(coeff4_CM6)
coeff5_CM6 = np.polyfit(x_CM6, y5_CM6, 2)
poly5_CM6 = np.poly1d(coeff5_CM6)
coeff7_CM6 = np.polyfit(x_CM6, y7_CM6, 2)
poly7_CM6 = np.poly1d(coeff7_CM6)
coeff6_CM6 = np.polyfit(x_CM6, y6_CM6, 2)
poly6_CM6 = np.poly1d(coeff6_CM6)
t = np.sort(TT_CMIP6)
curve_x_CM6 = np.linspace(t[0], t[-1])
curve_y1_CM6 = poly1_CM6(curve_x_CM6)
curve_y2_CM6 = poly2_CM6(curve_x_CM6)
curve_y3_CM6 = poly3_CM6(curve_x_CM6)
curve_y4_CM6 = poly4_CM6(curve_x_CM6)
curve_y5_CM6 = poly5_CM6(curve_x_CM6)
curve_y7_CM6 = poly7_CM6(curve_x_CM6)
curve_y6_CM6 = poly6_CM6(curve_x_CM6)
#fig.savefig('/projects/NS9600K/idunnam/src/Figures/SEB_rad_flux_anomalies_CMIP5_CMIP6_JJA.png')
#==========================================================================================
#==========================================================================================
plt.rcParams.update({
"text.usetex": True,
"font.family": 'DejaVu Sans',
"font.serif": ["Computer Modern Roman"],
"font.size": 22})
#== JOINT PLOT CM5 & CM6 ==
plt.figure(figsize= (10,10))
plt.xlabel('Near-surface Temperature anomalies [$^\circ$C]', fontsize = 20)
plt.ylabel('Annual Surface energy flux anomalies [Wm$^{-2}$]', fontsize = 20)
color_CM5 = ['darkolivegreen', 'firebrick','indigo','darkorange', 'steelblue','dimgrey']
label_CM5 = ['LWU - CMIP5','LWD - CMIP5', 'SWD - CMIP5', 'SW$_{net}$- CMIP5','LW$_{net}$- CMIP5','Net radiative flux - CMIP5' ]
for i in range(len(SEB_var_CMIP5)):
plt.scatter(TT_CMIP5, SEB_var_CMIP5[i], label= label_CM5[i], s=22, color = color_CM5[i])
plt.plot(curve_x_CM5, curve_y1_CM5, color ='darkolivegreen') ### TEST
plt.plot(curve_x_CM5, curve_y2_CM5, color ='firebrick') ### TEST
plt.plot(curve_x_CM5, curve_y3_CM5, color ='indigo') ### TEST
plt.plot(curve_x_CM5, curve_y5_CM5, color ='darkorange') ### TEST
plt.plot(curve_x_CM5, curve_y7_CM5, color ='steelblue') ### LW_net
plt.plot(curve_x_CM5, curve_y6_CM5, color = 'dimgrey')
color_CM6 = ['yellowgreen','lightcoral','mediumpurple', 'sandybrown','lightskyblue','darkgrey']
label_CM6 = ['LWU - CMIP6','LWD - CMIP6', 'SWD - CMIP6', 'SW$_{net}$- CMIP6','LW$_{net}$- CMIP6', 'Net radiative flux - CMIP6' ]
for i in range(len(SEB_var_CMIP6)):
plt.scatter(TT_CMIP6, SEB_var_CMIP6[i] ,label = label_CM6[i], s=80, marker='+',color = color_CM6[i])
plt.plot(curve_x_CM6, curve_y1_CM6, '--', color ='yellowgreen') ### TEST
plt.plot(curve_x_CM6, curve_y2_CM6, '--',color ='lightcoral') ### TEST
plt.plot(curve_x_CM6, curve_y3_CM6, '--', color ='mediumpurple') ### TEST
plt.plot(curve_x_CM6, curve_y5_CM6, '--', color ='sandybrown') ### TEST
plt.plot(curve_x_CM6, curve_y7_CM6, '--', color ='lightskyblue') ### LW_net
plt.plot(curve_x_CM6, curve_y6_CM6, '--', color = 'darkgrey')
plt.ylim(-40,40)
#-#-#- FANCY LEGEND -#-#-#
import matplotlib.lines as mlines
from matplotlib.legend_handler import HandlerBase
class AnyObjectHandler(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
LWU_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='darkolivegreen')
LWU_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', color='yellowgreen')
return [LWU_cm5, LWU_cm6]
class AnyObjectHandler2(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
LWD_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='firebrick')
LWD_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', color='lightcoral')
return [LWD_cm5, LWD_cm6]
class AnyObjectHandler3(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
SWD_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='indigo')
SWD_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', color='mediumpurple')
return [SWD_cm5, SWD_cm6]
class AnyObjectHandler4(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
SW_net_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='darkorange')
SW_net_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', color='sandybrown')
return [SW_net_cm5, SW_net_cm6]
class AnyObjectHandler5(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
LW_net_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='steelblue')
LW_net_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', color='lightskyblue')
return [LW_net_cm5, LW_net_cm6]
class AnyObjectHandler6(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
NET_rad_cm5 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
color='dimgrey')
NET_rad_cm6 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], linestyle='--', color='darkgrey')
return [NET_rad_cm5, NET_rad_cm6]
class AnyObjectHandler7(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
cm5_dott = mlines.Line2D([11],[3], color='black', marker='o', markersize=7, label='MAR CMIP5')
return [cm5_dott]
class AnyObjectHandler8(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
cm6_cross = mlines.Line2D([11],[3], color='black', marker='+', markersize=9, label='MAR CMIP6')
return [cm6_cross]
object1 = HandlerBase()
object2 = HandlerBase()
object3 = HandlerBase()
object4 = HandlerBase()
object5 = HandlerBase()
object6 = HandlerBase()
object7 = HandlerBase()
object8 = HandlerBase()
plt.legend([object1,object2, object3, object4, object5, object6, object7, object8], ['LWU','LWD', 'SWD', 'SW$_{net}$','LW$_{net}$', 'Net radiative flux','MAR CMIP5','MAR CMIP6'],
handler_map={object1: AnyObjectHandler(),
object2:AnyObjectHandler2(),
object3:AnyObjectHandler3(),
object4:AnyObjectHandler4(),
object5:AnyObjectHandler5(),
object6:AnyObjectHandler6(),
object7:AnyObjectHandler7(),
object8:AnyObjectHandler8()},
fontsize=16,frameon=False,ncol=3, loc='upper left')
#-#-#-#-#-#-#-#-#-#-#-#-#-#
#Imports
import matplotlib.patches as mpatches
###sns.set_palette('colorblind')
sns.despine()
#plt.legend(ncol=2)
plt.show()
plt.savefig('/projects/NS9600K/idunnam/Thesis/src/Figures/SEB_components/SEB_rad_flux_anomalies_jointCM5CM6_annual.pdf',bbox_inches='tight',dpi=300)
#==========================================================================================
#== ALBEDO ==
plt.figure(figsize=(10,10))
plt.scatter(TT_CMIP5, ALB_CMIP5, label= 'ALB - CMIP5', s=22, color='saddlebrown')
plt.xlabel('Near-surface Temperature anomalies [$^\circ$C]', fontsize = 20)
plt.ylabel('Annual Albedo anomalies', fontsize = 20)
plt.plot(curve_x_CM5, curve_y4_CM5, color='saddlebrown') ### TEST
plt.scatter(TT_CMIP6, ALB_CMIP6, label='ALB - CMIP6', s=80, marker = '+', color='tan')
#plt.title('Seasonal ('+season+') Albedo anomalies \n Model Mean of CMIP5 vs. CMIP6 MAR simulations', fontsize = 16)
#plt.legend(loc='upper right')
####
from matplotlib.legend_handler import HandlerBase
class AnyObjectHandler9(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
l1 = plt.Line2D([x0,y0+width], [0.3*height,0.3*height], color='saddlebrown')
l2 = plt.Line2D([x0,y0+width], [0.7*height,0.7*height],
linestyle='--', color='tan')
return [l1, l2]
class AnyObjectHandler10(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
cm5_dott = mlines.Line2D([11],[3], color='black', marker='o', markersize=7, label='MAR CMIP5')
return [cm5_dott]
class AnyObjectHandler11(HandlerBase):
def create_artists(self, legend, orig_handle,
x0, y0, width, height, fontsize, trans):
cm6_cross = mlines.Line2D([11],[3], color='black', marker='+', markersize=9, label='MAR CMIP6')
return [cm6_cross]
object1 = HandlerBase()
object2 = HandlerBase()
object3 = HandlerBase()
plt.legend([object1, object2, object3], ['Albedo', 'MAR CMIP5', 'MAR CMIP6'],
handler_map={object1: AnyObjectHandler9(),
object2:AnyObjectHandler10(),
object3:AnyObjectHandler11()},
fontsize=16,frameon=False,ncol=1, loc='upper right')
####
plt.plot(curve_x_CM6, curve_y4_CM6, '--', color ='tan') ### TEST
sns.set_palette('colorblind')
sns.despine()
plt.show()
plt.savefig('/projects/NS9600K/idunnam/Thesis/src/Figures/SEB_components/Albedo_anomalies_JOINT_CMIP5_CMIP6_annual.pdf',bbox_inches='tight',dpi=300)
#==========================================================================================
#==========================================================================================
#Printing Specific values of SEB components for given near-surface temperature (TAS)
for TAS in range(1,6):
print(season)
print('TAS:', TAS)
print('CMIP5', 'LWU:', np.round(poly1_CM5(TAS),2),
'LWD:',np.round(poly2_CM5(TAS),2),
'LW_net:', np.round(poly7_CM5(TAS),2),
'SWD:',np.round(poly3_CM5(TAS),2),
'SW_net:',np.round(poly5_CM5(TAS),2),
'Net_rad_f:', np.round(poly6_CM5(TAS),2),
'ALB:', np.round(poly4_CM5(TAS)*100,2))
print('CMIP6', 'LWU:', np.round(poly1_CM6(TAS),2),
'LWD:',np.round(poly2_CM6(TAS),2),
'LW_net:', np.round(poly7_CM6(TAS),2),
'SWD:',np.round(poly3_CM6(TAS),2),
'SW_net:',np.round(poly5_CM6(TAS),2),
'Net_rad_f:', np.round(poly6_CM6(TAS),2),
'ALB:', np.round(poly4_CM6(TAS)*100,2))
|
{"hexsha": "18ef83c1f753fe7adab926cd75f2860562adb06f", "size": 17854, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_scripts/SEB_rad_flux_annual.py", "max_stars_repo_name": "idunnam/Thesis", "max_stars_repo_head_hexsha": "a567a25aa037c949de285158804a6ee396fc0e6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot_scripts/SEB_rad_flux_annual.py", "max_issues_repo_name": "idunnam/Thesis", "max_issues_repo_head_hexsha": "a567a25aa037c949de285158804a6ee396fc0e6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-28T13:12:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T13:12:26.000Z", "max_forks_repo_path": "plot_scripts/SEB_rad_flux_annual.py", "max_forks_repo_name": "idunnam/Thesis", "max_forks_repo_head_hexsha": "a567a25aa037c949de285158804a6ee396fc0e6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.485260771, "max_line_length": 178, "alphanum_fraction": 0.6584518875, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5968}
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import re
import jax
from jax.experimental.compilation_cache.file_system_cache import FileSystemCache
from jax.lib import xla_client
from absl import logging
from typing import Optional
_cache = None
def initialize_cache(path, max_cache_size_bytes=32 * 2**30):
"""Creates a global cache object. Should only be called once per process.
max_cache_sixe defaults to 32GiB.
"""
global _cache
assert _cache == None, f"The cache path has already been initialized to {_cache._path}"
_cache = FileSystemCache(path, max_cache_size_bytes)
logging.warning(f"Initialized persistent compilation cache at {path}")
def get_executable(xla_computation, compile_options, backend) -> Optional[xla_client.Executable]:
"""Returns the cached executable if present, or None otherwise."""
assert _cache is not None, "initialize_cache must be called before you can call get_executable()"
cache_key = get_cache_key(xla_computation, compile_options, backend)
xla_executable_serialized = _cache.get(cache_key)
if not xla_executable_serialized:
return None
# TODO(skye): xla_computation.get_hlo_module() is the unoptimized HLO but it should
#be optimized
xla_executable_deserialized = backend.deserialize_executable(
xla_executable_serialized,
xla_computation.get_hlo_module(),
compile_options)
return xla_executable_deserialized
def put_executable(xla_computation, compile_options, executable: xla_client.Executable,
backend):
"""Adds 'executable' to the cache, possibly evicting older entries."""
assert _cache is not None, "initialize_cache must be called before you can call put_executable()"
cache_key = get_cache_key(xla_computation, compile_options, backend)
serialized_executable = backend.serialize_executable(executable)
_cache.put(cache_key, serialized_executable)
def get_cache_key(xla_computation, compile_options, backend) -> str:
"""Creates a hashed string to use as a key to the compilation cache.
get_cache_key takes in the xla_computation and compile_options of a program and hashes
all the components into a uniuqe byte string. This byte string is returned as a regular
string that is 256 characters long.
Typical return value example:
'14ac577cdb2ef6d986078b4054cc9893a9a14a16dbb0d8f37b89167c1f1aacdf'
"""
hash_obj = hashlib.sha256()
# The HLO op_name metadata sometimes includes Python function pointers,
# which cause spurious cache misses. Scrub anything that looks like a
# function pointer. Example op_name metadata:
# op_name="jit(s)/custom_jvp_call_jaxpr
# [ jvp_jaxpr_thunk=<function _memoize.<locals>.memoized at 0x7f3fa30f0940>\n
# num_consts=0 ]"
# TODO(skye): in theory this could cause us to scrub meaningful binary proto
# data. Do something more robust.
serialized_hlo = xla_computation.as_serialized_hlo_module_proto()
scrubbed_hlo = re.sub(b" at 0x[a-f0-9]+>", b" at 0x...>", serialized_hlo)
hash_obj.update(scrubbed_hlo)
if logging.vlog_is_on(1):
logging.vlog(1, f"get_cache_key hash after serializing computation: {hash_obj.digest().hex()}")
_hash_compile_options(hash_obj, compile_options)
if logging.vlog_is_on(1):
logging.vlog(1, f"get_cache_key hash after serializing compile_options: {hash_obj.digest().hex()}")
hash_obj.update(bytes(jax.lib.version))
if logging.vlog_is_on(1):
logging.vlog(1, f"get_cache_key hash after serializing jax_lib version: {hash_obj.digest().hex()}")
_hash_platform(hash_obj, backend)
if logging.vlog_is_on(1):
logging.vlog(1, f"get_cache_key hash after serializing the backend: {hash_obj.digest().hex()}")
return hash_obj.digest().hex()
def _hash_compile_options(hash_obj, compile_options_obj):
assert len(dir(compile_options_obj)) == 31,(f"Unexpected number of CompileOption fields: "
f"{len(dir(compile_options_obj))}. This likely: means that an extra "
f"field was added, and this function needs to be updated.")
if compile_options_obj.argument_layouts is not None:
map(lambda shape: hash_obj.update(shape.to_serialized_proto()),
compile_options_obj.argument_layouts)
_hash_int(hash_obj, compile_options_obj.parameter_is_tupled_arguments)
_hash_executable_build_options(hash_obj, compile_options_obj.executable_build_options)
_hash_bool(hash_obj, compile_options_obj.tuple_arguments)
_hash_int(hash_obj, compile_options_obj.num_replicas)
_hash_int(hash_obj, compile_options_obj.num_partitions)
if compile_options_obj.device_assignment is not None:
hash_obj.update(compile_options_obj.device_assignment.serialize())
def _hash_executable_build_options(hash_obj, executable_obj):
assert len(dir(executable_obj)) == 30, (f"Unexpected number of executable_build_options fields: "
f"{len(dir(executable_obj))}. This likely means that an extra "
f"field was added, and this function needs to be updated.")
if executable_obj.result_layout is not None:
hash_obj.update(executable_obj.result_layout.to_serialized_proto())
_hash_int(hash_obj, executable_obj.num_replicas)
_hash_int(hash_obj, executable_obj.num_partitions)
_hash_debug_options(hash_obj, executable_obj.debug_options)
if executable_obj.device_assignment is not None:
hash_obj.update(executable_obj.device_assignment.serialize())
_hash_bool(hash_obj, executable_obj.use_spmd_partitioning)
def _hash_debug_options(hash_obj, debug_obj):
_hash_bool(hash_obj, debug_obj.xla_cpu_enable_fast_math)
_hash_bool(hash_obj, debug_obj.xla_cpu_fast_math_honor_infs)
_hash_bool(hash_obj, debug_obj.xla_cpu_fast_math_honor_nans)
_hash_bool(hash_obj, debug_obj.xla_cpu_fast_math_honor_division)
_hash_bool(hash_obj, debug_obj.xla_cpu_fast_math_honor_functions)
_hash_bool(hash_obj, debug_obj.xla_gpu_enable_fast_min_max)
_hash_int(hash_obj, debug_obj.xla_backend_optimization_level)
_hash_bool(hash_obj, debug_obj.xla_cpu_enable_xprof_traceme)
_hash_bool(hash_obj, debug_obj.xla_llvm_disable_expensive_passes)
_hash_bool(hash_obj, debug_obj.xla_test_all_input_layouts)
def _hash_platform(hash_obj, backend):
_hash_string(hash_obj, backend.platform)
_hash_string(hash_obj, backend.platform_version)
_hash_string(hash_obj, backend.runtime_type)
def _hash_int(hash_obj, int_var):
hash_obj.update(int_var.to_bytes(8, byteorder='big'))
def _hash_bool(hash_obj, bool_var):
hash_obj.update(bool_var.to_bytes(1, byteorder='big'))
def _hash_string(hash_obj, str_var):
hash_obj.update(str_var.encode('utf-8').strip())
def is_initialized():
return _cache is not None
|
{"hexsha": "9c5bff552a8ce2f0f9911588c5e77a7b2ea7d1b5", "size": 7447, "ext": "py", "lang": "Python", "max_stars_repo_path": "jax/experimental/compilation_cache/compilation_cache.py", "max_stars_repo_name": "manifest/jax", "max_stars_repo_head_hexsha": "d82341d95f418fe2a03cfe691b21813e4309eff7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-14T07:12:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T07:12:46.000Z", "max_issues_repo_path": "jax/experimental/compilation_cache/compilation_cache.py", "max_issues_repo_name": "manifest/jax", "max_issues_repo_head_hexsha": "d82341d95f418fe2a03cfe691b21813e4309eff7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2022-01-03T23:08:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T23:09:23.000Z", "max_forks_repo_path": "jax/experimental/compilation_cache/compilation_cache.py", "max_forks_repo_name": "manifest/jax", "max_forks_repo_head_hexsha": "d82341d95f418fe2a03cfe691b21813e4309eff7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-11T20:57:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-11T20:57:59.000Z", "avg_line_length": 48.6732026144, "max_line_length": 107, "alphanum_fraction": 0.7565462602, "include": true, "reason": "import jax,from jax", "num_tokens": 1760}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Unit tests for finite_diff library
"""
import unittest
import random
from math import pi
import numpy as np
import finitediff
class TestFiniteDiff(unittest.TestCase):
"""Unit test class for the finitediff library"""
order = 4 # Order of the derivatives
def setUp(self):
"""Initialize a differentiator on a random grid"""
# Randomly pick some x values
numvals = 40
self.x = np.sort(np.array([random.uniform(0.0, 2*pi) for i in range(numvals)]))
# Create the differentiator on these x values
self.diff = finitediff.Derivative(TestFiniteDiff.order)
self.diff.set_x(self.x)
def test_order(self):
"""Make sure we can get the order out correctly"""
self.assertEqual(self.diff.get_order(), TestFiniteDiff.order)
def test_cos(self):
"""Test a cosine function with no boundary conditions"""
ycos = np.cos(self.x)
dycos = self.diff.dydx(ycos)
truevals = -np.sin(self.x)
self.compare_arrays(dycos, truevals)
def test_sin(self):
"""Test a sine function with no boundary conditions"""
ysin = np.sin(self.x)
dysin = self.diff.dydx(ysin)
truevals = np.cos(self.x)
self.compare_arrays(dysin, truevals)
def test_matrix(self):
"""Test a matrix function with no boundary conditions"""
ysin = np.sin(self.x)
ycos = np.cos(self.x)
# pylint: disable=no-member
test = np.array([ysin, ycos]).transpose()
truevals = np.array([ycos, -ysin]).transpose()
# pylint: enable=no-member
dtest = self.diff.dydx(test)
self.assertTrue(np.all(np.abs(dtest - truevals) < 0.01))
def test_sin_odd(self):
"""Test a sine function with odd boundary conditions"""
self.diff.apply_boundary(-1)
ysin = np.sin(self.x)
dysin = self.diff.dydx(ysin)
truevals = np.cos(self.x)
self.compare_arrays(dysin, truevals)
def test_cos_even(self):
"""Test a cosine function with even boundary conditions"""
self.diff.apply_boundary(1)
ycos = np.cos(self.x)
dycos = self.diff.dydx(ycos)
truevals = -np.sin(self.x)
self.compare_arrays(dycos, truevals)
def compare_arrays(self, array1, array2):
"""Helper function to test equality of two arrays"""
for i, _ in enumerate(array1):
self.assertAlmostEqual(array1[i], array2[i], delta=0.01)
def test_conversion1(self):
"""Test converting boundary conditions"""
# pylint: disable=protected-access
oldstencil = self.diff._stencil.copy()
self.diff.set_x(self.x, 1)
newstencil = self.diff._stencil.copy()
self.diff.apply_boundary(0)
# Test converting to no boundary condition
self.assertTrue(np.all(self.diff._stencil == oldstencil))
# Test converting to even boundary condition
newdiff = finitediff.Derivative(TestFiniteDiff.order)
newdiff.set_x(self.x, 1)
self.assertTrue(np.all(newdiff._stencil == newstencil))
# Test converting to odd boundary condition
self.diff.set_x(self.x, -1)
newstencil = self.diff._stencil.copy()
newdiff = finitediff.Derivative(TestFiniteDiff.order)
newdiff.set_x(self.x, -1)
self.assertTrue(np.all(newdiff._stencil == newstencil))
def test_bad(self):
"""Make sure an error is raised appropriately"""
# pylint: disable=protected-access
# Insufficient gridpoints for order
with self.assertRaises(finitediff.DerivativeError):
self.diff.set_x(np.array([0.0, 0.5, 1.0]))
# Something has gone very wrong - stencil and gridpoints are
# out of alignment
with self.assertRaises(finitediff.DerivativeError):
test = finitediff.Derivative(TestFiniteDiff.order)
test.set_x(np.array([1.0, 2, 3, 4, 5, 6]))
test._xvals = np.array([1.0, 2.0, 3.0, 4.0])
test.apply_boundary()
# Various tests for no stencil
with self.assertRaises(finitediff.NoStencil):
test = finitediff.Derivative(TestFiniteDiff.order)
test.dydx(np.array([1, 2, 3]))
with self.assertRaises(finitediff.NoStencil):
test = finitediff.Derivative(TestFiniteDiff.order)
test.leftdydx(np.array([1, 2, 3]))
with self.assertRaises(finitediff.NoStencil):
test = finitediff.Derivative(TestFiniteDiff.order)
test.rightdydx(np.array([1, 2, 3]))
with self.assertRaises(finitediff.NoStencil):
test = finitediff.Derivative(TestFiniteDiff.order)
test.position_dydx(np.array([1, 2, 3]), 1)
with self.assertRaises(finitediff.NoStencil):
test = finitediff.Derivative(TestFiniteDiff.order)
test.get_xvals()
with self.assertRaises(finitediff.NoStencil):
test = finitediff.Derivative(TestFiniteDiff.order)
test.apply_boundary()
# xvals and yvals are out of alignment
with self.assertRaises(finitediff.DerivativeError):
test = finitediff.Derivative(TestFiniteDiff.order)
test.set_x(np.array([1.0, 2, 3, 4, 5, 6]))
test.dydx(np.array([1, 2, 3]))
with self.assertRaises(finitediff.DerivativeError):
test = finitediff.Derivative(TestFiniteDiff.order)
test.set_x(np.array([1.0, 2, 3, 4, 5, 6]))
test.leftdydx(np.array([1, 2, 3]))
with self.assertRaises(finitediff.DerivativeError):
test = finitediff.Derivative(TestFiniteDiff.order)
test.set_x(np.array([1.0, 2, 3, 4, 5, 6]))
test.rightdydx(np.array([1, 2, 3]))
with self.assertRaises(finitediff.DerivativeError):
test = finitediff.Derivative(TestFiniteDiff.order)
test.set_x(np.array([1.0, 2, 3, 4, 5, 6]))
test.position_dydx(np.array([1, 2, 3]), 3)
# Position out of bounds
with self.assertRaises(IndexError):
test = finitediff.Derivative(TestFiniteDiff.order)
test.set_x(np.array([1.0, 2, 3, 4, 5, 6]))
test.position_dydx(np.array([1, 2, 3, 4, 5, 6]), 7)
def test_copy(self):
"""Make sure things copy correctly"""
# pylint: disable=protected-access
# Make sure we get references
xref = self.diff.get_xvals(False)
self.x[0] += 1
self.assertTrue(xref[0] == self.x[0])
# Make sure we get copies!
self.diff.set_x(self.x, copy=True)
x = self.diff.get_xvals(True)
xref = self.diff.get_xvals(False)
x[0] += 1
self.assertFalse(xref[0] == x[0]) # get_xvals returned a copy
xref[0] += 1
self.assertFalse(xref[0] == self.x[0]) # set_x stored a copy
def test_positions(self):
"""Test that derivatives at all positions are correct"""
ysin = np.sin(self.x)
dysin = self.diff.dydx(ysin)
for i in range(len(ysin)):
self.assertAlmostEqual(dysin[i], self.diff.position_dydx(ysin, i), delta=1e-12)
self.assertAlmostEqual(dysin[0], self.diff.leftdydx(ysin), delta=1e-12)
self.assertAlmostEqual(dysin[-1], self.diff.rightdydx(ysin), delta=1e-12)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "a65f39ef854ba9a4bb94c8cf08a568806963b135", "size": 7416, "ext": "py", "lang": "Python", "max_stars_repo_path": "finitediff/unit_tests.py", "max_stars_repo_name": "jolyonb/finitediff", "max_stars_repo_head_hexsha": "fb6d05490fcf8a7a7603e68aec165b9fb931ba3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "finitediff/unit_tests.py", "max_issues_repo_name": "jolyonb/finitediff", "max_issues_repo_head_hexsha": "fb6d05490fcf8a7a7603e68aec165b9fb931ba3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "finitediff/unit_tests.py", "max_forks_repo_name": "jolyonb/finitediff", "max_forks_repo_head_hexsha": "fb6d05490fcf8a7a7603e68aec165b9fb931ba3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8367346939, "max_line_length": 91, "alphanum_fraction": 0.6187971953, "include": true, "reason": "import numpy", "num_tokens": 1903}
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# custom libs
import utils
import model_funcs as mf
class BasicBlockWOutput(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, params, stride=1):
super(BasicBlockWOutput, self).__init__()
add_output = params[0]
num_classes = params[1]
input_size = params[2]
self.output_id = params[3]
self.depth = 2
layers = nn.ModuleList()
conv_layer = []
conv_layer.append(nn.Conv2d(in_channels, channels, kernel_size=3, stride=stride, padding=1, bias=False))
conv_layer.append(nn.BatchNorm2d(channels))
conv_layer.append(nn.ReLU())
conv_layer.append(nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False))
conv_layer.append(nn.BatchNorm2d(channels))
layers.append(nn.Sequential(*conv_layer))
shortcut = nn.Sequential()
if stride != 1 or in_channels != self.expansion*channels:
shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
layers.append(shortcut)
layers.append(nn.ReLU())
self.layers = layers
if add_output:
self.output = utils.InternalClassifier(input_size, self.expansion*channels, num_classes)
self.no_output = False
else:
self.output = None
self.forward = self.only_forward
self.no_output = True
def forward(self, x):
fwd = self.layers[0](x) # conv layers
fwd = fwd + self.layers[1](x) # shortcut
return self.layers[2](fwd), 1, self.output(fwd) # output layers for this module
def only_output(self, x):
fwd = self.layers[0](x) # conv layers
fwd = fwd + self.layers[1](x) # shortcut
fwd = self.layers[2](fwd) # activation
out = self.output(fwd) # output layers for this module
return out
def only_forward(self, x):
fwd = self.layers[0](x) # conv layers
fwd = fwd + self.layers[1](x) # shortcut
return self.layers[2](fwd), 0, None # activation
class ResNet_SDN(nn.Module):
def __init__(self, params):
super(ResNet_SDN, self).__init__()
self.output_to_return_when_ICs_are_delayed = 'network_output' # 'network_output' or 'most_confident_output'
self.num_blocks = params['num_blocks']
self.num_classes = int(params['num_classes'])
self.augment_training = params['augment_training']
self.input_size = int(params['input_size'])
self.block_type = params['block_type']
self.add_out_nonflat = params['add_output']
self.add_output = [item for sublist in self.add_out_nonflat for item in sublist]
self.init_weights = params['init_weights']
# for vanilla training
self.train_func = mf.sdn_train
self.test_func = mf.sdn_test
# for AT
self.advtrain_func = mf.sdn_advtrain
self.advtrain_func = mf.sdn_advtrain
self.in_channels = 16
self.num_output = sum(self.add_output) + 1
self.init_depth = 1
self.end_depth = 1
self.cur_output_id = 0
if self.block_type == 'basic':
self.block = BasicBlockWOutput
init_conv = []
if self.input_size == 32: # cifar10
self.cur_input_size = self.input_size
init_conv.append(nn.Conv2d(3, self.in_channels, kernel_size=3, stride=1, padding=1, bias=False))
else: # tiny imagenet
self.cur_input_size = int(self.input_size/2)
init_conv.append(nn.Conv2d(3, self.in_channels, kernel_size=3, stride=2, padding=1, bias=False))
init_conv.append(nn.BatchNorm2d(self.in_channels))
init_conv.append(nn.ReLU())
self.init_conv = nn.Sequential(*init_conv)
self.layers = nn.ModuleList()
self.layers.extend(self._make_layer(self.in_channels, block_id=0, stride=1))
self.cur_input_size = int(self.cur_input_size/2)
self.layers.extend(self._make_layer(32, block_id=1, stride=2))
self.cur_input_size = int(self.cur_input_size/2)
self.layers.extend(self._make_layer(64, block_id=2, stride=2))
end_layers = []
end_layers.append(nn.AvgPool2d(kernel_size=8))
end_layers.append(utils.Flatten())
end_layers.append(nn.Linear(64*self.block.expansion, self.num_classes))
self.end_layers = nn.Sequential(*end_layers)
if self.init_weights:
self.initialize_weights()
def _make_layer(self, channels, block_id, stride):
num_blocks = int(self.num_blocks[block_id])
strides = [stride] + [1]*(num_blocks-1)
layers = []
for cur_block_id, stride in enumerate(strides):
add_output = self.add_out_nonflat[block_id][cur_block_id]
params = (add_output, self.num_classes, int(self.cur_input_size), self.cur_output_id)
layers.append(self.block(self.in_channels, channels, params, stride))
self.in_channels = channels * self.block.expansion
self.cur_output_id += add_output
return layers
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x, internal=False):
# forward pass w/o internal representations
if not internal:
outputs = []
fwd = self.init_conv(x)
for layer in self.layers:
fwd, is_output, output = layer(fwd)
if is_output:
outputs.append(output)
fwd = self.end_layers(fwd)
outputs.append(fwd)
return outputs
# forward pass w. internal representations
else:
outints = []
outputs = []
fwd = self.init_conv(x)
for layer in self.layers:
fwd, is_output, output = layer(fwd)
if is_output:
outints.append(fwd.detach().numpy()) # remove .numpy() if you don't want
outputs.append(output)
fwd = self.end_layers(fwd)
outputs.append(fwd)
return outputs, outints
def forward_w_acts(self, x):
acts = []
out = self.init_conv(x)
for layer in self.layers:
out = layer.only_forward(out)[0]
acts.append(out)
out = self.end_layers(out)
return acts, out
# takes a single input
def early_exit(self, x):
confidences = []
outputs = []
fwd = self.init_conv(x)
output_id = 0
for layer in self.layers:
fwd, is_output, output = layer(fwd)
if is_output:
outputs.append(output)
softmax = nn.functional.softmax(output[0], dim=0)
confidence = torch.max(softmax)
confidences.append(confidence)
if confidence >= self.confidence_threshold:
is_early = True
return output, output_id, is_early
output_id += is_output
output = self.end_layers(fwd)
outputs.append(output)
# 'network_output' or 'most_confident_output'
if self.output_to_return_when_ICs_are_delayed == 'most_confident_output':
softmax = nn.functional.softmax(output[0], dim=0)
confidence = torch.max(softmax)
confidences.append(confidence)
max_confidence_output = np.argmax(confidences)
is_early = False
return outputs[max_confidence_output], max_confidence_output, is_early
elif self.output_to_return_when_ICs_are_delayed == 'network_output':
is_early = False
return output, output_id, is_early
else:
raise RuntimeError('Invalid value for "output_to_return_when_not_confident_enough": it should be "network_output" or "most_confident_output"')
def get_prediction_and_confidences(self, x):
outputs = self.forward(x.cpu())
logits, confs, max_confs, labels = [], [], [], []
for out in outputs:
softmax = nn.functional.softmax(out[0].cpu(), dim=0)
pred_label = out.max(1)[1].item()
pred_conf = torch.max(softmax).item()
logits.append(list(out[0].cpu().detach().numpy()))
confs.append(list(softmax.cpu().detach().numpy()))
max_confs.append(pred_conf)
labels.append(pred_label)
return logits, confs, max_confs, labels
|
{"hexsha": "2401076cdf2cf6907b78559096b790608881b365", "size": 8996, "ext": "py", "lang": "Python", "max_stars_repo_path": "networks/SDNs/ResNet_SDN.py", "max_stars_repo_name": "Sanghyun-Hong/DeepSloth", "max_stars_repo_head_hexsha": "92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-12-16T04:55:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T08:28:11.000Z", "max_issues_repo_path": "networks/SDNs/ResNet_SDN.py", "max_issues_repo_name": "Sanghyun-Hong/DeepSloth", "max_issues_repo_head_hexsha": "92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "networks/SDNs/ResNet_SDN.py", "max_forks_repo_name": "Sanghyun-Hong/DeepSloth", "max_forks_repo_head_hexsha": "92b3d0d3ef3f974d8bce7b4b4a1828776227e3c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-11T06:21:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-11T06:21:04.000Z", "avg_line_length": 35.557312253, "max_line_length": 154, "alphanum_fraction": 0.6074922188, "include": true, "reason": "import numpy", "num_tokens": 2040}
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import random
import cv2
import numpy as np
from yolox.utils import adjust_box_anns
from ..data_augment import box_candidates, random_perspective
from .datasets_wrapper import Dataset
class MosaicDetection(Dataset):
"""Detection dataset wrapper that performs mixup for normal dataset.
Parameters
----------
dataset : Pytorch Dataset
Gluon dataset object.
*args : list
Additional arguments for mixup random sampler.
"""
def __init__(
self, dataset, img_size, mosaic=True, preproc=None,
degrees=10.0, translate=0.1, scale=(0.5, 1.5), mscale=(0.5, 1.5),
shear=2.0, perspective=0.0, enable_mixup=True, *args
):
super().__init__(img_size, mosaic=mosaic)
self._dataset = dataset
self.preproc = preproc
self.degrees = degrees
self.translate = translate
self.scale = scale
self.shear = shear
self.perspective = perspective
self.mixup_scale = mscale
self._mosaic = mosaic
self.enable_mixup = enable_mixup
def __len__(self):
return len(self._dataset)
@Dataset.resize_getitem
def __getitem__(self, idx):
if self._mosaic:
labels4 = []
input_dim = self._dataset.input_dim
# yc, xc = s, s # mosaic center x, y
yc = int(random.uniform(0.5 * input_dim[0], 1.5 * input_dim[0]))
xc = int(random.uniform(0.5 * input_dim[1], 1.5 * input_dim[1]))
# 3 additional image indices
indices = [idx] + [random.randint(0, len(self._dataset) - 1) for _ in range(3)]
for i, index in enumerate(indices):
img, _labels, _, _ = self._dataset.pull_item(index)
h0, w0 = img.shape[:2] # orig hw
scale = min(1. * input_dim[0] / h0, 1. * input_dim[1] / w0)
interp = cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * scale), int(h0 * scale)), interpolation=interp)
(h, w) = img.shape[:2]
if i == 0: # top left
# base image with 4 tiles
img4 = np.full(
(input_dim[0] * 2, input_dim[1] * 2, img.shape[2]), 114, dtype=np.uint8
)
# xmin, ymin, xmax, ymax (large image)
x1a, y1a, x2a, y2a = (max(xc - w, 0), max(yc - h, 0), xc, yc,)
# xmin, ymin, xmax, ymax (small image)
x1b, y1b, x2b, y2b = (w - (x2a - x1a), h - (y2a - y1a), w, h,)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, input_dim[1] * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(input_dim[0] * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, input_dim[1] * 2), min(input_dim[0] * 2, yc + h) # noqa
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
labels = _labels.copy() # [[xmin, ymin, xmax, ymax, label_ind], ... ]
if _labels.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 0] = scale * _labels[:, 0] + padw
labels[:, 1] = scale * _labels[:, 1] + padh
labels[:, 2] = scale * _labels[:, 2] + padw
labels[:, 3] = scale * _labels[:, 3] + padh
labels4.append(labels)
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 0], 0, 2 * input_dim[1], out=labels4[:, 0])
np.clip(labels4[:, 1], 0, 2 * input_dim[0], out=labels4[:, 1])
np.clip(labels4[:, 2], 0, 2 * input_dim[1], out=labels4[:, 2])
np.clip(labels4[:, 3], 0, 2 * input_dim[0], out=labels4[:, 3])
#wj
#cv2.imwrite("b.png",img4.astype(np.uint8))
img4, labels4 = random_perspective(
img4,
labels4,
degrees=self.degrees,
translate=self.translate,
scale=self.scale,
shear=self.shear,
perspective=self.perspective,
border=[-input_dim[0] // 2, -input_dim[1] // 2],
) # border to remove
#wj
#cv2.imwrite("c.png",img4.astype(np.uint8))
# -----------------------------------------------------------------
# CopyPaste: https://arxiv.org/abs/2012.07177
# -----------------------------------------------------------------
if self.enable_mixup and not len(labels4) == 0:
img4, labels4 = self.mixup(img4, labels4, self.input_dim)
mix_img, padded_labels = self.preproc(img4, labels4, self.input_dim)
img_info = (mix_img.shape[1], mix_img.shape[0])
return mix_img, padded_labels, img_info, int(idx)
else:
self._dataset._input_dim = self.input_dim
img, label, img_info, idx = self._dataset.pull_item(idx)
img, label = self.preproc(img, label, self.input_dim)
return img, label, img_info, int(idx)
def mixup(self, origin_img, origin_labels, input_dim):
jit_factor = random.uniform(*self.mixup_scale)
FLIP = random.uniform(0, 1) > 0.5
cp_labels = []
while len(cp_labels) == 0:
cp_index = random.randint(0, self.__len__() - 1)
cp_labels = self._dataset.load_anno(cp_index)
img, cp_labels, _, _ = self._dataset.pull_item(cp_index)
if len(img.shape) == 3:
cp_img = np.ones((input_dim[0], input_dim[1], 3)) * 114.0
else:
cp_img = np.ones(input_dim) * 114.0
cp_scale_ratio = min(input_dim[0] / img.shape[0], input_dim[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * cp_scale_ratio), int(img.shape[0] * cp_scale_ratio)),
interpolation=cv2.INTER_LINEAR,
).astype(np.float32)
cp_img[
: int(img.shape[0] * cp_scale_ratio), : int(img.shape[1] * cp_scale_ratio)
] = resized_img
cp_img = cv2.resize(
cp_img,
(int(cp_img.shape[1] * jit_factor), int(cp_img.shape[0] * jit_factor)),
)
cp_scale_ratio *= jit_factor
if FLIP:
cp_img = cp_img[:, ::-1, :]
origin_h, origin_w = cp_img.shape[:2]
target_h, target_w = origin_img.shape[:2]
padded_img = np.zeros(
(max(origin_h, target_h), max(origin_w, target_w), 3)
).astype(np.uint8)
padded_img[:origin_h, :origin_w] = cp_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h - 1)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w - 1)
padded_cropped_img = padded_img[
y_offset: y_offset + target_h, x_offset: x_offset + target_w
]
cp_bboxes_origin_np = adjust_box_anns(
cp_labels[:, :4], cp_scale_ratio, 0, 0, origin_w, origin_h
)
if FLIP:
cp_bboxes_origin_np[:, 0::2] = (
origin_w - cp_bboxes_origin_np[:, 0::2][:, ::-1]
)
cp_bboxes_transformed_np = cp_bboxes_origin_np.copy()
cp_bboxes_transformed_np[:, 0::2] = np.clip(
cp_bboxes_transformed_np[:, 0::2] - x_offset, 0, target_w
)
cp_bboxes_transformed_np[:, 1::2] = np.clip(
cp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h
)
keep_list = box_candidates(cp_bboxes_origin_np.T, cp_bboxes_transformed_np.T, 5)
if keep_list.sum() >= 1.0:
cls_labels = cp_labels[keep_list, 4:5]
box_labels = cp_bboxes_transformed_np[keep_list]
labels = np.hstack((box_labels, cls_labels))
origin_labels = np.vstack((origin_labels, labels))
origin_img = origin_img.astype(np.float32)
origin_img = 0.5 * origin_img + 0.5 * padded_cropped_img.astype(np.float32)
#wj
#cv2.imwrite("a.png",origin_img.astype(np.uint8))
return origin_img.astype(np.uint8), origin_labels
|
{"hexsha": "0c9aa6ee46f2c4ca782f44660535fa0de1e29423", "size": 8829, "ext": "py", "lang": "Python", "max_stars_repo_path": "yolox/data/datasets/mosaicdetection.py", "max_stars_repo_name": "vghost2008/YOLOX", "max_stars_repo_head_hexsha": "37b3cba0756907679ff25a4e5cc96eaad3b6f988", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "yolox/data/datasets/mosaicdetection.py", "max_issues_repo_name": "vghost2008/YOLOX", "max_issues_repo_head_hexsha": "37b3cba0756907679ff25a4e5cc96eaad3b6f988", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-17T07:28:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-17T07:28:41.000Z", "max_forks_repo_path": "yolox/data/datasets/mosaicdetection.py", "max_forks_repo_name": "vghost2008/YOLOX", "max_forks_repo_head_hexsha": "37b3cba0756907679ff25a4e5cc96eaad3b6f988", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4471153846, "max_line_length": 117, "alphanum_fraction": 0.527239778, "include": true, "reason": "import numpy", "num_tokens": 2529}
|
from sklearn.linear_model import LogisticRegression
import sklearn
from sklearn.model_selection import cross_val_score
from scipy.sparse import lil_matrix
import numpy as np
import json
from time import time
import sklearn
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import random
colorset = dict()
target = list()
def colorinit():
colorset[0] = '#108831'
colorset[1] = '#880c7f'
colorset[2] = '#4e8ab5'
def datainit(orlabel, topaim):
global target
target = [1, 4, 7]
print(type(target))
def format_data_for_display(emb_file, i2l_file):
i2l = dict()
with open(i2l_file, 'r') as r:
r.readline()
for line in r:
parts = line.strip().split()
n_id, l_id = int(parts[0]), int(parts[1])
i2l[n_id] = l_id
i2e = dict()
with open(emb_file, 'r') as r:
r.readline()
for line in r:
embeds = np.fromstring(line.strip(), dtype=float, sep=' ')
node_id = embeds[0]
if node_id in i2l:
i2e[node_id] = embeds[1:]
i2l_list = sorted(i2l.items(), key=lambda x:x[0])
X = []
Y = []
for (id, label) in i2l_list:
X.append(i2e[id])
Y.append(label)
return X,Y
def getdata(oremb, orlabel):
print(target)
emb = []
label = []
nodechoice = dict()
index = 0
for i in orlabel:
if i in target:
if i not in nodechoice:
nodechoice[i]=set()
nodechoice[i].add(index)
index=index+1
for i in target:
print(str(i)+' '+str(len(nodechoice[i])))
temp = random.sample(nodechoice[i], 500)
for index in temp:
emb.append(oremb[index])
label.append(orlabel[index])
return emb, label
def plot_emb(emb, label, title):
global target
x_min, x_max = np.min(emb, 0), np.max(emb, 0)
data = (emb - x_min) / (x_max - x_min)
datasize = data.shape[0]
plt.figure()
for i in range(datasize):
plt.text(data[i, 0], data[i, 1], str('.'),
color = colorset[target.index(label[i])],
fontdict={'weight': 'bold', 'size': 15})
plt.axis('off')
#plt.title(title)
plt.show()
def run(oremb, orlabel):
colorinit()
datainit(orlabel, 3)
emb, label = getdata(oremb, orlabel)
#print(label)
tsne = TSNE(n_components=2, init='pca', random_state=0)
tstart = time()
result = tsne.fit_transform(emb)
plot_emb(result, label, 't-SNE embedding (time %.2fs)'% (time()-tstart))
if __name__ == '__main__':
oremb, orlabel = format_data_for_display('../../emb/dblp/dblp_MNCI_10.emb', '../../../data/dblp/node2label.txt')
run(oremb, orlabel)
|
{"hexsha": "7659812b5b963fb7ebf0e41af5ec83822dddbd7d", "size": 2411, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/experiment/network visualization.py", "max_stars_repo_name": "MGitHubL/MNCI", "max_stars_repo_head_hexsha": "6651a59ba30dd8c588aa26580411d8d01571c296", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/experiment/network visualization.py", "max_issues_repo_name": "MGitHubL/MNCI", "max_issues_repo_head_hexsha": "6651a59ba30dd8c588aa26580411d8d01571c296", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-10T02:25:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T12:00:34.000Z", "max_forks_repo_path": "code/experiment/network visualization.py", "max_forks_repo_name": "MGitHubL/MNCI", "max_forks_repo_head_hexsha": "6651a59ba30dd8c588aa26580411d8d01571c296", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9619047619, "max_line_length": 113, "alphanum_fraction": 0.6652841145, "include": true, "reason": "import numpy,from scipy", "num_tokens": 751}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 12:12:57 2020
@author: ssterl
"""
###################################
######### REVUB core code #########
###################################
# REVUB model © 2019 CIREG project
# Author: Sebastian Sterl, Vrije Universiteit Brussel
# This code accompanies the paper "Turbines of the Caribbean: Decarbonising Suriname's electricity mix through hydro-supported integration of wind power" by Sterl et al.
# All equation, section &c. numbers refer to the official REVUB manual (see corresponding GitHub page, https://github.com/VUB-HYDR/REVUB).
import numpy as np
import pandas as pd
import numbers as nb
# %% REVUB.1) Set simulation accuracy
##### TECHNICAL SIMULATION PARAMETERS #####
# [set by user] This number defines the amount of discrete steps between 0 and max(E_hydro + E_solar + E_wind)
# reflecting the accuracy of determining the achieved ELCC
N_ELCC = 10**3
# [set by user] These values are used to get a good initial guess for the order of magnitude of the ELCC.
# This is done by multiplying them with yearly average E_{hydro}.
# A suitable range within which to identify the optimal solution (eq. S21) is thus obtained automatically
# for each HPP, regardless of differences in volume, head, rated power, &c.
# The value f_init_BAL_end may have to be increased in scenarios where the ELCC becomes extremely high,
# e.g. when extremely good balancing sources other than hydro are present.
# For the scenarios in (Sterl et al.), the below ranges work for all HPPs.
f_init_BAL_start = 0
f_init_BAL_step = 0.2
f_init_BAL_end = 1
# Idem for the optional STOR scenario
f_init_STOR_start = 0
f_init_STOR_step = 0.2
f_init_STOR_end = 1
# [set by user] Number of refinement loops for equilibrium search for min(Psi) (see eq. S21)
# Every +1 increases precision by one digit. Typically, 2 or 3 iterations suffice.
N_refine_BAL = 2
N_refine_STOR = 2
# [set by user] When min(Psi) (eq. S21) is lower than this threshold, no further refinement loops
# are performed. This number can be increased to speed up the simulation.
psi_min_threshold = 0.00
# [set by user] Number of loops for iterative estimation of P_stable,BAL/STOR (see eq. S9 & explanation below eq. S19)
# Typically, 3-6 iterations suffice until convergence is achieved.
X_max_BAL = 3
X_max_STOR = 3
# %% REVUB.2) Preallocate variables for REVUB simulation
##### RESERVOIR INFLOW PARAMETERS #####
# [preallocate] Part of inflow that can be stored on annual timescales
Q_in_frac_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_in_frac_store = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Part of inflow that must be released to prevent overflowing (this term is by definition zero for large HPPs)
Q_in_RoR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_in_RoR_store = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] monthly average inflow (m^3/s)
Q_in_nat_monthly = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
# [preallocate] HPP category
HPP_category = ["" for x in range(HPP_number)]
##### HPP OPERATIONAL PARAMETERS #####
# [preallocate] Parameters tau_fill (eq. S1), phi (eq. S6), kappa (eq. S5) and f_reg (eq. S30) for each HPP
tau_fill = np.full([HPP_number], np.nan)
phi = np.full([HPP_number], np.nan)
kappa = np.full([HPP_number], np.nan)
f_reg = np.full([HPP_number], np.nan)
# [preallocate] Option for STOR scenario
STOR_break = np.zeros(shape = HPP_number)
##### RESERVOIR OUTFLOW PARAMETERS #####
# [preallocate] Various outflow data arrays (m^3/s) for CONV scenario (Note 2, 3.1 and eq. S2)
Q_CONV_stable_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_CONV_spill_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_CONV_out_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Various outflow data arrays (m^3/s) for BAL scenario (Note 2, 3.2 and eq. S2)
Q_BAL_stable_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_BAL_flexible_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_BAL_spill_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_BAL_out_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Potential flexible outflow from eq. S17
Q_BAL_pot_turb_flexible = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Various outflow data arrays (m^3/s) for optional STOR scenario (Note 7)
Q_STOR_stable_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_STOR_flexible_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_STOR_pump_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_STOR_out_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_STOR_spill_hourly_upper = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
Q_STOR_spill_hourly_lower = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Potential flexible outflow from eq. S17
Q_STOR_pot_turb_flexible = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Potential pumped flow from eq. S38
Q_STOR_pot_pump_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] monthly average outflow (m^3/s)
Q_CONV_out_monthly = np.zeros(shape = (months_yr, len(simulation_years), HPP_number))
Q_BAL_out_monthly = np.zeros(shape = (months_yr, len(simulation_years), HPP_number))
Q_STOR_out_monthly = np.zeros(shape = (months_yr, len(simulation_years), HPP_number))
# [preallocate] Outflow data (yearly mean) aggregated by year (m^3/s)
Q_BAL_out_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
Q_STOR_out_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
##### RESERVOIR VOLUME #####
# [preallocate] Reservoir volume data (in m^3)
V_CONV_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
V_BAL_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
V_STOR_hourly_upper = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
V_STOR_hourly_lower = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
# [preallocate] The same data as 1D-array (full time series from start to end, not ordered by year)
V_CONV_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
V_BAL_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
V_STOR_series_hourly_upper = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
V_STOR_series_hourly_lower = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
##### RESERVOIR LAKE AREA #####
# [preallocate] Lake surface area (in m^2)
A_CONV_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
A_BAL_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
A_STOR_hourly_upper = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
# [preallocate] The same data as 1D-array (full time series from start to end, not ordered by year)
A_CONV_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
A_BAL_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
A_STOR_series_hourly_upper = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
##### RESERVOIR WATER LEVEL / HYDRAULIC HEAD #####
# [preallocate] hydraulic head from water level to turbine (in m)
h_CONV_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
h_BAL_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
h_STOR_hourly = np.full([int(np.max(positions)) + 1, len(simulation_years), HPP_number], np.nan)
# [preallocate] The same data as 1D-array (full time series from start to end, not ordered by year)
h_CONV_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
h_BAL_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
h_STOR_series_hourly = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
# [ADDED] rule curve
rule_curve_volume_series = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
rule_curve_head_series = np.full([int(sum(hrs_byyear)), HPP_number], np.nan)
##### SOLAR AND WIND CAPACITY #####
# [preallocate] Yearly power generation by each MW of solar+wind capacity with solar-wind ratio
# given by c_solar: c_wind (needed to calculate multipliers, below)
E_SW_per_MW_BAL_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_SW_per_MW_STOR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Capacity multiplier (MW; the product c_solar_relative*c_multiplier [see later]
# equals the factor c_solar in eq. S9; idem for wind)
c_multiplier_BAL = np.zeros(shape = (len(simulation_years), HPP_number))
c_multiplier_STOR = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Optimal E_solar + E_wind identified when looping over a range of possible ELCC
# values to identify min(Psi) (eq. S21; in MWh/year)
E_SW_loop_BAL_opt = np.zeros(shape = HPP_number)
E_SW_loop_STOR_opt = np.zeros(shape = HPP_number)
##### POWER GENERATION PARAMETERS: HYDRO #####
# [preallocate] Hydropower generation in CONV scenario (in MW or MWh/h)
P_CONV_hydro_stable_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_CONV_hydro_RoR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Hydropower generation in BAL scenario (in MW or MWh/h)
P_BAL_hydro_stable_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_BAL_hydro_flexible_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_BAL_hydro_RoR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Hydropower generation/storage in optional STOR scenario (in MW or MWh/h)
P_STOR_hydro_stable_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_pump_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_hydro_flexible_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Maximum possible power output after accounting for ramp rate restrictions
# (in MW or MWh/h, see eq. S16, S37)
P_BAL_ramp_restr_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_ramp_restr_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_ramp_restr_pump_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Turbine utilization rate (fraction; see eq. S28)
k_turb_hourly_BAL = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
k_turb_hourly_STOR = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Monthly average of output energy variables for BAL (GWh/month)
E_hydro_BAL_stable_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_hydro_BAL_flexible_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_hydro_BAL_RoR_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
# [preallocate] Monthly average of output energy variables for STOR (GWh/month)
E_hydro_STOR_stable_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_hydro_STOR_flexible_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_hydro_pump_STOR_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
# [preallocate] Hydropower generation in CONV (MWh/year)
E_hydro_CONV_stable_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_CONV_RoR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Hydropower generation in BAL (MWh/year) (eq. S24, S33)
E_hydro_BAL_stable_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_BAL_flexible_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_BAL_nonRoR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_BAL_RoR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Hydropower generation in STOR (MWh/year) (eq. S24, S33)
E_hydro_STOR_stable_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_STOR_flexible_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_STOR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_hydro_STOR_pump_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Binary variable [0 or 1] determining whether hydropower plant is operating (1)
# or shut off in case of extreme drought (0) (see Note 3.1 and 8)
hydro_CONV_curtailment_factor_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
hydro_BAL_curtailment_factor_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
hydro_STOR_curtailment_factor_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Binary variable indicating hydropower curtailment in given month
hydro_BAL_curtailment_factor_monthly = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
hydro_STOR_curtailment_factor_monthly = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
# [preallocate] Yearly average capacity factor of HPP turbines (%)
CF_hydro_CONV_yearly = np.full([len(simulation_years), HPP_number], np.nan)
# [preallocate] Hourly capacity factor for BAL and STOR scenario (%)
CF_hydro_BAL_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
CF_hydro_STOR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
##### POWER GENERATION PARAMETERS: SOLAR & WIND #####
# [preallocate] Power generation from solar and wind power (MW or MWh/h)
P_BAL_solar_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_BAL_wind_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_solar_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_wind_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Monthly average of output energy variables (GWh/month)
E_solar_BAL_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_wind_BAL_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_solar_STOR_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
E_wind_STOR_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
# [preallocate] Solar and wind power generation (MWh/year) (eq. S25)
E_solar_BAL_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_wind_BAL_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_solar_STOR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
E_wind_STOR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
##### POWER GENERATION PARAMETERS: HYDRO-SOLAR-WIND #####
# [preallocate] Load difference (eq. S9; in MW or MWh/h)
P_BAL_difference_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_difference_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] P_inflexible (stable hydro + solar + wind in eq. S9; in MW or MWh/h)
P_BAL_inflexible_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
P_STOR_inflexible_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] RLDC = Residual Load Duration Curve; sorted array of L_followed -
# (P_stable + P_flexible + P_solar + P_wind (- P_pump)) (in MW or MWh/h)
L_res_BAL_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
L_res_STOR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
##### LOAD PROFILE DATA #####
# [preallocate] Load curve L(t) from eq. S9 (MW or MWh/h)
L_BAL_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
L_STOR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] monthly average of load curve (MW or MWh/h)
L_norm_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
##### IDENTIFYING THE ACHIEVED ELCC UNDER OPTIMAL HSW COMBINATION #####
# [preallocate] ELCC (Effective Load Carrying Capability = optimal series L(t) in eq. S10; in MW or MWh/h)
L_followed_BAL_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
L_followed_STOR_hourly = np.full([int(np.max(positions)), len(simulation_years), HPP_number], np.nan)
# [preallocate] Range of ELCCs from which to identify the actual one post-simulation
# (MWh/year). Accuracy is given by the parameter N_ELCC (amount of discrete values).
P_followed_BAL_range = np.zeros(shape = (len(simulation_years), N_ELCC, HPP_number))
P_followed_STOR_range = np.zeros(shape = (len(simulation_years), N_ELCC, HPP_number))
# [preallocate] Index of achieved ELCC in the above range
P_followed_BAL_index = np.zeros(shape = (len(simulation_years), HPP_number))
P_followed_STOR_index = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Monthly ELCC (MWh/h)
ELCC_BAL_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
ELCC_STOR_bymonth = np.zeros(shape = (months_yr,len(simulation_years),HPP_number))
# [preallocate] Yearly ELCC (MWh/year)
ELCC_BAL_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
ELCC_STOR_yearly = np.zeros(shape = (len(simulation_years), HPP_number))
# [preallocate] Fraction of ELCC unmet by HSW operation. note: as long as the parameter
# "LOEE_allowed" is set to zero, these arrays will be zero.
# If LOEE_allowed > 0, these arrays will indicate how the allowed unmet fraction of the ELCC
# is distributed over different months.
L_unmet_BAL_frac_bymonth = np.zeros(shape = (months_yr, len(simulation_years), HPP_number))
L_unmet_STOR_frac_bymonth = np.zeros(shape = (months_yr, len(simulation_years), HPP_number))
# %% REVUB.3) Classify HPPs
# [loop] to classify all HPPs
for HPP in range(HPP_number):
# [calculate] f_reg (eq. S29, S30 - solution for f_reg of t_fill,frac = 1 in eq. S29)
f_reg[HPP] = (V_max[HPP]/(min(np.sum(days_year,0))*hrs_day*secs_hr*T_fill_thres))/np.nanmean(Q_in_nat_hourly[:,:,HPP])
# [calculate] Determine dam category based on f_reg (Note 5)
# Here "large" HPPs are designated by "A", "small" HPPs by "B".
if f_reg[HPP] < 1:
# [define] as small HPP
HPP_category[HPP] = "B"
# [calculate] flexibly usable inflow for small HPPs (eq. S30)
Q_in_frac_hourly[:,:,HPP] = f_reg[HPP]*Q_in_nat_hourly[:,:,HPP]
else:
# [define] as large HPP
HPP_category[HPP] = "A"
# [calculate] all flow can be used flexibly for large HPPs
Q_in_frac_hourly[:,:,HPP] = Q_in_nat_hourly[:,:,HPP]
# [calculate] the component Q_RoR for small HPPs (Note 5)
Q_in_RoR_hourly[:,:,HPP] = Q_in_nat_hourly[:,:,HPP] - Q_in_frac_hourly[:,:,HPP]
##### SPECIFY OUTFLOW CURVE (CONV) #####
# [calculate] tau_fill (eq. S1) for each HPP
tau_fill[HPP] = (np.nanmean(Q_in_frac_hourly[:,:,HPP] * (min(np.sum(days_year,0))*hrs_day*secs_hr)/V_max[HPP]))**(-1)
# [calculate] phi (eq. S6) for each HPP
phi[HPP] = alpha*np.sqrt(tau_fill[HPP])
# [calculate] kappa (eq. S5) for each HPP
kappa[HPP] = 1/(f_opt**phi[HPP])*((np.exp(1))**(1 - d_min) - 1)
# [initialize] store Q_in_frac_hourly and Q_in_RoR_hourly; these may change during the simulations
# but need to be reinitialized for every iteration step (e.g. every new c_solar, c_wind in eq. S9)
Q_in_frac_store[:,:,HPP] = Q_in_frac_hourly[:,:,HPP]
Q_in_RoR_store[:,:,HPP] = Q_in_RoR_hourly[:,:,HPP]
# %% REVUB.4) Core REVUB simulation
# This section carries out the actual REVUB optimization.
# [loop] carry out CONV, BAL and (optionally) STOR simulation for every HPP
for HPP in range(HPP_number):
# [display] HPP for which simulation is being performed
print("HPP", HPP + 1, "/", HPP_number, ":", HPP_name[HPP])
###############################################################
############----------- CONV simulation -----------############
###############################################################
# [initialize] ensure Q_in_nat_flex and Q_in_nat_out are written correctly at the beginning of each simulation
Q_in_frac_hourly[:,:,HPP] = Q_in_frac_store[:,:,HPP]
Q_in_RoR_hourly[:,:,HPP] = Q_in_RoR_store[:,:,HPP]
# [CHANGED] [initialize] Calculate multiannual average flow for conventional operating rules (eq. S4)
Q_in_nat_av = np.nanmedian(Q_in_frac_hourly[:,:,HPP])
# [initialize] This variable is equal to unity by default, but set to zero in case of extreme droughts forcing a
# temporary curtailment on hydropower generation (Note 3.1)
hydro_CONV_curtailment_factor_hourly[:,:,HPP] = 1
# [display] CONV simulation underway
print("(i) simulating CONV")
# [loop] across all simulation years
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [initialize] initial values of volume (m^3), area (m^2) and hydraulic head (m) for each simulation year
if y == 0:
V_CONV_hourly[0,y,HPP] = V_max[HPP]*f_opt
h_temp = np.where(abs(calibrate_volume[:,HPP] - V_CONV_hourly[0,y,HPP]) == min(abs(calibrate_volume[:,HPP] - V_CONV_hourly[0,y,HPP])))[0][0]
A_CONV_hourly[0,y,HPP] = calibrate_area[h_temp,HPP]
h_CONV_hourly[0,y,HPP] = calibrate_head[h_temp,HPP]
else:
temp = V_CONV_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_CONV_hourly[0,y,HPP] = temp[-1]
temp = A_CONV_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
A_CONV_hourly[0,y,HPP] = temp[-1]
temp = h_CONV_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
h_CONV_hourly[0,y,HPP] = temp[-1]
# [loop] over all time steps in each simulation year to calculate reservoir dynamics and hydropower generation
for n in hrs_year:
# [calculate] stable outflow Q_stable in m^3/s according to conventional management (eq. S4)
if V_CONV_hourly[n,y,HPP]/V_max[HPP] < f_opt:
Q_CONV_stable_hourly[n,y,HPP] = (d_min + np.log(kappa[HPP]*(V_CONV_hourly[n,y,HPP]/V_max[HPP])**phi[HPP] + 1))*Q_in_nat_av
Q_CONV_spill_hourly[n,y,HPP] = 0
elif V_CONV_hourly[n,y,HPP]/V_max[HPP] < f_spill:
Q_CONV_stable_hourly[n,y,HPP] = (np.exp(gamma_hydro*(V_CONV_hourly[n,y,HPP]/V_max[HPP] - f_opt)**2))*Q_in_nat_av
Q_CONV_spill_hourly[n,y,HPP] = 0
else:
# [calculate] spilling component (eq. S7)
Q_CONV_stable_hourly[n,y,HPP] = (np.exp(gamma_hydro*(V_CONV_hourly[n,y,HPP]/V_max[HPP] - f_opt)**2))*Q_in_nat_av
Q_CONV_spill_hourly[n,y,HPP] = (Q_in_frac_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_CONV_hourly[n,y,HPP]/rho)*(1 + mu) - Q_CONV_stable_hourly[n,y,HPP]
# [check] spilling component cannot be negative (eq. S7)
if Q_CONV_spill_hourly[n,y,HPP] < 0:
Q_CONV_spill_hourly[n,y,HPP] = 0
# [check] stable outflow is reduced to zero in case of droughts
Q_CONV_stable_hourly[n,y,HPP] = Q_CONV_stable_hourly[n,y,HPP] * hydro_CONV_curtailment_factor_hourly[n,y,HPP]
# [calculate] total net outflow in m^3/s (eq. S2)
Q_CONV_out_hourly[n,y,HPP] = Q_CONV_stable_hourly[n,y,HPP] + Q_CONV_spill_hourly[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
# [calculate] hydropower generation in MW (eq. S8)
Q_pot_turb_CONV = np.min([Q_CONV_stable_hourly[n,y,HPP], Q_max_turb[HPP]])
P_CONV_hydro_stable_hourly[n,y,HPP] = Q_pot_turb_CONV*eta_turb*rho*g*h_CONV_hourly[n,y,HPP]/10**6
# [calculate] hydropower generation from RoR flow component in MW (eq. S32)
P_CONV_hydro_RoR_hourly[n,y,HPP] = np.min([Q_in_RoR_hourly[n,y,HPP], np.max([0, Q_max_turb[HPP] - Q_CONV_stable_hourly[n,y,HPP]]) ])*eta_turb*rho*g*h_CONV_hourly[n,y,HPP]/10**6
# [calculate] reservoir volume in m^3 at next time step (eq. S3, S31)
V_CONV_hourly[n+1,y,HPP] = V_CONV_hourly[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_CONV_stable_hourly[n,y,HPP] - Q_CONV_spill_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_CONV_hourly[n,y,HPP]/rho)*secs_hr
# [calculate] reservoir lake area in m^2 and hydraulic head in m from bathymetric relationship
h_temp = np.where(abs(calibrate_volume[:,HPP] - V_CONV_hourly[n+1,y,HPP]) == min(abs(calibrate_volume[:,HPP] - V_CONV_hourly[n+1,y,HPP])))[0][0]
A_CONV_hourly[n+1,y,HPP] = calibrate_area[h_temp,HPP]
h_CONV_hourly[n+1,y,HPP] = calibrate_head[h_temp,HPP]
# [calculate] whether lake levels have dropped so low as to require hydropower curtailment
# [calculate] for small HPPs: use "RoR" flow component to fill up reservoir in case water levels have dropped below f_restart*V_max
# (see explanation below eq. S33)
if HPP_category[HPP] == "B":
if V_CONV_hourly[n+1,y,HPP] < f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
Q_in_frac_hourly[n+1,y,HPP] = Q_in_frac_hourly[n+1,y,HPP] + Q_in_RoR_hourly[n+1,y,HPP]
Q_in_RoR_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
Q_in_frac_hourly[0,y+1,HPP] = Q_in_frac_hourly[0,y+1,HPP] + Q_in_RoR_hourly[0,y+1,HPP]
Q_in_RoR_hourly[0,y+1,HPP] = 0
# [calculate] for large and small HPPs: curtail hydropower generation in case water levels have dropped below f_stop*V_max
# (see Note 3.1)
if V_CONV_hourly[n+1,y,HPP] < f_stop*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_CONV_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_CONV_curtailment_factor_hourly[0,y+1,HPP] = 0
# [calculate] restart hydropower generation if reservoir levels have recovered
# (see Note 3.1)
if hydro_CONV_curtailment_factor_hourly[n,y,HPP] == 0 and V_CONV_hourly[n+1,y,HPP] > f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_CONV_curtailment_factor_hourly[n+1,y,HPP] = 1
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_CONV_curtailment_factor_hourly[0,y+1,HPP] = 1
elif hydro_CONV_curtailment_factor_hourly[n,y,HPP] == 0 and V_CONV_hourly[n+1,y,HPP] <= f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_CONV_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_CONV_curtailment_factor_hourly[0,y+1,HPP] = 0
# [calculate] total hydropower generation in MWh/year (eq. S24)
E_hydro_CONV_stable_yearly[y,HPP] = np.sum(P_CONV_hydro_stable_hourly[hrs_year,y,HPP])
E_hydro_CONV_RoR_yearly[y,HPP] = np.sum(P_CONV_hydro_RoR_hourly[hrs_year,y,HPP])
# [arrange] complete time series of water volume, area and levels
for y in range(len(simulation_years)):
V_CONV_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
A_CONV_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
h_CONV_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
temp_volume_upper_CONV_series = V_CONV_hourly[:,:,HPP]
temp_volume_upper_CONV_series = (np.transpose(temp_volume_upper_CONV_series)).ravel()
temp_volume_upper_CONV_series = temp_volume_upper_CONV_series[np.isfinite(temp_volume_upper_CONV_series)]
V_CONV_series_hourly[:,HPP] = temp_volume_upper_CONV_series
temp_area_CONV_series = A_CONV_hourly[:,:,HPP]
temp_area_CONV_series = (np.transpose(temp_area_CONV_series)).ravel()
temp_area_CONV_series = temp_area_CONV_series[np.isfinite(temp_area_CONV_series)]
A_CONV_series_hourly[:,HPP] = temp_area_CONV_series
temp_head_CONV_series = h_CONV_hourly[:,:,HPP]
temp_head_CONV_series = (np.transpose(temp_head_CONV_series)).ravel()
temp_head_CONV_series = temp_head_CONV_series[np.isfinite(temp_head_CONV_series)]
h_CONV_series_hourly[:,HPP] = temp_head_CONV_series
temp_rule_curve_volume_series = rule_curve_volume[:,:,HPP]
temp_rule_curve_volume_series = (np.transpose(temp_rule_curve_volume_series)).ravel()
temp_rule_curve_volume_series = temp_rule_curve_volume_series[np.isfinite(temp_rule_curve_volume_series)]
rule_curve_volume_series[:,HPP] = temp_rule_curve_volume_series
temp_rule_curve_head_series = rule_curve_head[:,:,HPP]
temp_rule_curve_head_series = (np.transpose(temp_rule_curve_head_series)).ravel()
temp_rule_curve_head_series = temp_rule_curve_head_series[np.isfinite(temp_rule_curve_head_series)]
rule_curve_head_series[:,HPP] = temp_rule_curve_head_series
# [display] once CONV simulation is complete
print("done")
###############################################################
############------------ BAL iterations -----------############
###############################################################
# [display] start of iterations to find optimal solution for BAL operation
print("(ii) finding optimal BAL solution")
# [loop] with incrementally increased C_OR values, starting at C_OR = 1 - d_min (Note 4)
for q in range(len(C_OR_range_BAL)):
# [calculate] ratio of stable (environmental) to average total outflow (see eq. S14)
Q_stable_ratio = 1 - C_OR_range_BAL[q]
# [display] refinement step in BAL simulation
print("C_OR = ", np.round(100*C_OR_range_BAL[q], decimals = 1), "%")
# [loop] across refinement steps to increase accuracy
for n_refine_BAL in range(N_refine_BAL):
# [initialize] range for current refinement step; each step increases accuracy by one digit
if n_refine_BAL == 0:
f_demand_BAL_start = f_init_BAL_start
f_demand_BAL_step = f_init_BAL_step
f_demand_BAL_end = f_init_BAL_end
elif n_refine_BAL > 0:
f_demand_BAL_start = f_demand_opt_BAL - f_demand_BAL_step
f_demand_BAL_end = f_demand_opt_BAL + f_demand_BAL_step
f_demand_BAL_step = f_demand_BAL_step/10
f_demand_BAL = np.arange(f_demand_BAL_start, f_demand_BAL_end + f_demand_BAL_step, f_demand_BAL_step)
# [preallocate] psi (eq. S21)
psi_BAL = np.full([len(f_demand_BAL)], np.nan)
# [loop] to find optimal values of E_solar and E_wind (eq. S25) by locating minimum in psi (eq. S22)
for f in range(len(f_demand_BAL)):
# [display] progress within each refinement step in BAL simulation
print("refinement step", n_refine_BAL + 1, "/", N_refine_BAL, "> scanning:", np.floor(100*(f + 1)/len(f_demand_BAL)), "%")
# [initialize] realistic value of total SW power (MWh/year) so that we can loop over realistic values of c_solar and c_wind (eq. S25)
E_SW_loop_BAL = np.mean(E_hydro_CONV_stable_yearly[:,HPP])*f_demand_BAL[f]*np.ones(shape = (len(E_hydro_CONV_stable_yearly[:,HPP])))
# [preallocate] stable hydropower generation P_stable in MW (see explanation below eq. S19)
P_BAL_hydro_stable_hourly[:,:,HPP] = np.nan
# [loop] across all simulation years to identify realistic c_solar and c_wind values
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] determine realistic amount of SW capacity in MW (c_solar, c_wind) corresponding to generation equal to E_SW_loop_BAL
E_SW_per_MW_BAL_yearly[y,HPP] = np.sum(c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP])
c_multiplier_BAL[y,HPP] = E_SW_loop_BAL[y]/E_SW_per_MW_BAL_yearly[y,HPP]
# [loop] perform iterations to get converged estimate of P_stable (see explanation below eq. S19)
for x in range(X_max_BAL):
# [calculate] environmentally required outflow (eq. S14)
temp_Q_out_BAL = Q_in_nat_av*np.ones(shape = (len(Q_CONV_stable_hourly),len(Q_CONV_stable_hourly[0])))
temp_Q_out_BAL[np.isnan(Q_CONV_stable_hourly[:,:,HPP])] = np.nan
Q_BAL_stable_hourly[:,:,HPP] = Q_stable_ratio*temp_Q_out_BAL
# [initialize] ensure Q_in_frac_hourly and Q_in_RoR_hourly are written correctly at the beginning of each step in the loop
Q_in_frac_hourly[:,:,HPP] = Q_in_frac_store[:,:,HPP]
Q_in_RoR_hourly[:,:,HPP] = Q_in_RoR_store[:,:,HPP]
# [initialize] This variable is equal to unity by default, but set to zero in case of extreme droughts forcing a
# temporary curtailment on hydropower generation (Note 3.1)
hydro_BAL_curtailment_factor_hourly[:,:,HPP] = 1
# [loop] across all simulation years to initialize P_stable (see explanation below eq. S19)
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [initialize] stable hydropower generation P_stable in MW (see explanation below eq. S19)
# use estimate P_stable,BAL = (1 - C_OR)*P_stable,CONV as initial guess
if x == 0:
P_BAL_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_BAL[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_BAL[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + Q_stable_ratio*np.nanmean(P_CONV_hydro_stable_hourly[:,:,HPP]))
elif x > 0:
# use estimate P_stable,BAL from previous iteration
P_BAL_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_BAL[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_BAL[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + P_BAL_hydro_stable_hourly[hrs_year,y,HPP])
# [calculate] P_load according to constraints on overproduction (eq. S11)
P_load_BAL = np.nanpercentile(P_BAL_inflexible_hourly[:,:,HPP],f_size)
# [loop] across all simulation years to perform optimization
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] hourly load time series in MW (eq. S10)
L_BAL_hourly[hrs_year,y,HPP] = P_load_BAL*L_norm[hrs_year,y,HPP]
# [calculate] load difference P_d (eq. S9)
P_BAL_difference_hourly[hrs_year,y,HPP] = P_BAL_inflexible_hourly[hrs_year,y,HPP] - L_BAL_hourly[hrs_year,y,HPP]
# [initialize] initial values of volume (m^3), area (m^2), hydraulic head (m) and ramp restrictions (MW/hr) for each simulation year
if y == 0:
V_BAL_hourly[0,y,HPP] = V_CONV_hourly[0,y,HPP]
A_BAL_hourly[0,y,HPP] = A_CONV_hourly[0,y,HPP]
h_BAL_hourly[0,y,HPP] = h_CONV_hourly[0,y,HPP]
# [calculate] ramping constraint (eq. S16)
temp_sgn_turb = 1
P_BAL_ramp_restr_hourly[0,y,HPP] = P_r_turb[HPP]*dP_ramp_turb*mins_hr
else:
temp = V_BAL_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_BAL_hourly[0,y,HPP] = temp[-1]
temp = A_BAL_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
A_BAL_hourly[0,y,HPP] = temp[-1]
temp = h_BAL_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
h_BAL_hourly[0,y,HPP] = temp[-1]
# [calculate] ramping constraint (eq. S16)
temp = P_BAL_hydro_flexible_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
temp_P_difference = P_BAL_difference_hourly[:,y-1,HPP]
temp_P_difference = temp_P_difference[np.isfinite(temp_P_difference)]
# [calculate] whether ramping up (temp_sgn = 1) or down (temp_sgn = -1)
if P_BAL_difference_hourly[0,y,HPP] - temp_P_difference[-1] < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_BAL_ramp_restr_hourly[0,y,HPP] = temp[-1] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_BAL_ramp_restr_hourly[0,y,HPP] < 0:
P_BAL_ramp_restr_hourly[0,y,HPP] = 0
# [loop] over all time steps in each simulation year to calculate reservoir dynamics and hydropower generation
for n in hrs_year:
# [check] stable outflow is reduced to zero in case of droughts
Q_BAL_stable_hourly[n,y,HPP] = Q_BAL_stable_hourly[n,y,HPP] * hydro_BAL_curtailment_factor_hourly[n,y,HPP]
# [calculate] flexible hydropower generation in MW (eq. S16 & S17)
if P_BAL_difference_hourly[n,y,HPP] < 0:
Q_BAL_pot_turb_flexible[n,y,HPP] = np.max([0, Q_max_turb[HPP] - Q_BAL_stable_hourly[n,y,HPP]]) * hydro_BAL_curtailment_factor_hourly[n,y,HPP]
# [calculate] if ramping up
if temp_sgn_turb == 1:
P_BAL_hydro_flexible_hourly[n,y,HPP] = np.min([Q_BAL_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_BAL_hourly[n,y,HPP]/10**6, np.min([np.abs(P_BAL_difference_hourly[n,y,HPP]), P_BAL_ramp_restr_hourly[n,y,HPP]]) ])
# [calculate] if ramping down
elif temp_sgn_turb == -1:
P_BAL_hydro_flexible_hourly[n,y,HPP] = np.min([Q_BAL_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_BAL_hourly[n,y,HPP]/10**6, np.max([np.abs(P_BAL_difference_hourly[n,y,HPP]), P_BAL_ramp_restr_hourly[n,y,HPP]]) ])
# [check] flexible hydropower generation is zero when P_d >= 0 (eq. S16)
if P_BAL_difference_hourly[n,y,HPP] >= 0:
P_BAL_hydro_flexible_hourly[n,y,HPP] = 0
# [calculate] stable hydropower generation in MW (eq. S15)
Q_pot_turb_BAL = np.min([Q_BAL_stable_hourly[n,y,HPP], Q_max_turb[HPP]])
P_BAL_hydro_stable_hourly[n,y,HPP] = Q_pot_turb_BAL*eta_turb*rho*g*h_BAL_hourly[n,y,HPP]/10**6
# [calculate] flexible turbined flow in m^3/s (eq. S18)
if h_BAL_hourly[n,y,HPP] > 0:
Q_BAL_flexible_hourly[n,y,HPP] = P_BAL_hydro_flexible_hourly[n,y,HPP]/(eta_turb*rho*g*h_BAL_hourly[n,y,HPP])*10**6
else:
# [check] cannot be negative
h_BAL_hourly[n,y,HPP] = 0
Q_BAL_flexible_hourly[n,y,HPP] = 0
# [calculate] hydropower generation from RoR flow component in MW (eq. S32)
P_BAL_hydro_RoR_hourly[n,y,HPP] = np.min([Q_in_RoR_hourly[n,y,HPP], np.max([0, Q_max_turb[HPP] - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP]]) ])*eta_turb*rho*g*h_CONV_hourly[n,y,HPP]/10**6
# [calculate] spilling component in m^3/s (eq. S19)
if V_BAL_hourly[n,y,HPP]/V_max[HPP] < f_spill:
Q_BAL_spill_hourly[n,y,HPP] = 0
else:
Q_BAL_spill_hourly[n,y,HPP] = (Q_in_frac_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_BAL_hourly[n,y,HPP]/rho)*(1 + mu) - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP]
# [check] spilling component cannot be negative (eq. S7)
if Q_BAL_spill_hourly[n,y,HPP] < 0:
Q_BAL_spill_hourly[n,y,HPP] = 0
# [calculate] total net outflow in m^3/s (eq. S2)
Q_BAL_out_hourly[n,y,HPP] = Q_BAL_stable_hourly[n,y,HPP] + Q_BAL_flexible_hourly[n,y,HPP] + Q_BAL_spill_hourly[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
# [calculate] reservoir volume in m^3 at next time step (eq. S3, S31)
V_BAL_hourly[n+1,y,HPP] = V_BAL_hourly[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP] - Q_BAL_spill_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_BAL_hourly[n,y,HPP]/rho)*secs_hr
# [check] prevent unreal values when lake levels drop low
if V_BAL_hourly[n+1,y,HPP] < 0:
Q_BAL_stable_hourly[n,y,HPP] = 0
P_BAL_hydro_stable_hourly[n,y,HPP] = 0
Q_BAL_flexible_hourly[n,y,HPP] = 0
P_BAL_hydro_flexible_hourly[n,y,HPP] = 0
Q_BAL_out_hourly[n,y,HPP] = Q_BAL_stable_hourly[n,y,HPP] + Q_BAL_flexible_hourly[n,y,HPP] + Q_BAL_spill_hourly[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
A_BAL_hourly[n,y,HPP] = 0
V_BAL_hourly[n+1,y,HPP] = V_BAL_hourly[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP] - Q_BAL_spill_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_BAL_hourly[n,y,HPP]/rho)*secs_hr
# [calculate] reservoir lake area in m^2 and hydraulic head in m from bathymetric relationship
h_temp = np.where(abs(calibrate_volume[:,HPP] - V_BAL_hourly[n+1,y,HPP]) == min(abs(calibrate_volume[:,HPP] - V_BAL_hourly[n+1,y,HPP])))[0][0]
A_BAL_hourly[n+1,y,HPP] = calibrate_area[h_temp,HPP]
h_BAL_hourly[n+1,y,HPP] = calibrate_head[h_temp,HPP]
# [calculate] ramp rate restrictions (MW attainable) at next time step (eq. S16)
if n < len(hrs_year) - 1:
if (P_BAL_difference_hourly[n+1,y,HPP] - P_BAL_difference_hourly[n,y,HPP]) < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_BAL_ramp_restr_hourly[n+1,y,HPP] = P_BAL_hydro_flexible_hourly[n,y,HPP] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_BAL_ramp_restr_hourly[n+1,y,HPP] < 0:
P_BAL_ramp_restr_hourly[n+1,y,HPP] = 0
# [calculate] whether lake levels have dropped so low as to require hydropower curtailment
# [calculate] for small HPPs: use "RoR" flow component to fill up reservoir in case water levels have dropped below f_restart*V_max
# (see explanation below eq. S33)
if HPP_category[HPP] == "B":
if V_BAL_hourly[n+1,y,HPP] < f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
Q_in_frac_hourly[n+1,y,HPP] = Q_in_frac_hourly[n+1,y,HPP] + Q_in_RoR_hourly[n+1,y,HPP]
Q_in_RoR_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
Q_in_frac_hourly[0,y+1,HPP] = Q_in_frac_hourly[0,y+1,HPP] + Q_in_RoR_hourly[0,y+1,HPP]
Q_in_RoR_hourly[0,y+1,HPP] = 0
# [calculate] for large and small HPPs: curtail hydropower generation in case water levels have dropped below f_stop*V_max
# (see Note 3.1)
if V_BAL_hourly[n+1,y,HPP] < f_stop*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_BAL_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_BAL_curtailment_factor_hourly[0,y+1,HPP] = 0
# [calculate] restart hydropower generation if reservoir levels have recovered
# (see Note 3.1)
if hydro_BAL_curtailment_factor_hourly[n,y,HPP] == 0 and V_BAL_hourly[n+1,y,HPP] > f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_BAL_curtailment_factor_hourly[n+1,y,HPP] = 1
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_BAL_curtailment_factor_hourly[0,y+1,HPP] = 1
elif hydro_BAL_curtailment_factor_hourly[n,y,HPP] == 0 and V_BAL_hourly[n+1,y,HPP] <= f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_BAL_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_BAL_curtailment_factor_hourly[0,y+1,HPP] = 0
# [arrange] complete time series of water volume for eq. S20
for y in range(len(simulation_years)):
V_BAL_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
temp_volume_upper_BAL_series = V_BAL_hourly[:,:,HPP]
temp_volume_upper_BAL_series = (np.transpose(temp_volume_upper_BAL_series)).ravel()
temp_volume_upper_BAL_series = temp_volume_upper_BAL_series[np.isfinite(temp_volume_upper_BAL_series)]
# [CHANGED] [calculate] deviation between rule curve and BAL reservoir dynamics (eq. S21)
psi_BAL[f] = np.mean(np.abs(temp_volume_upper_BAL_series - rule_curve_volume_series[:,HPP]))/np.mean(rule_curve_volume_series[:,HPP])
# [check] see explanation below eq. S21: if droughts occur in CONV, BAL should have no MORE days of curtailed flow than CONV ...
# and curtailed flow should occur in less than 50% of the years in the simulation, so median yearly statistics represent normal operation
if np.nanmin(V_CONV_hourly[:,:,HPP]) < f_stop*V_max[HPP] and (np.nansum(Q_BAL_out_hourly[:,:,HPP] == 0) > np.nansum(Q_CONV_out_hourly[:,:,HPP] == 0) or np.sum(np.sum(Q_BAL_out_hourly[:,:,HPP] - Q_in_RoR_hourly[:,:,HPP] == 0, axis = 0) > 0) > np.floor(len(simulation_years)/2)):
psi_BAL[f] = np.nan
# [check] if droughts do not occur in CONV, then neither should they in BAL
elif np.nanmin(V_CONV_hourly[:,:,HPP]) >= f_stop*V_max[HPP] and np.nanmin(V_BAL_hourly[:,:,HPP]) < f_stop*V_max[HPP]:
psi_BAL[f] = np.nan
# [NEW] to speed up simulation
if f > 0 and (psi_BAL[f] > psi_BAL[f-1] or (np.isnan(psi_BAL[f]))):
break
# [identify] minimum in psi (eq. S21)
if np.sum(np.isnan(psi_BAL)) == len(psi_BAL) and f_demand_BAL[0] == 0:
f_demand_opt_BAL = 0
psi_BAL_opt = 0
break
else:
crossing_BAL = np.where(psi_BAL == min(psi_BAL))[0][0]
f_demand_opt_BAL = f_demand_BAL[crossing_BAL]
psi_BAL_opt = np.abs(psi_BAL[crossing_BAL])
# [check] prevent negative results
if f_demand_opt_BAL == 0:
f_demand_opt_BAL = f_demand_BAL[crossing_BAL + 1]
psi_BAL_opt = np.abs(psi_BAL[crossing_BAL + 1])
# [check] determine if psi is low enough for this to qualify as optimum solution
if psi_BAL_opt < psi_min_threshold:
break
# [check] if range in which to identify ELCC is adequate
if f_demand_opt_BAL == f_demand_BAL[-1]:
print("Warning: parameter f_init_BAL_end likely set too low")
# [initialize] optimal value of total SW power (MWh/year) so that we can calculate optimal c_solar and c_wind (eq. S25)
E_SW_loop_BAL_opt[HPP] = np.mean(E_hydro_CONV_stable_yearly[:,HPP])*f_demand_opt_BAL
###############################################################
############------------ BAL optimized ------------############
###############################################################
# [display]
print("(iii) found optimum BAL solution - saving all variables")
# [preallocate] to test convergence towards P_stable (see explanation below eq. S19)
convergence_test_BAL = np.zeros(shape = (X_max_BAL))
# [loop] across all simulation years to identify realistic c_solar and c_wind values
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] determine realistic amount of SW capacity in MW (c_solar, c_wind) corresponding to generation equal to E_SW_loop_BAL
E_SW_per_MW_BAL_yearly[y,HPP] = np.sum(c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP])
c_multiplier_BAL[y,HPP] = E_SW_loop_BAL_opt[HPP]/E_SW_per_MW_BAL_yearly[y,HPP]
# [loop] perform iterations to get converged estimate of P_stable (see explanation below eq. S19)
for x in range(X_max_BAL):
# [calculate] environmentally required outflow (eq. S14)
temp_Q_out_BAL = Q_in_nat_av*np.ones(shape = (len(Q_CONV_stable_hourly),len(Q_CONV_stable_hourly[0])))
temp_Q_out_BAL[np.isnan(Q_CONV_stable_hourly[:,:,HPP])] = np.nan
Q_BAL_stable_hourly[:,:,HPP] = Q_stable_ratio*temp_Q_out_BAL
# [initialize] ensure Q_in_frac_hourly and Q_in_RoR_hourly are written correctly at the beginning of each step in the loop
Q_in_frac_hourly[:,:,HPP] = Q_in_frac_store[:,:,HPP]
Q_in_RoR_hourly[:,:,HPP] = Q_in_RoR_store[:,:,HPP]
# [initialize] This variable is equal to unity by default, but set to zero in case of extreme droughts forcing a
# temporary curtailment on hydropower generation (Note 3.1)
hydro_BAL_curtailment_factor_hourly[:,:,HPP] = 1
# [loop] across all simulation years to initialize P_stable (see explanation below eq. S19)
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [initialize] stable hydropower generation P_stable in MW (see explanation below eq. S19)
# use estimate P_stable,BAL = (1 - C_OR)*P_stable,CONV as initial guess
if x == 0:
P_BAL_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_BAL[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_BAL[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + Q_stable_ratio*np.nanmean(P_CONV_hydro_stable_hourly[:,:,HPP]))
elif x > 0:
# use estimate P_stable,BAL from previous iteration
P_BAL_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_BAL[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_BAL[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + P_BAL_hydro_stable_hourly[hrs_year,y,HPP])
# [calculate] total solar and wind power generation by hour (eq. S12)
P_BAL_solar_hourly[hrs_year,y,HPP] = np.mean(c_multiplier_BAL[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP]
P_BAL_wind_hourly[hrs_year,y,HPP] = np.mean(c_multiplier_BAL[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP]
# [calculate] P_load according to constraints on overproduction (eq. S11)
P_load_BAL = np.nanpercentile(P_BAL_inflexible_hourly[:,:,HPP],f_size)
# [loop] across all simulation years to perform optimization
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] hourly load time series in MW (eq. S10)
L_BAL_hourly[hrs_year,y,HPP] = P_load_BAL*L_norm[hrs_year,y,HPP]
# [calculate] load difference P_d (eq. S9)
P_BAL_difference_hourly[hrs_year,y,HPP] = P_BAL_inflexible_hourly[hrs_year,y,HPP] - L_BAL_hourly[hrs_year,y,HPP]
# [initialize] initial values of volume (m^3), area (m^2), hydraulic head (m) and ramp restrictions (MW/hr) for each simulation year
if y == 0:
V_BAL_hourly[0,y,HPP] = V_CONV_hourly[0,y,HPP]
A_BAL_hourly[0,y,HPP] = A_CONV_hourly[0,y,HPP]
h_BAL_hourly[0,y,HPP] = h_CONV_hourly[0,y,HPP]
# [calculate] ramping constraint (eq. S16)
temp_sgn_turb = 1
P_BAL_ramp_restr_hourly[0,y,HPP] = P_r_turb[HPP]*dP_ramp_turb*mins_hr
else:
temp = V_BAL_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_BAL_hourly[0,y,HPP] = temp[-1]
temp = A_BAL_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
A_BAL_hourly[0,y,HPP] = temp[-1]
temp = h_BAL_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
h_BAL_hourly[0,y,HPP] = temp[-1]
# [calculate] ramping constraint (eq. S16)
temp = P_BAL_hydro_flexible_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
temp_P_difference = P_BAL_difference_hourly[:,y-1,HPP]
temp_P_difference = temp_P_difference[np.isfinite(temp_P_difference)]
# [calculate] whether ramping up (temp_sgn = 1) or down (temp_sgn = -1)
if P_BAL_difference_hourly[0,y,HPP] - temp_P_difference[-1] < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_BAL_ramp_restr_hourly[0,y,HPP] = temp[-1] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_BAL_ramp_restr_hourly[0,y,HPP] < 0:
P_BAL_ramp_restr_hourly[0,y,HPP] = 0
# [loop] over all time steps in each simulation year to calculate reservoir dynamics and hydropower generation
for n in hrs_year:
# [check] stable outflow is reduced to zero in case of droughts
Q_BAL_stable_hourly[n,y,HPP] = Q_BAL_stable_hourly[n,y,HPP] * hydro_BAL_curtailment_factor_hourly[n,y,HPP]
# [calculate] flexible hydropower generation in MW (eq. S16 & S17)
if P_BAL_difference_hourly[n,y,HPP] < 0:
Q_BAL_pot_turb_flexible[n,y,HPP] = np.max([0, Q_max_turb[HPP] - Q_BAL_stable_hourly[n,y,HPP]]) * hydro_BAL_curtailment_factor_hourly[n,y,HPP]
# [calculate] if ramping up
if temp_sgn_turb == 1:
P_BAL_hydro_flexible_hourly[n,y,HPP] = np.min([Q_BAL_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_BAL_hourly[n,y,HPP]/10**6, np.min([np.abs(P_BAL_difference_hourly[n,y,HPP]), P_BAL_ramp_restr_hourly[n,y,HPP]]) ])
# [calculate] if ramping down
elif temp_sgn_turb == -1:
P_BAL_hydro_flexible_hourly[n,y,HPP] = np.min([Q_BAL_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_BAL_hourly[n,y,HPP]/10**6, np.max([np.abs(P_BAL_difference_hourly[n,y,HPP]), P_BAL_ramp_restr_hourly[n,y,HPP]]) ])
# [check] flexible hydropower generation is zero when P_d >= 0 (eq. S16)
if P_BAL_difference_hourly[n,y,HPP] >= 0:
P_BAL_hydro_flexible_hourly[n,y,HPP] = 0
# [calculate] stable hydropower generation in MW (eq. S15)
Q_pot_turb_BAL = np.min([Q_BAL_stable_hourly[n,y,HPP], Q_max_turb[HPP]])
P_BAL_hydro_stable_hourly[n,y,HPP] = Q_pot_turb_BAL*eta_turb*rho*g*h_BAL_hourly[n,y,HPP]/10**6
# [calculate] flexible turbined flow in m^3/s (eq. S18)
if h_BAL_hourly[n,y,HPP] > 0:
Q_BAL_flexible_hourly[n,y,HPP] = P_BAL_hydro_flexible_hourly[n,y,HPP]/(eta_turb*rho*g*h_BAL_hourly[n,y,HPP])*10**6
else:
# [check] cannot be negative
h_BAL_hourly[n,y,HPP] = 0
Q_BAL_flexible_hourly[n,y,HPP] = 0
# [calculate] hydropower generation from RoR flow component in MW (eq. S32)
P_BAL_hydro_RoR_hourly[n,y,HPP] = np.min([Q_in_RoR_hourly[n,y,HPP], np.max([0, Q_max_turb[HPP] - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP]]) ])*eta_turb*rho*g*h_CONV_hourly[n,y,HPP]/10**6
# [calculate] spilling component in m^3/s (eq. S19)
if V_BAL_hourly[n,y,HPP]/V_max[HPP] < f_spill:
Q_BAL_spill_hourly[n,y,HPP] = 0
else:
Q_BAL_spill_hourly[n,y,HPP] = (Q_in_frac_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_BAL_hourly[n,y,HPP]/rho)*(1 + mu) - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP]
# [check] spilling component cannot be negative (eq. S7)
if Q_BAL_spill_hourly[n,y,HPP] < 0:
Q_BAL_spill_hourly[n,y,HPP] = 0
# [calculate] total net outflow in m^3/s (eq. S2)
Q_BAL_out_hourly[n,y,HPP] = Q_BAL_stable_hourly[n,y,HPP] + Q_BAL_flexible_hourly[n,y,HPP] + Q_BAL_spill_hourly[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
# [calculate] reservoir volume in m^3 at next time step (eq. S3, S31)
V_BAL_hourly[n+1,y,HPP] = V_BAL_hourly[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP] - Q_BAL_spill_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_BAL_hourly[n,y,HPP]/rho)*secs_hr
# [check] prevent unreal values when lake levels drop low
if V_BAL_hourly[n+1,y,HPP] < 0:
Q_BAL_stable_hourly[n,y,HPP] = 0
P_BAL_hydro_stable_hourly[n,y,HPP] = 0
Q_BAL_flexible_hourly[n,y,HPP] = 0
P_BAL_hydro_flexible_hourly[n,y,HPP] = 0
Q_BAL_out_hourly[n,y,HPP] = Q_BAL_stable_hourly[n,y,HPP] + Q_BAL_flexible_hourly[n,y,HPP] + Q_BAL_spill_hourly[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
A_BAL_hourly[n,y,HPP] = 0
V_BAL_hourly[n+1,y,HPP] = V_BAL_hourly[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_BAL_stable_hourly[n,y,HPP] - Q_BAL_flexible_hourly[n,y,HPP] - Q_BAL_spill_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_BAL_hourly[n,y,HPP]/rho)*secs_hr
# [calculate] reservoir lake area in m^2 and hydraulic head in m from bathymetric relationship
h_temp = np.where(abs(calibrate_volume[:,HPP] - V_BAL_hourly[n+1,y,HPP]) == min(abs(calibrate_volume[:,HPP] - V_BAL_hourly[n+1,y,HPP])))[0][0]
A_BAL_hourly[n+1,y,HPP] = calibrate_area[h_temp,HPP]
h_BAL_hourly[n+1,y,HPP] = calibrate_head[h_temp,HPP]
# [calculate] ramp rate restrictions (MW attainable) at next time step (eq. S16)
if n < len(hrs_year) - 1:
if (P_BAL_difference_hourly[n+1,y,HPP] - P_BAL_difference_hourly[n,y,HPP]) < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_BAL_ramp_restr_hourly[n+1,y,HPP] = P_BAL_hydro_flexible_hourly[n,y,HPP] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_BAL_ramp_restr_hourly[n+1,y,HPP] < 0:
P_BAL_ramp_restr_hourly[n+1,y,HPP] = 0
# [calculate] whether lake levels have dropped so low as to require hydropower curtailment
# [calculate] for small HPPs: use "RoR" flow component to fill up reservoir in case water levels have dropped below f_restart*V_max
# (see explanation below eq. S33)
if HPP_category[HPP] == "B":
if V_BAL_hourly[n+1,y,HPP] < f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
Q_in_frac_hourly[n+1,y,HPP] = Q_in_frac_hourly[n+1,y,HPP] + Q_in_RoR_hourly[n+1,y,HPP]
Q_in_RoR_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
Q_in_frac_hourly[0,y+1,HPP] = Q_in_frac_hourly[0,y+1,HPP] + Q_in_RoR_hourly[0,y+1,HPP]
Q_in_RoR_hourly[0,y+1,HPP] = 0
# [calculate] for large and small HPPs: curtail hydropower generation in case water levels have dropped below f_stop*V_max
# (see Note 3.1)
if V_BAL_hourly[n+1,y,HPP] < f_stop*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_BAL_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_BAL_curtailment_factor_hourly[0,y+1,HPP] = 0
# [calculate] restart hydropower generation if reservoir levels have recovered
# (see Note 3.1)
if hydro_BAL_curtailment_factor_hourly[n,y,HPP] == 0 and V_BAL_hourly[n+1,y,HPP] > f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_BAL_curtailment_factor_hourly[n+1,y,HPP] = 1
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_BAL_curtailment_factor_hourly[0,y+1,HPP] = 1
elif hydro_BAL_curtailment_factor_hourly[n,y,HPP] == 0 and V_BAL_hourly[n+1,y,HPP] <= f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_BAL_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_BAL_curtailment_factor_hourly[0,y+1,HPP] = 0
##### IDENTIFY YEARLY ELCC #####
# [calculate] total supplied HSW generation under optimal BAL solution
total_power_supply_BAL = P_BAL_hydro_stable_hourly[hrs_year,y,HPP] + P_BAL_hydro_flexible_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_BAL[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_BAL[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP]
N_power_supply_BAL = int(np.ceil(np.max(total_power_supply_BAL)))
# [preallocate] range in which to identify ELCC
P_followed_BAL_range[y,:,HPP] = np.linspace(0,N_power_supply_BAL,N_ELCC)
power_unmet_BAL = np.zeros(shape = N_ELCC)
# [loop] to identify ELCC under optimal BAL solution
for n in range(N_ELCC):
temp = total_power_supply_BAL - P_followed_BAL_range[y,n,HPP]*L_norm[hrs_year,y,HPP]
if np.abs(np.mean(temp[temp<=0])) > 0:
power_unmet_BAL[n] = np.abs(np.sum(temp[temp<=0]))/np.sum(P_followed_BAL_range[y,n,HPP]*L_norm[hrs_year,y,HPP])
# [identify] total P_followed given the constraint LOEE_allowed (default zero)
N_demand_covered_BAL_temp = np.where(power_unmet_BAL[power_unmet_BAL != np.Inf] > LOEE_allowed)[0][0] - 1
if N_demand_covered_BAL_temp.size == 0 or N_demand_covered_BAL_temp == 0:
P_followed_BAL_index[y,HPP] = 1
else:
P_followed_BAL_index[y,HPP] = N_demand_covered_BAL_temp
# [identify] hourly time series of L_followed (MW) (eq. S23)
L_followed_BAL_hourly[hrs_year,y,HPP] = P_followed_BAL_range[y,int(P_followed_BAL_index[y,HPP]),HPP]*L_norm[hrs_year,y,HPP]
# [calculate] difference between ELCC and total HSW generated (excl. RoR component) to obtain Residual Load Duration Curve (RLDC) (eq. S22)
L_res_BAL_hourly[hrs_year,y,HPP] = L_followed_BAL_hourly[hrs_year,y,HPP] - total_power_supply_BAL
# [arrange] mean fraction of unmet load by month
for m in range(months_yr):
temp1 = L_res_BAL_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP]
temp2 = L_followed_BAL_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP]
L_unmet_BAL_frac_bymonth[m,y,HPP] = np.sum(temp1[temp1>0])/np.sum(temp2)
# [check] to check convergence of solution towards P_stable
convergence_test_BAL[x] = np.nanmean(P_BAL_hydro_stable_hourly[:,:,HPP])
# [arrange] complete time series of water volume, area and levels
for y in range(len(simulation_years)):
V_BAL_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
A_BAL_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
h_BAL_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
temp_volume_upper_BAL_series = V_BAL_hourly[:,:,HPP]
temp_volume_upper_BAL_series = (np.transpose(temp_volume_upper_BAL_series)).ravel()
temp_volume_upper_BAL_series = temp_volume_upper_BAL_series[np.isfinite(temp_volume_upper_BAL_series)]
V_BAL_series_hourly[:,HPP] = temp_volume_upper_BAL_series
temp_area_BAL_series = A_BAL_hourly[:,:,HPP]
temp_area_BAL_series = (np.transpose(temp_area_BAL_series)).ravel()
temp_area_BAL_series = temp_area_BAL_series[np.isfinite(temp_area_BAL_series)]
A_BAL_series_hourly[:,HPP] = temp_area_BAL_series
temp_head_BAL_series = h_BAL_hourly[:,:,HPP]
temp_head_BAL_series = (np.transpose(temp_head_BAL_series)).ravel()
temp_head_BAL_series = temp_head_BAL_series[np.isfinite(temp_head_BAL_series)]
h_BAL_series_hourly[:,HPP] = temp_head_BAL_series
# [display] once BAL simulation is complete
print("done")
###############################################################
############------ CHECK NEED TO RESIMULATE -------############
###############################################################
# [calculate] turbine exhaustion factor k_turb in BAL (eq. S28)
k_turb_hourly_BAL[:,:,HPP] = (Q_BAL_stable_hourly[:,:,HPP] + Q_BAL_flexible_hourly[:,:,HPP])/Q_max_turb[HPP]
# [check] if criterion on k_turb is met for BAL, wrap up simulation and write data
if np.median(np.nanpercentile(k_turb_hourly_BAL[:,:,HPP],99,0)) < 1:
break
else:
# [display] in case k_turb criterion was not met (eq. S28)
print("requires resimulating at lower C_OR...")
###############################################################
############----------- STOR iterations -----------############
###############################################################
# [initialize] STOR scenario is only relevant for large HPPs
if HPP_category[HPP] == "B":
STOR_break[HPP] = 1
elif option_storage == 0:
STOR_break[HPP] = 1
# [check] start loop if STOR scenario could be an option (0 = yes, 1 = no)
if STOR_break[HPP] == 0:
# [display] start of iterations to find optimal solution for STOR operation
print("(iv) finding optimal STOR solution")
for q in range(len(C_OR_range_STOR)):
# [calculate] ratio of stable (environmental) to average total outflow (see eq. S14)
Q_stable_ratio = 1 - C_OR_range_STOR[q]
# [display] refinement step in STOR simulation
print("C_OR = ", np.round(100*C_OR_range_STOR[q], decimals = 1), "%")
# [loop] across refinement steps to increase accuracy
for n_refine_STOR in range(N_refine_STOR):
# [initialize] range for current refinement step; each step increases accuracy by one digit
if n_refine_STOR == 0:
f_demand_STOR_start = f_init_STOR_start
f_demand_STOR_step = f_init_STOR_step
f_demand_STOR_end = f_init_STOR_end
elif n_refine_STOR > 0:
f_demand_STOR_start = f_demand_opt_STOR - f_demand_STOR_step
f_demand_STOR_end = f_demand_opt_STOR + f_demand_STOR_step
f_demand_STOR_step = f_demand_STOR_step/10
f_demand_STOR = np.arange(f_demand_STOR_start, f_demand_STOR_end + f_demand_STOR_step, f_demand_STOR_step)
# [preallocate] psi (eq. S21)
psi_STOR = np.full([len(f_demand_STOR)], np.nan)
# [loop] to find optimal values of E_solar and E_wind (eq. S25) by locating minimum in psi (eq. S22)
for f in range(len(f_demand_STOR)):
# [display] progress within each refinement step in STOR simulation
print("refinement step", n_refine_STOR + 1, "/", N_refine_STOR, "> scanning:", np.floor(100*(f + 1)/len(f_demand_STOR)), "%")
# [initialize] realistic value of total SW power (MWh/year) so that we can loop over realistic values of c_solar and c_wind (eq. S25)
E_SW_loop_STOR = np.mean(E_hydro_CONV_stable_yearly[:,HPP])*f_demand_STOR[f]*np.ones(shape = (len(E_hydro_CONV_stable_yearly[:,HPP])))
# [preallocate] stable hydropower generation P_stable in MW (see explanation below eq. S19)
P_STOR_hydro_stable_hourly[:,:,HPP] = np.nan
# [loop] across all simulation years to identify realistic c_solar and c_wind values
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] determine realistic amount of SW capacity in MW (c_solar, c_wind) corresponding to generation equal to E_SW_loop_STOR
E_SW_per_MW_STOR_yearly[y,HPP] = np.sum(c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP])
c_multiplier_STOR[y,HPP] = E_SW_loop_STOR[y]/E_SW_per_MW_STOR_yearly[y,HPP]
# [loop] perform iterations to get converged estimate of P_stable (see explanation below eq. S19)
for x in range(X_max_STOR):
# [calculate] environmentally required outflow (eq. S14)
temp_Q_out_STOR = Q_in_nat_av*np.ones(shape = (len(Q_CONV_stable_hourly),len(Q_CONV_stable_hourly[0])))
temp_Q_out_STOR[np.isnan(Q_CONV_stable_hourly[:,:,HPP])] = np.nan
Q_STOR_stable_hourly[:,:,HPP] = Q_stable_ratio*temp_Q_out_STOR
# [initialize] This variable is equal to unity by default, but set to zero in case of extreme droughts forcing a
# temporary curtailment on hydropower generation (Note 3.1)
hydro_STOR_curtailment_factor_hourly[:,:,HPP] = 1
# [loop] across all simulation years to initialize P_stable (see explanation below eq. S19)
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [initialize] stable hydropower generation P_stable in MW (see explanation below eq. S19)
# use estimate P_stable,STOR = (1 - C_OR)*P_stable,CONV as initial guess
if x == 0:
P_STOR_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_STOR[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_STOR[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + Q_stable_ratio*np.nanmean(P_CONV_hydro_stable_hourly[:,:,HPP]))
elif x > 0:
# use estimate P_stable,STOR from previous iteration
P_STOR_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_STOR[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_STOR[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + P_STOR_hydro_stable_hourly[hrs_year,y,HPP])
# [calculate] P_load according to constraints on overproduction (eq. S11)
P_load_STOR = np.nanpercentile(P_STOR_inflexible_hourly[:,:,HPP],f_size)
# [loop] across all simulation years to perform optimization
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] hourly load time series in MW (eq. S10)
L_STOR_hourly[hrs_year,y,HPP] = P_load_STOR*L_norm[hrs_year,y,HPP]
# [calculate] load difference P_d (eq. S9)
P_STOR_difference_hourly[hrs_year,y,HPP] = P_STOR_inflexible_hourly[hrs_year,y,HPP] - L_STOR_hourly[hrs_year,y,HPP]
# [initialize] initial values of volume (m^3), area (m^2), hydraulic head (m) and ramp restrictions (MW/hr) for each simulation year
if y == 0:
V_STOR_hourly_upper[0,y,HPP] = V_CONV_hourly[0,y,HPP]
V_STOR_hourly_lower[0,y,HPP] = V_lower_max[HPP]*f_opt
A_STOR_hourly_upper[0,y,HPP] = A_CONV_hourly[0,y,HPP]
h_STOR_hourly[0,y,HPP] = h_CONV_hourly[0,y,HPP]
# [calculate] ramping constraint (eq. S16)
temp_sgn_turb = 1
P_STOR_ramp_restr_hourly[0,y,HPP] = P_r_turb[HPP]*dP_ramp_turb*mins_hr
# [calculate] ramping constraint for pump (eq. S37)
temp_sgn_pump = 1
P_STOR_ramp_restr_pump_hourly[0,y,HPP] = P_r_pump[HPP]*dP_ramp_pump*mins_hr
else:
temp = V_STOR_hourly_upper[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_STOR_hourly_upper[0,y,HPP] = temp[-1]
temp = V_STOR_hourly_lower[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_STOR_hourly_lower[0,y,HPP] = temp[-1]
temp = A_STOR_hourly_upper[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
A_STOR_hourly_upper[0,y,HPP] = temp[-1]
temp = h_STOR_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
h_STOR_hourly[0,y,HPP] = temp[-1]
# [calculate] ramping constraint (eq. S16)
temp = P_STOR_hydro_flexible_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
temp_P_difference = P_STOR_difference_hourly[:,y-1,HPP]
temp_P_difference = temp_P_difference[np.isfinite(temp_P_difference)]
# [calculate] whether ramping up (temp_sgn = 1) or down (temp_sgn = -1)
if P_STOR_difference_hourly[0,y,HPP] - temp_P_difference[-1] < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_STOR_ramp_restr_hourly[0,y,HPP] = temp[-1] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_STOR_ramp_restr_hourly[0,y,HPP] < 0:
P_STOR_ramp_restr_hourly[0,y,HPP] = 0
# [calculate] ramping constraint for pump (eq. S37)
temp = P_STOR_pump_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
# [calculate] whether ramping up (temp_sgn = 1) or down (temp_sgn = -1)
if P_STOR_difference_hourly[0,y,HPP] - temp_P_difference[-1] < 0:
temp_sgn_pump = -1
else:
temp_sgn_pump = 1
P_STOR_ramp_restr_pump_hourly[0,y,HPP] = temp[-1] + temp_sgn_pump*P_r_pump[HPP]*dP_ramp_pump*mins_hr
if P_STOR_ramp_restr_pump_hourly[0,y,HPP] < 0:
P_STOR_ramp_restr_pump_hourly[0,y,HPP] = 0
# [loop] over all time steps in each simulation year to calculate reservoir dynamics and hydropower generation
for n in hrs_year:
# [check] stable outflow is reduced to zero in case of droughts
Q_STOR_stable_hourly[n,y,HPP] = Q_STOR_stable_hourly[n,y,HPP] * hydro_STOR_curtailment_factor_hourly[n,y,HPP]
# [calculate] flexible hydropower generation in MW (eq. S16, S17)
if P_STOR_difference_hourly[n,y,HPP] < 0:
Q_STOR_pot_turb_flexible[n,y,HPP] = np.max([0, Q_max_turb[HPP] - Q_STOR_stable_hourly[n,y,HPP]]) * hydro_STOR_curtailment_factor_hourly[n,y,HPP]
# [calculate] if ramping up
if temp_sgn_turb == 1:
P_STOR_hydro_flexible_hourly[n,y,HPP] = np.min([Q_STOR_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.min([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_hourly[n,y,HPP]]) ])
# [calculate] if ramping down
elif temp_sgn_turb == -1:
P_STOR_hydro_flexible_hourly[n,y,HPP] = np.min([Q_STOR_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.max([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_hourly[n,y,HPP]]) ])
# [calculate] if P_d < 0 pumping is not performed (eq. S37)
P_STOR_pump_hourly[n,y,HPP] = 0
# [calculate] pumping power in cases of surpluses (eq. S37, S38)
if P_STOR_difference_hourly[n,y,HPP] >= 0:
if V_STOR_hourly_upper[n,y,HPP]/V_max[HPP] < f_spill:
Q_STOR_pot_pump_hourly[n,y,HPP] = np.min([V_STOR_hourly_lower[n,y,HPP]/secs_hr, Q_max_pump[HPP]])
# [calculate] if ramping up
if temp_sgn_pump == 1:
P_STOR_pump_hourly[n,y,HPP] = np.min([Q_STOR_pot_pump_hourly[n,y,HPP]*eta_pump**(-1)*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.min([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_pump_hourly[n,y,HPP]]) ])
# [calculate] if ramping down
elif temp_sgn_pump == -1:
P_STOR_pump_hourly[n,y,HPP] = np.min([Q_STOR_pot_pump_hourly[n,y,HPP]*eta_pump**(-1)*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.max([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_pump_hourly[n,y,HPP]]) ])
else:
P_STOR_pump_hourly[n,y,HPP] = 0
# [check] flexible hydropower generation is zero when P_d >= 0 (eq. S16)
P_STOR_hydro_flexible_hourly[n,y,HPP] = 0
# [calculate] stable hydropower generation in MW (eq. S15)
Q_pot_turb_STOR = np.min([Q_STOR_stable_hourly[n,y,HPP], Q_max_turb[HPP]])
P_STOR_hydro_stable_hourly[n,y,HPP] = Q_pot_turb_STOR*eta_turb*rho*g*h_STOR_hourly[n,y,HPP]/10**6
# [calculate] flexible turbined flow (eq. S18) and pumped flow (eq. 39) in m^3/s
if h_STOR_hourly[n,y,HPP] > 0:
Q_STOR_flexible_hourly[n,y,HPP] = P_STOR_hydro_flexible_hourly[n,y,HPP]/(eta_turb*rho*g*h_STOR_hourly[n,y,HPP])*10**6
Q_STOR_pump_hourly[n,y,HPP] = P_STOR_pump_hourly[n,y,HPP]/(eta_pump**(-1)*rho*g*h_STOR_hourly[n,y,HPP])*10**6
else:
# [check] cannot be negative
h_STOR_hourly[n,y,HPP] = 0
Q_STOR_flexible_hourly[n,y,HPP] = 0
Q_STOR_pump_hourly[n,y,HPP] = 0
# [calculate] spilling component of upper reservoir in m^3/s (eq. S19)
if V_STOR_hourly_upper[n,y,HPP]/V_max[HPP] < f_spill:
Q_STOR_spill_hourly_upper[n,y,HPP] = 0
else:
Q_STOR_spill_hourly_upper[n,y,HPP] = (Q_in_frac_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_STOR_hourly_upper[n,y,HPP]/rho)*(1 + mu) - Q_STOR_stable_hourly[n,y,HPP] - Q_STOR_flexible_hourly[n,y,HPP]
# [check] spilling component cannot be negative (eq. S7)
if Q_STOR_spill_hourly_upper[n,y,HPP] < 0:
Q_STOR_spill_hourly_upper[n,y,HPP] = 0
# [calculate] spilling component of lower reservoir in m^3/s (eq. S40)
if (V_lower_max[HPP] - V_STOR_hourly_lower[n,y,HPP])/secs_hr < Q_STOR_flexible_hourly[n,y,HPP]:
Q_STOR_spill_hourly_lower[n,y,HPP] = Q_STOR_flexible_hourly[n,y,HPP] - (V_lower_max[HPP] - V_STOR_hourly_lower[n,y,HPP])/secs_hr
elif (V_lower_max[HPP] - V_STOR_hourly_lower[n,y,HPP])/secs_hr >= Q_STOR_flexible_hourly[n,y,HPP]:
Q_STOR_spill_hourly_lower[n,y,HPP] = 0
# [calculate] total net outflow in m^3/s (eq. S36)
Q_STOR_out_hourly[n,y,HPP] = Q_STOR_stable_hourly[n,y,HPP] + Q_STOR_spill_hourly_upper[n,y,HPP] + Q_STOR_spill_hourly_lower[n,y,HPP]
# [calculate] reservoir volume in m^3 at next time step (eq. S34, S35)
V_STOR_hourly_upper[n+1,y,HPP] = V_STOR_hourly_upper[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_STOR_stable_hourly[n,y,HPP] - Q_STOR_flexible_hourly[n,y,HPP] - Q_STOR_spill_hourly_upper[n,y,HPP] + Q_STOR_pump_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_STOR_hourly_upper[n,y,HPP]/rho)*secs_hr
V_STOR_hourly_lower[n+1,y,HPP] = V_STOR_hourly_lower[n,y,HPP] + (Q_STOR_flexible_hourly[n,y,HPP] - Q_STOR_pump_hourly[n,y,HPP] - Q_STOR_spill_hourly_lower[n,y,HPP])*secs_hr
# [check] prevent unreal values when lake levels drop low
if V_STOR_hourly_upper[n+1,y,HPP] < 0:
Q_STOR_stable_hourly[n,y,HPP] = 0
P_STOR_hydro_stable_hourly[n,y,HPP] = 0
Q_STOR_flexible_hourly[n,y,HPP] = 0
P_STOR_hydro_flexible_hourly[n,y,HPP] = 0
Q_STOR_out_hourly[n,y,HPP] = Q_STOR_stable_hourly[n,y,HPP] + Q_STOR_flexible_hourly[n,y,HPP] + Q_STOR_spill_hourly_upper[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
A_STOR_hourly_upper[n,y,HPP] = 0
V_STOR_hourly_upper[n+1,y,HPP] = V_STOR_hourly_upper[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_STOR_stable_hourly[n,y,HPP] - Q_STOR_flexible_hourly[n,y,HPP] - Q_STOR_spill_hourly_upper[n,y,HPP] + Q_STOR_pump_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_STOR_hourly_upper[n,y,HPP]/rho)*secs_hr
# [calculate] reservoir lake area in m^2 and hydraulic head in m from bathymetric relationship
h_temp = np.where(abs(calibrate_volume[:,HPP] - V_STOR_hourly_upper[n+1,y,HPP]) == min(abs(calibrate_volume[:,HPP] - V_STOR_hourly_upper[n+1,y,HPP])))[0][0]
A_STOR_hourly_upper[n+1,y,HPP] = calibrate_area[h_temp,HPP]
h_STOR_hourly[n+1,y,HPP] = calibrate_head[h_temp,HPP]
# [calculate] ramp rate restrictions (MW attainable) at next time step (for turbine) (eq. S16)
if n < len(hrs_year) - 1:
if (P_STOR_difference_hourly[n+1,y,HPP] - P_STOR_difference_hourly[n,y,HPP]) < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_STOR_ramp_restr_hourly[n+1,y,HPP] = P_STOR_hydro_flexible_hourly[n,y,HPP] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_STOR_ramp_restr_hourly[n+1,y,HPP] < 0:
P_STOR_ramp_restr_hourly[n+1,y,HPP] = 0
# [calculate] ramp rate restrictions (MW attainable) at next time step (for pump) (eq. S37)
if n < len(hrs_year) - 1:
if (P_STOR_difference_hourly[n+1,y,HPP] - P_STOR_difference_hourly[n,y,HPP]) < 0:
temp_sgn_pump = -1
else:
temp_sgn_pump = 1
P_STOR_ramp_restr_pump_hourly[n+1,y,HPP] = P_STOR_pump_hourly[n,y,HPP] + temp_sgn_pump*P_r_pump[HPP]*dP_ramp_pump*mins_hr
if P_STOR_ramp_restr_pump_hourly[n+1,y,HPP] < 0:
P_STOR_ramp_restr_pump_hourly[n+1,y,HPP] = 0
# [calculate] whether lake levels have dropped so low as to require hydropower curtailment
# curtail hydropower generation in case water levels have dropped below f_stop*V_max
# (see Note 3.1)
if V_STOR_hourly_upper[n+1,y,HPP] < f_stop*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_STOR_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_STOR_curtailment_factor_hourly[0,y+1,HPP] = 0
# [calculate] restart hydropower generation if reservoir levels have recovered
# (see Note 3.1)
if hydro_STOR_curtailment_factor_hourly[n,y,HPP] == 0 and V_STOR_hourly_upper[n+1,y,HPP] > f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_STOR_curtailment_factor_hourly[n+1,y,HPP] = 1
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_STOR_curtailment_factor_hourly[0,y+1,HPP] = 1
elif hydro_STOR_curtailment_factor_hourly[n,y,HPP] == 0 and V_STOR_hourly_upper[n+1,y,HPP] <= f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_STOR_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_STOR_curtailment_factor_hourly[0,y+1,HPP] = 0
# [arrange] complete time series of water volume for eq. S20
for y in range(len(simulation_years)):
V_STOR_hourly_upper[int(hrs_byyear[y]),y,HPP] = np.nan
temp_volume_upper_STOR_series = V_STOR_hourly_upper[:,:,HPP]
temp_volume_upper_STOR_series = (np.transpose(temp_volume_upper_STOR_series)).ravel()
temp_volume_upper_STOR_series = temp_volume_upper_STOR_series[np.isfinite(temp_volume_upper_STOR_series)]
# [CHANGED] [calculate] deviation between rule curve and STOR reservoir dynamics (eq. S21)
psi_STOR[f] = np.mean(np.abs(temp_volume_upper_STOR_series - rule_curve_volume_series[:,HPP]))/np.mean(rule_curve_volume_series[:,HPP])
# [check] see explanation below eq. S21: if droughts occur in CONV, STOR should have no MORE days of curtailed flow than CONV ...
# and curtailed flow should occur in less than 50% of the years in the simulation, so median yearly statistics represent normal operation
if np.nanmin(V_CONV_hourly[:,:,HPP]) < f_stop*V_max[HPP] and (np.nansum(Q_STOR_out_hourly[:,:,HPP] == 0) > np.nansum(Q_CONV_out_hourly[:,:,HPP] == 0) or np.sum(np.sum(Q_STOR_out_hourly[:,:,HPP] - Q_in_RoR_hourly[:,:,HPP] == 0, axis = 0) > 0) > np.floor(len(simulation_years)/2)):
psi_STOR[f] = np.nan
# [check] if droughts do not occur in CONV, then neither should they in STOR
elif np.nanmin(V_CONV_hourly[:,:,HPP]) >= f_stop*V_max[HPP] and np.nanmin(V_STOR_hourly_upper[:,:,HPP]) < f_stop*V_max[HPP]:
psi_STOR[f] = np.nan
# [NEW] to speed up simulation
if f > 0 and (psi_STOR[f] > psi_STOR[f-1] or (np.isnan(psi_STOR[f]))):
break
# [identify] minimum in psi (eq. S21)
if np.sum(np.isnan(psi_STOR)) == len(psi_STOR) and f_demand_STOR[0] == 0:
f_demand_opt_STOR = 0
psi_STOR_opt = 0
break
else:
crossing_STOR = np.where(psi_STOR == min(psi_STOR))[0][0]
f_demand_opt_STOR = f_demand_STOR[crossing_STOR]
psi_STOR_opt = np.abs(psi_STOR[crossing_STOR])
# [check] prevent negative results
if f_demand_opt_STOR == 0:
f_demand_opt_STOR = f_demand_STOR[crossing_STOR + 1]
psi_STOR_opt = np.abs(psi_STOR[crossing_STOR + 1])
# [check] determine if psi is low enough for this to qualify as optimum solution
if psi_STOR_opt < psi_min_threshold:
break
# [check] if range in which to identify ELCC is adequate
if f_demand_opt_STOR == f_demand_STOR[-1]:
print("Warning: parameter f_init_STOR_end likely set too low")
# [initialize] optimal value of total SW power (MWh/year) so that we can calculate optimal c_solar and c_wind (eq. S25)
E_SW_loop_STOR_opt[HPP] = np.mean(E_hydro_CONV_stable_yearly[:,HPP])*f_demand_opt_STOR
###############################################################
############----------- STOR optimized ------------############
###############################################################
# [display]
print("(v) found optimum STOR solution - saving all variables")
# [preallocate] to test convergence towards P_stable (see explanation below eq. S19)
convergence_test_STOR = np.zeros(shape = (X_max_STOR))
# [loop] across all simulation years to identify realistic c_solar and c_wind values
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] determine realistic amount of SW capacity in MW (c_solar, c_wind) corresponding to generation equal to E_SW_loop_STOR
E_SW_per_MW_STOR_yearly[y,HPP] = np.sum(c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP])
c_multiplier_STOR[y,HPP] = E_SW_loop_STOR_opt[HPP]/E_SW_per_MW_STOR_yearly[y,HPP]
# [loop] perform iterations to get converged estimate of P_stable (see explanation below eq. S19)
for x in range(X_max_STOR):
# [calculate] environmentally required outflow (eq. S14)
temp_Q_out_STOR = Q_in_nat_av*np.ones(shape = (len(Q_CONV_stable_hourly),len(Q_CONV_stable_hourly[0])))
temp_Q_out_STOR[np.isnan(Q_CONV_stable_hourly[:,:,HPP])] = np.nan
Q_STOR_stable_hourly[:,:,HPP] = Q_stable_ratio*temp_Q_out_STOR
# [initialize] This variable is equal to unity by default, but set to zero in case of extreme droughts forcing a
# temporary curtailment on hydropower generation (Note 3.1)
hydro_STOR_curtailment_factor_hourly[:,:,HPP] = 1
# [loop] across all simulation years to initialize P_stable (see explanation below eq. S19)
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [initialize] stable hydropower generation P_stable in MW (see explanation below eq. S19)
# use estimate P_stable,STOR = (1 - C_OR)*P_stable,CONV as initial guess
if x == 0:
P_STOR_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_STOR[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_STOR[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + Q_stable_ratio*np.nanmean(P_CONV_hydro_stable_hourly[:,:,HPP]))
elif x > 0:
# use estimate P_stable,STOR from previous iteration
P_STOR_inflexible_hourly[hrs_year,y,HPP] = (np.mean(c_multiplier_STOR[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_STOR[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP] + P_STOR_hydro_stable_hourly[hrs_year,y,HPP])
# [calculate] total solar and wind power generation by hour (eq. S12)
P_STOR_solar_hourly[hrs_year,y,HPP] = np.mean(c_multiplier_STOR[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP]
P_STOR_wind_hourly[hrs_year,y,HPP] = np.mean(c_multiplier_STOR[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP]
# [calculate] P_load according to constraints on overproduction (eq. S11)
P_load_STOR = np.nanpercentile(P_STOR_inflexible_hourly[:,:,HPP],f_size)
# [loop] across all simulation years to perform optimization
for y in range(len(simulation_years)):
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [calculate] hourly load time series in MW (eq. S10)
L_STOR_hourly[hrs_year,y,HPP] = P_load_STOR*L_norm[hrs_year,y,HPP]
# [calculate] load difference P_d (eq. S9)
P_STOR_difference_hourly[hrs_year,y,HPP] = P_STOR_inflexible_hourly[hrs_year,y,HPP] - L_STOR_hourly[hrs_year,y,HPP]
# [initialize] initial values of volume (m^3), area (m^2), hydraulic head (m) and ramp restrictions (MW/hr) for each simulation year
if y == 0:
V_STOR_hourly_upper[0,y,HPP] = V_CONV_hourly[0,y,HPP]
V_STOR_hourly_lower[0,y,HPP] = V_lower_max[HPP]*f_opt
A_STOR_hourly_upper[0,y,HPP] = A_CONV_hourly[0,y,HPP]
h_STOR_hourly[0,y,HPP] = h_CONV_hourly[0,y,HPP]
# [calculate] ramping constraint (eq. S16)
temp_sgn_turb = 1
P_STOR_ramp_restr_hourly[0,y,HPP] = P_r_turb[HPP]*dP_ramp_turb*mins_hr
# [calculate] ramping constraint for pump (eq. S37)
temp_sgn_pump = 1
P_STOR_ramp_restr_pump_hourly[0,y,HPP] = P_r_pump[HPP]*dP_ramp_pump*mins_hr
else:
temp = V_STOR_hourly_upper[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_STOR_hourly_upper[0,y,HPP] = temp[-1]
temp = V_STOR_hourly_lower[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
V_STOR_hourly_lower[0,y,HPP] = temp[-1]
temp = A_STOR_hourly_upper[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
A_STOR_hourly_upper[0,y,HPP] = temp[-1]
temp = h_STOR_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
h_STOR_hourly[0,y,HPP] = temp[-1]
# [calculate] ramping constraint (eq. S16)
temp = P_STOR_hydro_flexible_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
temp_P_difference = P_STOR_difference_hourly[:,y-1,HPP]
temp_P_difference = temp_P_difference[np.isfinite(temp_P_difference)]
# [calculate] whether ramping up (temp_sgn = 1) or down (temp_sgn = -1)
if P_STOR_difference_hourly[0,y,HPP] - temp_P_difference[-1] < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_STOR_ramp_restr_hourly[0,y,HPP] = temp[-1] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_STOR_ramp_restr_hourly[0,y,HPP] < 0:
P_STOR_ramp_restr_hourly[0,y,HPP] = 0
# [calculate] ramping constraint for pump (eq. S37)
temp = P_STOR_pump_hourly[:,y-1,HPP]
temp = temp[np.isfinite(temp)]
# [calculate] whether ramping up (temp_sgn = 1) or down (temp_sgn = -1)
if P_STOR_difference_hourly[0,y,HPP] - temp_P_difference[-1] < 0:
temp_sgn_pump = -1
else:
temp_sgn_pump = 1
P_STOR_ramp_restr_pump_hourly[0,y,HPP] = temp[-1] + temp_sgn_pump*P_r_pump[HPP]*dP_ramp_pump*mins_hr
if P_STOR_ramp_restr_pump_hourly[0,y,HPP] < 0:
P_STOR_ramp_restr_pump_hourly[0,y,HPP] = 0
# [loop] over all time steps in each simulation year to calculate reservoir dynamics and hydropower generation
for n in hrs_year:
# [check] stable outflow is reduced to zero in case of droughts
Q_STOR_stable_hourly[n,y,HPP] = Q_STOR_stable_hourly[n,y,HPP] * hydro_STOR_curtailment_factor_hourly[n,y,HPP]
# [calculate] flexible hydropower generation in MW (eq. S16, S17)
if P_STOR_difference_hourly[n,y,HPP] < 0:
Q_STOR_pot_turb_flexible[n,y,HPP] = np.max([0, Q_max_turb[HPP] - Q_STOR_stable_hourly[n,y,HPP]]) * hydro_STOR_curtailment_factor_hourly[n,y,HPP]
# [calculate] if ramping up
if temp_sgn_turb == 1:
P_STOR_hydro_flexible_hourly[n,y,HPP] = np.min([Q_STOR_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.min([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_hourly[n,y,HPP]]) ])
# [calculate] if ramping down
elif temp_sgn_turb == -1:
P_STOR_hydro_flexible_hourly[n,y,HPP] = np.min([Q_STOR_pot_turb_flexible[n,y,HPP]*eta_turb*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.max([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_hourly[n,y,HPP]]) ])
# [calculate] if P_d < 0 pumping is not performed (eq. S37)
P_STOR_pump_hourly[n,y,HPP] = 0
# [calculate] pumping power in cases of surpluses (eq. S37, S38)
if P_STOR_difference_hourly[n,y,HPP] >= 0:
if V_STOR_hourly_upper[n,y,HPP]/V_max[HPP] < f_spill:
Q_STOR_pot_pump_hourly[n,y,HPP] = np.min([V_STOR_hourly_lower[n,y,HPP]/secs_hr, Q_max_pump[HPP]])
# [calculate] if ramping up
if temp_sgn_pump == 1:
P_STOR_pump_hourly[n,y,HPP] = np.min([Q_STOR_pot_pump_hourly[n,y,HPP]*eta_pump**(-1)*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.min([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_pump_hourly[n,y,HPP]]) ])
# [calculate] if ramping down
elif temp_sgn_pump == -1:
P_STOR_pump_hourly[n,y,HPP] = np.min([Q_STOR_pot_pump_hourly[n,y,HPP]*eta_pump**(-1)*rho*g*h_STOR_hourly[n,y,HPP]/10**6, np.max([np.abs(P_STOR_difference_hourly[n,y,HPP]), P_STOR_ramp_restr_pump_hourly[n,y,HPP]]) ])
else:
P_STOR_pump_hourly[n,y,HPP] = 0
# [check] flexible hydropower generation is zero when P_d >= 0 (eq. S16)
P_STOR_hydro_flexible_hourly[n,y,HPP] = 0
# [calculate] stable hydropower generation in MW (eq. S15)
Q_pot_turb_STOR = np.min([Q_STOR_stable_hourly[n,y,HPP], Q_max_turb[HPP]])
P_STOR_hydro_stable_hourly[n,y,HPP] = Q_pot_turb_STOR*eta_turb*rho*g*h_STOR_hourly[n,y,HPP]/10**6
# [calculate] flexible turbined flow (eq. S18) and pumped flow (eq. 39) in m^3/s
if h_STOR_hourly[n,y,HPP] > 0:
Q_STOR_flexible_hourly[n,y,HPP] = P_STOR_hydro_flexible_hourly[n,y,HPP]/(eta_turb*rho*g*h_STOR_hourly[n,y,HPP])*10**6
Q_STOR_pump_hourly[n,y,HPP] = P_STOR_pump_hourly[n,y,HPP]/(eta_pump**(-1)*rho*g*h_STOR_hourly[n,y,HPP])*10**6
else:
# [check] cannot be negative
h_STOR_hourly[n,y,HPP] = 0
Q_STOR_flexible_hourly[n,y,HPP] = 0
Q_STOR_pump_hourly[n,y,HPP] = 0
# [calculate] spilling component of upper reservoir in m^3/s (eq. S19)
if V_STOR_hourly_upper[n,y,HPP]/V_max[HPP] < f_spill:
Q_STOR_spill_hourly_upper[n,y,HPP] = 0
else:
Q_STOR_spill_hourly_upper[n,y,HPP] = (Q_in_frac_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_STOR_hourly_upper[n,y,HPP]/rho)*(1 + mu) - Q_STOR_stable_hourly[n,y,HPP] - Q_STOR_flexible_hourly[n,y,HPP]
# [check] spilling component cannot be negative (eq. S7)
if Q_STOR_spill_hourly_upper[n,y,HPP] < 0:
Q_STOR_spill_hourly_upper[n,y,HPP] = 0
# [calculate] spilling component of lower reservoir in m^3/s (eq. S40)
if (V_lower_max[HPP] - V_STOR_hourly_lower[n,y,HPP])/secs_hr < Q_STOR_flexible_hourly[n,y,HPP]:
Q_STOR_spill_hourly_lower[n,y,HPP] = Q_STOR_flexible_hourly[n,y,HPP] - (V_lower_max[HPP] - V_STOR_hourly_lower[n,y,HPP])/secs_hr
elif (V_lower_max[HPP] - V_STOR_hourly_lower[n,y,HPP])/secs_hr >= Q_STOR_flexible_hourly[n,y,HPP]:
Q_STOR_spill_hourly_lower[n,y,HPP] = 0
# [calculate] total net outflow in m^3/s (eq. S36)
Q_STOR_out_hourly[n,y,HPP] = Q_STOR_stable_hourly[n,y,HPP] + Q_STOR_spill_hourly_upper[n,y,HPP] + Q_STOR_spill_hourly_lower[n,y,HPP]
# [calculate] reservoir volume in m^3 at next time step (eq. S34, S35)
V_STOR_hourly_upper[n+1,y,HPP] = V_STOR_hourly_upper[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_STOR_stable_hourly[n,y,HPP] - Q_STOR_flexible_hourly[n,y,HPP] - Q_STOR_spill_hourly_upper[n,y,HPP] + Q_STOR_pump_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_STOR_hourly_upper[n,y,HPP]/rho)*secs_hr
V_STOR_hourly_lower[n+1,y,HPP] = V_STOR_hourly_lower[n,y,HPP] + (Q_STOR_flexible_hourly[n,y,HPP] - Q_STOR_pump_hourly[n,y,HPP] - Q_STOR_spill_hourly_lower[n,y,HPP])*secs_hr
# [check] prevent unreal values when lake levels drop low
if V_STOR_hourly_upper[n+1,y,HPP] < 0:
Q_STOR_stable_hourly[n,y,HPP] = 0
P_STOR_hydro_stable_hourly[n,y,HPP] = 0
Q_STOR_flexible_hourly[n,y,HPP] = 0
P_STOR_hydro_flexible_hourly[n,y,HPP] = 0
Q_STOR_out_hourly[n,y,HPP] = Q_STOR_stable_hourly[n,y,HPP] + Q_STOR_flexible_hourly[n,y,HPP] + Q_STOR_spill_hourly_upper[n,y,HPP] + Q_in_RoR_hourly[n,y,HPP]
A_STOR_hourly_upper[n,y,HPP] = 0
V_STOR_hourly_upper[n+1,y,HPP] = V_STOR_hourly_upper[n,y,HPP] + (Q_in_frac_hourly[n,y,HPP] - Q_STOR_stable_hourly[n,y,HPP] - Q_STOR_flexible_hourly[n,y,HPP] - Q_STOR_spill_hourly_upper[n,y,HPP] + Q_STOR_pump_hourly[n,y,HPP] + (precipitation_flux_hourly[n,y,HPP] - evaporation_flux_hourly[n,y,HPP])*A_STOR_hourly_upper[n,y,HPP]/rho)*secs_hr
# [calculate] reservoir lake area in m^2 and hydraulic head in m from bathymetric relationship
h_temp = np.where(abs(calibrate_volume[:,HPP] - V_STOR_hourly_upper[n+1,y,HPP]) == min(abs(calibrate_volume[:,HPP] - V_STOR_hourly_upper[n+1,y,HPP])))[0][0]
A_STOR_hourly_upper[n+1,y,HPP] = calibrate_area[h_temp,HPP]
h_STOR_hourly[n+1,y,HPP] = calibrate_head[h_temp,HPP]
# [calculate] ramp rate restrictions (MW attainable) at next time step (for turbine) (eq. S16)
if n < len(hrs_year) - 1:
if (P_STOR_difference_hourly[n+1,y,HPP] - P_STOR_difference_hourly[n,y,HPP]) < 0:
temp_sgn_turb = 1
else:
temp_sgn_turb = -1
P_STOR_ramp_restr_hourly[n+1,y,HPP] = P_STOR_hydro_flexible_hourly[n,y,HPP] + temp_sgn_turb*P_r_turb[HPP]*dP_ramp_turb*mins_hr
if P_STOR_ramp_restr_hourly[n+1,y,HPP] < 0:
P_STOR_ramp_restr_hourly[n+1,y,HPP] = 0
# [calculate] ramp rate restrictions (MW attainable) at next time step (for pump) (eq. S37)
if n < len(hrs_year) - 1:
if (P_STOR_difference_hourly[n+1,y,HPP] - P_STOR_difference_hourly[n,y,HPP]) < 0:
temp_sgn_pump = -1
else:
temp_sgn_pump = 1
P_STOR_ramp_restr_pump_hourly[n+1,y,HPP] = P_STOR_pump_hourly[n,y,HPP] + temp_sgn_pump*P_r_pump[HPP]*dP_ramp_pump*mins_hr
if P_STOR_ramp_restr_pump_hourly[n+1,y,HPP] < 0:
P_STOR_ramp_restr_pump_hourly[n+1,y,HPP] = 0
# [calculate] whether lake levels have dropped so low as to require hydropower curtailment
# curtail hydropower generation in case water levels have dropped below f_stop*V_max
# (see Note 3.1)
if V_STOR_hourly_upper[n+1,y,HPP] < f_stop*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_STOR_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_STOR_curtailment_factor_hourly[0,y+1,HPP] = 0
# [calculate] restart hydropower generation if reservoir levels have recovered
# (see Note 3.1)
if hydro_STOR_curtailment_factor_hourly[n,y,HPP] == 0 and V_STOR_hourly_upper[n+1,y,HPP] > f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_STOR_curtailment_factor_hourly[n+1,y,HPP] = 1
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_STOR_curtailment_factor_hourly[0,y+1,HPP] = 1
elif hydro_STOR_curtailment_factor_hourly[n,y,HPP] == 0 and V_STOR_hourly_upper[n+1,y,HPP] <= f_restart*V_max[HPP]:
if n < len(hrs_year) - 1:
hydro_STOR_curtailment_factor_hourly[n+1,y,HPP] = 0
elif n == len(hrs_year) - 1 and y < len(simulation_years) - 1:
hydro_STOR_curtailment_factor_hourly[0,y+1,HPP] = 0
##### IDENTIFY YEARLY ELCC #####
# [calculate] total supplied HSW generation under optimal STOR solution
total_power_supply_STOR = P_STOR_hydro_stable_hourly[hrs_year,y,HPP] + P_STOR_hydro_flexible_hourly[hrs_year,y,HPP] - P_STOR_pump_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_STOR[:,HPP])*c_solar_relative[HPP]*CF_solar_hourly[hrs_year,y,HPP] + np.mean(c_multiplier_STOR[:,HPP])*c_wind_relative[HPP]*CF_wind_hourly[hrs_year,y,HPP]
N_power_supply_STOR = int(np.ceil(np.max(total_power_supply_STOR)))
# [preallocate] range in which to identify ELCC
P_followed_STOR_range[y,:,HPP] = np.linspace(0,N_power_supply_STOR,N_ELCC)
power_unmet_STOR = np.zeros(shape = N_ELCC)
# [loop] to identify ELCC under optimal STOR solution
for n in range(N_ELCC):
temp = total_power_supply_STOR - P_followed_STOR_range[y,n,HPP]*L_norm[hrs_year,y,HPP]
if np.abs(np.mean(temp[temp<=0])) > 0:
power_unmet_STOR[n] = np.abs(np.sum(temp[temp<=0]))/np.sum(P_followed_STOR_range[y,n,HPP]*L_norm[hrs_year,y,HPP])
# [identify] total P_followed given the constraint LOEE_allowed (default zero)
N_demand_covered_STOR_temp = np.where(power_unmet_STOR[power_unmet_STOR != np.Inf] > LOEE_allowed)[0][0] - 1
if N_demand_covered_STOR_temp.size == 0 or N_demand_covered_STOR_temp == 0:
P_followed_STOR_index[y,HPP] = 1
else:
P_followed_STOR_index[y,HPP] = N_demand_covered_STOR_temp
# [identify] hourly time series of L_followed (MW) (eq. S23)
L_followed_STOR_hourly[hrs_year,y,HPP] = P_followed_STOR_range[y,int(P_followed_STOR_index[y,HPP]),HPP]*L_norm[hrs_year,y,HPP]
# [calculate] difference between ELCC and total HSW generated (excl. RoR component) to obtain Residual Load Duration Curve (RLDC) (eq. S22)
L_res_STOR_hourly[hrs_year,y,HPP] = L_followed_STOR_hourly[hrs_year,y,HPP] - total_power_supply_STOR
# [arrange] mean fraction of unmet load by month
for m in range(months_yr):
temp1 = L_res_STOR_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP]
temp2 = L_followed_STOR_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP]
L_unmet_STOR_frac_bymonth[m,y,HPP] = np.sum(temp1[temp1>0])/np.sum(temp2)
# [check] to check convergence of solution towards P_stable
convergence_test_STOR[x] = np.nanmean(P_STOR_hydro_stable_hourly[:,:,HPP])
# [arrange] complete time series of water volume, area and levels
for y in range(len(simulation_years)):
V_STOR_hourly_upper[int(hrs_byyear[y]),y,HPP] = np.nan
V_STOR_hourly_lower[int(hrs_byyear[y]),y,HPP] = np.nan
A_STOR_hourly_upper[int(hrs_byyear[y]),y,HPP] = np.nan
h_STOR_hourly[int(hrs_byyear[y]),y,HPP] = np.nan
temp_volume_upper_STOR_series = V_STOR_hourly_upper[:,:,HPP]
temp_volume_upper_STOR_series = (np.transpose(temp_volume_upper_STOR_series)).ravel()
temp_volume_upper_STOR_series = temp_volume_upper_STOR_series[np.isfinite(temp_volume_upper_STOR_series)]
V_STOR_series_hourly_upper[:,HPP] = temp_volume_upper_STOR_series
temp_volume_lower_STOR_series = V_STOR_hourly_lower[:,:,HPP]
temp_volume_upper_STOR_series = (np.transpose(temp_volume_upper_STOR_series)).ravel()
temp_volume_upper_STOR_series = temp_volume_upper_STOR_series[np.isfinite(temp_volume_upper_STOR_series)]
V_STOR_series_hourly_upper[:,HPP] = temp_volume_upper_STOR_series
temp_area_STOR_series = A_STOR_hourly_upper[:,:,HPP]
temp_area_STOR_series = (np.transpose(temp_area_STOR_series)).ravel()
temp_area_STOR_series = temp_area_STOR_series[np.isfinite(temp_area_STOR_series)]
A_STOR_series_hourly_upper[:,HPP] = temp_area_STOR_series
temp_head_STOR_series = h_STOR_hourly[:,:,HPP]
temp_head_STOR_series = (np.transpose(temp_head_STOR_series)).ravel()
temp_head_STOR_series = temp_head_STOR_series[np.isfinite(temp_head_STOR_series)]
h_STOR_series_hourly[:,HPP] = temp_head_STOR_series
# [display] once STOR simulation is complete
print("done")
###############################################################
############------ CHECK NEED TO RESIMULATE -------############
###############################################################
# [calculate] turbine exhaustion factor k_turb in STOR (eq. S28)
k_turb_hourly_STOR[:,:,HPP] = (Q_STOR_stable_hourly[:,:,HPP] + Q_STOR_flexible_hourly[:,:,HPP])/Q_max_turb[HPP]
# [check] if criterion on k_turb is met for STOR, wrap up simulation and write data
if np.median(np.nanpercentile(k_turb_hourly_STOR[:,:,HPP],99,0)) < 1:
break
else:
# [display] in case k_turb criterion was not met (eq. S28)
print("requires resimulating at lower C_OR...")
else:
c_multiplier_STOR[:,HPP] = np.nan
# %% REVUB.5) Post-processing
# [initialise] use STOR equal to BAL for reservoirs where STOR not modelled
for HPP in range(HPP_number):
if STOR_break[HPP] == 1:
P_STOR_hydro_stable_hourly[:,:,HPP] = P_BAL_hydro_stable_hourly[:,:,HPP]
P_STOR_hydro_flexible_hourly[:,:,HPP] = P_BAL_hydro_flexible_hourly[:,:,HPP]
P_STOR_wind_hourly[:,:,HPP] = P_BAL_wind_hourly[:,:,HPP]
P_STOR_solar_hourly[:,:,HPP] = P_BAL_solar_hourly[:,:,HPP]
P_STOR_pump_hourly[:,:,HPP] = 0
ELCC_STOR_yearly[:,HPP] = ELCC_BAL_yearly[:,HPP]
L_followed_STOR_hourly[:,:,HPP] = L_followed_BAL_hourly[:,:,HPP]
# [loop] across all HPPs
for HPP in range(HPP_number):
# [CHANGED] [calculate] yearly hydropower capacity factor for CONV
CF_hydro_CONV_yearly[:,HPP] = (E_hydro_CONV_stable_yearly[:,HPP] + E_hydro_CONV_RoR_yearly[:,HPP])/((P_r_turb[HPP]/f_power)*hrs_byyear)
# [CHANGED] [calculate] hourly hydropower capacity factor for BAL (eq. S42)
CF_hydro_BAL_hourly[:,:,HPP] = (P_BAL_hydro_stable_hourly[:,:,HPP] + P_BAL_hydro_flexible_hourly[:,:,HPP] + P_BAL_hydro_RoR_hourly[:,:,HPP])/(P_r_turb[HPP]/f_power)
# [CHANGED] [calculate] hourly hydropower capacity factor for STOR (eq. S42)
CF_hydro_STOR_hourly[:,:,HPP] = (P_STOR_hydro_stable_hourly[:,:,HPP] + P_STOR_hydro_flexible_hourly[:,:,HPP])/(P_r_turb[HPP]/f_power)
# [loop] across all simulation years
for y in range(len(simulation_years)):
# [loop] across all months of the year
for m in range(months_yr):
# [calculate] average monthly inflow (m^3/s)
Q_in_nat_monthly[m,y,HPP] = np.mean(Q_in_nat_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
# [calculate] average monthly outflows (m^3/s)
Q_CONV_out_monthly[m,y,HPP] = np.mean(Q_CONV_out_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
Q_BAL_out_monthly[m,y,HPP] = np.mean(Q_BAL_out_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
Q_STOR_out_monthly[m,y,HPP] = np.mean(Q_STOR_out_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
# [calculate] load profile by month
L_norm_bymonth[m,y,HPP] = np.mean(L_norm[int(np.sum(days_year[range(m),y])*hrs_day) : int(np.sum(days_year[range(m+1),y])*hrs_day),y,HPP])
# [calculate] power generation, converting hourly values (MW or MWh/h) to GWh/month
E_hydro_BAL_stable_bymonth[m,y,HPP] = 1e-3*np.sum(P_BAL_hydro_stable_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_solar_BAL_bymonth[m,y,HPP] = 1e-3*np.sum(P_BAL_solar_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_wind_BAL_bymonth[m,y,HPP] = 1e-3*np.sum(P_BAL_wind_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_hydro_BAL_flexible_bymonth[m,y,HPP] = 1e-3*np.sum(P_BAL_hydro_flexible_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_hydro_BAL_RoR_bymonth[m,y,HPP] = 1e-3*np.sum(P_BAL_hydro_RoR_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_hydro_STOR_stable_bymonth[m,y,HPP] = 1e-3*np.sum(P_STOR_hydro_stable_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_solar_STOR_bymonth[m,y,HPP] = 1e-3*np.sum(P_STOR_solar_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_wind_STOR_bymonth[m,y,HPP] = 1e-3*np.sum(P_STOR_wind_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_hydro_STOR_flexible_bymonth[m,y,HPP] = 1e-3*np.sum(P_STOR_hydro_flexible_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
E_hydro_pump_STOR_bymonth[m,y,HPP] = 1e-3*np.sum(P_STOR_pump_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])
# [calculate] binary variable indicating hydropower curtailment in given month
hydro_BAL_curtailment_factor_monthly[m,y,HPP] = np.min(hydro_BAL_curtailment_factor_hourly[int(np.sum(days_year[range(m),y])*hrs_day) : int(np.sum(days_year[range(m+1),y])*hrs_day),y,HPP])
hydro_STOR_curtailment_factor_monthly[m,y,HPP] = np.min(hydro_STOR_curtailment_factor_hourly[int(np.sum(days_year[range(m),y])*hrs_day) : int(np.sum(days_year[range(m+1),y])*hrs_day),y,HPP])
# [calculate] ELCC by month (MWh/h)
ELCC_BAL_bymonth[m,y,HPP] = np.sum(L_followed_BAL_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])/days_year[m,y]/hrs_day
ELCC_STOR_bymonth[m,y,HPP] = np.sum(L_followed_STOR_hourly[int(positions[m,y]):int(positions[m+1,y]),y,HPP])/days_year[m,y]/hrs_day
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[y]))
# [arrange] yearly average outflow under optimal BAL solution (m^3/s)
Q_BAL_out_yearly[y,HPP] = np.mean(Q_BAL_out_hourly[hrs_year,y,HPP])
# [calculate] total solar and wind power generation under optimal BAL solution in MWh/year (eq. S25)
E_solar_BAL_yearly[y,HPP] = np.sum(P_BAL_solar_hourly[hrs_year,y,HPP])
E_wind_BAL_yearly[y,HPP] = np.sum(P_BAL_wind_hourly[hrs_year,y,HPP])
# [calculate] total flexible hydropower generation under optimal BAL solution in MWh/year (eq. S24)
E_hydro_BAL_flexible_yearly[y,HPP] = np.sum(P_BAL_hydro_flexible_hourly[hrs_year,y,HPP])
# [calculate] total stable hydropower generation under optimal BAL solution in MWh/year (eq. S24)
E_hydro_BAL_stable_yearly[y,HPP] = np.sum(P_BAL_hydro_stable_hourly[hrs_year,y,HPP])
# [calculate] total stable + flexible hydropower generation under optimal BAL solution in MWh/year (eq. S24)
E_hydro_BAL_nonRoR_yearly[y,HPP] = E_hydro_BAL_flexible_yearly[y,HPP] + E_hydro_BAL_stable_yearly[y,HPP]
# [calculate] total RoR hydropower generation under optimal BAL solution in MWh/year (eq. S33)
E_hydro_BAL_RoR_yearly[y,HPP] = np.sum(P_BAL_hydro_RoR_hourly[hrs_year,y,HPP])
# [calculate] ELCC by year in MWh/year (eq. S23)
ELCC_BAL_yearly[y,HPP] = np.sum(L_followed_BAL_hourly[hrs_year,y,HPP])
# [arrange] yearly average outflow under optimal STOR solution (m^3/s)
Q_STOR_out_yearly[y,HPP] = np.mean(Q_STOR_out_hourly[hrs_year,y,HPP])
# [calculate] total solar and wind power generation under optimal STOR solution in MWh/year (eq. S24)
E_solar_STOR_yearly[y,HPP] = np.sum(P_STOR_solar_hourly[hrs_year,y,HPP])
E_wind_STOR_yearly[y,HPP] = np.sum(P_STOR_wind_hourly[hrs_year,y,HPP])
# [calculate] total flexible hydropower generation under optimal STOR solution in MWh/year (eq. S24)
E_hydro_STOR_flexible_yearly[y,HPP] = np.sum(P_STOR_hydro_flexible_hourly[hrs_year,y,HPP])
# [calculate] total stable hydropower generation under optimal STOR solution in MWh/year (eq. S24)
E_hydro_STOR_stable_yearly[y,HPP] = np.sum(P_STOR_hydro_stable_hourly[hrs_year,y,HPP])
# [calculate] total stable + flexible hydropower generation under optimal STOR solution in MWh/year (eq. S24)
E_hydro_STOR_yearly[y,HPP] = E_hydro_STOR_flexible_yearly[y,HPP] + E_hydro_STOR_stable_yearly[y,HPP]
# [calculate] total energy pumped up into reservoir in MWh/year
E_hydro_STOR_pump_yearly[y,HPP] = np.sum(P_STOR_pump_hourly[hrs_year,y,HPP])*eta_pump
# [calculate] ELCC by year in MWh/year (eq. S23)
ELCC_STOR_yearly[y,HPP] = np.sum(L_followed_STOR_hourly[hrs_year,y,HPP])
# [display] signal simulation end
print("simulation finished")
|
{"hexsha": "727975efd0c13eaca12b7ae369e2e5d571d283c7", "size": 133662, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/REVUB_Python_B_Suriname_Sterl_etal_2020.py", "max_stars_repo_name": "VUB-HYDR/2020_Sterl_etal_RSER", "max_stars_repo_head_hexsha": "672adf4f7676cb2e8be77e441a8a407f59930437", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/REVUB_Python_B_Suriname_Sterl_etal_2020.py", "max_issues_repo_name": "VUB-HYDR/2020_Sterl_etal_RSER", "max_issues_repo_head_hexsha": "672adf4f7676cb2e8be77e441a8a407f59930437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/REVUB_Python_B_Suriname_Sterl_etal_2020.py", "max_forks_repo_name": "VUB-HYDR/2020_Sterl_etal_RSER", "max_forks_repo_head_hexsha": "672adf4f7676cb2e8be77e441a8a407f59930437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 67.0320962889, "max_line_length": 375, "alphanum_fraction": 0.5743367599, "include": true, "reason": "import numpy", "num_tokens": 34434}
|
# -*- coding: utf-8 -*-
# Author: Daniel Yang <daniel.yj.yang@gmail.com>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import pandas as pd
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of any randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of samples x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_samples, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.unique(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mapper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mapper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_samples for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{pd.concat([pd.DataFrame(document,columns=['X_message_j',]),pd.Series(y_train,name='y')],axis=1).to_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample, before X is observed\nprior prob(y):\n{pd.DataFrame(self.prob_y.reshape(1,-1), columns=columns).to_string(index=False)}")
# axis=0 means column-wise, axis=1 means row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{pd.concat([ pd.DataFrame(feature_names, columns=['word_i',]), pd.DataFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).to_string(index=False)}")
assert (self.prob_x_i_given_y.T.sum(axis=0) - np.ones((1, len(self.y_classes))) < 1e-9).all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample in enumerate(X_test):
feature_presence_columns = this_x_sample.astype(bool)
# recall that this_x_sample is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample[feature_presence_columns]
# axis=0 means column-wise, axis=1 means row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it sums across all possible y classes that can divide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.sum(axis=1)-1 < 1e-9).all(), "***Error*** each row should sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if len(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{pd.concat([pd.DataFrame(document, columns=['X_message_j',]),pd.DataFrame(X_test, columns = self.feature_names)], axis=1).to_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{pd.concat([pd.DataFrame(document, columns=['X_message_j',]),pd.DataFrame(self.prob_X_given_y, columns=columns)], axis=1).to_string(index=False)}")
print(f"\nStep 3. prob(X_message ∩ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it sums across all possible y classes that can divide X_message\n{pd.concat([pd.DataFrame(document, columns=['X_message_j',]),pd.DataFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).to_string(index=False)}")
print(f"\nStep 4. prob(X_message), across all y_classes within the observed sample:\n{pd.concat([pd.DataFrame(document, columns=['X_message_j', ]),pd.DataFrame(self.prob_X,columns=['prob',])], axis=1).to_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{pd.concat([pd.DataFrame(document, columns=['X_message_j', ]),pd.DataFrame(self.prob_y_given_X, columns=columns),pd.Series(self.prob_y_given_X.argmax(axis=1),name='predict').map(self.y_mapper)],axis=1).to_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).argmax(axis=1)
def show_model_attributes(self, fitted_tfidf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfidf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfidf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_df = pd.DataFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_df = term_proba_df.sort_values(by=['proba'], ascending=False)
top_n = top_n
df = pd.DataFrame.head(term_proba_df, n=top_n)
print(f"\nThe top {top_n} terms with highest probability of a document = {y_class}:")
for term, proba in zip(df['term'], df['proba']):
print(f" \"{term}\": {proba:4.2%}")
self.verbose = verbose_old
def evaluate_model(self, X_test: np.ndarray, y_test: np.ndarray, y_pos_label = 1, y_classes = 'auto', document: list = None, skip_PR_curve: bool = False, figsize_cm: tuple = None):
X_test = convert_to_numpy_ndarray(X_test)
y_test = convert_to_numpy_ndarray(y_test)
X_test, y_test = check_X_y(X_test, y_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
model_name = 'Multinomial NB from scratch'
y_pred = self.predict(X_test, document = document)
if figsize_cm is None:
if len(y_classes) == 2:
figsize_cm = (10, 9)
if len(y_classes) > 2:
figsize_cm = (8, 8)
plot_confusion_matrix(y_test, y_pred, y_classes = y_classes, model_name = model_name, figsize = figsize_cm)
if len(y_classes) == 2:
verbose_old = self.verbose
self.verbose = False
plot_ROC_and_PR_curves(fitted_model=self, X=X_test, y_true=y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=y_pos_label, model_name=model_name, skip_PR_curve = skip_PR_curve, figsize=(8,8))
self.verbose = verbose_old
#class naive_bayes_Bernoulli(BernoulliNB):
# """
# This class is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
# """
# def __init__(self, *, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, binarize=binarize, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_multinomial(MultinomialNB):
# """
# This class is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
# """
# # note: In Python 3, adding * to a function's signature forces calling code to pass every argument defined after the asterisk as a keyword argument
# def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_Gaussian(GaussianNB):
# """
# This class is used when X are continuous variables.
# """
# def __init__(self, *, priors=None, var_smoothing=1e-09):
# super().__init__(priors=priors, var_smoothing=var_smoothing)
def Bernoulli_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def Multinomial_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def Gaussian_NB_classifier(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_mean=True, with_std=True)),
('classifier',
Gaussian_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_mean': [True],
'scaler__with_std': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_mean={repr(self.classifier_grid.best_params_['scaler__with_mean'])}, with_std={repr(self.classifier_grid.best_params_['scaler__with_std'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfidfTransformer(use_idf=True)),
('classifier',
Multinomial_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': (self._tokens, self._lemmas), # 'word',
'count_matrix_normalizer__use_idf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
#import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforming from occurrences to frequency: TfidfTransformer(use_idf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_idf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def getdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {len(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_df = pd.DataFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_df = term_proba_df.sort_values(
by=['proba_spam'], ascending=False)
top_n = 10
df = pd.DataFrame.head(term_proba_df, n=top_n)
print(
f"The top {top_n} terms with highest probability of a message being a spam (the classification is either spam or ham):")
for term, proba_spam in zip(df['term'], df['proba_spam']):
print(f" \"{term}\": {proba_spam:4.2%}")
def evaluate_model(self):
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
plot_confusion_matrix(y_true=self.y_test, y_pred=self.y_pred,
y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label='spam', model_name='Multinomial NB')
def application(self):
custom_message = "URGENT! We are trying to contact U. Todays draw shows that you have won a 2000 prize GUARANTEED. Call 090 5809 4507 from a landline. Claim 3030. Valid 12hrs only."
custom_results = self.classifier_grid.predict([custom_message])[0]
print(
f"\nApplication example:\n- Message: \"{custom_message}\"\n- Probability of spam (class=1): {self.classifier_grid.predict_proba([custom_message])[0][1]:.2%}\n- Classification: {custom_results}\n")
def run(self):
"""
This function provides a demo of selected functions in this module using the SMS spam dataset.
Required arguments:
None
"""
# Get data
self.getdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
# import numpy as np
# from sklearn.utils import shuffle
# True Positive
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_samples=1, random_state=1234)[0] ] ]]
# False Negative
#X_test_subset = X_test[y_test == 'spam']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_samples=1, random_state=1234)[0] ] ]]
# False Positive
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'spam')[0], n_samples=1, random_state=1234)[0] ] ]]
# True Negative
#X_test_subset = X_test[y_test == 'ham']
#y_pred_array = classifier_grid.predict( X_test_subset )
#X_test_subset.loc[[ X_test_subset.index[ shuffle(np.where(y_pred_array == 'ham')[0], n_samples=1, random_state=123)[0] ] ]]
class _naive_bayes_demo_20newsgroups(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = sorted(
['soc.religion.christian', 'comp.graphics', 'sci.med'])
def getdata(self):
print(
f"-------------------------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of 20newsgroup and uses {len(self.y_classes)} categories of them: {repr(self.y_classes)}.\n"
f"The goal is to use 'term frequency in document' to predict which category a document belongs to.\n")
from sklearn.datasets import fetch_20newsgroups
from ..datasets import public_dataset
twenty_train = fetch_20newsgroups( #data_home=public_dataset("scikit_learn_data_path"),
subset='train', categories=self.y_classes, random_state=self.random_state)
twenty_test = fetch_20newsgroups( #data_home=public_dataset("scikit_learn_data_path"),
subset='test', categories=self.y_classes, random_state=self.random_state)
self.X_train = twenty_train.data
self.y_train = twenty_train.target
self.X_test = twenty_test.data
self.y_test = twenty_test.target
def show_model_attributes(self):
# model attributes
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
for i in range(len(self.y_classes)):
term_proba_df = pd.DataFrame({'term': list(
vocabulary_dict), 'proba': self.classifier_grid.predict_proba(vocabulary_dict)[:, i]})
term_proba_df = term_proba_df.sort_values(
by=['proba'], ascending=False)
top_n = 10
df = pd.DataFrame.head(term_proba_df, n=top_n)
print(
f"The top {top_n} terms with highest probability of a document being {repr(self.y_classes[i])}:")
for term, proba in zip(df['term'], df['proba']):
print(f" \"{term}\": {proba:4.2%}")
def evaluate_model(self):
# model evaluation
self.y_pred = self.classifier_grid.predict(self.X_test)
from ..model_evaluation import plot_confusion_matrix
# the y_classes are in an alphabetic order
plot_confusion_matrix(y_true=self.y_test,
y_pred=self.y_pred, y_classes=self.y_classes)
def application(self):
pass
def run(self):
"""
This function provides a demo of selected functions in this module using the 20 newsgroup dataset.
It models after the tutorial https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
Required arguments:
None
"""
# Get data
self.getdata()
# Create and train a pipeline
self.build_naive_bayes_multinomial_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
class _naive_bayes_demo_Social_Network_Ads(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ['not_purchased (y=0)', 'purchased (y=1)']
def getdata(self):
from ..datasets import public_dataset
data = public_dataset(name='Social_Network_Ads')
self.X = data[['Age', 'EstimatedSalary']].to_numpy()
self.y = data['Purchased'].to_numpy()
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=0.25, random_state=123)
def show_model_attributes(self):
pass
def evaluate_model(self):
# model evaluation
self.y_pred = self.classifier_grid.predict(self.X_test)
self.y_pred_score = self.classifier_grid.predict_proba(self.X_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves, visualize_classifier_decision_boundary_with_two_features
plot_confusion_matrix(y_true=self.y_test,
y_pred=self.y_pred, y_classes=self.y_classes)
plot_ROC_and_PR_curves(fitted_model=self.classifier_grid, X=self.X_test,
y_true=self.y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=1, model_name="Gaussian NB")
visualize_classifier_decision_boundary_with_two_features(
self.classifier_grid, self.X_train, self.y_train, self.y_classes, title="Gaussian Naive Bayes / training set", X1_lab='Age', X2_lab='Estimated Salary')
visualize_classifier_decision_boundary_with_two_features(
self.classifier_grid, self.X_test, self.y_test, self.y_classes, title="Gaussian Naive Bayes / testing set", X1_lab='Age', X2_lab='Estimated Salary')
def application(self):
pass
def run(self):
"""
This function provides a demo of selected functions in this module using the Social_Network_Ads dataset.
Required arguments:
None
"""
# Get data
self.getdata()
# Create and train a pipeline
self.build_naive_bayes_Gaussian_pipeline()
# model attributes
self.show_model_attributes()
# model evaluation
self.evaluate_model()
# application example
self.application()
# return classifier_grid
# return self.classifier_grid
def demo(dataset="SMS_spam"):
"""
This function provides a demo of selected functions in this module.
Required arguments:
dataset: A string. Possible values: "SMS_spam", "20newsgroups", "Social_Network_Ads"
"""
if dataset == "SMS_spam":
nb_demo = _naive_bayes_demo_SMS_spam()
elif dataset == "20newsgroups":
nb_demo = _naive_bayes_demo_20newsgroups()
elif dataset == "Social_Network_Ads":
nb_demo = _naive_bayes_demo_Social_Network_Ads()
else:
raise TypeError(f"dataset [{dataset}] is not defined")
return nb_demo.run()
def demo_from_scratch():
max_df = 1.0
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, _document_frequency
vectorizer = CountVectorizer(max_df = max_df)
y_classes = ['ham', 'spam']
X = document = ['BB AA', 'BB CC']
y = ['ham', 'spam']
print(f"1. document = {document}")
transformed_data = vectorizer.fit_transform(X)
term_frequency = transformed_data
print(f"\n2. Term frequency (tf) (the number of times a word appears in the document):\n{pd.DataFrame(term_frequency.toarray(), columns = vectorizer.get_feature_names()).to_string(index=False)}")
document_frequency = _document_frequency(term_frequency)
document_frequency_divided_by_n_documents = np.divide(document_frequency, len(X))
print(f"\n3a. Document frequency (df) (the number of times a word appears in the corpus):\n{pd.DataFrame(document_frequency.reshape(1,-1), columns = vectorizer.get_feature_names()).to_string(index=False)}")
print(f"\n3b. Document frequency (df) / n_documents (this is where min_df and max_df could affect):\n{pd.DataFrame(document_frequency_divided_by_n_documents.reshape(1,-1), columns = vectorizer.get_feature_names()).to_string(index=False)}")
# max_df: If float in range [0.0, 1.0], the parameter represents a proportion of documents
tfidf_vectorizer = TfidfVectorizer(max_df=max_df)
transformed_data = tfidf_vectorizer.fit_transform(X)
inverse_document_frequency = tfidf_vectorizer._tfidf._idf_diag
#print(f"\n3. Inverse document frequency (adjust for the fact that some words appear more frequently in the corpus):\n{list(zip(tfidf_vectorizer.get_feature_names(), np.ravel(tfidf_vectorizer.idf_)))}")
print(f"\n4a. Inverse document frequency (idf) (adjust for the fact that some words appear more frequently in the corpus):\n{pd.DataFrame(tfidf_vectorizer.idf_.reshape(1,-1), columns = tfidf_vectorizer.get_feature_names()).to_string(index=False)}")
print(f"\n4b. Inverse document frequency (diag):\n{inverse_document_frequency.toarray()}")
tf_times_idf = term_frequency * inverse_document_frequency
print(f"\n5. Term frequency * Inverse document frequency (diag):\n{tf_times_idf.toarray()}")
from sklearn.preprocessing import normalize
normalized_tf_times_idf = normalize(tf_times_idf, norm = 'l2', axis=1) # axis=0 means column-wise, axis=1 means row-wise
print(f"\n6.Document-wise normalized TF * IDF (each document has a unit length):\n{normalized_tf_times_idf.toarray()}")
sklearn_X_train = pd.DataFrame(transformed_data.toarray(), columns = tfidf_vectorizer.get_feature_names())
print(f"\n7.Compared to transformed matrix from TfidfVectorizer() [should be the same]:\n{sklearn_X_train.to_string(index=False)}")
assert (normalized_tf_times_idf.toarray() - transformed_data.toarray() < 1e-9).all(), "***Error*** Results of td-idf should be the same"
y_train = pd.DataFrame(y, columns = ['y',])
print(f"\n8.y_train (target) and X_train (term frequency) together:\n{pd.concat([y_train, sklearn_X_train], axis=1).to_string(index=False)}")
#################
print("========================================================================================================================")
X_train = transformed_data
y_train = convert_to_numpy_ndarray(y)
X_test_doc = ['bb cc', 'bb aa', 'bb aa aa', 'bb aa aa aa', 'bb aa aa aa aa', 'bb aa aa aa aa aa']
y_test = np.array([1, 0, 0, 0, 0, 0])
X_test = tfidf_vectorizer.transform(X_test_doc)
model_from_scratch = Multinomial_NB_classifier_from_scratch(verbose=True)
model_from_scratch.fit(X_train, y_train, feature_names=tfidf_vectorizer.get_feature_names(), document=document)
model_from_scratch.show_model_attributes(fitted_tfidf_vectorizer = tfidf_vectorizer, y_classes=y_classes)
model_from_scratch.evaluate_model(X_test, y_test, y_classes=y_classes, document = X_test_doc, skip_PR_curve = True)
#################
print("========================================================================================================================")
# reference: https://scikit-learn.org/stable/auto_examples/text/plot_document_classification_20newsgroups.html#sphx-glr-auto-examples-text-plot-document-classification-20newsgroups-py
# reference: https://scikit-learn.org/stable/auto_examples/applications/plot_out_of_core_classification.html#sphx-glr-auto-examples-applications-plot-out-of-core-classification-py
y_classes = ['sci.space', 'soc.religion.christian']
from sklearn.datasets import fetch_20newsgroups
from ..datasets import public_dataset
# no need to specify # data_home=public_dataset("scikit_learn_data_path"),
twenty_train = fetch_20newsgroups( subset='train', categories=y_classes, random_state=1 )
twenty_test = fetch_20newsgroups( subset='test', categories=y_classes, random_state=1 )
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
X_train = vectorizer.fit_transform(twenty_train.data)
X_test = vectorizer.transform(twenty_test.data)
y_train, y_test = twenty_train.target, twenty_test.target
model_from_scratch = Multinomial_NB_classifier_from_scratch()
model_from_scratch.fit(X_train, y_train, feature_names=vectorizer.get_feature_names())
model_from_scratch.show_model_attributes(fitted_tfidf_vectorizer = vectorizer, y_classes=y_classes)
model_from_scratch.evaluate_model(X_test, y_test, y_classes=y_classes, document= twenty_test.data)
|
{"hexsha": "4adec05c97f6517f1bad563b0a2f16966d4b9e6f", "size": 33248, "ext": "py", "lang": "Python", "max_stars_repo_path": "machlearn/naive_bayes/_naive_bayes.py", "max_stars_repo_name": "daniel-yj-yang/pyml", "max_stars_repo_head_hexsha": "2328ae1d73eab39f2774331fcfaa10e8fa2fc0de", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-18T13:25:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-18T13:25:27.000Z", "max_issues_repo_path": "machlearn/naive_bayes/_naive_bayes.py", "max_issues_repo_name": "daniel-yj-yang/pyml", "max_issues_repo_head_hexsha": "2328ae1d73eab39f2774331fcfaa10e8fa2fc0de", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "machlearn/naive_bayes/_naive_bayes.py", "max_forks_repo_name": "daniel-yj-yang/pyml", "max_forks_repo_head_hexsha": "2328ae1d73eab39f2774331fcfaa10e8fa2fc0de", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-18T04:46:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-25T16:19:39.000Z", "avg_line_length": 50.7603053435, "max_line_length": 380, "alphanum_fraction": 0.6608517806, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7840}
|
"""decodes and serializes frames from vidoes in a given directory
into TFRecord files to improve parallelized I/O and provide
prefetching benefits.
The program expects the folder containing the videos to have the following
structure:
-- class_name_1
-- video_1.mp4
-- video_2.mp4
-- class_name_2
-- video_1.mp4
-- video_2.mp4
A mapping of the class names and id needs to be provided as well.
"""
import os
import json
import glob
import math
import imageio
import numpy as np
from tqdm import tqdm
import multiprocessing
import tensorflow as tf
from absl import app, flags, logging
SET = {'train', 'val', 'test'}
SUPPORTED_FILETYPES = {'.mp4', '.avi', '.mkv', '.webm', '.mov'}
flags.DEFINE_string('video_dir', None,
'Name of directory containing video dataset.')
flags.DEFINE_string('label_map', None,
'Path to .json file containing mapping between class name and id.')
flags.DEFINE_string('output_dir', None,
'Path to folder to write tfrecord files.')
flags.DEFINE_string('set', 'train',
'The subset of the dataset to write to tfrecord format (train, val or test).')
flags.DEFINE_list('extensions', list(SUPPORTED_FILETYPES),
'Video formats to search for and decode.')
flags.DEFINE_string('test_annotations', None,
'Path to .json file containing test labels (designed for Kinetics dataset).')
flags.DEFINE_integer('videos_per_record', 32,
'Number of videos to decode and store in a single tfrecord file.')
flags.mark_flags_as_required(['video_dir', 'label_map', 'output_dir'])
FLAGS = flags.FLAGS
def to_tf_example(frames, class_id):
"""converts a list of frames and corresponding class id to bytes,
represented in a tf.train.SequenceExample object.
Args:
frames (list): A list of numpy arrays representing indiviual
frames of a video.
class_id (int): an integer value representing the label of the
video.
Returns:
tf.train.SequenceExample: the frames and class id encoded in a
ProtocolMessage.
"""
# encode the frames a JPEG as a way to compress them
encoded_frames = [tf.image.encode_jpeg(frame, format='rgb', quality=90, optimize_size=True)
for frame in frames]
frame_bytes = [tf.train.Feature(bytes_list=tf.train.BytesList(value=[frame.numpy()]))
for frame in encoded_frames]
sequence = tf.train.FeatureLists(
feature_list = {
'video': tf.train.FeatureList(feature=frame_bytes)
})
context = tf.train.Features(
feature = {
'video/num_frames':
tf.train.Feature(int64_list=tf.train.Int64List(value=[frames.shape[0]])),
'video/class/label':
tf.train.Feature(int64_list=tf.train.Int64List(value=[class_id])),
})
tf_example = tf.train.SequenceExample(context=context, feature_lists=sequence)
return tf_example
def write_tfrecord(paths, label_map, annotations, process_id, num_shards):
"""writes a list of videos to a tfrecord file.
Args:
paths (np.array): a list of paths to video files
label_map (dict): a mapping of class labels and class ids
annotations (dict): the groundtruth annotations for the test dataset.
process_id (int): the index of the process that is currently writing
the tfrecord file.
num_shards (int): the total number of tfrecord files that will be created
for the dataset.
Returns:
int: 1 if the process runs successfully.
"""
tfr_options = tf.io.TFRecordOptions(compression_type='GZIP', compression_level=9)
tfr_path = FLAGS.output_dir + '-{}-{}-of-{}.tfrecord'.format(FLAGS.set, process_id, num_shards)
with tf.io.TFRecordWriter(tfr_path, tfr_options) as writer:
for path in paths:
logging.debug(f'writing {path}')
filename = os.path.basename(path).split('.')[0]
# get class label (string) and class id (integer)
if annotations: # test set
try:
class_label = annotations[filename]['annotations']['label']
class_label = class_label.replace(' ', '_') # replace space with underscore
class_id = label_map[class_label] # get the integer label for the video
except KeyError:
logging.info(f'{filename} not found! Skipping...')
continue
else: # training or validation set
class_label = os.path.basename(os.path.dirname(path))
class_id = label_map[class_label]
# decode video (skip videos that cannot be decoded)
# NOTE: this removes any guarantees that
# `videos_per_record` videos will fit into a single
# tfrecord file.
try:
vr = imageio.get_reader(path, 'ffmpeg') # read the video
# get all the frames
fps = math.ceil(vr.get_meta_data()['fps'])
frames = np.stack(list(vr.iter_data()))
# trim video down to 10 seconds, if possible
num_frames = min(frames.shape[0], fps*10)
frames = frames[:num_frames, :, :, :]
except Exception as e:
logging.info(e)
continue
tf_example = to_tf_example(frames, class_id)
writer.write(tf_example.SerializeToString())
return 1
def main(_):
video_dir = FLAGS.video_dir
if not video_dir or not os.path.isdir(video_dir):
raise ValueError('Please provide valid directory for videos.')
label_path = FLAGS.label_map
if not label_path or not '.json' in label_path:
raise ValueError('Please provide valid path to label map.')
with open(label_path, 'r') as f:
label_map = json.load(f)
# create tfrecord output directory if it does not exist
output_path = os.path.dirname(FLAGS.output_dir)
if not os.path.exists(output_path):
os.makedirs(output_path)
test_file = FLAGS.test_annotations
if test_file is not None and '.json' not in test_file:
raise ValueError('Please provide valid path to JSON test file.')
assert FLAGS.set in SET
annotations = None
if FLAGS.set == 'test':
with open(test_file, 'r') as j:
annotations = json.load(j)
videos_per_record = max(1, FLAGS.videos_per_record)
# get files with supported extension
files = []
for ext in FLAGS.extensions:
if ext in SUPPORTED_FILETYPES:
files.extend(glob.glob(os.path.join(video_dir, '**', '*' + ext),
recursive=True))
else:
logging.info(f'{ext} format not supported. Skipping...')
np.random.shuffle(files)
returns = []
process_id = 0
num_files = len(files)
num_workers = multiprocessing.cpu_count() # set to lower number if running out of memory
# The list of file paths is split twice
# so that the number of videos in a file is roughly
# equal to the `videos_per_record parameter`.
# This is based on https://gebob19.github.io/tfrecords/
# Splitting all the files into smaller chunks improves
# writing speed.
# NOTE: splitting further does not guarantee that
# `videos_per_record` videos will fit into a single
# tfrecord, especially for small datasets.
num_splits = round(num_files / (num_workers * videos_per_record))
num_shards = num_workers * num_splits # total number of tfrecord files for dataset
file_split = np.array_split(files, num_splits) # first split
pbar = tqdm(total=num_files, desc=f'Writing {FLAGS.set} set to TFRecord')
def update(*a):
"""updates the progress bar after a process exits successfully"""
pbar.update(videos_per_record)
for big_chunk in file_split:
smaller_chunk = np.array_split(big_chunk, num_workers) # second split
pool = multiprocessing.Pool(num_workers)
for chunk in smaller_chunk:
r = pool.apply_async(write_tfrecord,
args=(chunk, label_map, annotations, process_id, num_shards),
callback=update)
process_id += 1
returns.append(r)
pool.close()
for r in returns: r.get()
pool.join()
pbar.close()
if __name__ == "__main__":
app.run(main)
|
{"hexsha": "8523d8bd49c628a29704d22e2ad03b227939f5a0", "size": 7968, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/create_tfrecords.py", "max_stars_repo_name": "Chianugoogidi/X3D-tf", "max_stars_repo_head_hexsha": "45935c227896b83492b3c923af37d9746ab8a3c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-03-26T05:01:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T00:10:13.000Z", "max_issues_repo_path": "datasets/create_tfrecords.py", "max_issues_repo_name": "Chianugoogidi/X3D-tf", "max_issues_repo_head_hexsha": "45935c227896b83492b3c923af37d9746ab8a3c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-24T11:02:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-20T12:36:28.000Z", "max_forks_repo_path": "datasets/create_tfrecords.py", "max_forks_repo_name": "Chianugoogidi/X3D-tf", "max_forks_repo_head_hexsha": "45935c227896b83492b3c923af37d9746ab8a3c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-24T11:00:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T09:41:23.000Z", "avg_line_length": 35.8918918919, "max_line_length": 98, "alphanum_fraction": 0.6816014056, "include": true, "reason": "import numpy", "num_tokens": 1853}
|
from .OBJET import OBJET
import numpy as np
class Objet(object):
"""OBJET"""
def __init__(self, path_to_meta_json, width=500, height=500):
self._OBJET = OBJET(path_to_meta_json, width, height)
self.width = width
self.height = height
def draw(self, ):
self._OBJET.Draw()
def get_image(self, ):
img = np.array(self._OBJET.GetImage())
img = img.reshape([self.height, self.width, -1])
return np.flip(img, axis=0)
def get_depth_map(self, ):
img = np.array(self._OBJET.GetDepthMap())
img = img.reshape([self.height, self.width])
return np.flip(img, axis=0)
def to_image(self, path_to_image):
self._OBJET.ToImage(path_to_image)
def set_camera(self, position, target):
self._OBJET.SetCamera(position, target)
def set_object_position(self, object_name, position):
self._OBJET.SetObjectPosition(object_name, position)
def set_object_y_rotation(self, object_name, y_rotation):
self._OBJET.SetObjectYRotation(object_name, y_rotation)
def set_object_scale(self, object_name, scale):
self._OBJET.SetObjectScale(object_name, scale)
|
{"hexsha": "87ac45215be36e7193a736c3acd7ac26e8d704b8", "size": 1189, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyobjet/objet.py", "max_stars_repo_name": "MahanFathi/Objet", "max_stars_repo_head_hexsha": "c6e2366327852c18b30dbf2f439931860dc26bf9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyobjet/objet.py", "max_issues_repo_name": "MahanFathi/Objet", "max_issues_repo_head_hexsha": "c6e2366327852c18b30dbf2f439931860dc26bf9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyobjet/objet.py", "max_forks_repo_name": "MahanFathi/Objet", "max_forks_repo_head_hexsha": "c6e2366327852c18b30dbf2f439931860dc26bf9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0, "max_line_length": 65, "alphanum_fraction": 0.6669470143, "include": true, "reason": "import numpy", "num_tokens": 293}
|
abstract type AbstractScheduler end
struct StepDecay <: AbstractScheduler
xmax
xmin
Δ
T
end
(d::StepDecay)(t) = max(d.xmin, d.xmax - div(t, d.T) * d.Δ)
d = StepDecay(1.0, 0.1, 0.1, 5)
plot(1:100, d.(1:100))
struct ExponentialDecay <: AbstractScheduler
xmax
xmin
ρ
end
(d::ExponentialDecay)(t) = max(d.xmin, d.xmax * d.ρ^t)
d = ExponentialDecay(1.0, 0.1, 0.99)
plot(1:250, d.(1:250))
|
{"hexsha": "ecdd89f5d69a4d0178d65283371d3ada85e191ab", "size": 419, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/schedulers.jl", "max_stars_repo_name": "cswaney/Minerva.jl", "max_stars_repo_head_hexsha": "5a925de2d2b483c317efd286eb81aa2d64d9a5a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/schedulers.jl", "max_issues_repo_name": "cswaney/Minerva.jl", "max_issues_repo_head_hexsha": "5a925de2d2b483c317efd286eb81aa2d64d9a5a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/schedulers.jl", "max_forks_repo_name": "cswaney/Minerva.jl", "max_forks_repo_head_hexsha": "5a925de2d2b483c317efd286eb81aa2d64d9a5a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.5185185185, "max_line_length": 59, "alphanum_fraction": 0.6229116945, "num_tokens": 172}
|
/* Copyright (C) 2012-2019 IBM Corp.
* This program is Licensed under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
#include <NTL/ZZ.h>
#include "FHEContext.h"
#include "Ctxt.h"
#include "permutations.h"
#include "EncryptedArray.h"
namespace helib {
std::ostream& operator<< (std::ostream &s, const PermNetwork &net)
{
s << "[";
for (long i=0; i< net.layers.length(); i++) {
const PermNetLayer& lyr = net.layers[i];
s << "[" << lyr.genIdx << " " << lyr.e << " " << lyr.isID << " "
<< lyr.shifts << "]\n";
}
return s << "]";
}
// Compute one or more layers corresponding to one network of one leaf
void PermNetwork::setLayers4Leaf(long lyrIdx, const ColPerm& p,
const NTL::Vec<long>& benesLvls, long gIdx,
const SubDimension& leafData,
const Permut& map2cube)
{
#ifdef DEBUG_PRINTOUT
std::cerr << "Layer "<<lyrIdx<<", column-permutation="<< p << std::endl;
#endif
// Compute the shift amounts for all the layers in this network
NTL::Vec<bool> isID;
NTL::Vec<Permut> shifts;
if (benesLvls.length()==1) {// Special case for a "trivial" 1-layer network
shifts.SetLength(1);
isID.SetLength(1);
isID[0] = !p.getShiftAmounts(shifts[0]);
}
else // The general case of a multi-layer Benes network
p.getBenesShiftAmounts(shifts,isID,benesLvls);
// Copy the shift amounts to the right place in the bigger network,
// renaming the slots from a linear array to the hyper cube
for (long i=0; i<benesLvls.length(); i++) {
PermNetLayer& lyr = layers[lyrIdx+i];
lyr.genIdx = gIdx;
lyr.isID = isID[i];
lyr.e = leafData.e;
if (!lyr.isID) {
#ifdef DEBUG_PRINTOUT
std::cerr << "layer "<<lyrIdx+i<<": "<<shifts[i]<<std::endl;
#endif
if (leafData.good) // For good leaves, shift by -x is the same as size-x
for (long k=0; k<shifts[i].length(); k++)
if (shifts[i][k]<0) shifts[i][k] += leafData.size;
applyPermToVec(lyr.shifts, shifts[i], map2cube); // do the renaming
#ifdef DEBUG_PRINTOUT
std::cerr << " : "<<lyr.shifts<<std::endl;
#endif
}
// else std::cerr << "layer "<<lyrIdx+i<<"= identity\n";
}
}
// Build a full permutation network
void PermNetwork::buildNetwork(const Permut& pi, const GeneratorTrees& trees)
{
if (trees.numTrees()==0) { // the identity permutation, nothing to do
layers.SetLength(0);
return;
}
NTL::Vec<long> dims;
trees.getCubeSubDims(dims);
// std::cerr << "pi = "<<pi<<std::endl;
// std::cerr << "map2cube ="<<trees.mapToCube()<<std::endl;
// std::cerr << "map2array="<<trees.mapToArray()<<std::endl;
// Compute the permutation on the cube, rho = map2cube o pi o map2array
Permut rho;
applyPermsToVec(rho, trees.mapToCube(), pi, trees.mapToArray());
// std::cerr << "rho = "<<rho<<std::endl;
// Break rho along the different dimensions
CubeSignature sig(dims); // make a cube-signature object
std::vector<ColPerm> perms;
breakPermByDim(perms, rho, sig);
// for (long i=0; i<(long)perms.size(); i++) { // debugging printouts
// Permut tmp;
// perms[i].makeExplicit(tmp);
// std::cerr << " prems["<<i<<"]="<<tmp<<std::endl;
// }
layers.SetLength(trees.numLayers()); // allocate space
// Go over the different permutations and build the corresponding layers
long dimIdx =0;
long frntLyr=0, backLyr=layers.length();
for (long g=0; g<trees.numTrees(); g++) { // go over all the generators/trees
const OneGeneratorTree &T = trees[g];
// In each tree, go over all the leaves
for (long leaf=T.firstLeaf(); leaf>=0; leaf=T.nextLeaf(leaf)) {
const SubDimension& leafData = T[leaf].getData();
// This leaf determines layers frntLyer...frntLey+frst.length()-1, and
// if it isn't the middle then also backLyr-scnd.length()...backLyr-1
// handle the first Benes network
setLayers4Leaf(/*1st-layer-index=*/frntLyr,
/*permutation =*/perms[dimIdx],
/*Benes levels =*/leafData.frstBenes,
/*generator index=*/T.getAuxKey(),
/*(size,good,e) =*/leafData,
/*hypercube renaming permutation=*/trees.mapToCube());
frntLyr += leafData.frstBenes.length(); // how many layers were used
dimIdx++;
if (leafData.scndBenes.length()>0) { // Also a second Benes network
long dimIdx2 = perms.size() -dimIdx; // dimIdx was incremented above
backLyr -= leafData.scndBenes.length();
setLayers4Leaf(/*1st-layer-index=*/backLyr,
/*permutation =*/perms[dimIdx2],
/*Benes levels =*/leafData.scndBenes,
/*generator index=*/T.getAuxKey(),
/*(size,good,e) =*/leafData,
/*hypercube renaming permutation=*/trees.mapToCube());
}
}
}
}
// Apply a permutation network to a hypercube, used mostly for debugging
void PermNetwork::applyToCube(HyperCube<long>& cube) const
{
if (layers.length()==0) return;
long n = cube.getSize();
NTL::Vec<long> tmp(NTL::INIT_SIZE, n); // temporary vector
// Apply the layers, one at a time
for (long i=0; i<layers.length(); i++) {
const PermNetLayer& lyr = layers[i];
if (lyr.isID) continue; // this layer is the identity permutation
//OLD: assert(lyr.shifts.length()==n);
helib::assertEq(lyr.shifts.length(), n, "layer has incorrect size");
// This layer shift elements along the dimension lyr.genIdx
long dim = lyr.genIdx;
// Move elements as dictated by this layer
for (long j=0; j<n; j++) {
long shamt = lyr.e * lyr.shifts[j]; // how much to shift this slot
if (shamt<0) shamt += cube.getDim(dim); // addCoord expects shamt>=0
long j2 = cube.addCoord(j, dim, shamt); // new index for this slot
tmp[j2] = cube[j];
}
// Copy back to cube
for (long j=0; j<n; j++)
cube[j] = tmp[j];
#ifdef DEBUG_PRINTOUT
std::cerr << " after layer "<< i << ", cube=" << cube.getData()<<std::endl;
#endif
}
}
void PermNetwork::applyToPtxt(NTL::ZZX& p, const EncryptedArray& ea) const
{
throw helib::LogicError("PermNetwork::applyToPtxt is not implemented");
}
// Upon return, mask[i]=1 if haystack[i]=needle, 0 otherwise.
// Also set to 0 all the entries in haystack where mask[i]=1.
// Return the index of the first nonzero entry in haystack at the end
// of the pass (-1 if they are all zero). Also return a flag saying if
// any entries of the mask are nonzero.
static std::pair<long,bool>
makeMask(std::vector<long>& mask, NTL::Vec<long>& haystack, long needle)
{
long found = false;
long fstNonZeroIdx = -1;
for (long i=0; i<(long)mask.size(); i++) {
if (haystack[i] == needle) { // found a needle
found = true;
mask[i]=1;
haystack[i]=0; // remove this needle from haystack
} else { // no needle here
mask[i]=0;
if (haystack[i]!=0 && fstNonZeroIdx<0)
fstNonZeroIdx = i; // first nonzero entry in haystack
}
}
return std::make_pair(fstNonZeroIdx,found);
}
// Apply a permutation network to a ciphertext
void PermNetwork::applyToCtxt(Ctxt& c, const EncryptedArray& ea) const
{
const PAlgebra& al = ea.getPAlgebra();
// Apply the layers, one at a time
for (long i=0; i<layers.length(); i++) {
const PermNetLayer& lyr = layers[i];
if (lyr.isID) continue; // this layer is the identity permutation
// This layer is shifted via powers of g^e mod m
long g2e = NTL::PowerMod(al.ZmStarGen(lyr.genIdx), lyr.e, al.getM());
NTL::Vec<long> unused = lyr.shifts; // copy to a new vector
std::vector<long> mask(lyr.shifts.length()); // buffer to hold masks
Ctxt sum(c.getPubKey(), c.getPtxtSpace()); // an empty ciphertext
long shamt = 0;
bool frst = true;
while (true) {
std::pair<long,bool> ret=makeMask(mask, unused, shamt); // compute mask
if (ret.second) { // non-empty mask
Ctxt tmp = c;
NTL::ZZX maskPoly;
ea.encode(maskPoly, mask); // encode mask as polynomial
tmp.multByConstant(maskPoly); // multiply by mask
if (shamt!=0) // rotate if the shift amount is nonzero
tmp.smartAutomorph(NTL::PowerMod(g2e, shamt, al.getM()));
if (frst) {
sum = tmp;
frst = false;
}
else
sum += tmp;
}
if (ret.first >= 0)
shamt = unused[ret.first]; // next shift amount to use
else break; // unused is all-zero, done with this layer
}
c = sum; // update the cipehrtext c before the next layer
}
}
}
|
{"hexsha": "590fbbc29a6577b81fa544ca21977aed63372e58", "size": 8862, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/PermNetwork.cpp", "max_stars_repo_name": "patrick-schwarz/HElib", "max_stars_repo_head_hexsha": "cd267e2ddc6e92886b89f3aa51c416d5c1d2dc59", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-12-01T07:18:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-01T07:18:47.000Z", "max_issues_repo_path": "src/PermNetwork.cpp", "max_issues_repo_name": "wangjinglin0721/HElib", "max_issues_repo_head_hexsha": "cd267e2ddc6e92886b89f3aa51c416d5c1d2dc59", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PermNetwork.cpp", "max_forks_repo_name": "wangjinglin0721/HElib", "max_forks_repo_head_hexsha": "cd267e2ddc6e92886b89f3aa51c416d5c1d2dc59", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8897637795, "max_line_length": 79, "alphanum_fraction": 0.6429699842, "num_tokens": 2574}
|
"""
```
set_regime_val!(p::Parameter{S},
i::Int, v::S; override_bounds::Bool = false) where S <: Real
set_regime_val!(p::Parameter{S},
model_regime::Int, v::S, d::AbstractDict{Int, Int}; override_bounds::Bool = false) where S <: Real
```
sets the value in regime `i` of `p` to be `v`. By default, we enforce
the bounds that are currently in `p`, but the bounds can be ignoerd by
setting `override_bounds = true`.
The second method allows the user to pass a dictionary to permit the case where
there may be differences between the regimes of a regime-switching model and
the regimes for the parameters. For example, aside from regime-switching in parameters,
the model may also include other forms of regime-switching. To allow
estimation of regime-switching parameters in such a model, the dictionary `d`
maps each "model" regime to a "parameter" regime. In this way,
the second method specifies which "parameter" regime should be used at a given
"model" regime.
"""
function set_regime_val!(p::Parameter{S},
i::Int, v::S; override_bounds::Bool = false) where S <: Real
if !haskey(p.regimes, :value)
p.regimes[:value] = OrderedDict{Int,S}()
end
# First check if fixed and enforce valuebounds so
# `set_regime_val!` mirrors `parameter(p::Parameter, newvalue::S)` functionality
if haskey(p.regimes, :fixed) && haskey(p.regimes[:fixed], i) ? regime_fixed(p, i) : false
if haskey(p.regimes[:value], i)
return p.regimes[:value][i]
else # If it doesn't exist yet, then we want to set the value for this regime
p.regimes[:value][i] = v
p.regimes[:valuebounds][i] = (v, v)
end
elseif (haskey(p.regimes, :valuebounds) && haskey(p.regimes[:valuebounds], i) ?
((regime_valuebounds(p, i)[1] <= v <= regime_valuebounds(p, i)[2]) || override_bounds) : false)
p.regimes[:value][i] = v
elseif (p.valuebounds[1] <= v <= p.valuebounds[2]) || override_bounds
p.regimes[:value][i] = v
elseif p.fixed
# When parameters are initialized as non-regime-switching and fixed,
# the valuebounds is automatically set to (p.value, p.value).
# Unless valuebounds are set to regime-switching, it is not possible
# to add extra regimes without using `override_bounds = true` (unless using the same value).
# Note that the rest of ModelConstructors.jl assumes that if `p.fixed = true`
# and haskey(p.regimes, :fixed) is false, then all regimes (if any) are also fixed.
throw(ParamBoundsError("Parameter $(p.key) is fixed. Regimes cannot be added unless " *
"keyword `override_bounds` is set to `true`."))
else
throw(ParamBoundsError("New value of $(string(p.key)) ($(v)) is out of bounds ($(p.valuebounds))"))
end
return v
end
function set_regime_val!(p::Parameter{S}, model_regime::Int,
v::S, d::AbstractDict{Int, Int}; override_bounds::Bool = false) where S <: Real
return set_regime_val!(p, d[model_regime], v; override_bounds = override_bounds)
end
"""
```
regime_val(p::Parameter{S}, i::Int) where S <: Real
regime_val(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real
```
returns the value of `p` in regime `i` for the first method
and the value of `p` in regime `d[model_regime` for the second.
"""
function regime_val(p::Parameter{S}, i::Int) where S <: Real
if !haskey(p.regimes, :value) || !haskey(p.regimes[:value], i)
@error "regime_val(), Input Error: No regime $(i)"
end
return p.regimes[:value][i]
end
regime_val(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real = regime_val(p, d[model_regime])
"""
```
set_regime_prior!(p::Parameter{S}, i::Int, v)
set_regime_prior!(p::Parameter{S}, model_regime::Int, v, d::AbstractDict{Int, Int})
```
sets the prior in regime `i` of `p` to be `v`. The type of `v`
can be a `NullablePriorUnivariate`, `NullablePriorMultivariate`,
`ContinuousUnivariateDistribution`, or `ContinuousMultivariateDistribution'.
The second method allows the user to pass a dictionary to permit the case where
there may be differences between the regimes of a regime-switching model and
the regimes for the parameters. For example, aside from regime-switching in parameters,
the model may also include other forms of regime-switching. To allow
estimation of regime-switching parameters in such a model, the dictionary `d`
maps each "model" regime to a "parameter" regime. In this way,
the second method specifies which "parameter" regime should be used at a given
"model" regime.
"""
function set_regime_prior!(p::Parameter, i::Int, v::S) where {S <: Union{NullablePriorUnivariate, NullablePriorMultivariate}}
if !haskey(p.regimes, :prior)
p.regimes[:prior] = OrderedDict{Int, Union{NullablePriorUnivariate, NullablePriorMultivariate}}()
end
p.regimes[:prior][i] = v
return v
end
function set_regime_prior!(p::Parameter, i::Int, v::S) where S <: ContinuousUnivariateDistribution
return set_regime_prior!(p, i, NullablePriorUnivariate(v))
end
function set_regime_prior!(p::Parameter, i::Int, v::S) where S <: ContinuousMultivariateDistribution
return set_regime_prior!(p, i, NullablePriorMultivariate(v))
end
function set_regime_prior!(p::Parameter, model_regime::Int, v::S,
d::AbstractDict{Int, Int}) where {S <: Union{NullablePriorUnivariate, NullablePriorMultivariate}}
return set_regime_prior!(p, d[model_regime], v)
end
function set_regime_prior!(p::Parameter, model_regime::Int,
v::S, d::AbstractDict{Int, Int}) where S <: ContinuousUnivariateDistribution
return set_regime_prior!(p, model_regime, NullablePriorUnivariate(v), d)
end
function set_regime_prior!(p::Parameter, model_regime::Int, v::S,
d::AbstractDict{Int, Int}) where S <: ContinuousMultivariateDistribution
return set_regime_prior!(p, model_regime, NullablePriorMultivariate(v), d)
end
"""
```
regime_prior(p::Parameter{S}, i::Int) where S <: Real
regime_prior(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real
```
returns the prior of `p` in regime `i` for the first method
and the prior of `p` in regime `d[model_regime]` for the second.
"""
function regime_prior(p::Parameter{S}, i::Int) where S <: Real
if !haskey(p.regimes, :prior) || !haskey(p.regimes[:prior], i)
@error "regime_prior(), Input Error: No regime $(i)"
end
return p.regimes[:prior][i]
end
regime_prior(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real = regime_prior(p, d[model_regime])
"""
```
set_regime_fixed!(p::Parameter{S}, i::Int, v::S; update_valuebounds::Interval = (NaN, NaN))
```
sets whether `p` is fixed in regime `i` of `p`. Set update_valuebounds to true to
set the valuebounds to match the fixed value.
The second method allows the user to pass a dictionary to permit the case where
there may be differences between the regimes of a regime-switching model and
the regimes for the parameters. For example, aside from regime-switching in parameters,
the model may also include other forms of regime-switching. To allow
estimation of regime-switching parameters in such a model, the dictionary `d`
maps each "model" regime to a "parameter" regime. In this way,
the second method specifies which "parameter" regime should be used at a given
"model" regime.
"""
function set_regime_fixed!(p::Parameter{S1}, i::Int, v::S; update_valuebounds::Interval{S1} = (NaN, NaN)) where {S <: Bool, S1 <: Real}
if !haskey(p.regimes, :fixed)
p.regimes[:fixed] = OrderedDict{Int, Bool}()
end
p.regimes[:fixed][i] = v
if !haskey(p.regimes, :valuebounds)
p.regimes[:valuebounds] = OrderedDict{Int, typeof(p.value)}()
end
if isnan(update_valuebounds[1]) || isnan(update_valuebounds[2])
# Do nothing unless `v` is true (fixed in regime `i`) or
# !haskey(p.regimes[:valuebounds], i) (no value for regime `i` for valuebounds dict)
if v
if (haskey(p.regimes, :value) ? haskey(p.regimes[:value], i) : false)
# Regime is fixed, so valuebounds should be set to (value, value), if it exists
p.regimes[:valuebounds][i] = (regime_val(p, i), regime_val(p, i))
else
error("Regime $(i) for parameter $(p.key) does not have a value. Set the value " *
"using `set_regime_val!` before making it a fixed value with `set_regime_fixed!`.")
end
elseif !haskey(p.regimes[:valuebounds], i)
# If valuebounds is nonexistent in regime i, initialize to p.valuebounds
p.regimes[:valuebounds][i] = p.valuebounds
end
else # Otherwise update the valuebounds. Note that there is no check that p.regimes[:value][i] lies inside the valuebounds
p.regimes[:valuebounds][i] = update_valuebounds
end
return v
end
function set_regime_fixed!(p::Parameter, model_regime::Int, v::S,
d::AbstractDict{Int, Int}; update_valuebounds::Interval{S1} = (NaN, NaN)) where {S <: Bool, S1 <: Real}
set_regime_fixed!(p, d[model_regime], v; update_valuebounds = update_valuebounds)
end
"""
```
regime_fixed(p::Parameter{S}, i::Int) where S <: Real
regime_fixed(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real
```
returns whether `p` is fixed in regime `i` for the first method
and whether true `p` is fixed in regime `d[model_regime]` for the second method.
"""
function regime_fixed(p::Parameter{S}, i::Int) where S <: Real
if !haskey(p.regimes, :fixed) || !haskey(p.regimes[:fixed], i)
@error "regime_fixed(), Input Error: No regime $(i)"
end
return p.regimes[:fixed][i]
end
regime_fixed(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real = regime_fixed(p, d[model_regime])
"""
```
set_regime_valuebounds!(p::Parameter{S}, i::Int, v::S)
```
sets valuebounds for `p` in regime `i` to `v`.
The second method allows the user to pass a dictionary to permit the case where
there may be differences between the regimes of a regime-switching model and
the regimes for the parameters. For example, aside from regime-switching in parameters,
the model may also include other forms of regime-switching. To allow
estimation of regime-switching parameters in such a model, the dictionary `d`
maps each "model" regime to a "parameter" regime. In this way,
the second method specifies which "parameter" regime should be used at a given
"model" regime.
"""
function set_regime_valuebounds!(p::Parameter, i::Int, v::Interval{S}) where {S <: Real}
if !haskey(p.regimes, :valuebounds)
p.regimes[:valuebounds] = OrderedDict{Int, typeof(p.value)}()
end
p.regimes[:valuebounds][i] = v
return v
end
function set_regime_valuebounds!(p::Parameter, model_regime::Int, v::Interval{S},
d::AbstractDict{Int, Int}) where {S <: Real}
set_regime_valuebounds!(p, d[model_regime], v)
end
"""
```
regime_valuebounds(p::Parameter{S}, i::Int) where S <: Real
regime_valuebounds(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real
```
returns the `valuebounds` of `p` in regime `i` for the first method
and the `valuebounds` of `p` in regime `d[model_regime]` for the second method.
"""
function regime_valuebounds(p::Parameter{S}, i::Int) where S <: Real
if !haskey(p.regimes, :valuebounds) || !haskey(p.regimes[:valuebounds], i)
@error "regime_valuebounds(), Input Error: No regime $(i)"
end
return p.regimes[:valuebounds][i]
end
regime_valuebounds(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real = regime_valuebounds(p, d[model_regime])
"""
```
toggle_regime!(p::Parameter{S}, i::Int) where S <: Real
toggle_regime!(pvec::ParameterVector{S}, i::Int) where S <: Real
toggle_regime!(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real
toggle_regime!(pvec::ParameterVector{S}, model_regime::Int, d::AbstractDict{Symbol, <: AbstractDict{Int, Int}}) where S <: Real
toggle_regime!(pvec::ParameterVector{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real
```
changes the fields of `p` to regime `i`.
For example, if
```
p.regimes[:value] = OrderedDict{Int, Any}(1 => 1, 2 => 3)
```
then `toggle_regime!(p, 1)` will cause `p.value = 1` and `toggle_regime!(p, 2)`
will cause `p.value = 3`.
The third method allows the user to pass a dictionary to permit the case where
there may be differences between the regimes of a regime-switching model and
the regimes for the parameters. For example, aside from regime-switching in parameters,
the model may also include other forms of regime-switching. To allow
estimation of regime-switching parameters in such a model, the dictionary `d`
maps each "model" regime to a "parameter" regime. In this way,
the second method specifies which "parameter" regime should be used at a given
"model" regime.
The fourth method extends the third to a ParameterVector, with the possibility
that each parameter may have different mappings to the model regimes. Each key
of `d` corresponds to the key of a parameter, and each value of `d` is
the mapping for model regimes to the parameter regimes of `p.key`.
The fifth method is similar to the fourth but assumes
any regime-switching parameter has the same mapping from model regimes
to parameter regimes, hence the use of a common dictionary.
"""
function toggle_regime!(p::Parameter{S}, i::Int) where S <: Real
if !isempty(p.regimes)
for field in [:value, :valuebounds, :transform_parameterization,
:transform, :prior, :fixed]
if haskey(p.regimes, field) && haskey(p.regimes[field], i)
if field == :value
p.value = p.regimes[field][i]
elseif field == :valuebounds
p.valuebounds = p.regimes[field][i]
elseif field == :transform_parameterization
p.transform_parameterization = p.regimes[field][i]
elseif field == :transform
p.transform = p.regimes[field][i]
elseif field == :prior
p.prior = p.regimes[field][i]
elseif field == :fixed
p.fixed = p.regimes[field][i]
end
elseif haskey(p.regimes, field) && !haskey(p.regimes[field], i)
error("Regime $i for field $field not found")
end
end
end
end
function toggle_regime!(pvec::ParameterVector{S}, i::Int) where {S <: Real}
for p in pvec
toggle_regime!(p, i)
end
end
toggle_regime!(p::Parameter{S}, model_regime::Int, d::AbstractDict{Int, Int}) where S <: Real = toggle_regime!(p, d[model_regime])
function toggle_regime!(pvec::ParameterVector{S}, model_regime::Int, d::AbstractDict{Symbol, <: AbstractDict{Int, Int}}) where S <: Real
for p in pvec
if haskey(d, p.key)
toggle_regime!(p, model_regime, d[p.key])
end
end
end
toggle_regime!(pvec::ParameterVector{S}, model_regime::Int, d::AbstractDict{Int, Int}) where {S <: Real} = toggle_regime!(pvec, d[model_regime])
"""
```
get_values(pvec::ParameterVector{S}; regime_switching::Bool = true) where {S <: Real}
```
constructs a vector of the underlying values in a `ParameterVector`, including
if there are regime-switching values.
"""
function get_values(pvec::ParameterVector{S}; regime_switching::Bool = true) where {S <: Real}
if regime_switching # Check if regime switching occurs
np_reg = n_parameters_regime_switching(pvec)
np = length(pvec)
if np == np_reg # No regime-switching
vals = [x.value for x in pvec]
else
vals = Vector{S}(undef, np_reg)
# An initial pass to find regime 1 values
for i in 1:np
if isempty(pvec[i].regimes)
vals[i] = pvec[i].value
elseif haskey(pvec[i].regimes, :value)
vals[i] = pvec[i].regimes[:value][1]
end
end
# A second loop to add in the extra regimes
ct = np # a counter to assist us
for i in 1:np
if !isempty(pvec[i].regimes)
if haskey(pvec[i].regimes, :value)
for j in 1:length(pvec[i].regimes[:value])
if j > 1
ct += 1
vals[ct] = pvec[i].regimes[:value][j]
end
end
end
end
end
end
else # Regime switching doesn't occur, so just directly map
vals = [x.value for x in pvec]
end
return vals
end
|
{"hexsha": "0ecb5c56d7f4636f6278b5067c5270df710260d0", "size": 16984, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/regimes.jl", "max_stars_repo_name": "FRBNY-DSGE/ModelConstructors", "max_stars_repo_head_hexsha": "3c8e6ebbfd3a1c1ed8851bd84e876e595f3a9145", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-11-03T15:57:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T15:58:20.000Z", "max_issues_repo_path": "src/regimes.jl", "max_issues_repo_name": "FRBNY-DSGE/ModelConstructors", "max_issues_repo_head_hexsha": "3c8e6ebbfd3a1c1ed8851bd84e876e595f3a9145", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-09-04T17:20:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-12T14:10:19.000Z", "max_forks_repo_path": "src/regimes.jl", "max_forks_repo_name": "FRBNY-DSGE/ModelConstructors", "max_forks_repo_head_hexsha": "3c8e6ebbfd3a1c1ed8851bd84e876e595f3a9145", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:18:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-07T10:39:03.000Z", "avg_line_length": 42.7808564232, "max_line_length": 144, "alphanum_fraction": 0.6615638248, "num_tokens": 4505}
|
import os
from typing import Tuple, Sequence, Callable
import csv
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.model_selection import KFold
import torch
import torch.optim as optim
from torch import nn, Tensor
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torch.cuda.amp import autocast, GradScaler
from torchinfo import summary
from efficientnet_pytorch import EfficientNet
from torchvision import transforms
import ttach as tta
class MnistDataset(Dataset):
def __init__(
self,
dir: os.PathLike,
image_ids: os.PathLike,
transforms: Sequence[Callable]
) -> None:
self.dir = dir
self.transforms = transforms
self.labels = {}
with open(image_ids, 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
self.labels[int(row[0])] = list(map(int, row[1:]))
self.image_ids = list(self.labels.keys())
def __len__(self) -> int:
return len(self.image_ids)
def __getitem__(self, index: int) -> Tuple[Tensor]:
image_id = self.image_ids[index]
image = Image.open(
os.path.join(
self.dir, f'{str(image_id).zfill(5)}.png')).convert('RGB')
target = np.array(self.labels.get(image_id)).astype(np.float32)
if self.transforms is not None:
image = self.transforms(image)
return image, target
transforms_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
])
transforms_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
])
class EfficientNetModel(nn.Module):
def __init__(self):
super(EfficientNetModel, self).__init__()
self.Effinet = EfficientNet.from_pretrained('efficientnet-b4')
self.classifier = nn.Linear(1000, 26)
nn.init.xavier_normal_(self.classifier.weight)
def forward(self, x):
x = F.relu(self.Effinet(x))
x = self.classifier(x)
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = EfficientNetModel().to(device)
def split_dataset(path: os.PathLike):
df = pd.read_csv(path)
kfold = KFold(n_splits=5)
for fold, (train, valid) in enumerate(kfold.split(df, df.index)):
df.loc[valid, 'kfold'] = int(fold)
return df
def train(fold: int, verbose: int = 30):
# create folds
df = split_dataset('data/dirty_mnist_2nd_answer.csv')
df_train = df[df['kfold'] != fold].reset_index(drop=True)
df_valid = df[df['kfold'] == fold].reset_index(drop=True)
df_train.drop(['kfold'], axis=1).to_csv(f'data/train-kfold-{fold}.csv', index=False)
df_valid.drop(['kfold'], axis=1).to_csv(f'data/valid-kfold-{fold}.csv', index=False)
trainset = MnistDataset('data/dirty_mnist_2nd', f'data/train-kfold-{fold}.csv', transforms_train)
train_loader = DataLoader(trainset, batch_size=64, shuffle=True, num_workers=0)
validset = MnistDataset('data/dirty_mnist_2nd', f'data/valid-kfold-{fold}.csv', transforms_test)
valid_loader = DataLoader(validset, batch_size=16, shuffle=False, num_workers=0)
num_epochs = 10
device = 'cuda'
scaler = GradScaler()
model = EfficientNetModel().to(device)
model.load_state_dict(torch.load(f'models/effi_b4_kfold_SAM/effinet_b4_SAM-f{fold}-8.pth'))
optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)
criterion = nn.MultiLabelSoftMarginLoss()
for epoch in range(num_epochs):
model.train()
for i, (images, targets) in enumerate(train_loader):
optimizer.zero_grad()
images = images.to(device)
targets = targets.to(device)
with autocast():
outputs = model(images)
loss = criterion(outputs, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
if (i+1) % verbose == 0:
outputs = outputs > 0.0
acc = (outputs == targets).float().mean()
print(f'Fold {fold} | Epoch {epoch} | L: {loss.item():.7f} | A: {acc:.7f}')
model.eval()
valid_acc = 0.0
valid_loss = 0.0
for i, (images, targets) in enumerate(valid_loader):
images = images.to(device)
targets = targets.to(device)
with autocast():
outputs = model(images)
loss = criterion(outputs, targets)
valid_loss += loss.item()
outputs = outputs > 0.0
valid_acc += (outputs == targets).float().mean()
print(f'Fold {fold} | Epoch {epoch} | L: {valid_loss/(i+1):.7f} | A: {valid_acc/(i+1):.7f}\n')
if epoch > num_epochs-20:
torch.save(model.state_dict(), f'models/effinet_b4_adamw-f{fold}-{epoch}.pth')
for i in range(5):
train(i)
def load_model(fold: int, epoch: int, device: torch.device = 'cuda'):
model = EfficientNetModel().to(device)
model.load_state_dict(torch.load(f'models/effinet_b4_adamw-f{fold}-{epoch}.pth'))
return model
transforms = tta.Compose(
[tta.HorizontalFlip(),
tta.VerticalFlip()]
)
tta_model = tta.ClassificationTTAWrapper(model, transforms)
def test(device: torch.device = 'cuda'):
submit = pd.read_csv('data/sample_submission.csv')
model1 = load_model(0, 8)
model2 = load_model(1, 8)
model3 = load_model(2, 8)
model4 = load_model(3, 8)
model5 = load_model(4, 8)
tta_model1 = tta.ClassificationTTAWrapper(model1, transforms)
tta_model2 = tta.ClassificationTTAWrapper(model2, transforms)
tta_model3 = tta.ClassificationTTAWrapper(model3, transforms)
tta_model4 = tta.ClassificationTTAWrapper(model4, transforms)
tta_model5 = tta.ClassificationTTAWrapper(model5, transforms)
tta_model1.eval()
tta_model2.eval()
tta_model3.eval()
tta_model4.eval()
tta_model5.eval()
testset = MnistDataset('data/test_dirty_mnist_2nd', 'data/sample_submission.csv', transforms_test)
test_loader = DataLoader(testset, batch_size=2, shuffle=False, num_workers=0)
batch_size = test_loader.batch_size
batch_index = 0
for i, (images, targets) in enumerate(test_loader):
images = images.to(device)
targets = targets.to(device)
outputs1 = tta_model1(images)
outputs2 = tta_model2(images)
outputs3 = tta_model3(images)
outputs4 = tta_model4(images)
outputs5 = tta_model5(images)
outputs = (outputs1 + outputs2 + outputs3 + outputs4 + outputs5) / 5
outputs = outputs > 0.0
batch_index = i * batch_size
submit.iloc[batch_index:batch_index+batch_size, 1:] = \
outputs.long().squeeze(0).detach().cpu().numpy()
submit.to_csv('effinet_b4-adamw-kfold.csv', index=False)
test()
|
{"hexsha": "d76a24d4b71e0652608398790a1c77acbfc07ad0", "size": 7153, "ext": "py", "lang": "Python", "max_stars_repo_path": "kaggle/leaves/tta.py", "max_stars_repo_name": "ioyy900205/PyTorch_mess-around", "max_stars_repo_head_hexsha": "90d255e17158699fd7902f7746b35fa18975112e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kaggle/leaves/tta.py", "max_issues_repo_name": "ioyy900205/PyTorch_mess-around", "max_issues_repo_head_hexsha": "90d255e17158699fd7902f7746b35fa18975112e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kaggle/leaves/tta.py", "max_forks_repo_name": "ioyy900205/PyTorch_mess-around", "max_forks_repo_head_hexsha": "90d255e17158699fd7902f7746b35fa18975112e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1, "max_line_length": 102, "alphanum_fraction": 0.6310638893, "include": true, "reason": "import numpy", "num_tokens": 1845}
|
import logging
import re
import numpy as np
import torch
from datasets import Metric, load_metric
from transformers import PreTrainedTokenizer
__all__ = [
"CodeGenerationEvaluator"
]
# From https://github.com/neulab/external-knowledge-codegen/blob/datasets/conala/conala_eval.py#L94
special_chars = re.compile(r'([^A-Za-z0-9_])')
lower_upper = re.compile(r'([a-z])([A-Z])')
double_space = re.compile(r'(\s)+')
QUOTED_TOKEN_RE = re.compile(r"(?P<quote>''|[`'\"])(?P<string>.*?)(?P=quote)")
class CodeGenerationEvaluator:
"""
Helper class for calculating NORMAL BLEU scores. Calculates both BLEU and SacreBLUE.
"""
def __init__(self, tokenizer: PreTrainedTokenizer,
device: torch.device,
logger: logging.Logger = None,
minimal: bool = False,
smooth_bleu: bool = False,
get_high_rouge: bool = False,
only_alphanumeric_chars:bool=False):
self.sacre_bleu: Metric = load_metric('sacrebleu')
self.normal_bleu: Metric = load_metric('bleu')
self.rouge: Metric = load_metric('rouge')
self.tokenizer: PreTrainedTokenizer = tokenizer
self.logger = logger or logging.getLogger(__name__)
self.device = device
self.minimal = minimal
self.smooth_bleu = smooth_bleu
self.get_high_rouge = get_high_rouge
self.only_alphanumeric_chars = only_alphanumeric_chars
def postprocessText(self, preds, labels):
preds = list(map(self.postprocessSingle, preds))
labels = list(map(self.postprocessSingle, labels))
return preds, labels
def postprocessSingle(self, s):
if not self.only_alphanumeric_chars:
out = special_chars.sub(r' \1 ', s.strip())
else:
out = special_chars.sub(r' ', s.strip())
out = lower_upper.sub(r'\1 \2', out)
out = double_space.sub(r'\1', out)
return out.replace('"', '`').replace("\'", "`")
def __call__(self, preds):
preds, labels = preds
if isinstance(preds, tuple):
preds = preds[0]
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, self.tokenizer.pad_token_id)
preds = np.where(preds != -100, preds, self.tokenizer.pad_token_id)
decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)
return self.evaluate(decoded_preds, decoded_labels)
def evaluate(self, decoded_preds, decoded_labels):
# Postprocess the both the labels and the predictions
decoded_preds, decoded_labels = self.postprocessText(decoded_preds, decoded_labels)
if self.minimal:
bleu_scores = self.calcBLEU(decoded_preds, decoded_labels)
return {
'BLEU' : bleu_scores['bleu'] * 100,
'BLEU-Unigram-Precision': 100 * bleu_scores['precisions'][0],
'BLEU-Bigram-Precision' : 100 * bleu_scores['precisions'][1],
}
sacre_scores, bleu_scores, rogue_scores = self.calcMetrics(decoded_preds, decoded_labels)
self.logger.info(
f"Got BLEU of {bleu_scores['bleu'] * 100:.2f} and SacreBLEU of "
f"{sacre_scores['score']:.2f}")
if self.get_high_rouge:
rouge_2 = rogue_scores['rouge2'].high
rouge_l = rogue_scores['rougeL'].high
else:
rouge_2 = rogue_scores['rouge2'].mid
rouge_l = rogue_scores['rougeL'].mid
out = {
"BLEU" : bleu_scores['bleu'] * 100,
'SacreBLEU' : sacre_scores['score'],
'BLEU-Unigram-Precision' : 100 * bleu_scores['precisions'][0],
'BLEU-Bigram-Precision' : 100 * bleu_scores['precisions'][1],
'BLEU-Trigram-Precision' : 100 * bleu_scores['precisions'][2],
"ROUGE-2" : rouge_2.fmeasure * 100,
"ROUGE-L" : rouge_l.fmeasure * 100,
'Sacre-Unigram-Precision': sacre_scores['precisions'][0],
'Sacre-Bigram-Precision' : sacre_scores['precisions'][1],
'Sacre-Trigram-Precision': sacre_scores['precisions'][2]
}
return out
# return {k: round(v, 4) for k, v in out.items()}
def calcBLEU(self, decoded_preds, decoded_labels):
# Calculate the BLEU scores then return them.
def bleuTok(arr):
return list(map(lambda x: x.split(' '), arr))
bleu_toked_preds = bleuTok(decoded_preds)
blue_toked_labels = [[x] for x in bleuTok(decoded_labels)]
return self.normal_bleu.compute(
predictions=bleu_toked_preds,
references=blue_toked_labels,
smooth=self.smooth_bleu
)
def calcMetrics(self, decoded_preds, decoded_labels):
sacre_scores = self.sacre_bleu.compute(predictions=decoded_preds,
references=[[l] for l in decoded_labels])
rogue_scores = self.rouge.compute(predictions=decoded_preds, references=decoded_labels)
return sacre_scores, self.calcBLEU(decoded_preds, decoded_labels), rogue_scores
def evaluateSingle(self, prediction, label):
return self.evaluate([prediction], [label])
|
{"hexsha": "3b83d694b4ca68c6c4e7d1e85f86876cb5400d8e", "size": 5410, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/evaluation/seq_to_seq.py", "max_stars_repo_name": "jhk16/stackoverflow-encourages-cheating", "max_stars_repo_head_hexsha": "425fa92e7defc783d34f4bd3366cd96990d3c037", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-06-11T18:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T08:15:01.000Z", "max_issues_repo_path": "src/evaluation/seq_to_seq.py", "max_issues_repo_name": "jhk16/stackoverflow-encourages-cheating", "max_issues_repo_head_hexsha": "425fa92e7defc783d34f4bd3366cd96990d3c037", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/evaluation/seq_to_seq.py", "max_forks_repo_name": "jhk16/stackoverflow-encourages-cheating", "max_forks_repo_head_hexsha": "425fa92e7defc783d34f4bd3366cd96990d3c037", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-27T07:01:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T07:01:07.000Z", "avg_line_length": 41.6153846154, "max_line_length": 99, "alphanum_fraction": 0.6188539741, "include": true, "reason": "import numpy", "num_tokens": 1300}
|
!
! Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
!
! Licensed under the Apache License, Version 2.0 (the "License");
! you may not use this file except in compliance with the License.
! You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing, software
! distributed under the License is distributed on an "AS IS" BASIS,
! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
! See the License for the specific language governing permissions and
! limitations under the License.
!
program main
!use cudafor
!use cublas
use matmul_m
use matmul_cublas_m
integer :: i, j, m, n, k
integer :: AllocateStatus
real, allocatable :: A(:,:) ! A(m,k)
real, allocatable :: B(:,:) ! B(k,n)
real, allocatable :: C(:,:) ! C(m,n)
real, allocatable :: D(:,:) ! D(m,n)
m = 2
n = 2
k = 2
write(*,*) 'Running with m,n,k: ', m, n, k
allocate( A(m,k), stat = AllocateStatus)
IF (AllocateStatus /= 0) STOP "*** Not enough memory ***"
allocate( B(k,n), stat = AllocateStatus)
IF (AllocateStatus /= 0) STOP "*** Not enough memory ***"
allocate( C(m,n), stat = AllocateStatus)
IF (AllocateStatus /= 0) STOP "*** Not enough memory ***"
allocate( D(m,n), stat = AllocateStatus)
IF (AllocateStatus /= 0) STOP "*** Not enough memory ***"
call srand(0)
do i=1,m
do j=1,k
A(i,j) = rand()
enddo
enddo
do i=1,k
do j=1,n
B(i,j) = rand()
enddo
enddo
call matmul(A, B, C, m, n, k);
!$acc enter data copyin(A(:), B(:)) create(D(:))
!$acc host_data use_device(A, B, D)
call matmul_cublas(A, B, D, m, n, k)
!$acc end host_data
!$acc exit data delete(A, B) copyout(D(:))
! Check accuracy
write(*,*) 'maxval(abs(C-D)): ',maxval(abs(C-D))
write(*,*) "Program finished sucessfully."
end program
|
{"hexsha": "00657496791229ce8c40efc924cd1d021209d259", "size": 1956, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "indevelopment/module8/English/Fortran/cublas/Solution/main.f90", "max_stars_repo_name": "mozhgan-kch/openacc-training-materials", "max_stars_repo_head_hexsha": "099643c50f41415943035daa64a698db7b45e409", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2020-01-16T21:26:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T05:32:38.000Z", "max_issues_repo_path": "indevelopment/module8/English/Fortran/cublas/Solution/main.f90", "max_issues_repo_name": "afzal442/openacc-training-materials", "max_issues_repo_head_hexsha": "1047652d8c075333428dfc7d398276732d9aef78", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-02T15:04:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-02T15:04:35.000Z", "max_forks_repo_path": "indevelopment/module8/English/Fortran/cublas/Solution/main.f90", "max_forks_repo_name": "afzal442/openacc-training-materials", "max_forks_repo_head_hexsha": "1047652d8c075333428dfc7d398276732d9aef78", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2020-03-30T19:50:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T05:50:34.000Z", "avg_line_length": 25.4025974026, "max_line_length": 74, "alphanum_fraction": 0.6124744376, "num_tokens": 589}
|
from astropy.table import Table
import yaml
import os
from db_tables import load_connection, open_settings
SETTINGS = yaml.load(open(os.path.join(os.environ['HOME'], 'dd_configure.yaml')))
print SETTINGS
Session, engine = load_connection(SETTINGS['CONNECTION_STRING'], echo=False)
results = engine.execute("""SELECT
sed.designation,
GROUP_CONCAT(DISTINCT(classifications.classified_as) SEPARATOR ';') classification,
subjects.ddid,
subjects.state,
subjects.im_2massj as 2mass_imagj,
subjects.im_2massk as 2mass_image_k,
sed.ra,
sed.dec,
sed.glon,
sed.glat,
sed.w1mpro,
sed.w2mpro,
sed.w3mpro,
sed.w4mpro,
sed.j_m_2mass,
sed.h_m_2mass,
sed.k_m_2mass,
urat.pmRA,
urat.pmDE,
urat.Bmag,
urat.Vmag,
urat.rmag,
urat.imag,
urat.gmag
FROM sed
JOIN subjects ON sed.designation = subjects.wise_id
JOIN classifications ON subjects.ddid = classifications.ddid
LEFT JOIN urat ON sed.id = urat.file_id
WHERE classifications.classified_as IS NOT NULL
GROUP BY sed.designation;""")
keys = results.keys()
print keys
datarows = []
for item in results:
item = item.values()
for i, column in enumerate(item):
if str(column).strip().startswith('http'):
print column
item[i] = """<a href="{0}" target="_blank"><img width="100" src="{0}"><a>""".format(column.strip())
datarows.append(item)
#datarows = [item for item in results]
t = Table(rows=datarows, names=keys, meta={'Name':'COS HEADER TABLE'})
t.write('composite_table.html', format='jsviewer')
|
{"hexsha": "67a2ba11699dd45bcf12448d269edf764d04fc91", "size": 1663, "ext": "py", "lang": "Python", "max_stars_repo_path": "pull.py", "max_stars_repo_name": "justincely/disk_detective_data", "max_stars_repo_head_hexsha": "5e8ceadf5708a525287357f83ca927edd519b0dd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pull.py", "max_issues_repo_name": "justincely/disk_detective_data", "max_issues_repo_head_hexsha": "5e8ceadf5708a525287357f83ca927edd519b0dd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pull.py", "max_forks_repo_name": "justincely/disk_detective_data", "max_forks_repo_head_hexsha": "5e8ceadf5708a525287357f83ca927edd519b0dd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.186440678, "max_line_length": 111, "alphanum_fraction": 0.6554419723, "include": true, "reason": "from astropy", "num_tokens": 435}
|
from xml.dom import minidom
import re
import numpy as np
from BezierCurve import GetBezierPoints as bp
import pygame
import time
import triangulate
import stl
from stl import mesh
import copy
import earcut
# import geopandas as gpd
from shapely.geometry import Polygon
import pathlib
# read the SVG file
file_name = "new-york-yankees-logo.svg"
doc = minidom.parse(file_name)
# path strings is an array containing the d values from the path in the svg file
path_strings = [path.getAttribute('d') for path
in doc.getElementsByTagName('path')]
# path strings is an array containing the d values from the path in the svg file
transform_strings = [path.getAttribute('transform') for path
in doc.getElementsByTagName('g')]
allPaths = []
for path in doc.getElementsByTagName('g'):
paths = []
paths.append(path.getAttribute('transform'))
if path.hasChildNodes():
for p in path.childNodes:
try:
paths.append(p.getAttribute('d'))
except:
continue
allPaths.append(paths)
if len(allPaths) == 0:
allPaths.append([])
allPaths[0] = path_strings
# for p in path.getElementByTagName('path'):
# print(p)
# print(path)
# the color value for each path, not used
color_strings = [path.getAttribute('style') for path
in doc.getElementsByTagName('path')]
doc.unlink()
# amount of detail put into the curves, higher number is more detail
detail = 10
# List of list of vertices
global lolov
lolov = []
# List of vertices
global lov
lov = []
# Method to save multiple meshes into one stl
# def combined_stl(meshes, save_path ="./combined.stl"):
def combined_stl(meshes):
# gmsh.write(str(pathlib.Path(args.svg_path).with_suffix(".stl")))
save_path = str(pathlib.Path(file_name).with_suffix(".stl"))
combined = mesh.Mesh(np.concatenate([m.data for m in meshes]))
combined.save(save_path, mode=stl.Mode.ASCII)
# Method update the currPoint and add currPoint to vertices
# Returns updated currPoint
def createLine(x, y, vertices):
arr = np.array([float(x), float(y)])
currPoint = arr
vertices = np.append(vertices, currPoint)
return currPoint, vertices
# Draw all the vertices as lines, used mainly for debugging
def drawLines():
pygame.init()
surface = pygame.display.set_mode((800, 800))
surface.fill((255, 255, 255))
color = (0, 0, 0)
for listOfVer in lolov:
for vert in listOfVer:
for i in range(0, len(vert) - 3, 2):
pygame.draw.line(surface, color, (vert[i], vert[i + 1]), (vert[i + 2], vert[i + 3]))
pygame.display.flip()
time.sleep(.5)
time.sleep(10)
def drawPolygons(polygons):
pygame.init()
surface = pygame.display.set_mode((800, 800))
surface.fill((255, 255, 255))
RED = (255, 0, 0)
for x in polygons:
pygame.draw.polygon(surface, RED, x, 1)
pygame.display.update()
time.sleep(3)
while True:
time.sleep(1)
# Draws line for the current vertice, used mainly for debugging
def drawLineCurr():
pygame.init()
surface = pygame.display.set_mode((800, 800))
surface.fill((255, 255, 255))
color = (0, 0, 0)
for i in range(0, len(vertices) - 3, 2):
pygame.draw.line(surface, color, (vertices[i], vertices[i + 1]), (vertices[i + 2], vertices[i + 3]))
pygame.display.flip()
input("press enter to continue")
def getLines(newStr, i):
# remove all spaces before and after commas and -
newStr[i + 1] = newStr[i + 1].replace(' ,', ',')
newStr[i + 1] = newStr[i + 1].replace(', ', ',')
newStr[i + 1] = newStr[i + 1].replace(' -', '-')
newStr[i + 1] = newStr[i + 1].replace('- ', '-')
# add a space before -
newStr[i + 1] = newStr[i + 1].replace('-', ' -')
# remove spaces after comma (only applicable if - right after comma)
newStr[i + 1] = newStr[i + 1].replace(', ', ',')
# change commma to space
newStr[i + 1] = newStr[i + 1].replace(',', ' ')
# make sure to reconnect e and -
newStr[i + 1] = newStr[i + 1].replace('e -', 'e-')
# now only have to split based on spaces
line = newStr[i + 1].strip().split()
newLine = []
for l in range(0, len(line), 2):
newLine.append((line[l] + "," + line[l + 1]))
line = newLine
return line
def printMaxes(lov):
maxX = 0
minX = 100000
maxY = 0
minY = 100000
for vert in lov:
for v in range(len(vert)):
if v % 2 == 1:
# odd // y
if maxY < vert[v]:
maxY = vert[v]
if minY > vert[v]:
minY = vert[v]
else:
# even // x
if maxX < vert[v]:
maxX = vert[v]
if minX > vert[v]:
minX = vert[v]
print(minX, maxX, minY, maxY)
# Loop through each path in path_strings
global currPoint
currPoint = np.array([0.0, 0.0])
global initialPoint
initialPoint = np.array([0.0, 0.0])
global startPoint
startPoint = np.array([0.0, 0.0])
for g in allPaths:
matrices = [1, 0, 0, 1, 0, 0]
for u in range(len(g)):
if len(g[u]) == 0:
continue
if g[u].split('(')[0] == 'matrix':
if ',' in g[0]:
matrices = g[0].split('(')[1].split(')')[0].split(',')
else:
matrices = g[0].split('(')[1].split(')')[0].split(' ')
continue
if 'translate' in g[u]:
continue
currPoint = np.array([0.0, 0.0])
initialPoint = currPoint.copy()
path = g[u]
# for path in path_strings:
# separate each path in path_strings out by letter
newStr = re.split('([a-df-zA-DF-Z])', path)
# ^[a-df-zA-DF-Z]+$
global vertices
vertices = np.array([0.0, 0.0])
savedCurve = np.array([0.0, 0.0])
draw = 0
# loop through each letter in the path
for i in range(len(newStr)):
if newStr[i].strip() == 'm':
savedCurve = np.array([0.0, 0.0])
if (len(vertices) > 2):
lov.append(vertices.copy())
lines = getLines(newStr, i)
if (currPoint[0] != 0 or currPoint[1] != 0):
tempPoint = np.asarray(np.array(lines[0].split(',')), dtype=float)
currPoint = np.add(tempPoint, currPoint)
initialPoint = currPoint.copy()
else:
initialPoint = np.asarray(np.array(lines[0].split(',')), dtype=float)
currPoint = np.asarray(np.array(lines[0].split(',')), dtype=float)
vertices = np.empty(1)
vertices = currPoint.copy()
for b in range(1, len(lines)):
currPoint, vertices = createLine(float(lines[b].split(',')[0]) + currPoint[0],
float(lines[b].split(',')[1]) + currPoint[1], vertices)
elif newStr[i].strip() == 'M':
savedCurve = np.array([0.0, 0.0])
if (len(vertices) > 2):
lov.append(vertices.copy())
vertices = np.empty(1)
initialPoint = np.empty(1)
currPoint = np.empty(1)
lines = getLines(newStr, i)
initialPoint = np.asarray(np.array(lines[0].split(',')), dtype=float)
currPoint = np.asarray(np.array(lines[0].split(',')), dtype=float)
vertices = np.asarray(np.array(lines[0].split(',')), dtype=float)
for b in range(1, len(lines)):
currPoint, vertices = createLine(float(lines[b].split(',')[0]), float(lines[b].split(',')[1]))
elif newStr[i].strip() == 'h':
savedCurve = np.array([0.0, 0.0])
for x in range(0, len(newStr[i + 1].strip().split())):
currPoint, vertices = createLine(float(newStr[i + 1].strip().split()[x]) + currPoint[0],
currPoint[1], vertices)
elif newStr[i].strip() == 'H':
savedCurve = np.array([0.0, 0.0])
for x in range(0, len(newStr[i + 1].strip().split())):
currPoint, vertices = createLine(float(newStr[i + 1].strip().split()[x]), currPoint[1], vertices)
elif newStr[i].strip() == 'v':
savedCurve = np.array([0.0, 0.0])
for x in range(0, len(newStr[i + 1].strip().split())):
currPoint, vertices = createLine(currPoint[0],
float(newStr[i + 1].strip().split()[x]) + currPoint[1], vertices)
elif newStr[i].strip() == 'V':
savedCurve = np.array([0.0, 0.0])
for x in range(0, len(newStr[i + 1].strip().split())):
currPoint, vertices = createLine(currPoint[0], float(newStr[i + 1].strip().split()[x]), vertices)
elif newStr[i].strip() == 'l':
savedCurve = np.array([0.0, 0.0])
line = getLines(newStr, i)
for l in line:
if ',' in l:
arr = np.asarray(np.array(l.strip().split(',')), dtype=float)
else:
l = l.replace('-', ' -')
arr = np.asarray(np.array(l.strip().split(' ')), dtype=float)
currPoint = np.add(arr, currPoint)
currPoint, vertices = createLine(currPoint[0], currPoint[1], vertices)
elif newStr[i].strip() == 'L':
savedCurve = np.array([0.0, 0.0])
line = getLines(newStr, i)
for l in line:
if ',' in l:
arr = np.asarray(np.array(l.strip().split(',')), dtype=float)
else:
l = l.replace('-', ' -')
arr = np.asarray(np.array(l.strip().split(' ')), dtype=float)
points = l.split(',')
currPoint, vertices = createLine(points[0], points[1], vertices)
elif newStr[i].strip() == 'c':
line = getLines(newStr, i)
if len(line) % 3 != 0:
# print("Some error idk c should be divisible by 3")
if len(line) == 1:
for l in line:
if ',' in l:
arr = np.asarray(np.array(l.strip().split(',')), dtype=float)
else:
l = l.replace('-', ' -')
arr = np.asarray(np.array(l.strip().split(' ')), dtype=float)
currPoint = np.add(arr, currPoint)
currPoint, vertices = createLine(currPoint[0], currPoint[1], vertices)
else:
for x in range(0, len(line), 3):
x1 = np.add(currPoint, np.asarray(np.array(line[x].strip().split(',')), dtype=float))
x2 = np.add(currPoint, np.asarray(np.array(line[x + 1].strip().split(',')), dtype=float))
x3 = np.add(currPoint, np.asarray(np.array(line[x + 2].strip().split(',')), dtype=float))
savedCurve = x2.copy()
newVertices = bp(currPoint, x1, x2, x3, detail)
vertices = np.append(vertices, newVertices)
currPoint = x3
elif newStr[i].strip() == 'C':
line = getLines(newStr, i)
# if ',' in l:
# arr = np.asarray(np.array(l.strip().split(',')), dtype = float)
# else:
# l = l.replace('-', ' -')
# arr = np.asarray(np.array(l.strip().split(' ')), dtype = float)
for x in range(0, len(line), 3):
x1 = np.asarray(np.array(line[x].strip().split(',')), dtype=float)
x2 = np.asarray(np.array(line[x + 1].strip().split(',')), dtype=float)
x3 = np.asarray(np.array(line[x + 2].strip().split(',')), dtype=float)
savedCurve = x2.copy()
newVertices = bp(currPoint, x1, x2, x3, detail)
vertices = np.append(vertices, newVertices)
currPoint = x3
elif newStr[i].strip() == 's':
line = getLines(newStr, i)
if len(line) % 2 != 0:
# print("Some error idk c should be divisible by 3")
if len(line) == 1:
for l in line:
if ',' in l:
arr = np.asarray(np.array(l.strip().split(',')), dtype=float)
else:
l = l.replace('-', ' -')
arr = np.asarray(np.array(l.strip().split(' ')), dtype=float)
currPoint = np.add(arr, currPoint)
currPoint, vertices = createLine(currPoint[0], currPoint[1], vertices)
else:
for x in range(0, len(line), 2):
x1 = np.add(currPoint, np.asarray(np.array(line[x].strip().split(',')), dtype=float))
x2 = np.add(currPoint, np.asarray(np.array(line[x + 1].strip().split(',')), dtype=float))
if savedCurve[0] == 0 and savedCurve[1] == 0:
newVertices = bp(currPoint, x1, x1, x2, detail)
else:
reflect = np.subtract(currPoint * 2, savedCurve)
# reflect = np.subtract(savedCurve * 2, currPoint)
# x1 = np.add(reflect, np.asarray(np.array(line[x].strip().split(',')), dtype = float))
newVertices = bp(currPoint, reflect, x1, x2, detail)
vertices = np.append(vertices, newVertices)
savedCurve = x1
currPoint = x2
elif newStr[i].strip() == 'S':
print("S")
elif newStr[i].strip() == 'z' or newStr[i].strip() == 'Z':
if (len(vertices) > 2):
vertices = np.append(vertices, initialPoint)
currPoint = initialPoint.copy()
else:
draw = 1
if draw == 1:
draw = 0
lov.append(vertices)
# print(len(vertices))
for vert in range(len(lov)):
for v in range(len(lov[vert])):
if v % 2 == 1:
# odd // y
lov[vert][v] = float(matrices[1]) * lov[vert][v - 1] + float(matrices[3]) * lov[vert][v] + float(
matrices[5])
else:
# even // x
lov[vert][v] = float(matrices[0]) * lov[vert][v] + float(matrices[2]) * lov[vert][v + 1] + float(
matrices[4])
lolov.append(lov.copy())
lov.clear()
def drawTriangles(triangles):
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
screen = pygame.display.set_mode((500, 500))
for x in triangles:
pygame.draw.polygon(screen, RED, x, 1)
pygame.display.update()
while True:
time.sleep(1)
def drawNewTri(newTri, pts):
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
screen = pygame.display.set_mode((500, 500))
for i in range(0, len(newTri), 3):
pygame.draw.polygon(screen, RED, [pts[newTri[i]], pts[newTri[i + 1]], pts[newTri[i + 2]]], 1)
pygame.display.update()
time.sleep(5)
def getScreenReady():
return pygame.display.set_mode((500, 500))
# drawLines()
def getSideTri(pt1, pt2, top, bot):
p1t = pt1.copy()
p2t = pt2.copy()
p1b = pt1.copy()
p2b = pt2.copy()
p2t.append(top)
p2b.append(bot)
p1t.append(top)
p1b.append(bot)
retVal = []
retVal.append([p1t, p1b, p2t])
retVal.append([p1b, p2b, p2t])
return retVal
def getSides(pts, holes, topHeight, botHeight):
retVal = []
splitPoints = []
if len(holes) > 0:
splitPoints.append(pts[0:holes[0]])
for i in range(len(holes)):
splitPoints.append(pts[holes[i]:holes[i + 1]]) if len(holes) - 1 > i else splitPoints.append(
pts[holes[i]:len(pts)])
else:
splitPoints.append(pts)
for p in splitPoints:
sideTri = []
for i in range(0, len(p)):
if i == len(p) - 1:
sideTri.append(getSideTri(p[i], p[0], topHeight, botHeight))
else:
sideTri.append(getSideTri(p[i], p[i + 1], topHeight, botHeight))
cubeSide = mesh.Mesh(np.zeros(len(sideTri) * len(sideTri[0]), dtype=mesh.Mesh.dtype))
for i in range(0, len(sideTri)):
for j in range(0, len(sideTri[i])):
cubeSide.vectors[i * 2 + j] = sideTri[i][j]
retVal.append(cubeSide)
return retVal
def getHoles(allPts, polys, holes):
isActive = []
contains = []
for i in range(0, len(allPts)):
contains.append([])
isActive.append(1)
for i in range(len(polys) - 1):
for j in range(i + 1, len(polys)):
if polys[i].contains(polys[j]):
contains[i].append(j)
for i in range(len(contains)):
for j in contains[i]:
for k in contains[j]:
contains[i].remove(k)
contains[j] = []
for i in range(len(contains)):
for j in contains[i]:
holes[i].append(len(allPts[i]))
for k in allPts[j]:
allPts[i].append(k)
allContains = []
for i in contains:
for j in i:
allContains.append(j)
allContains.sort()
allContains = allContains[::-1]
for i in range(len(allContains)):
allPts.pop(allContains[i])
return allPts
def normalizePoints():
allVerts = []
for listOfVert in lolov:
for vert in listOfVert:
for v in vert:
allVerts.append(v)
min = 0.0
for allVert in allVerts:
if allVert < min:
min = allVert
return min
def createStl():
topHeight = 0
botHeight = 0
incrHeight = 1
smallerInc = 1
cubes = []
# for lov in lolov:
# printMaxes(lov)
# min = normalizePoints()
# for listOfVert in lolov:
# for vert in listOfVert:
# for v in vert:
# v = v - min
for listOfVert in lolov:
topHeight = topHeight + incrHeight
allVertPts = []
pts = []
holes = []
for v in listOfVert:
holes.append([])
for vert in listOfVert:
pts = []
for i in range(0, len(vert), 2):
pts.append([vert[i], vert[i + 1]])
# print("(" + str(vert[i]) + "," + str(vert[i+1]) + ")")
allVertPts.append(pts.copy())
for j in range(0, len(allVertPts)):
for i in range(0, len(allVertPts) - 1):
if triangulate.GetArea(allVertPts[i]) < triangulate.GetArea(allVertPts[i + 1]):
tempPt = allVertPts[i].copy()
allVertPts[i] = allVertPts[i + 1].copy()
allVertPts[i + 1] = tempPt.copy()
polygons = []
for pt in allVertPts:
polygons.append(Polygon(pt.copy()))
# drawPolygons(allVertPts)
allVertPts = getHoles(allVertPts, polygons, holes)
for c in range(len(allVertPts)):
# print(1)
verts = []
for b in range(len(allVertPts[c])):
verts.append(allVertPts[c][b][0])
verts.append(allVertPts[c][b][1])
newTri = earcut.earcut(verts, holes[c])
botTri = []
topTri = []
topPts = copy.deepcopy(allVertPts[c])
botPts = copy.deepcopy(allVertPts[c])
for tp in topPts:
tp.append(topHeight)
for bp in botPts:
bp.append(botHeight)
for i in range(0, len(newTri), 3):
topTri.append((topPts[newTri[i]], topPts[newTri[i + 1]], topPts[newTri[i + 2]]))
botTri.append((botPts[newTri[i]], botPts[newTri[i + 2]], botPts[newTri[i + 1]]))
cubeTop = mesh.Mesh(np.zeros(len(topTri), dtype=mesh.Mesh.dtype))
for i in range(0, len(topTri)):
cubeTop.vectors[i] = topTri[i]
cubeBot = mesh.Mesh(np.zeros(len(verts), dtype=mesh.Mesh.dtype))
for i in range(0, len(botTri)):
cubeBot.vectors[i] = botTri[i]
cubes.append(cubeTop)
cubes.append(cubeBot)
sideCubes = getSides(allVertPts[c], holes[c], topHeight, botHeight)
for side in sideCubes:
cubes.append(side)
combined_stl(cubes)
createStl()
|
{"hexsha": "4c1081c73b13dcd9367e522df99a7efdb5a3283a", "size": 21275, "ext": "py", "lang": "Python", "max_stars_repo_path": "SvgToStl.py", "max_stars_repo_name": "daekwon00/SvgToStl", "max_stars_repo_head_hexsha": "170d81ea689b505dbba9e2293b2dbe0e723da6ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SvgToStl.py", "max_issues_repo_name": "daekwon00/SvgToStl", "max_issues_repo_head_hexsha": "170d81ea689b505dbba9e2293b2dbe0e723da6ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SvgToStl.py", "max_forks_repo_name": "daekwon00/SvgToStl", "max_forks_repo_head_hexsha": "170d81ea689b505dbba9e2293b2dbe0e723da6ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5416666667, "max_line_length": 118, "alphanum_fraction": 0.5004465335, "include": true, "reason": "import numpy", "num_tokens": 5337}
|
import numpy as np
import pandas as pd
import seaborn as sns
from abc import ABC, abstractmethod
from matplotlib import pyplot as plt
from typing import Generic, TypeVar, List, Union, Dict, Sequence, Optional
from ...util.string import ToStringMixin, dictString
from ...vector_model import VectorModel
# Note: in the 2020.2 version of PyCharm passing strings to bound is highlighted as error
# It does not cause runtime errors and the static type checker ignores the bound anyway, so it does not matter for now.
# However, this might cause problems with type checking in the future. Therefore, I moved the definition of TEvalStats
# below the definition of EvalStats. Unfortunately, the dependency in generics between EvalStats and Metric
# does not allow to define both, TMetric and TEvalStats, properly. For now we have to leave it with the bound as string
# and hope for the best in the future
TMetric = TypeVar("TMetric", bound="Metric")
TVectorModel = TypeVar("TVectorModel", bound=VectorModel)
PredictionArray = Union[np.ndarray, pd.Series, pd.DataFrame, list]
class EvalStats(Generic[TMetric], ToStringMixin):
def __init__(self, metrics: List[TMetric], additionalMetrics: List[TMetric] = None):
if len(metrics) == 0:
raise ValueError("No metrics provided")
self.metrics = metrics
# Implementations of EvalStats will typically provide default metrics, therefore we include
# the possibility for passing additional metrics here
if additionalMetrics is not None:
self.metrics = self.metrics + additionalMetrics
self.name = None
def setName(self, name: str):
self.name = name
def addMetric(self, metric: TMetric):
self.metrics.append(metric)
def computeMetricValue(self, metric: TMetric) -> float:
return metric.computeValueForEvalStats(self)
def getAll(self) -> Dict[str, float]:
"""Gets a dictionary with all metrics"""
d = {}
for metric in self.metrics:
d[metric.name] = self.computeMetricValue(metric)
return d
def _toStringObjectInfo(self) -> str:
return dictString(self.getAll())
TEvalStats = TypeVar("TEvalStats", bound=EvalStats)
class Metric(Generic[TEvalStats], ABC):
name: str
def __init__(self, name: str = None):
"""
:param name: the name of the metric; if None use the class' name attribute
"""
# this raises an attribute error if a subclass does not specify a name as a static attribute nor as parameter
self.name = name if name is not None else self.__class__.name
@abstractmethod
def computeValueForEvalStats(self, evalStats: TEvalStats) -> float:
pass
class EvalStatsCollection(Generic[TEvalStats], ABC):
def __init__(self, evalStatsList: List[TEvalStats]):
self.statsList = evalStatsList
metricsList = [es.getAll() for es in evalStatsList]
metricNames = sorted(metricsList[0].keys())
self.metrics = {metric: [d[metric] for d in metricsList] for metric in metricNames}
def getValues(self, metric):
return self.metrics[metric]
def aggStats(self):
agg = {}
for metric, values in self.metrics.items():
agg[f"mean[{metric}]"] = float(np.mean(values))
agg[f"std[{metric}]"] = float(np.std(values))
return agg
def meanStats(self):
metrics = {metric: np.mean(values) for (metric, values) in self.metrics.items()}
metrics.update({f"StdDev[{metric}]": np.std(values) for (metric, values) in self.metrics.items()})
return metrics
def plotDistribution(self, metric):
values = self.metrics[metric]
plt.figure()
plt.title(metric)
sns.distplot(values)
def toDataFrame(self) -> pd.DataFrame:
"""
:return: a DataFrame with the evaluation metrics from all contained EvalStats objects;
the EvalStats' name field being used as the index if it is set
"""
data = dict(self.metrics)
index = [stats.name for stats in self.statsList]
if len([n for n in index if n is not None]) == 0:
index = None
return pd.DataFrame(data, index=index)
@abstractmethod
def getGlobalStats(self) -> TEvalStats:
pass
def __str__(self):
return f"{self.__class__.__name__}[" + \
", ".join([f"{key}={self.aggStats()[key]:.4f}" for key in self.metrics]) + "]"
class PredictionEvalStats(EvalStats[TMetric], ABC):
"""
Collects data for the evaluation of predicted values (including multi-dimensional predictions)
and computes corresponding metrics
"""
def __init__(self, y_predicted: Optional[PredictionArray], y_true: Optional[PredictionArray],
metrics: List[TMetric], additionalMetrics: List[TMetric] = None):
"""
:param y_predicted: sequence of predicted values, or, in case of multi-dimensional predictions, either a data frame with
one column per dimension or a nested sequence of values
:param y_true: sequence of ground truth labels of same shape as y_predicted
:param metrics: list of metrics to be computed on the provided data
:param additionalMetrics: the metrics to additionally compute. This should only be provided if metrics is None
"""
self.y_true = []
self.y_predicted = []
self.y_true_multidim = None
self.y_predicted_multidim = None
if y_predicted is not None:
self.addAll(y_predicted, y_true)
super().__init__(metrics, additionalMetrics=additionalMetrics)
def add(self, y_predicted, y_true):
"""
Adds a single pair of values to the evaluation
Parameters:
y_predicted: the value predicted by the model
y_true: the true value
"""
self.y_true.append(y_true)
self.y_predicted.append(y_predicted)
def addAll(self, y_predicted: PredictionArray, y_true: PredictionArray):
"""
:param y_predicted: sequence of predicted values, or, in case of multi-dimensional predictions, either a data frame with
one column per dimension or a nested sequence of values
:param y_true: sequence of ground truth labels of same shape as y_predicted
"""
def isSequence(x):
return isinstance(x, pd.Series) or isinstance(x, list) or isinstance(x, np.ndarray)
if isSequence(y_predicted) and isSequence(y_true):
a, b = len(y_predicted), len(y_true)
if a != b:
raise Exception(f"Lengths differ (predicted {a}, truth {b})")
if a > 0:
firstItem = y_predicted.iloc[0] if isinstance(y_predicted, pd.Series) else y_predicted[0]
isNestedSequence = isSequence(firstItem)
if isNestedSequence:
for y_true_i, y_predicted_i in zip(y_true, y_predicted):
self.addAll(y_predicted=y_predicted_i, y_true=y_true_i)
else:
self.y_true.extend(y_true)
self.y_predicted.extend(y_predicted)
elif isinstance(y_predicted, pd.DataFrame) and isinstance(y_true, pd.DataFrame):
# keep track of multidimensional data (to be used later in getEvalStatsCollection)
y_predicted_multidim = y_predicted.values
y_true_multidim = y_true.values
dim = y_predicted_multidim.shape[1]
if dim != y_true_multidim.shape[1]:
raise Exception("Dimension mismatch")
if self.y_true_multidim is None:
self.y_predicted_multidim = [[] for _ in range(dim)]
self.y_true_multidim = [[] for _ in range(dim)]
if len(self.y_predicted_multidim) != dim:
raise Exception("Dimension mismatch")
for i in range(dim):
self.y_predicted_multidim[i].extend(y_predicted_multidim[:, i])
self.y_true_multidim[i].extend(y_true_multidim[:, i])
# convert to flat data for this stats object
y_predicted = y_predicted_multidim.reshape(-1)
y_true = y_true_multidim.reshape(-1)
self.y_true.extend(y_true)
self.y_predicted.extend(y_predicted)
else:
raise Exception(f"Unhandled data types: {type(y_predicted)}, {type(y_true)}")
def _toStringObjectInfo(self) -> str:
return f"{super()._toStringObjectInfo()}, N={len(self.y_predicted)}"
def meanStats(evalStatsList: Sequence[EvalStats]) -> Dict[str, float]:
"""
For a list of EvalStats objects compute the mean values of all metrics in a dictionary.
Assumes that all provided EvalStats have the same metrics
"""
dicts = [s.getAll() for s in evalStatsList]
metrics = dicts[0].keys()
return {m: np.mean([d[m] for d in dicts]) for m in metrics}
|
{"hexsha": "48ee2cc91cc5a2948143eea2e7037d786c47e9dc", "size": 8955, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sensai/evaluation/eval_stats/eval_stats_base.py", "max_stars_repo_name": "schroedk/sensAI", "max_stars_repo_head_hexsha": "a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-02-19T09:16:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T16:19:33.000Z", "max_issues_repo_path": "src/sensai/evaluation/eval_stats/eval_stats_base.py", "max_issues_repo_name": "schroedk/sensAI", "max_issues_repo_head_hexsha": "a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 47, "max_issues_repo_issues_event_min_datetime": "2020-03-11T16:26:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T15:29:40.000Z", "max_forks_repo_path": "src/sensai/evaluation/eval_stats/eval_stats_base.py", "max_forks_repo_name": "schroedk/sensAI", "max_forks_repo_head_hexsha": "a2d6d7c6ab7bed9ccd5eac216dd988c49d69aec7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-03-12T21:33:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T14:43:04.000Z", "avg_line_length": 42.8468899522, "max_line_length": 128, "alphanum_fraction": 0.6542713568, "include": true, "reason": "import numpy", "num_tokens": 1983}
|
function [ f,qualMeasOut] = PCSD(proj,geo,angles,maxiter,varargin)
%PCSD solves the reconstruction problem using projection-controlled steepest descent method
%
% PCSD(PROJ,GEO,ALPHA,NITER) solves the reconstruction problem using
% the projection data PROJ taken over ALPHA angles, corresponding to the
% geometry described in GEO, using NITER iterations.
%
% PCSD(PROJ,GEO,ALPHA,NITER,OPT,VAL,...) uses options and values for solving. The
% possible options in OPT are:
%
%
% 'lambda': Sets the value of the hyperparameter for the SART iterations.
% Default is 1
%
% 'lambdared': Reduction of lambda.Every iteration
% lambda=lambdared*lambda. Default is 0.99
%
% 'init': Describes diferent initialization techniques.
% • 'none' : Initializes the image to zeros (default)
% • 'FDK' : intializes image to FDK reconstrucition
%
% 'TViter': Defines the amount of TV iterations performed per SART
% iteration. Default is 20
%
% 'maxL2err' Maximum L2 error to accept an image as valid. This
% parameter is crucial for the algorithm, determines at
% what point an image should not be updated further.
% Default is 20% of the FDK L2 norm.
% 'Verbose' 1 or 0. Default is 1. Gives information about the
% progress of the algorithm.
% 'redundancy_weighting': true or false. Default is true. Applies data
% redundancy weighting to projections in the update step
% (relevant for offset detector geometry)
% 'groundTruth' an image as grounf truth, to be used if quality measures
% are requested, to plot their change w.r.t. this known
% data.
%--------------------------------------------------------------------------
%--------------------------------------------------------------------------
% This file is part of the TIGRE Toolbox
%
% Copyright (c) 2015, University of Bath and
% CERN-European Organization for Nuclear Research
% All rights reserved.
%
% License: Open Source under BSD.
% See the full license at
% https://github.com/CERN/TIGRE/blob/master/LICENSE
%
% Contact: tigre.toolbox@gmail.com
% Codes: https://github.com/CERN/TIGRE/
% Coded by: Ander Biguri and Manasavee Lohvithee
%--------------------------------------------------------------------------
%% parse inputs
[beta,beta_red,f,ng,verbose,epsilon,QualMeasOpts,nonneg,gpuids,redundancy_weights,gt]=parse_inputs(proj,geo,angles,varargin);
measurequality=~isempty(QualMeasOpts) | ~any(isnan(gt(:)));
if ~any(isnan(gt(:)))
QualMeasOpts{end+1}='error_norm';
res_prev=gt;
clear gt
end
if nargout<2 && measurequality
warning("Image metrics requested but none catched as output. Call the algorithm with 3 outputs to store them")
measurequality=false;
end
qualMeasOut=zeros(length(QualMeasOpts),niter);
% does detector rotation exists?
if ~isfield(geo,'rotDetector')
geo.rotDetector=[0;0;0];
end
%% Create weigthing matrices for the SART step
% the reason we do this, instead of calling the SART fucntion is not to
% recompute the weigths every AwASD-POCS iteration, thus effectively doubling
% the computational time
% Projection weigth, W
geoaux=geo;
geoaux.sVoxel([1 2])=geo.sVoxel([1 2])*1.1; % a Bit bigger, to avoid numerical division by zero (small number)
geoaux.sVoxel(3)=max(geo.sDetector(2),geo.sVoxel(3)); % make sure lines are not cropped. One is for when image is bigger than detector and viceversa
geoaux.nVoxel=[2,2,2]'; % accurate enough?
geoaux.dVoxel=geoaux.sVoxel./geoaux.nVoxel;
W=Ax(ones(geoaux.nVoxel','single'),geoaux,angles,'Siddon','gpuids',gpuids);
W(W<min(geo.dVoxel)/4)=Inf;
W=1./W;
% Compute V
V=computeV(geo,angles,num2cell(angles),num2cell(1:length(angles)),'gpuids',gpuids);
if redundancy_weights
% Data redundancy weighting, W_r implemented using Wang weighting
% reference: https://iopscience.iop.org/article/10.1088/1361-6560/ac16bc
num_frames = size(proj,3);
W_r = redundancy_weighting(geo);
W_r = repmat(W_r,[1,1,num_frames]);
% disp('Size of redundancy weighting matrix');
% disp(size(W_r));
W = W.*W_r; % include redundancy weighting in W
end
%Initialize image.
%f=zeros(geo.nVoxel','single');
iter=0;
offOrigin=geo.offOrigin;
offDetector=geo.offDetector;
rotDetector=geo.rotDetector;
stop_criteria=0;
DSD=geo.DSD;
DSO=geo.DSO;
%%
while ~stop_criteria %POCS
% If quality is going to be measured, then we need to save previous image
if measurequality && ~strcmp(QualMeasOpts,'error_norm')
res_prev = f; % only store if necesary
end
if (iter==0 && verbose==1);tic;end
iter=iter+1;
%Estimation error in the projection domain
est_proj=Ax(f,geo,angles,'interpolated','gpuids',gpuids);
delta_p=im3Dnorm(est_proj-proj,'L2');
%Enforcing ART along all projections if squared delta_p > epsilon
if (delta_p^2)>epsilon
for jj=1:size(angles,2)
if size(offOrigin,2)==size(angles,2)
geo.offOrigin=offOrigin(:,jj);
end
if size(offDetector,2)==size(angles,2)
geo.offDetector=offDetector(:,jj);
end
if size(rotDetector,2)==size(angles,2)
geo.rotDetector=rotDetector(:,jj);
end
if size(DSD,2)==size(angles,2)
geo.DSD=DSD(jj);
end
if size(DSO,2)==size(angles,2)
geo.DSO=DSO(jj);
end
f=f+beta* bsxfun(@times,1./V(:,:,jj),Atb(W(:,:,jj).*(proj(:,:,jj)-Ax(f,geo,angles(:,jj),'gpuids',gpuids)),geo,angles(:,jj),'gpuids',gpuids));
end
end
%Non-negativity projection on all pixels
if nonneg
f=max(f,0);
end
geo.offDetector=offDetector;
geo.offOrigin=offOrigin;
geo.DSD=DSD;
geo.DSO=DSO;
geo.rotDetector=rotDetector;
if measurequality
qualMeasOut(:,iter)=Measure_Quality(res_prev,f,QualMeasOpts);
end
% Compute L2 error of actual image. Ax-b
dd=im3Dnorm(Ax(f,geo,angles,'gpuids',gpuids)-proj,'L2');
% Compute change in the image after last SART iteration
dp_vec=(f-f0);
if iter==1
step=1;
else
step=delta_p/delta_p_first;
end
f0=f;
% TV MINIMIZATION
% =========================================================================
% Call GPU to minimize TV
f=minimizeTV(f0,step,ng,'gpuids',gpuids); % This is the MATLAB CODE, the functions are sill in the library, but CUDA is used nowadays
% for ii=1:ng
% %delta=-0.00038 for thorax phantom
% df=weighted_gradientTVnorm(f,delta);
% df=df./im3Dnorm(df,'L2');
% f=f-(step.*df);
% end
% Compute change by TV min
dg_vec=(f-f0);
if iter==1
delta_p_first=im3Dnorm((Ax(f0,geo,angles,'interpolated','gpuids',gpuids))-proj,'L2');
end
% Reduce SART step
beta=beta*beta_red;
% Check convergence criteria
% ==========================================================================
%Define c_alpha as in equation 21 in the journal
c=dot(dg_vec(:),dp_vec(:))/(norm(dg_vec(:),2)*norm(dp_vec(:),2));
%This c is examined to see if it is close to -1.0
if (c<-0.99 && dd<=epsilon) || beta<0.005|| iter>maxiter
if verbose
disp(['Stopping criteria met']);
disp([' c = ' num2str(c)]);
disp([' beta = ' num2str(beta)]);
disp([' iter = ' num2str(iter)]);
end
stop_criteria=true;
end
if (iter==1 && verbose==1)
expected_time=toc*maxiter;
disp('PCSD');
disp(['Expected duration : ',secs2hms(expected_time)]);
disp(['Expected finish time: ',datestr(datetime('now')+seconds(expected_time))]);
disp('');
end
end
end
function [beta,beta_red,f0,ng,verbose,epsilon,QualMeasOpts,nonneg,gpuids,redundancy_weights]=parse_inputs(proj,geo,angles,argin)
opts= {'lambda','lambda_red','init','tviter','verbose','maxl2err','qualmeas','nonneg','gpuids','redundancy_weighting'};
defaults=ones(length(opts),1);
% Check inputs
nVarargs = length(argin);
if mod(nVarargs,2)
error('CBCT:PCSD:InvalidInput','Invalid number of inputs')
end
% check if option has been passed as input
for ii=1:2:nVarargs
ind=find(ismember(opts,lower(argin{ii})));
if ~isempty(ind)
defaults(ind)=0;
else
error('CBCT:PCSD:InvalidInput',['Optional parameter "' argin{ii} '" does not exist' ]);
end
end
for ii=1:length(opts)
opt=opts{ii};
default=defaults(ii);
% if one option isnot default, then extract value from input
if default==0
ind=double.empty(0,1);jj=1;
while isempty(ind)
ind=find(isequal(opt,lower(argin{jj})));
jj=jj+1;
end
if isempty(ind)
error('CBCT:PCSD:InvalidInput',['Optional parameter "' argin{jj} '" does not exist' ]);
end
val=argin{jj};
end
% parse inputs
switch opt
% Verbose
% =========================================================================
case 'verbose'
if default
verbose=1;
else
verbose=val;
end
if ~is2014bOrNewer
warning('Verbose mode not available for older versions than MATLAB R2014b');
verbose=false;
end
% Lambda
% =========================================================================
case 'lambda'
if default
beta=1;
else
if length(val)>1 || ~isnumeric( val)
error('TIGRE:PCSD:InvalidInput','Invalid lambda')
end
beta=val;
end
% Lambda reduction
% =========================================================================
case 'lambda_red'
if default
beta_red=0.99;
else
if length(val)>1 || ~isnumeric( val)
error('TIGRE:PCSD:InvalidInput','Invalid lambda')
end
beta_red=val;
end
% Initial image
% =========================================================================
case 'init'
if default || strcmp(val,'none')
f0=zeros(geo.nVoxel','single');
else
if strcmp(val,'FDK')
f0=FDK(proj, geo, angles);
else
error('TIGRE:PCSD:InvalidInput','Invalid init')
end
end
% Number of iterations of TV
% =========================================================================
case 'tviter'
if default
ng=20;
else
ng=val;
end
% Maximum L2 error to have a "good image"
% =========================================================================
case 'maxl2err'
if default
epsilon=im3Dnorm(FDK(proj,geo,angles))*0.2; %heuristic
else
epsilon=val;
end
%Image Quality Measure
% =========================================================================
case 'qualmeas'
if default
QualMeasOpts={};
else
if iscellstr(val)
QualMeasOpts=val;
else
error('TIGRE:PCSD:InvalidInput','Invalid quality measurement parameters');
end
end
% Non negative
% =========================================================================
case 'nonneg'
if default
nonneg=true;
else
nonneg=val;
end
% GPU Ids
% =========================================================================
case 'gpuids'
if default
gpuids = GpuIds();
else
gpuids = val;
end
case 'redundancy_weighting'
if default
redundancy_weights = true;
else
redundancy_weights = val;
end
otherwise
error('TIGRE:PCSD:InvalidInput',['Invalid input name:', num2str(opt),'\n No such option in PCSD()']);
end
end
end
|
{"author": "CERN", "repo": "TIGRE", "sha": "8df632662228d1b1c52afd95c90d0f7a9f8dc4b3", "save_path": "github-repos/MATLAB/CERN-TIGRE", "path": "github-repos/MATLAB/CERN-TIGRE/TIGRE-8df632662228d1b1c52afd95c90d0f7a9f8dc4b3/MATLAB/Algorithms/PCSD.m"}
|
[STATEMENT]
lemma start_end_implies_terminating:
assumes "has_start_points x"
and "has_end_points x"
shows "terminating x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. terminating x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
has_start_points x
has_end_points x
goal (1 subgoal):
1. terminating x
[PROOF STEP]
by simp
|
{"llama_tokens": 141, "file": "Relational_Paths_Paths", "length": 2}
|
## testing function (for notebooks e.g.)
function __plot_check(dfcart,plotdir,plotfile, showplot=true)
cart= DataFrame(X=dfcart.data[1,:], Y=dfcart.data[2,:], Z=dfcart.data[3,:])
println("## check plot subtraction ...")
PyPlot.plt.figure(figsize=(9.0,8.0))
PyPlot.plt.subplot(1, 1, 1 , xlim=[-100,100])
PyPlot.plt.scatter(cart.Y, cart.X, s = 0.1 )
PyPlot.plt.xlabel("Y (pc)")
PyPlot.plt.ylabel("X (pc)")
PyPlot.plt.grid(true)
PyPlot.plt.savefig(plotdir*"/"*plotfile)
if showplot PyPlot.plt.show() end
end
function __plot_nstars(nstarh,plotfile="test-stats-votable.png", plotdir= ".")
println("## plotting distribution...")
PyPlot.plt.figure(figsize=(9.0,8.0))
PyPlot.plt.subplot(1, 1, 1 )
nbins = 50
PyPlot.plt.hist(nstarh,nbins, range = [0,3e5], color = "g", alpha=0.8 , label = "Votable stars",density=false)
PyPlot.plt.xlabel("Stars")
PyPlot.plt.ylabel("N")
PyPlot.plt.grid(true)
PyPlot.plt.savefig(plotdir*"/"*plotfile)
PyPlot.plt.show()
end
|
{"hexsha": "8898ddea603d38d60f53559b05a388c7870e4a83", "size": 1032, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/testing.jl", "max_stars_repo_name": "bosscha/gaia-shock", "max_stars_repo_head_hexsha": "61327854c998651e16a9a020a6008439e2217620", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-31T09:56:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-31T09:56:38.000Z", "max_issues_repo_path": "src/testing.jl", "max_issues_repo_name": "bosscha/gaia-shock", "max_issues_repo_head_hexsha": "61327854c998651e16a9a020a6008439e2217620", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2018-11-10T12:07:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T11:06:25.000Z", "max_forks_repo_path": "src/testing.jl", "max_forks_repo_name": "bosscha/gaia-shock", "max_forks_repo_head_hexsha": "61327854c998651e16a9a020a6008439e2217620", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-22T08:54:30.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-22T08:54:30.000Z", "avg_line_length": 33.2903225806, "max_line_length": 115, "alphanum_fraction": 0.6492248062, "num_tokens": 341}
|
import pytest
from cfl.density_estimation_methods.condExpMod import CondExpMod
import tensorflow as tf
# tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import visual_bars.generate_visual_bars_data as vbd
from cfl.dataset import Dataset
import os
import numpy as np
############################### SETUP #################################
# helper functions
def get_data_helper(n_samples):
im_shape = (10, 10)
noise_lvl= 0.03
set_seed = 180
# create visual bars data
vb_data = vbd.VisualBarsData( n_samples=n_samples,
im_shape = im_shape,
noise_lvl=noise_lvl,
set_random_seed=set_seed)
# retrieve the images and the target
X = vb_data.getImages()
Y = vb_data.getTarget()
X = np.reshape(X, (X.shape[0], X.shape[1]*X.shape[2]))
Y = np.expand_dims(Y, -1)
assert X.shape == (n_samples,100), 'X data shape is incorrect: {}'.format(X.shape)
assert Y.shape == (n_samples, 1), 'Y data shape is incorrect {}'.format(Y.shape)
return X, Y
# parameters to use across tests
N_TRAIN = 1000
N_PRED = 100
TR_SPLIT = 750
TS_SPLIT = N_TRAIN - TR_SPLIT
X_DIM = 100
Y_DIM = 1
WEIGHTS_PATH = 'testing/test_results/test_condExpBase_resources/experiment0000/dataset0/checkpoints/best_weights'
DATA_INFO = { 'X_dims' : (N_TRAIN,X_DIM),
'Y_dims' : (N_TRAIN,Y_DIM),
'Y_type' : 'categorical' }
CDE_PARAMS = { 'batch_size' : 32,
'optimizer' : 'adam',
'n_epochs' : 30,
'verbose' : 0,
'opt_config' : {'lr': 1e-3},
'dense_units' : [20, DATA_INFO['Y_dims'][1]],
'activations' : ['relu', 'sigmoid'],
'dropouts' : [0.2, 0],
'show_plot' : False }
CDE_PARAMS_WP = CDE_PARAMS.copy()
CDE_PARAMS_WP['weights_path'] = WEIGHTS_PATH
# generate results to test when weights_path is not supplied
X, Y = get_data_helper(N_TRAIN)
dtrain = Dataset(X, Y, name='dtrain')
ceb_obj = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS
)
results_dict = ceb_obj.train(dataset=dtrain)
tr_loss = results_dict['train_loss']
ts_loss = results_dict['val_loss'] #TODO: same as line 88
dtest = Dataset(X[:N_PRED,:], Y[:N_PRED,:], name='dtest')
pred = ceb_obj.predict(dtest)
# generate results to test when weights_path is supplied
dtrain_wp = Dataset(X, Y, name='dtrain_wp')
ceb_obj_wp = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS_WP
)
results_dict = ceb_obj_wp.train(dataset=dtrain_wp)
tr_loss_wp = results_dict['train_loss']
ts_loss_wp = results_dict['val_loss'] #TODO: I jenna changed this but i'm not totally sure that test loss is the same as validation loss?
dtest_wp = Dataset(X[:N_PRED,:], Y[:N_PRED,:], name='dtest_wp')
pred_wp = ceb_obj.predict(dtest_wp)['pyx']
############################### TESTS #################################
def test_init():
''' tests the following:
- was the model successfully built?
- since no weights_path was specified, model should be untrained
'''
ceb_obj_tmp = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS
)
assert ceb_obj_tmp.trained==False, "No weights_path was specified, so model shouldn't be trained yet."
assert ceb_obj_tmp.weights_loaded==False, "No weights_path was specified, so self.weights_loaded should be false."
def test_init_wp():
''' tests the following:
- was the model successfully built?
- since no weights_path was specified, model should be untrained
'''
ceb_obj_tmp = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS_WP
)
assert ceb_obj_tmp.trained==True, "Since weights_path was supplied, model is already trained."
assert ceb_obj_tmp.weights_loaded==True, "Since weights_path was supplied, weights_loaded should be true."
def test_train_test_split():
''' tests the following:
- train-test-split is the right shape
'''
assert dtrain.split_data[0].shape==(TR_SPLIT, X_DIM), 'Xtr shape is incorrect'
assert dtrain.split_data[1].shape==(TS_SPLIT, X_DIM), 'Xts shape is incorrect'
assert dtrain.split_data[2].shape==(TR_SPLIT, Y_DIM), 'Ytr shape is incorrect'
assert dtrain.split_data[3].shape==(TS_SPLIT, Y_DIM), 'Yts shape is incorrect'
def test_train():
''' tests the following:
- train loss is right shape
- test loss is right shape
- model.trained is true
'''
assert len(tr_loss)==CDE_PARAMS['n_epochs'], 'tr_loss shape is incorrect'
assert len(ts_loss)==CDE_PARAMS['n_epochs'], 'ts_loss shape is incorrect'
assert ceb_obj.trained, 'weights_path was supplied but model.trained is false.'
def test_train_wp():
''' tests the following:
- whether train() will just return [],[] because model does not require training.
'''
assert tr_loss_wp==[], 'tr_loss was not [] when weights_path was supplied: {}'.format(tr_loss_wp)
assert ts_loss_wp==[], 'ts_loss was not [] when weights_path was supplied: {}'.format(ts_loss_wp)
def test_predict():
''' tests the following:
- prediction is correct size when weights_path not used
'''
assert pred.shape==(N_PRED, Y_DIM), 'Prediction size incorrect'
def test_predict_wp():
''' tests the following:
- prediction is correct size when weights_path used
'''
assert pred_wp.shape==(N_PRED, Y_DIM), 'Prediction size incorrect'
def test_evaluate():
''' tests the following:
- eval function runs
'''
ceb_obj.evaluate(dtest)
def test_load_parameters():
''' tests the following:
- self.trained is true after loading parameters
'''
ceb_obj_tmp = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS
)
ceb_obj_tmp.load_parameters(WEIGHTS_PATH)
assert ceb_obj_tmp.trained, 'Parameters loaded but self.trained is false.'
def test_save_parameters():
''' tests the following:
- file exists at file_path after saving parameters
'''
ceb_obj_tmp = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS
)
ceb_obj_tmp.load_parameters(WEIGHTS_PATH)
new_path = 'testing/test_results/tmp_weights.h5'
ceb_obj_tmp.save_parameters(new_path)
assert os.path.exists(new_path), 'File for saved parameters does not exist.'
os.remove(new_path)
def test_check_save_model_params():
''' tests the following:
- all keys in self.default_params show up in self.params
'''
ceb_obj_tmp = CondExpMod( data_info=DATA_INFO,
params=CDE_PARAMS
)
assert set(ceb_obj_tmp.default_params.keys())==set(ceb_obj_tmp.params.keys()), \
'self.model_params keys do not match self.default_params keys.'
|
{"hexsha": "b494882e0a6c6e7c4f37f9834831d3cbf83f95b9", "size": 7156, "ext": "py", "lang": "Python", "max_stars_repo_path": "testing/test_condExpBase.py", "max_stars_repo_name": "eberharf/cfl", "max_stars_repo_head_hexsha": "077b99a05824f1371ac47d76dfed6bb160222668", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-01-09T04:46:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T22:27:13.000Z", "max_issues_repo_path": "testing/test_condExpBase.py", "max_issues_repo_name": "eberharf/cfl", "max_issues_repo_head_hexsha": "077b99a05824f1371ac47d76dfed6bb160222668", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-01-11T16:32:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T13:21:30.000Z", "max_forks_repo_path": "testing/test_condExpBase.py", "max_forks_repo_name": "eberharf/cfl", "max_forks_repo_head_hexsha": "077b99a05824f1371ac47d76dfed6bb160222668", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0784313725, "max_line_length": 138, "alphanum_fraction": 0.6274455003, "include": true, "reason": "import numpy", "num_tokens": 1689}
|
import numpy as np
from deformations.utility.mesh3d import mesh3d
from deformations.utility.bernstein import get_bernstein_polynomial
def get_min_max(x, *args, **kwargs):
return np.min(x, *args, **kwargs), np.max(x, *args, **kwargs)
def stu_to_xyz(stu_points, stu_origin, stu_axes):
return stu_origin + stu_points*stu_axes
def get_stu_control_points(dims):
stu_lattice = mesh3d(*(np.linspace(0, 1, d+1) for d in dims),
dtype=np.float32)
stu_points = np.reshape(stu_lattice, (-1, 3))
return stu_points
def get_control_points(dims, stu_origin, stu_axes):
stu_points = get_stu_control_points(dims)
xyz_points = stu_to_xyz(stu_points, stu_origin, stu_axes)
return xyz_points
def xyz_to_stu(xyz, origin, stu_axes):
if stu_axes.shape == (3,):
stu_axes = np.diag(stu_axes)
assert(stu_axes.shape == (3, 3))
s, t, u = stu_axes
tu = np.cross(t, u)
su = np.cross(s, u)
st = np.cross(s, t)
diff = xyz - origin
stu = np.stack([
np.dot(diff, tu) / np.dot(s, tu),
np.dot(diff, su) / np.dot(t, su),
np.dot(diff, st) / np.dot(u, st)
], axis=-1)
return stu
def get_stu_params(xyz):
minimum, maximum = get_min_max(xyz, axis=0)
stu_origin = minimum
stu_axes = maximum - minimum
return stu_origin, stu_axes
def get_stu_deformation_matrix(stu, dims):
"""v is a matrix of shape(l+1, m+1, n+1, 3)
with all possible i, j and k combinations and 3
as 3 dimensions wrt to stu"""
v = mesh3d(*(np.arange(0, d+1, dtype=np.int32) for d in dims),
dtype=np.int32)
v = np.reshape(v, (-1, 3))
weights = get_bernstein_polynomial(n=np.array(dims, dtype=np.int32),
v=v,
x=np.expand_dims(stu, axis=-2))
b = np.prod(weights, axis=-1)
return b
def get_deformation_matrix(xyz, dims, stu_origin, stu_axis):
stu = xyz_to_stu(xyz, stu_origin, stu_axis)
return get_stu_deformation_matrix(stu, dims)
def get_ffd(xyz, dims, stu_origin=None, stu_axis=None):
if stu_origin is None or stu_axis is None:
print("Generating origin and axis")
stu_origin, stu_axis = get_stu_params(xyz)
b = get_deformation_matrix(xyz, dims, stu_origin, stu_axis)
p = get_control_points(dims, stu_origin, stu_axis)
return b, p
|
{"hexsha": "fd4624f972eff8e01efaf0e95613b6060436638e", "size": 2393, "ext": "py", "lang": "Python", "max_stars_repo_path": "implementation/deformations/utility/deform.py", "max_stars_repo_name": "saurabbhsp/mesh-3d-reconstruction", "max_stars_repo_head_hexsha": "c52312bce7e3121643189f6b67192ffe28b08565", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "implementation/deformations/utility/deform.py", "max_issues_repo_name": "saurabbhsp/mesh-3d-reconstruction", "max_issues_repo_head_hexsha": "c52312bce7e3121643189f6b67192ffe28b08565", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "implementation/deformations/utility/deform.py", "max_forks_repo_name": "saurabbhsp/mesh-3d-reconstruction", "max_forks_repo_head_hexsha": "c52312bce7e3121643189f6b67192ffe28b08565", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2911392405, "max_line_length": 72, "alphanum_fraction": 0.63602173, "include": true, "reason": "import numpy", "num_tokens": 684}
|
import Data.Vect
-- Exercise 1
myPlusCommutes : (n : Nat) -> (m : Nat) -> n + m = m + n
myPlusCommutes Z m = rewrite plusZeroRightNeutral m in Refl
myPlusCommutes (S k) m = rewrite myPlusCommutes k m in
rewrite plusSuccRightSucc m k in Refl
-- Exercise 2
reverseProof_nil : (acc : Vect n1 a) -> Vect (plus n1 0) a
reverseProof_nil {n1} acc = rewrite plusZeroRightNeutral n1 in acc
reverseProof_xs : Vect (S n + k) a -> Vect (plus n (S k)) a
reverseProof_xs {n} {k} xs = rewrite sym (plusSuccRightSucc n k) in xs
myReverse : Vect n a -> Vect n a
myReverse xs = reverse' [] xs
where
reverse' : Vect n a -> Vect m a -> Vect (n + m) a
reverse' acc [] = reverseProof_nil acc
reverse' acc (x :: xs) = reverseProof_xs (reverse' (x::acc) xs)
|
{"hexsha": "9926e25b80dc673445dabbadb28dddda0f881ae9", "size": 778, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "TypeDrivenDevelopment/Chapter08/Exercises/ex_8_2.idr", "max_stars_repo_name": "lambdaxymox/type-driven-development-with-idris", "max_stars_repo_head_hexsha": "e5e55715cd7418f3e6fab8e5658d7518da3fdce7", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TypeDrivenDevelopment/Chapter08/Exercises/ex_8_2.idr", "max_issues_repo_name": "lambdaxymox/type-driven-development-with-idris", "max_issues_repo_head_hexsha": "e5e55715cd7418f3e6fab8e5658d7518da3fdce7", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TypeDrivenDevelopment/Chapter08/Exercises/ex_8_2.idr", "max_forks_repo_name": "lambdaxymox/type-driven-development-with-idris", "max_forks_repo_head_hexsha": "e5e55715cd7418f3e6fab8e5658d7518da3fdce7", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8260869565, "max_line_length": 70, "alphanum_fraction": 0.64781491, "num_tokens": 258}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.