text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We are going to apply this technique to a database of wine samples. The inputs are 13 chemical descriptors, the output is the index of its class (cheap, ok, good). In principle we do not know the output.
Step2: The transform had no idea about the output classes, and still three clusters of points can be seen. We can overlay the knowledge of correct classifaction to check if the clusters correspond to what we know
Step3: Exercises
Step4: Plot the t-SNE projection of MBTR output and see if you can see the two classes of structures accurately
Step5: Plot the original MBTR descriptors and see if the structural differences are visible there
Step6: Try changing the MBTR and t-SNE parameters and see how the projection changes
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy
import pickle
from dscribe.descriptors import MBTR
from visualise import view
dataIn = numpy.genfromtxt('./data/wineInputs.txt', delimiter=',')
dataOut = numpy.genfromtxt('./data/wineOutputs.txt', delimiter=',')
# find indexes of wines for each class
idx1 = numpy.where(dataOut==1)
idx2 = numpy.where(dataOut==2)
idx3 = numpy.where(dataOut==3)
# compute the tSNE transformation of the inputs in 2 dimensions
comp = TSNE(n_components=2).fit_transform(dataIn)
# plot the resulting 2D points
plt.plot(comp[:,0],comp[:,1],'ro')
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
plt.plot(comp[idx1,0],comp[idx1,1],'go')
plt.plot(comp[idx2,0],comp[idx2,1],'ro')
plt.plot(comp[idx3,0],comp[idx3,1],'bo')
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
import ase.io
# load the database
samples = ase.io.read("data/clusters.extxyz", index=':')
# samples is now a list of ASE Atoms objects, ready to use!
# the first 55 clusters are FCC, the last 55 are BCC
# define MBTR setup
mbtr = MBTR(
species=["Fe"],
periodic=False,
k2={
"geometry": {"function": "distance"},
"grid": { "min": 0, "max": 2, "sigma": 0.01, "n": 200 },
"weighting": {"function": "exp", "scale": 0.4, "cutoff": 1e-2}
},
k3={
"geometry": {"function": "cosine"},
"grid": { "min": -1.0, "max": 1.0, "sigma": 0.02, "n": 200 },
"weighting": {"function": "exp", "scale": 0.4, "cutoff": 1e-2}
},
flatten=True,
sparse=False,
)
# calculate MBTR descriptor for each sample - takes a few secs
mbtrs = mbtr.create(samples)
print(mbtrs.shape)
# ...
# ...
# ...
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup
Step2: Directories
Step3: Extract list of files
Step4: Import raw data and perform preprocessing
Step5: Example alignment
Step6: Process remaining samples
Step7: Wrapping up
|
<ASSISTANT_TASK:>
Python Code:
import deltascope.alignment as ut
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import h5py
import os
import re
import time
import tqdm
# --------------------------------
# -------- User input ------------
# --------------------------------
param = {
'gthresh':0.5,
'scale':[1,1,1],
'microns':[0.16,0.16,0.21],
'mthresh':0.5,
'radius':10,
'comp_order':[0,2,1],
'fit_dim':['x','z'],
'deg':2,
# Don't forget to modify this with a specific sample name
'expname':'expname'
}
# --------------------------------
# -------- User input ------------
# --------------------------------
# Specify file paths to directories containing probability files
# after processing by ilastik
gfap = os.path.abspath('..\expname\GFAP\Prob')
at = os.path.abspath('..\expname\AT\Prob')
# Specify root directory where output will be saved
root = os.path.abspath('..\expname')
# Output directory with timestamp
outname = 'Output_'+time.strftime("%m-%d-%H-%M",
time.localtime())
# Create output directory
outdir = os.path.join(root,outname)
os.mkdir(outdir)
Dat = {}
for f in os.listdir(at):
if 'h5' in f:
num = re.findall(r'\d+',f.split('.')[0])[-1]
Dat[num] = os.path.join(at,f)
Dzrf = {}
for f in os.listdir(gfap):
if 'h5' in f:
num = re.findall(r'\d+',f.split('.')[0])[-1]
Dzrf[num] = os.path.join(gfap,f)
# Extract list of filename keys
klist = Dat.keys()
# Create dictionaries to contain the deltascope brain object for each sample
Dbat = {}
Dbzrf = {}
%%time
for k in tqdm.tqdm(klist):
if k not in list(Dbat.keys()):
Dbat[k] = ut.preprocess(Dat[k],param)
Dbzrf[k] = ut.preprocess(Dzrf[k],param,pca=Dbat[k].pcamed,
mm=Dbat[k].mm,vertex=Dbat[k].vertex)
else:
print(k,'already processed')
''' Define wrapper functions for starting and saving to minimize the number
of inputs that the user needs to type for each call of the function.'''
def start(k):
return(ut.start(k,Dbat,[Dbzrf],im=True))
def save_both(k,dfa,dfb):
ut.save_both(k,dfa,dfb,outdir,param.expname)
'''Save model parameters for each file to a dataframe that can be
exported for later reference.'''
model = pd.DataFrame({'a':[],'b':[],'c':[]})
def save_model(k,mm,model):
row = pd.Series({'a':mm[0],'b':mm[1],'c':mm[2]},name=k)
model = model.append(row)
return(model)
'''Define a function that can both fit a model and plot it on an existing plot'''
def fit_model(axi,df,mm=None):
if mm == None:
mm = np.polyfit(df.x,df.z,2)
p = np.poly1d(mm)
xrange = np.arange(np.min(df.x),np.max(df.x))
axi.plot(xrange,p(xrange),c='m')
return(mm)
'''Take a set of points and transform to a dataframe format for ease of access.'''
def pick_pts(x1,z1,vx,vz,x2,z2):
pts = pd.DataFrame({'x':[x1,vx,x2],'z':[z1,vz,z2]})
return(pts)
klist
model.to_csv(os.path.join(outdir,'model.csv'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Spearman correlation
Step2: correlation
Step3: OHVD3
Step4: OHV1D3
Step5: OHV24D3
Step6: ratio_activation
Step7: ratio_catabolism
Step8: Lasso Regression
Step9: OHVD3
Step10: OHV1D3
Step11: OHV24D3
Step12: ratio_activation
Step13: ratio_catabolism
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from scipy.stats import spearmanr, pearsonr
from statsmodels.sandbox.stats.multicomp import multipletests
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LassoLarsCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pylab as plt
import seaborn as sns
%matplotlib inline
mf = pd.read_csv('../data/mapping_cleaned_MrOS.txt', sep='\t', dtype=str, index_col='#SampleID')
bt = pd.read_csv('../data/biomtable.txt', sep='\t', dtype=str, index_col='#OTU ID')
bt = bt.transpose()
print(mf.shape, bt.shape) # bt has an additional row of 'taxonomy'
mf.head()
bt.head()
dat = pd.merge(mf, bt, left_index=True, right_index=True)
dat.shape
dat.head()
vars_vd = np.array(['OHVD3', 'OHV1D3', 'OHV24D3', 'ratio_activation', 'ratio_catabolism'])
dat[vars_vd] = dat[vars_vd].apply(pd.to_numeric, errors='coerce')
dat[vars_vd].describe()
otu_cols = dat.columns[mf.shape[1]:dat.shape[1]]
len(otu_cols)
results= []
i = 0
for j in range(len(otu_cols)):
tmp = dat[[vars_vd[i], otu_cols[j]]].dropna(axis=0, how='any')
rho, pval = spearmanr(tmp[vars_vd[i]], tmp[otu_cols[j]])
tax = bt.loc['taxonomy'][otu_cols[j]]
results.append([vars_vd[i], otu_cols[j], tax, rho, pval])
# output table
results = pd.DataFrame(results, columns=['vars', 'otu',
'taxonomy', 'rho', 'pval']).dropna(axis=0, how='any')
results['fdr pval'] = multipletests(results['pval'], method = 'fdr_bh')[1]
results = results.sort_values(['fdr pval'], ascending=True)
# specific bacteria
index = results.loc[results['fdr pval'] <= 0.05].index
for i in range(len(index)):
print(results.taxonomy[index[i]], results['fdr pval'][index[i]])
# check
results.head(5)
results_spearman_OHV1D3 = []
i = 1
for j in range(len(otu_cols)):
tmp = dat[[vars_vd[i], otu_cols[j]]].dropna(axis=0, how='any')
rho, pval = spearmanr(tmp[vars_vd[i]], tmp[otu_cols[j]])
tax = bt.loc['taxonomy'][otu_cols[j]]
results_spearman_OHV1D3.append([vars_vd[i], otu_cols[j], tax, rho, pval])
# output table
results_spearman_OHV1D3 = pd.DataFrame(results_spearman_OHV1D3, columns=['vars', 'otu', 'taxonomy', 'rho',
'pval']).dropna(axis=0, how='any')
results_spearman_OHV1D3['fdr pval'] = multipletests(results_spearman_OHV1D3['pval'], method = 'fdr_bh')[1]
results_spearman_OHV1D3 = results_spearman_OHV1D3.sort_values(['fdr pval'], ascending=True)
# specific bacteria
index_OHV1D3 = results_spearman_OHV1D3.loc[results_spearman_OHV1D3['fdr pval'] <= 0.05].index
for i in range(len(index_OHV1D3)):
print(results_spearman_OHV1D3.taxonomy[index_OHV1D3[i]],
results_spearman_OHV1D3['rho'][index_OHV1D3[i]],
results_spearman_OHV1D3['fdr pval'][index_OHV1D3[i]])
# check
results_spearman_OHV1D3.head(10)
results= []
i = 2
for j in range(len(otu_cols)):
tmp = dat[[vars_vd[i], otu_cols[j]]].dropna(axis=0, how='any')
rho, pval = spearmanr(tmp[vars_vd[i]], tmp[otu_cols[j]])
tax = bt.loc['taxonomy'][otu_cols[j]]
results.append([vars_vd[i], otu_cols[j], tax, rho, pval])
# output table
results = pd.DataFrame(results, columns=['vars', 'otu',
'taxonomy', 'rho', 'pval']).dropna(axis=0, how='any')
results['fdr pval'] = multipletests(results['pval'], method = 'fdr_bh')[1]
results = results.sort_values(['fdr pval'], ascending=True)
# specific bacteria
index = results.loc[results['fdr pval'] <= 0.05].index
for i in range(len(index)):
print(results.taxonomy[index[i]],
results['rho'][index[i]],
results['fdr pval'][index[i]])
# check
results.head(3)
results= []
i = 3
for j in range(len(otu_cols)):
tmp = dat[[vars_vd[i], otu_cols[j]]].dropna(axis=0, how='any')
rho, pval = spearmanr(tmp[vars_vd[i]], tmp[otu_cols[j]])
tax = bt.loc['taxonomy'][otu_cols[j]]
results.append([vars_vd[i], otu_cols[j], tax, rho, pval])
# output table
results = pd.DataFrame(results, columns=['vars', 'otu',
'taxonomy', 'rho', 'pval']).dropna(axis=0, how='any')
results['fdr pval'] = multipletests(results['pval'], method = 'fdr_bh')[1]
results = results.sort_values(['fdr pval'], ascending=True)
# specific bacteria
index = results.loc[results['fdr pval'] <= 0.05].index
for i in range(len(index)):
print(results.taxonomy[index[i]],
results['rho'][index[i]],
results['fdr pval'][index[i]])
# store result for future check with lasso
results_spearman_activation = results
# check
results.head(10)
results= []
i = 4
for j in range(len(otu_cols)):
tmp = dat[[vars_vd[i], otu_cols[j]]].dropna(axis=0, how='any')
rho, pval = spearmanr(tmp[vars_vd[i]], tmp[otu_cols[j]])
tax = bt.loc['taxonomy'][otu_cols[j]]
results.append([vars_vd[i], otu_cols[j], tax, rho, pval])
# output table
results = pd.DataFrame(results, columns=['vars', 'otu',
'taxonomy', 'rho', 'pval']).dropna(axis=0, how='any')
results['fdr pval'] = multipletests(results['pval'], method = 'fdr_bh')[1]
results = results.sort_values(['fdr pval'], ascending=True)
# specific bacteria
index = results.loc[results['fdr pval'] <= 0.05].index
for i in range(len(index)):
print(results.taxonomy[index[i]],
results['fdr pval'][index[i]])
# check
results.head(3)
tmp = dat[np.append(vars_vd, otu_cols)].dropna()
print(tmp.shape)
tmp.head()
X = tmp[otu_cols]
y = tmp[vars_vd[0]]
# split data into train and test sets
pred_train, pred_test, tar_train, tar_test = train_test_split(X, y, test_size=.3, random_state=123)
# specify the lasso regression model
model=LassoLarsCV(cv=10, precompute=True).fit(pred_train,tar_train)
np.sum(model.coef_)
X = tmp[otu_cols]
y = tmp[vars_vd[0]]
# split data into train and test sets
pred_train, pred_test, tar_train, tar_test = train_test_split(X, y, test_size=.3, random_state=123)
# specify the lasso regression model
model=LassoLarsCV(cv=10, precompute=True).fit(pred_train,tar_train)
np.sum(model.coef_)
X = tmp[otu_cols]
y = tmp[vars_vd[0]]
# split data into train and test sets
pred_train, pred_test, tar_train, tar_test = train_test_split(X, y, test_size=.3, random_state=123)
# specify the lasso regression model
model=LassoLarsCV(cv=10, precompute=True).fit(pred_train,tar_train)
np.sum(model.coef_)
X = tmp[otu_cols]
# split data into train and test sets
pred_train, pred_test, tar_train, tar_test = train_test_split(X, y, test_size=.3, random_state=123)
# specify the lasso regression model
model=LassoLarsCV(cv=20, precompute=True).fit(pred_train,tar_train)
np.sum(model.coef_)
reg = dict(zip(X.columns, model.coef_))
reg = pd.DataFrame.from_dict(reg, orient='index').rename(
columns={0: 'lasso coef'})
reg['taxonomy'] = bt.loc['taxonomy'][reg.index]
subset = reg.loc[reg['lasso coef'] != 0]
print(subset.shape)
print(subset.taxonomy.values, subset['lasso coef'].values)
# store result for future need
subset_activation = subset
subset_activation
# check whether the same as in spearman result
same = results_spearman_activation.loc[results_spearman_activation['otu'].isin (subset_activation.index)]
print(same.taxonomy.values)
print(same['fdr pval'])
results_spearman_activation.loc[results['fdr pval'] <= 0.05].index
# plot coefficient progression
m_log_alphas = -np.log10(model.alphas_)
ax = plt.gca()
plt.plot(m_log_alphas, model.coef_path_.T)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.ylabel('Regression Coefficients')
plt.xlabel('-log(alpha)')
plt.title('Regression Coefficients Progression for Lasso Paths')
#plt.savefig('../figures/lasso_coef.png', bbox_inches='tight')
# plot mean square error for each fold
m_log_alphascv = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphascv, model.cv_mse_path_, ':')
plt.plot(m_log_alphascv, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean squared error')
plt.title('Mean squared error on each fold')
#plt.savefig('../figures/lasso_mse.png', bbox_inches='tight')
# MSE from training and test data
from sklearn.metrics import mean_squared_error
train_error = mean_squared_error(tar_train, model.predict(pred_train))
test_error = mean_squared_error(tar_test, model.predict(pred_test))
print ('training data MSE')
print(train_error)
print ('test data MSE')
print(test_error)
# R-square from training and test data
rsquared_train=model.score(pred_train,tar_train)
rsquared_test=model.score(pred_test,tar_test)
print ('training data R-square')
print(rsquared_train)
print ('test data R-square')
print(rsquared_test)
X = tmp[otu_cols]
y = tmp[vars_vd[4]]
# split data into train and test sets
pred_train, pred_test, tar_train, tar_test = train_test_split(X, y, test_size=.3, random_state=123)
# specify the lasso regression model
model=LassoLarsCV(cv=10, precompute=True).fit(pred_train,tar_train)
np.sum(model.coef_)
# check
reg = dict(zip(X.columns, model.coef_))
reg = pd.DataFrame.from_dict(reg, orient='index').rename(
columns={0: 'lasso coef'})
reg['taxonomy'] = bt.loc['taxonomy'][reg.index]
subset = reg.loc[reg['lasso coef'] != 0]
print(subset.shape)
#### previous scratch on lasso
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
small= tmp[np.append(vars_vd[1], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[1]]
# drop all missing values
tmp = dat[np.append(vars_vd, otu_cols)]
tmp.head()
scaler = StandardScaler()
small= tmp[np.append(vars_vd[0], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[0]]
names = otu_cols
# Create a function called lasso,
def lasso(alphas):
'''
Takes in a list of alphas. Outputs a dataframe containing the coefficients of lasso regressions from each alpha.
'''
# Create an empty data frame
df = pd.DataFrame()
# Create a column of feature names
df['OTU names'] = names
# For each alpha value in the list of alpha values,
for alpha in alphas:
# Create a lasso regression with that alpha value,
lasso = Lasso(alpha=alpha)
# Fit the lasso regression
lasso.fit(X, Y)
# Create a column name for that alpha value
column_name = 'Alpha = %f' % alpha
# Create a column of coefficient values
df[column_name] = lasso.coef_
# Return the datafram
return df
table = lasso([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
table.head()
table.loc[table['Alpha = 0.500000'] != 0]
list = np.array([63, 131, 188, 237, 384, 505, 2116, 2545, 3484, 3598])
for i in range(len(list)):
print(bt.loc['taxonomy'][list[i]])
scaler = StandardScaler()
small= tmp[np.append(vars_vd[1], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[1]]
names = otu_cols
table = lasso([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
table.loc[table['Alpha = 3.000000'] != 0]
bt.loc['taxonomy'][3323]
scaler = StandardScaler()
small= tmp[np.append(vars_vd[2], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[2]]
names = otu_cols
table = lasso([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
table.loc[table['Alpha = 2.000000'] != 0]
scaler = StandardScaler()
small= tmp[np.append(vars_vd[3], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[3]]
names = otu_cols
table = lasso([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
table.loc[table['Alpha = 2.000000'] != 0]
scaler = StandardScaler()
small= tmp[np.append(vars_vd[3], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[3]]
names = otu_cols
table = lasso([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
table.loc[table['Alpha = 1.000000'] != 0]
scaler = StandardScaler()
small= tmp[np.append(vars_vd[4], otu_cols)].dropna(axis=0, how='any')
X = scaler.fit_transform(small[otu_cols])
Y = small[vars_vd[4]]
names = otu_cols
table = lasso([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
table.loc[table['Alpha = 1.000000'] != 0]
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
Lasso(alpha=0.5, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
print(clf.coef_)
sum(np.abs(clf.coef_))
print(clf.intercept_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the second line above, we imported also the linear_autonomous_pool_model module which contains most of the functions required for the examples in this notebook.
Step2: Other useful system diagnostics are
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from LAPM import *
from LAPM.linear_autonomous_pool_model import LinearAutonomousPoolModel
B=Matrix([[-2.927714, 0, 0, 0, 0, 0, 0.000000],
[0, -14.560, 0, 0, 0, 0, 0],
[0, 0, -3.6211195, 0, 0, 0, 0],
[0, 0, 0, -18.20, 0, 0, 0],
[1.449218, 6.552, 1.4665534, 8.19, -3.731, 0.082992, 0.003042],
[0.204940, 0, 0.2534784, 0, 2.193828, -0.1976, 0],
[0, 0, 0, 0, 0.014924, 0.005928, -0.006760]])
u=Matrix(7,1,[3.58800, 18.97804, 4.68, 24.75396, 0, 0, 0])
M=LinearAutonomousPoolModel(u, B, True)
M.A_expected_value
M.A_quantile(0.5) # Median (50% quantile) of the system age distribution
M.T_expected_value #Mean transit time
M.T_quantile(0.95) # Median (50% quantile) of the transit time distribution
M.a_expected_value # Mean age vector of individual pools
M.a_quantile(0.95) # 95% quantile of individual pools
M.a_quantile(0.05) # 5% quantiles of individual pools
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: L'algorithme optimal est exposé en dernier, la correction propose un cheminement jusqu'à cette solution en introduisant au fur et à mesure les idées qui aboutissent à cette solution. A partir de la première solution, on élague peu à peu
Step2: Avant de passer à l'algorithme dichotomique, on va d'abord suivre un chemin plus facile et plus lent. Supposons qu'on connaît la meilleure séquence croissante de longueur $n
Step3: Le coût de cet algorithme est en $O(n^2)$. L'énoncé de l'exercice suggère qu'on peut faire mieux en utilisant la dichotomie. En coupant l'ensemble $E$ en deux, $A=E (1 \rightarrow k)$ et $B=E(1 \rightarrow k+1)$, soit la plus grande séquence croissante est dans $A$, soit dans $B$, soit elle commence avant la position $k$ et se termine après. Les deux premiers cas sont très simples à traiter par récurrence. Le dernier l'est moins mais on sait deux choses
Step4: On compare les coûts. La seconde fonction est un peu plus rapide à un facteur multiplicatif près. Le coût des deux fonctions est $O(n^2)$.
Step5: Un peu plus près de la solution optimale
Step6: On compare les coûts. La seconde fonction est un peu plus rapide à un facteur multiplicatif près. Le coût de la première fonction est en $O(n^2)$. La seconde est en $O(nL)$ où $L$ est la longueur de la plus longueur séquence. On majore ce coût par $O(n^2)$ mais dans les faits, c'est plus court.
Step7: Solution optimale
Step8: On compare avec la version précédente et on vérifie qu'elle est plus rapide.
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
E = [10, 15, 7, 19, 2, 5, 7, 16, 3, 9, 15, 0, 1, 15, 6, 11, 0, 14, 7, 9]
E
def plus_grande_sequence_position_k(E, k=None):
if k is None:
k = len(E)-1
if k == 0:
return [[0]]
else :
S = plus_grande_sequence_position_k(E, k-1)
best = []
for j,s in enumerate(S):
if len(s) > len(best) and E[k] >= E [s[-1]]:
best = s
best = best + [k]
S.append(best)
return S
def plus_grande_sequence(E):
if len(E) == 0:
return E
S = plus_grande_sequence_position_k(E)
best = []
for s in S:
if len(s) > len(best):
best = s
return best
b = plus_grande_sequence(E)
"E",E,"indice:",b, "valeurs:", [ E[i] for i in b ]
def plus_grande_sequence_2(E):
if len(E) == 0:
return E
precedent = [-1 for e in E]
longueur = [0 for e in E]
longueur[0] = 1
for k in range(1, len(E)):
bestL = 1
bestP = -1
for j in range(0,k) :
if E[k] >= E [ j ] and longueur[j]+1 > bestL:
bestL = longueur [j]+1
bestP = j
precedent[k] = bestP
longueur[k] = bestL
# on récupère la longueur de la plus grande séquence
maxiL = 0
for i,l in enumerate(longueur):
if l > longueur[maxiL]:
maxiL = i
# on récupère la plus grande séquence
seq = [maxiL]
while precedent[seq[-1]] != -1:
p = precedent[seq[-1]]
seq.append(p)
seq.reverse()
return seq
E = [10, 15, 7, 19, 2, 5, 7, 16, 3, 9, 15, 0, 1, 15, 6, 11, 0, 14, 7, 9]
b = plus_grande_sequence_2(E)
"E",E,"indice:",b, "valeurs:", [ E[i] for i in b ]
import random
for n in (20,50,100,200) :
E = [ random.randint(0,n) for i in range(n) ]
print("n=",n)
%timeit plus_grande_sequence(E)
%timeit plus_grande_sequence_2(E)
def plus_grande_sequence_2L(E):
if len(E) == 0:
return E
dernier = [0]
precedent = [-1 for e in E]
for k in range(1,len(E)):
if E[k] >= E [dernier [-1]]:
# on ajoute à la dernière séquence
precedent[k] = dernier[-1]
dernier.append( k )
else :
# on s'en sert pour améliorer une séquence existante
for j in range(len(dernier)-1, -1, -1):
if E[k] < E [dernier[j]]:
if precedent[dernier[j]] == -1:
dernier [j] = k
elif E[k] >= E[dernier[j-1]]:
if j == 0:
break
precedent[k] = dernier[j-1]
# ce n'est pas exactement precedent[dernier[j]],
# mais cette valeur est admissible par construction
dernier[j] = k
break # car il ne sert à rien d'aller plus loin
# on récupère la plus grande séquence
seq = [dernier[-1]]
while precedent[seq[-1]] != -1:
p = precedent[seq[-1]]
seq.append(p)
seq.reverse()
return seq
E = [10, 15, 7, 19, 2, 5, 7, 16, 3, 9, 15, 0, 1, 15, 6, 11, 0, 14, 7, 9]
b = plus_grande_sequence_2L(E)
"E",E,"indice:",b, "valeurs:", [ E[i] for i in b ]
import random
for n in (20,50,100,200) :
E = [random.randint(0,n) for i in range(n)]
%timeit plus_grande_sequence_2(E)
%timeit plus_grande_sequence_2L(E)
def plus_grande_sequence_wikipedia(E):
P = [-1 for _ in E]
M = [-1 for _ in E]
L = 0
for i in range(0, len(E)):
lo = 1
hi = L
while lo <= hi:
mid = (lo + hi) // 2
if E[M[mid]] < E[i]:
lo = mid + 1
else:
hi = mid - 1
newL = lo
P[i] = M[newL - 1]
if newL > L:
M[newL] = i
L = newL
elif E[i] < E[M[newL]]:
M[newL] = i
S = [-1 for i in range(L)]
k = M[L]
for i in range(L-1, -1, -1) :
S[i] = k
k = P[k]
return S
E = [10, 15, 7, 19, 2, 5, 7, 16, 3, 9, 15, 0, 1, 15, 6, 11, 0, 14, 7, 9]
b = plus_grande_sequence_wikipedia(E)
"E",E,"...","indice:",b, "valeurs:", [ E[i] for i in b ]
import random
for n in (20,50,100,200) :
E = [ random.randint(0,n) for i in range(n) ]
print("n=",n)
%timeit plus_grande_sequence_2L(E)
%timeit plus_grande_sequence_wikipedia(E)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Input Parameters
Step2: Run SYGMA
Step3: Plot results
Step4: 3. Multiple Decay Channels
Step5: Example with U-238
Step6: In the case of U-238, there are many isotopes that are resulting from the multiple decay channels. Those new radioactive isotopes are added automatically in the list of isotopes in NuPyCEE.
|
<ASSISTANT_TASK:>
Python Code:
# Import python modules
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Import the NuPyCEE codes
from NuPyCEE import sygma
from NuPyCEE import omega
# Number of timesteps in the simulaton.
# See https://github.com/NuGrid/NuPyCEE/blob/master/DOC/Capabilities/Timesteps_size_management.ipynb
special_timesteps = -1
nb_dt = 100
tend = 2.0e6
dt = tend / float(nb_dt)
# No star formation.
no_sf = True
# Dummy neutron star merger yields to activate the radioactive option.
nsmerger_table_radio = 'yield_tables/extra_table_radio_dummy.txt'
# Add 1 Msun of radioactive Al-26 in the gas.
# The indexes of this array reflect the order seen in the yield_tables/decay_file.txt file
# Index 0, 1, 2 --> Al-26, K-40, U-238
ism_ini_radio = [1.0, 0.0, 0.0]
# Run SYGMA (or in this case, the decay process)
s = sygma.sygma(iniZ=0.02, no_sf=no_sf, ism_ini_radio=ism_ini_radio,\
special_timesteps=special_timesteps, tend=tend, dt=dt,\
decay_file='yield_tables/decay_file.txt', nsmerger_table_radio=nsmerger_table_radio)
# Get the Al-26 (radioactive) and Mg-26 (stable) indexes in the gas arrays
i_Al_26 = s.radio_iso.index('Al-26')
i_Mg_26 = s.history.isotopes.index('Mg-26')
# Extract the evolution of these isotopes as a function of time
Al_26 = np.zeros(s.nb_timesteps+1)
Mg_26 = np.zeros(s.nb_timesteps+1)
for i_t in range(s.nb_timesteps+1):
Al_26[i_t] = s.ymgal_radio[i_t][i_Al_26]
Mg_26[i_t] = s.ymgal[i_t][i_Mg_26]
# Plot the evolution of Al-26 and Mg-26
%matplotlib nbagg
plt.figure(figsize=(8,4.5))
plt.plot( np.array(s.history.age)/1e6, Al_26, '--b', label='Al-26' )
plt.plot( np.array(s.history.age)/1e6, Mg_26, '-r', label='Mg-26' )
plt.plot([0,2.0], [0.5,0.5], ':k')
plt.plot([0.717,0.717], [0,1], ':k')
# Labels and fontsizes
plt.xlabel('Time [Myr]', fontsize=16)
plt.ylabel('Mass of isotope [M$_\odot$]', fontsize=16)
plt.legend(fontsize=14, loc='center left', bbox_to_anchor=(1, 0.5))
plt.subplots_adjust(top=0.96)
plt.subplots_adjust(bottom=0.15)
plt.subplots_adjust(right=0.75)
matplotlib.rcParams.update({'font.size': 14.0})
# Add 1 Msun of radioactive K-40 in the gas.
# The indexes of this array reflect the order seen in the yield_tables/decay_file.txt file
# Index 0, 1, 2 --> Al-26, K-40, U-238
ism_ini_radio = [0.0, 1.0, 0.0]
# Number of timesteps in the simulaton.
# See https://github.com/NuGrid/NuPyCEE/blob/master/DOC/Capabilities/Timesteps_size_management.ipynb
special_timesteps = -1
nb_dt = 100
tend = 5.0e9
dt = tend / float(nb_dt)
# Run SYGMA (or in this case, the decay process)
# with the decay module
s = sygma.sygma(iniZ=0.0, sfr=sfr, starbursts=starbursts, ism_ini_radio=ism_ini_radio,\
special_timesteps=special_timesteps, tend=tend, dt=dt,\
decay_file='yield_tables/decay_file.txt', nsmerger_table_radio=nsmerger_table_radio,\
use_decay_module=True, radio_refinement=1)
# Get the K-40 (radioactive) and Ca-40 and Ar-40 (stable) indexes in the gas arrays
i_K_40 = s.radio_iso.index('K-40')
i_Ca_40 = s.history.isotopes.index('Ca-40')
i_Ar_40 = s.history.isotopes.index('Ar-40')
# Extract the evolution of these isotopes as a function of time
K_40 = np.zeros(s.nb_timesteps+1)
Ca_40 = np.zeros(s.nb_timesteps+1)
Ar_40 = np.zeros(s.nb_timesteps+1)
for i_t in range(s.nb_timesteps+1):
K_40[i_t] = s.ymgal_radio[i_t][i_K_40]
Ca_40[i_t] = s.ymgal[i_t][i_Ca_40]
Ar_40[i_t] = s.ymgal[i_t][i_Ar_40]
# Plot the evolution of Al-26 and Mg-26
%matplotlib nbagg
plt.figure(figsize=(8,4.5))
plt.plot( np.array(s.history.age)/1e6, K_40, '--b', label='K-40' )
plt.plot( np.array(s.history.age)/1e6, Ca_40, '-r', label='Ca-40' )
plt.plot( np.array(s.history.age)/1e6, Ar_40, '-g', label='Ar-40' )
# Labels and fontsizes
plt.xlabel('Time [Myr]', fontsize=16)
plt.ylabel('Mass of isotope [M$_\odot$]', fontsize=16)
plt.legend(fontsize=14, loc='center left', bbox_to_anchor=(1, 0.5))
plt.subplots_adjust(top=0.96)
plt.subplots_adjust(bottom=0.15)
plt.subplots_adjust(right=0.75)
matplotlib.rcParams.update({'font.size': 14.0})
# Add 1 Msun of radioactive U-238 in the gas.
# The indexes of this array reflect the order seen in the yield_tables/decay_file.txt file
# Index 0, 1, 2 --> Al-26, K-40, U-238
ism_ini_radio = [0.0, 0.0, 1.0]
# Number of timesteps in the simulaton.
# See https://github.com/NuGrid/NuPyCEE/blob/master/DOC/Capabilities/Timesteps_size_management.ipynb
special_timesteps = -1
nb_dt = 100
tend = 5.0e9
dt = tend / float(nb_dt)
# Run SYGMA (or in this case, the decay process)
# with the decay module
s = sygma.sygma(iniZ=0.0, sfr=sfr, starbursts=starbursts, ism_ini_radio=ism_ini_radio,\
special_timesteps=special_timesteps, tend=tend, dt=dt,\
decay_file='yield_tables/decay_file.txt', nsmerger_table_radio=nsmerger_table_radio,\
use_decay_module=True, radio_refinement=1)
print(s.radio_iso)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Correct Zotero RDF
Step2: Parse RDF
Step3: Create a GraphML file
Step4: The code-cell below will create a new GraphML file that we can import in Cytoscape.
|
<ASSISTANT_TASK:>
Python Code:
import rdflib
import networkx as nx
import os
rdf_path = 'data/example.rdf'
with open(rdf_path, 'r') as f:
corrected = f.read().replace('rdf:resource rdf:resource',
'link:link rdf:resource')
# The corrected graph will be saved to a file with `_corrected`
# added to the name. E.g. if the original RDF document was
# called `example.rdf`, the new file will be called
# `example_corrected.rdf`.
base, name = os.path.split(rdf_path)
corrected_name = '%s_corrected_.%s' % tuple(name.split('.'))
corrected_rdf_path = os.path.join(base, corrected_name)
with open(corrected_rdf_path, 'w') as f:
f.write(corrected)
rdf_graph = rdflib.Graph()
rdf_graph.load(corrected_rdf_path)
graph = nx.DiGraph() # Metadata is `directed`.
for s, p, o in rdf_graph.triples((None, None, None)):
# The .toPython() method converts rdflib objects into objects
# that any Python module can understand (e.g. str, int, float).
graph.add_edge(s.toPython(),
o.toPython(),
attr_dict={'predicate': p.toPython()})
print 'Added %i nodes and %i edges to the graph' % (graph.order(),
graph.size())
graphml_path = 'output/example.graphml'
nx.write_graphml(graph, graphml_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a href="https
Step2: Optimization Through Dynamics
Step3: Next we need to posit interactions between our model bubbles. We'll assume that we can model a pair of bubbles by defining an energy function for the pair. We use an energy that is zero if the bubbles are not touching and increases gradually as they get pushed together. Specifically, we will consider two bubbles, $i$ and $j$, with radii $\sigma_i$ and $\sigma_j$ respectively. If $r_{ij}$ is the distance between the bubbles, we'll posit a function that looks like
Step4: Once we've assigned an energy, $U(r_{ij}, \sigma_{ij})$, to pairs of bubbles, we have to assign an energy to to the whole system, $\mathcal U$. As we will see, our simulation will make use of the forces, $\vec F_i = -\nabla_{\vec r_i}\mathcal U$. This is usually done by just summing up all the pairwise energies,
Step5: To compute the energy of the whole bubble raft in JAX, M.D. we use the helper function energy_fun = energy.soft_sphere_pair which returns a function that computes the soft-sphere energy for our bubble raft in its periodic box. We will feed the species and sigma definitions into the energy.soft_sphere_pair function.
Step6: Now let's run simulations with two different values of $D$ and draw them side-by-side. In one case we will use $D = 1$ so that both species are the same and this will result in the same hexagonal configuration as in the warm up. However, we will then run a simulation with $D = 0.8$ to see what happens when all the bubbles don't have the same size.
Step7: The hexagonal structure when the bubbles have the same size has broken down in the two-species case. To get a bit more insight into this we will use JAX's automatic vectorization capabilities (via vmap) to see how the structure and energy of bubble rafts depends on diameter.
Step8: At small values of the diameter, the energy is low and increases with increasing diameter until a maximum in the energy at which point it drops precipitously. We can visualize an example system at each of these diameter disparities to see what is happening. We will color the bubble rafts by their energy.
Step9: When the diameter of the small bubbles is close to the diamater of the large bubbles the structure is close to hexagonal. Likewise, when the small bubble diameter is very small, the small bubbles fit into the interstices and regions retain the hexagonal order. However, when the diameter is in-between (where the energy is high) the bubbles end up looking like they are arranged randomly. This is an effect known as the Jamming transition. However, could we have found this optimally disordered region without brute force? Let's try to maximize the energy with respect to the diameter, $D$, directly.
Step10: Next, let's take the derivative of the energy with respect to the particle diameters and vectorize it over the ensemble of initial positions.
Step11: Now, we run the simulation for each diameter and record the gradients over each member of the ensemble. We then plot the average gradient against diameter and compare it with the energy that we found by directly simulating the bubble raft.
|
<ASSISTANT_TASK:>
Python Code:
#@title Imports & Utils
!pip install -q git+https://www.github.com/google/jax-md
import time
from functools import partial
import numpy as onp
import jax.numpy as np
from jax import jit
from jax import grad
from jax import vmap
from jax import random
from jax import lax
from jax.experimental import stax
from jax.experimental import optimizers
from jax.config import config
config.update('jax_enable_x64', True)
from jax_md import space
from jax_md import minimize
from jax_md import simulate
from jax_md import space
from jax_md import energy
from jax_md import quantity
from jax_md.util import f32, f64
# Progress Bars.
from IPython.display import HTML, display
import time
def ProgressIter(iter_fun, iter_len=0):
if not iter_len:
iter_len = len(iter_fun)
out = display(progress(0, iter_len), display_id=True)
for i, it in enumerate(iter_fun):
yield it
out.update(progress(i + 1, iter_len))
def progress(value, max):
return HTML(
<progress
value='{value}'
max='{max}',
style='width: 45%'
>
{value}
</progress>
.format(value=value, max=max))
# Matplotlib settings.
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style(style='white')
sns.set(font_scale=1.6)
def format_plot(x, y):
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
def finalize_plot(shape=(1, 1)):
plt.gcf().set_size_inches(
shape[0] * 1.5 * plt.gcf().get_size_inches()[1],
shape[1] * 1.5 * plt.gcf().get_size_inches()[1])
plt.tight_layout()
def draw_system(R, box_size, marker_size, color=None):
if color == None:
color = [64 / 256] * 3
ms = marker_size / box_size
R = onp.array(R)
marker_style = dict(
linestyle='none',
markeredgewidth=3,
marker='o',
markersize=ms,
color=color,
fillstyle='none')
plt.plot(R[:, 0], R[:, 1], **marker_style)
plt.plot(R[:, 0] + box_size, R[:, 1], **marker_style)
plt.plot(R[:, 0], R[:, 1] + box_size, **marker_style)
plt.plot(R[:, 0] + box_size, R[:, 1] + box_size, **marker_style)
plt.plot(R[:, 0] - box_size, R[:, 1], **marker_style)
plt.plot(R[:, 0], R[:, 1] - box_size, **marker_style)
plt.plot(R[:, 0] - box_size, R[:, 1] - box_size, **marker_style)
plt.xlim([0, box_size])
plt.ylim([0, box_size])
plt.axis('off')
plt.gca().set_facecolor([1, 1, 1])
# System property utilities.
def box_size_at_number_density(particle_count, number_density):
return f32((particle_count / number_density) ** 0.5)
N = 128
N_2 = N // 2
simulation_steps = np.arange(300)
dt = 1e-2
temperature = 1e-5
key = random.PRNGKey(0)
packing_fraction = 0.98
markersize = 260
def box_size_at_packing_fraction(diameter):
bubble_volume = N_2 * np.pi * (diameter ** 2 + 1) / 4
return np.sqrt(bubble_volume / packing_fraction)
plt.gca().axhline(y=0, color='k')
plt.xlim([0, 1.5])
plt.ylim([-0.2, 0.8])
dr = np.linspace(0, 3.0, num=80)
plt.plot(dr, energy.soft_sphere(dr, sigma=1), 'b-', linewidth=3)
plt.fill_between(dr, energy.soft_sphere(dr), alpha=0.4)
format_plot('$r$', '$U(r)$')
species = np.array([0] * (N_2) + [1] * (N_2), dtype=np.int32)
def species_sigma(diameter):
d_AA = diameter
d_BB = 1
d_AB = 0.5 * (diameter + 1)
return np.array(
[[d_AA, d_AB],
[d_AB, d_BB]]
)
@jit
def simulation(diameter, key):
# Create the simulation environment.
box_size = box_size_at_packing_fraction(diameter)
displacement, shift = space.periodic(box_size)
# Create the energy function.
sigma = species_sigma(diameter)
energy_fun = energy.soft_sphere_pair(
displacement, species=species, sigma=sigma)
# Randomly initialize the system.
R = random.uniform(key, (N, 2), maxval=box_size)
# Minimize to the nearest minimum.
init_fn, apply_fn = minimize.fire_descent(energy_fun, shift)
state = init_fn(R)
do_step = lambda state, t: (apply_fn(state, t=t), ())
state, _ = lax.scan(do_step, state, simulation_steps)
return box_size, energy_fun(state.position), state.position
plt.subplot(1, 2, 1)
box_size, raft_energy, bubble_positions = simulation(1.0, key)
draw_system(bubble_positions, box_size, markersize)
finalize_plot((0.5, 0.5))
plt.subplot(1, 2, 2)
box_size, raft_energy, bubble_positions = simulation(0.8, key)
draw_system(bubble_positions[:N_2], box_size, 0.8 * markersize)
draw_system(bubble_positions[N_2:], box_size, markersize)
finalize_plot((2.0, 1))
ensemble_size = 40
# Vectorize over both random keys and diameter.
vec_simulation = vmap(vmap(simulation, (None, 0)), (0, None))
sim_keys = random.split(key, ensemble_size)
diameter = np.linspace(0.4, 1.0, 10)
box_size, raft_energy, bubble_positions = vec_simulation(diameter, sim_keys)
E_mean = np.mean(raft_energy, axis=1)
E_std = np.std(raft_energy, axis=1)
plt.plot(diameter, E_mean, linewidth=3)
plt.fill_between(diameter, E_mean + E_std, E_mean - E_std, alpha=0.4)
plt.xlim([0.4, 1.0])
format_plot('$D$', '$\\langle E\\rangle$')
ms = 185
for i, d in enumerate(diameter):
plt.subplot(2, 5, i + 1)
c = min(1, max(0, (E_mean[i] - 0.4) * 4))
color = [c, 0, 1 - c]
draw_system(bubble_positions[i, 0, :N_2], box_size[i, 0], d * ms, color=color)
draw_system(bubble_positions[i, 0, N_2:], box_size[i, 0], ms, color=color)
finalize_plot((2.5, 1))
short_simulation_steps = np.arange(10)
def short_simulation(diameter, positions, key):
L = box_size_at_packing_fraction(diameter)
displacement, shift = space.periodic_general(L * np.eye(2),
fractional_coordinates=False)
sigma = species_sigma(diameter)
energy_fun = energy.soft_sphere_pair(displacement, species, sigma)
init_fn, apply_fn = minimize.gradient_descent(energy_fun, shift, 1e-3)
state = init_fn(positions)
do_step = lambda state, t: (apply_fn(state), ())
state, _ = lax.scan(do_step, state, short_simulation_steps)
return energy_fun(state)
dE_dD_fun = grad(short_simulation)
dE_dD_fun = jit(vmap(dE_dD_fun, (None, 0, 0)))
dE_dD = []
for i, d in ProgressIter(enumerate(diameter), len(diameter)):
key, split = random.split(key)
split = random.split(split, ensemble_size)
dE_dD += [dE_dD_fun(d, bubble_positions[i], split)]
dE_dD = np.array(dE_dD)
plt.subplot(2, 1, 1)
dE_mean = np.mean(dE_dD, axis=1)
dE_std = np.std(dE_dD, axis=1)
plt.plot(diameter, dE_mean, linewidth=3)
plt.fill_between(diameter, dE_mean + dE_std, dE_mean - dE_std, alpha=0.4)
plt.xlim([0.4, 1.0])
format_plot('$D$', '$\\langle \\partial E/\\partial D\\rangle$')
plt.subplot(2, 1, 2)
plt.plot(diameter, E_mean, linewidth=3)
plt.fill_between(diameter, E_mean + E_std, E_mean - E_std, alpha=0.4)
plt.xlim([0.4, 1.0])
format_plot('$D$', '$\\langle E\\rangle$')
finalize_plot((1.25, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numpy and Scipy
Step2: Matplotlib
Step3: Miscellaneous definitions
Step4: 3 DOF System
Step5: The plots
Step6: Equation of Motion
Step7: The left beam is constrained by a roller and by the right beam, the first requires that the Centre of Instantaneous Rotation (CIR) belongs to the vertical line in $A$, while the second requires that the CIR belongs to the line that connects the hinges
Step8: Structural Matrices
Step9: Compute the 3x3 flexibility using the Principle of Virtual Displacements and the 3x3 stiffness using inversion, while the mass matrix is directly assembled with the understanding that the lumped mass on $x_1$ is $2m$.
Step10: The eigenvalues problem
Step11: Numerical Integration
Step12: The constants that we need in the linear acceleration algorithm — note that we have an undamped system or, in other words, $\boldsymbol C = \boldsymbol 0$
Step13: The integration loop
Step14: Plotting
Step15: Equation of Motion
Step16: Substituting a particular integral $\xi_i=C_i\sin(\lambda_0 a)$ in the
Step17: The modal response, taking into account that we start from rest conditions, is
Step18: Free vibration phase, $a\ge 3\pi/2 = a_1$
Step19: Analytical expressions
Step20: Stitching the two responses
Step21: Plotting the Analytical Response
Step22: next, the modal responses over the interval $0 \le a \le 16\pi$
Step23: Nodal responses
Step24: Why x = q@Psi.T rather than x = Psi@q? Because for different reasons (mostly, ease of use with the plotting libraries) we have all the response arrays organized in the shape of (Nsteps × 3).
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display, Latex, HTML
display(HTML(open('01.css').read()))
import numpy as np
from numpy import array, cos, diag, eye, linspace, pi
from numpy import poly1d, sign, sin, sqrt, where, zeros
from scipy.linalg import eigh, inv, det
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
plt.rcParams['figure.dpi'] = 115
plt.rcParams['figure.figsize'] = (7.5, 2.5)
plt.rcParams['axes.grid'] = True
def ld(*items):
display(Latex('$$' + ' '.join(items) + '$$'))
def pmat(mat, env='bmatrix', fmt='%+f'):
opener = '\\begin{'+env+'}\n '
closer = '\n\\end{'+env+'}'
formatted = '\\\\\n '.join('&'.join(fmt%elt for elt in row) for row in mat)
return opener+formatted+closer
def rounder(mat): return mat+0.01*sign(mat)
def p(*l): return poly1d(l)
def vw(emme, chi, L):
return sum(((m*c).integ()(l)-(m*c).integ()(0)) for (m, c, l) in zip(emme, chi, L))
def p0_p1(p):
from itertools import tee
a, b = tee(p)
next(b, None)
return zip(a, b)
l0 = 4/3
# define a function to get back the time array and the 3 dependent vars
def a_uA_vA_aA(t0, t1, npoints):
a = linspace(t0, t1, npoints)
uA = where(a<3*pi/2, (l0*a-sin(l0*a))/2/pi, 1)
vA = where(a<3*pi/2, (1-cos(l0*a))/2/pi, 0)
aA = where(a<3*pi/2, 16*sin(l0*a)/18/pi, 0)
return a, uA, vA, aA
# and use it
a, uA, vA, aA = a_uA_vA_aA(0, 2*pi, 501)
plt.plot(a/pi, uA)
plt.xlabel(r'$\omega_0 t/\pi$')
plt.ylabel(r'$u_A/\delta$')
plt.title('Imposed support motion');
plt.plot(a/pi, vA)
plt.xlabel(r'$\omega_0 t/\pi$')
plt.ylabel(r'$\dot u_A/\delta\omega_0$')
plt.title('Imposed support velocity');
plt.plot(a/pi, aA)
plt.xlabel(r'$\omega_0 t/\pi$')
plt.ylabel(r'$\ddot u_A/\delta\omega_0^2$')
plt.title('Imposed support acceleration');
display(HTML(open('figures/trab1kin_conv.svg').read()))
e = array((2.0, 2.0, 2.0))
display(HTML(open('figures/trab1_conv.svg').read()))
l = [1, 2, 2, 1, 1, 1]
h = 0.5 ; t = 3*h
m = [[p(2,0),p(h,0),p(h,1),p(h,0),p(h,h),p(1,0)],
[p(2,0),p(1,0),p(0,2),p(1,0),p(1,1),p(2,0)],
[p(2,0),p(h,0),p(h,1),p(h,0),p(t,h),p(2,0)]]
F = array([[vw(emme, chi, l) for emme in m] for chi in m])
K = inv(F)
M = array(((2.0, 0.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 0.0, 1.0)))
iM = inv(M)
ld('\\boldsymbol F = \\frac{L^3}{12EJ}\\,', pmat(rounder(F*12), fmt='%+d'))
ld('\\boldsymbol K = \\frac{3 EJ}{1588L^3}\\,',
pmat(rounder(K*1588/3), fmt='%+d'),
'= \\frac{EJ}{L^3}\\;\\hat{\\boldsymbol K}.')
ld('\\boldsymbol M = m\\,', pmat(M, fmt='%d'),
'= m\\;\\hat{\\boldsymbol M}.')
wn2, Psi = eigh(K, M)
wn = sqrt(wn2)
li = wn
Lambda2 = diag(wn2)
Lambda = diag(wn)
# eigenvectors are normalized → M* is a unit matrix, as well as its inverse
Mstar, iMstar = eye(3), eye(3)
ld(r'\boldsymbol\Omega^2 = \omega_0^2\,', pmat(Lambda2),
r'=\omega_0^2\,\boldsymbol\Lambda^2.')
ld(r'\boldsymbol\Omega=\omega_0\,', pmat(Lambda),
r'=\omega_0\,\boldsymbol\Lambda.')
ld(r'\boldsymbol T_\text{n}=\frac{2\pi}{\omega_0}\,', pmat(inv(Lambda)),
r'= t_0\,\boldsymbol\Theta.')
ld(r'\Psi=', pmat(Psi), '.')
nsppi = 200
a, _, _, aA = a_uA_vA_aA(0, 16*pi, nsppi*16+1)
peff = (- M @ e) * aA[:,None]
h = pi/nsppi
K_ = K + 6*M/h**2
F_ = inv(K_)
dp_v = 6*M/h
dp_a = 3*M
Xl, Vl = [zeros(3)], [zeros(3)]
for p0, p1 in p0_p1(peff):
x0, v0 = Xl[-1], Vl[-1]
a0 = iM @ (p0 -K@x0)
dp = (p1-p0) + dp_a@a0 + dp_v@v0
dx = F_@dp
dv = 3*dx/h - 3*v0 - a0*h/2
Xl.append(x0+dx), Vl.append(v0+dv)
Xl = array(Xl) ; Vl = array(Vl)
for i, line in enumerate(plt.plot(a/pi, Xl), 1):
line.set_label(r'$x_{%d}$'%i)
plt.xlabel(r'$\omega_0 t/\pi$')
plt.ylabel(r'$x_i/\delta$')
plt.title('Response — numerical integration — lin.acc.')
plt.legend();
G = - Psi.T @ M @ e
C = G*l0**2/(li**2-l0**2)/2/pi
for n in range(3):
i = n+1
ld(r'q_%d=%+10f\left(\sin\frac43a-%10f\sin%1fa\right)' % (i,C[n],l0/li[n],li[n]),
r'\qquad\text{for }0 \le a \le \frac32\pi')
a1 = 3*pi/2
q_a1 = C*(sin(l0*a1)-l0*sin(li*a1)/li)
v_a1 = C*l0*(cos(l0*a1)-cos(li*a1))
ABs = []
for i in range(3):
b = array((q_a1[i], v_a1[i]/li[i]))
A = array(((+cos(li[i]*a1), -sin(li[i]*a1)),
(+sin(li[i]*a1), +cos(li[i]*a1))))
ABs.append(A@b)
ABs = array(ABs)
display(Latex(r'Modal responses for $a_1 \le a$.'))
for n in range(3):
i, l, A_, B_ = n+1, li[n], *ABs[n]
display(Latex((r'$$q_{%d} = '+
r'%+6.3f\cos%6.3fa '+
r'%+6.3f\sin%6.3fa$$')%(i, A_, l, B_, l)))
ac = a[:,None]
q = where(ac<=a1,
C*(sin(l0*ac)-l0*sin(li*ac)/li),
ABs[:,0]*cos(li*ac) + ABs[:,1]*sin(li*ac))
# #### Plot zooming around a1
low, hi = int(0.8*a1*nsppi/pi), int(1.2*a1*nsppi/pi)
for i, line in enumerate(plt.plot(a[low:hi]/pi, q[low:hi]), 1):
line.set_label('$q_{%d}$'%i)
plt.title('Modal Responses, zoom on transition zone')
plt.xlabel(r'$\omega_0 t/\pi$')
plt.legend(loc='best')
plt.show()
# #### Plot in 0 ≤ a ≤ 16 pi
for i, line in enumerate(plt.plot(a/pi, q), 1):
line.set_label('$q_{%d}$'%i)
plt.title('Modal Responses')
plt.xlabel(r'$\omega_0 t/\pi$')
plt.legend(loc='best');
plt.xticks()
plt.show();
x = q@Psi.T
for i, line in enumerate(plt.plot(a/pi, x), 1):
line.set_label('$x_{%d}/\delta$'%i)
plt.title('Normalized Nodal Displacements — analytical solution')
plt.xlabel(r'$\omega_0 t / \pi$')
plt.legend(loc='best')
plt.show();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the Data
Step2: Once we have the data ready, we need to put it into an iterator and specify parameters such as the 'batch_size', and 'shuffle' which will determine the size of data the iterator feeds during each pass, and whether or not the data will be shuffled respectively.
Step3: In the above example, we have made use of NDArrayIter, which is used to iterate over numpy arrays. In general, there are many different types of iterators in MXNet based on the type of data you will be using. Their complete documentation can be found at
Step4: The above network uses the following layers
Step5: We can plot the network we have created in order to visualize it.
Step6: Training the model
Step7: Using a trained model
Step8: We can also evaluate our model for some metric. In this example, we are evaulating our model's mean squared error on the evaluation data.
Step9: Let us try to add some noise to the evaluation data and see how the MSE changes
|
<ASSISTANT_TASK:>
Python Code:
import mxnet as mx
import numpy as np
#Training data
train_data = np.array([[1,2],[3,4],[5,6],[3,2],[7,1],[6,9]])
train_label = np.array([5,11,17,7,9,24])
batch_size = 1
#Evaluation Data
eval_data = np.array([[7,2],[6,10],[12,2]])
eval_label = np.array([11,26,16])
train_iter = mx.io.NDArrayIter(train_data,train_label, batch_size, shuffle=True,label_name='lin_reg_label')
eval_iter = mx.io.NDArrayIter(eval_data, eval_label, batch_size, shuffle=False)
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden = 1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
model = mx.mod.Module(
symbol = lro ,
data_names=['data'],
label_names = ['lin_reg_label']# network structure
)
mx.viz.plot_network(symbol=lro)
import logging
logging.basicConfig(level=logging.INFO)
model.fit(train_iter, eval_iter,
optimizer_params={'learning_rate':0.00005, 'momentum': 0.9},
num_epoch=10,
eval_metric='mse',
batch_end_callback = mx.callback.Speedometer(batch_size, 2))
#Inference
model.predict(eval_iter).asnumpy()
#Evaluation
metric = mx.metric.MSE()
model.score(eval_iter, metric)
#Evaluation Data
eval_data = np.array([[7,2],[6,10],[12,2]])
eval_label = np.array([11.1,26.1,16.1]) #Adding 0.1 to each of the values
eval_iter = mx.io.NDArrayIter(eval_data, eval_label, batch_size, shuffle=False)
model.score(eval_iter, metric)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Read data
Step2: Convert data to monthly time series
Step3: Have a quick plot
Step4: 3. Carry out SSA analysis
Step5: We can view the contribution of each of the signals (corresponding to each singular value) as follows
Step6: From the above we can see that most of the variation is contained in the first signal (corresponding to the first singular vector). The scale in the image above makes it difficult to see what's going on with the remaining singular values.
Step7: Now that we have the signal components (there are 15 additive components to the signal) we can loop over that range and look at each one individually. We only look at the first 5 elements below
Step8: Let's see the result if we reconstruct the signal from these 5 streams. We add the first 5 signals and pass the result to object.view_reconstruction.
Step9: That looks very reasonable! We can get closer to the original series by adding additional signals from the remaining singular values. Let's use the first 15 singular values
Step10: 4. Forecast
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
from mySSA import mySSA # private lib
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 11, 4
df = pd.read_csv(r'data\NH.Ts.csv', parse_dates=True, index_col='Year', skiprows=1)
df = df['1881':'2016']
df.drop(['SON', 'MAM', 'DJF', 'JJA', 'J-D', 'D-N'], axis=1, inplace=True)
df1 = df.apply(pd.to_numeric, errors='coerce')
df1.tail()
tmp = df1.values.astype(np.float)
tmp = tmp.reshape(tmp.shape[0]*12,1)
print(tmp.shape)
ds = pd.date_range(start='1/1/1881', end='12/1/2016', freq='MS')
ts = pd.DataFrame(tmp, columns=['TEMP'])
ts = ts.set_index(ds)
ts.plot()
ssa = mySSA(ts)
K = 40
suspected_seasonality = 12
ssa.embed(embedding_dimension=40, suspected_frequency=suspected_seasonality, verbose=True)
ssa.decompose(verbose=True)
ssa.view_s_contributions()
ssa.view_s_contributions(adjust_scale=True)
rcParams['figure.figsize'] = 11, 2
for i in range(5):
ssa.view_reconstruction(ssa.Xs[i], names=i, symmetric_plots=i!=0)
rcParams['figure.figsize'] = 11, 4
streams5 = [i for i in range(5)]
reconstructed5 = ssa.view_reconstruction(*[ssa.Xs[i] for i in streams5], names=streams5, return_df=True, plot=False)
ts_copy5 = ssa.ts.copy()
ts_copy5['Reconstruction'] = reconstructed5.Reconstruction.values
ts_copy5.plot(title='Original vs. Reconstructed Time Series[First 5 RCs]');
streams15 = [i for i in range(15)]
reconstructed15 = ssa.view_reconstruction(*[ssa.Xs[i] for i in streams15],
names=streams15, return_df=True, plot=False)
ts_copy15 = ssa.ts.copy()
ts_copy15['Reconstruction'] = reconstructed15.Reconstruction.values
ts_copy15.plot(title='Original vs. Reconstructed Time Series [First 15 RCs]');
ssa.forecast_recurrent(steps_ahead=120, singular_values=streams15, plot=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Každý řádek představuje cenu pro daný den a to nejvyšší (High), nejnižší (Low), otevírací (Open - začátek dne) a uzavírací (Close - konec dne). Volatilní pohyb pro daný den pak vidím v grafu na první pohled jako výrazné velké svíce (viz. graf 1). Abych je mohl automaticky v mém analytickém softwaru (python - pandas) označit, definuji volatilní svíce pomocí pravidla například jako
Step2: Nyní znám přesnou změnu ceny každý den. Abych mohl porovnávat velikosti, aniž by mi záleželo na tom, zda se daný den cena propadla, nebo stoupla, aplikuji absolutní hodnotu.
Step3: Identifikace volatilní úsečky
Step4: Které svíce jsou volatilnější, než 4 předchozí, si zobrazím pomocí jednoduché selekce, kde ve sloupečku VolBar == 1.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pandas_datareader.data as web
import datetime
start = datetime.datetime(2015, 1, 1)
end = datetime.datetime(2018, 8, 31)
spy_data = web.DataReader('SPY', 'yahoo', start, end)
spy_data = spy_data.drop(['Volume', 'Adj Close'], axis=1) # sloupce 'Volume' a 'Adj Close' nebudu potřebovat
spy_data.tail()
spy_data['C-O'] = spy_data['Close'] - spy_data['Open']
spy_data.tail()
spy_data['Abs(C-O)'] = spy_data['C-O'].abs()
spy_data.tail()
def is_bigger(rows):
result = rows[-1] > rows[:-1].max() # rows[-1] - poslední hodnota je větší než maximum z předchozích
return result
spy_data['VolBar'] = spy_data['Abs(C-O)'].rolling(4).apply(is_bigger,raw=True)
spy_data.tail(10)
spy_data[spy_data['VolBar'] == 1].tail()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a decaying oscillation
Step2: Now, using curve_fit to fit this model and determine the estimates and uncertainties for the parameters
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
data = np.load('decay_osc.npz')
tdata = data['tdata']
ydata = data['ydata']
dy = data['dy']
plt.errorbar(tdata, ydata, dy, fmt='.k', ecolor='grey')
plt.xlabel('t')
plt.ylabel('y');
assert True # leave this to grade the data import and raw data plot
def model(t,a,lam,omega,delta):
return a*np.exp(-lam*t)*np.cos(omega*t) + delta
theta_best, theta_cov = opt.curve_fit(model, tdata, ydata, p0=[-5,0,-1,0],sigma=dy, absolute_sigma=True)
Afit = theta_best[0]
lamfit = theta_best[1]
omegafit = theta_best[2]
deltafit = theta_best[3]
print('A = {0:.3f} +/- {1:.3f}'.format(Afit, theta_cov[0,0]))
print('lambda = {0:.3f} +/- {1:.3f}'.format(lamfit, theta_cov[1,1]))
print('omega = {0:.3f} +/- {1:.3f}'.format(omegafit, theta_cov[2,2]))
print('delta = {0:.3f} +/- {1:.3f}'.format(deltafit, theta_cov[3,3]))
tfit = np.linspace(0,20,61)
yfit = model(tfit,Afit,lamfit,omegafit,deltafit)
plt.errorbar(tdata, ydata, dy, fmt='.k', ecolor='grey')
plt.plot(tfit, yfit, 'r-')
plt.xlabel('t')
plt.ylabel('y');
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once the database is retrieved, collections can be accessed as attributes of the database itself.
Step2: Each inserted document will receive an ObjectId which is a uniquue identifier of the document, the ObjectId is based on some data like the current timestamp, server identifier process id and other data that guarantees it to be unique across multiple servers.
Step3: Fetching back inserted document can be done using find and find_one methods of collections. Both methods accept a query expression that filters the returned documents. Omitting it means retrieving all the documents (or in case of find_one the first document).
Step4: Filters in mongodb are described by Documents themselves, so in case of PyMongo they are dictionaries too.
Step5: Updating Documents
Step6: SubDocuments
Step7: Indexing
Step8: Checking which index MongoDB is using to perform a query can be done using the explain method, forcing an index into a query can be done using the hint method.
Step9: Aggregation Pipeline
Step10: MapReduce
Step11: Exporting from MongoDB
Step12: You will notice that the map_reduce command has now been properly split across the nodes of our cluster. Our shards should report in their logs something like
Step13: When working on one value use map.
|
<ASSISTANT_TASK:>
Python Code:
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.phonebook
print db.collection_names()
data = {'name': 'Alessandro', 'phone': '+39123456789'}
db.people.insert(data)
print db.collection_names()
db.people.insert({'name': 'Puria', 'phone': '+39123456788', 'other_phone': '+3933332323'}, w=0)
try:
db.people.insert({'name': 'Puria', 'phone': '+39123456789'}, w=2)
except Exception as e:
print e
db.people.find_one({'name': 'Alessandro'})
from bson import ObjectId
db.people.find_one({'_id': {'$gt': ObjectId('55893a1d7ab71c669f4c149e')}})
doc = db.people.find_one({'name': 'Alessandro'})
print '\nBefore Updated:', doc
db.people.update({'name': 'Alessandro'}, {'name': 'John Doe'})
doc = db.people.find_one({'name': 'John Doe'})
print '\nAfter Update:', doc
# Go back to previous state
db.people.update({'name': 'John Doe'}, {'$set': {'phone': '+39123456789'}})
print '\nAfter $set phone:', db.people.find_one({'name': 'John Doe'})
db.people.update({'name': 'John Doe'}, {'$set': {'name': 'Alessandro'}})
print '\nAfter $set name:', db.people.find_one({'name': 'Alessandro'})
db.blog.insert({'title': 'MongoDB is great!',
'author': {'name': 'Alessandro',
'surname': 'Molina',
'avatar': 'http://www.gravatar.com/avatar/7a952cebb086d2114080b4b39ed83cad.png'},
'tags': ['mongodb', 'web', 'scaling']})
db.blog.find_one({'title': 'MongoDB is great!'})
db.blog.find_one({'tags': 'mongodb'})
db.blog.find_one({'author.name': 'Alessandro'})
TAGS = ['mongodb', 'web', 'scaling', 'cooking']
import random
for postnum in range(1, 5):
db.blog.insert({'title': 'Post %s' % postnum,
'author': {'name': 'Alessandro',
'surname': 'Molina',
'avatar': 'http://www.gravatar.com/avatar/7a952cebb086d2114080b4b39ed83cad.png'},
'tags': random.sample(TAGS, 2)})
for post in db.blog.find({'tags': {'$in': ['scaling', 'cooking']}}):
print post['title'], '->', ', '.join(post['tags'])
db.blog.ensure_index([('tags', 1)])
db.blog.find({'tags': 'mongodb'}).explain()['queryPlanner']['winningPlan']
db.blog.find({'tags': 'mongodb'}).hint([('_id', 1)]).explain()['queryPlanner']['winningPlan']
db.blog.find({'title': 'Post 1'}).explain()['queryPlanner']['winningPlan']
db.blog.ensure_index([('author.name', 1), ('title', 1)])
db.blog.find({'author.name': 'Alessandro'}, {'title': True, '_id': False}).explain()['queryPlanner']['winningPlan']
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.twitter
# How many professors wrote a tweet?
print len(list(db.tweets.aggregate([
{'$match': {'user.description': {'$regex': 'Professor'}}}
])))
# Count them using only the pipeline
print db.tweets.aggregate([
{'$match': {'user.description': {'$regex': 'Professor'}}},
{'$group': {'_id': 'count', 'count': {'$sum': 1}}}
]).next()['count']
# Hashtags frequency
print list(db.tweets.aggregate([
{'$project': {'tags': '$entities.hashtags.text', '_id': 0}},
{'$unwind': '$tags'},
{'$group': {'_id': '$tags', 'count': {'$sum': 1}}},
{'$match': {'count': {'$gt': 20}}}
]))
freqs = db.tweets.map_reduce(
map='''function() {
var tags = this.entities.hashtags;
for(var i=0; i<tags.length; i++)
emit(tags[i].text, 1);
}''',
reduce='''function(key, values) {
return Array.sum(values);
}''',
out='tagsfrequency'
)
print(list(
db.tagsfrequency.find({'value': {'$gt': 10}}).sort([('value', -1)])
))
print freqs
db.tweets.find_one()
freqs = db.tweets.map_reduce(
map='''function() {
var tags = this.user.screen_name;
emit(tags, 1);
}''',
reduce='''function(key, values) {
return Array.sum(values);
}''',
out='namefrequency'
)
print(list(
db.namefrequency.find().sort([('value', -1)]).limit(10)
))
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.twitter
db.tweets.map_reduce(
map='''function() {
var tags = this.entities.hashtags;
for(var i=0; i<tags.length; i++)
emit(tags[i].text, 1);
}''',
reduce='''function(key, values) {
return Array.sum(values);
}''',
out='tagsfrequency'
)
print(list(
db.tagsfrequency.find({'value': {'$gt': 10}})
))
sc
data = sc.parallelize([1,2,3,4,5,6])
data.first()
def multiply(v):
return v*2
multdata = data.map(multiply)
print multdata
multdata.collect()
sc.defaultParallelism
values = range(20)
print values
rddvalues = sc.parallelize(values)
def sumall(i):
yield list(i) # returns the list on each core (4 cores available)
rddvalues.mapPartitions(sumall).collect()
def summap(v):
return v+1
rddvalues.map(summap).collect()
def sumtwo(a, b):
return a+b
rddvalues.reduce(sumtwo)
low = sc.parallelize(range(1,10))
high = sc.parallelize(range(5,15))
low.union(high).collect()
low.intersection(high).collect()
low.union(high).distinct().collect()
text = sc.textFile('./Jungle_Book.txt')
print text
text.count()
def splitlines(line):
return line.split()
words = text.flatMap(splitlines)
words.count()
words.take(15)
def get_freq(word):
return word, 1
def get_count(a, b): # this will be used with reduceByKey
return a+b
def switch_tuple(t): # this is needed to sort by Key (flips the passed tuple)
return t[1], t[0]
print text.flatMap(splitlines).map(get_freq).reduceByKey(get_count).map(switch_tuple).sortByKey(0, 1).collect()[:20]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The model depicts an economy with multiple periods and an infinite number of producers, all of whom are trying to set prices as close as possible to some target price. This target price is given by a linear combination of demand and average price
Step2: What we have now is a pandas dataframe containing the randomly-generated demand shocks and the demand, which is the result of the accumulation of those shocks in each period. Every time the cell is run, a new demand series will be generated.
Step3: The next step is to model the target price, which can be quite nasty.
Step4: With the pattern of updating in the macroeconomy thus defined, we can evaluate the value of average price, which at each period is a weighted accumulation of the epsilon shocks. Each period's shock is weighed both by how important coordination is in the target price (value of r) and how many other people are up to date on demand (L), the idea being that one period's shock may not matter that much if, during that period, not many other people are aware of it.
Step5: We can thus define target price as above.
Step6: But tables can only tell us so much. Graphs are much more revealing.
Step7: The shocks to cumulative demand are reflected in the variations of target price, but much less extremely, as would be expected from the formula for target price which weighs cumulative demand less than fully.
Step8: It is this lag that ultimately makes up the most important result of Reis. It demonstrates that, in this economy, firms do not respond elastically to changes in demand -- a lot of the time it's better to just go with the flow. And, in the real world, a lot of the time this is the case. The average supermarket, for example, isn't looking so much at factors in the macroeconomy when setting prices, it's looking more at the prices of their competitor down the street. Those macro factors do play in, but in a muted way, and only after other people in the economy have started to get wind of them.
Step9: The formula for expected loss -- that is, the possible disutility that one could get from not updating between tau_hat and t, is given by the following
Step10: Whenever the agent updates, tau_hat resets to that period, and Loss for that period becomes 0 minus the cost c.
Step11: With these updates cemented, we can determine what the agent's price is at that given time -- they just set it to the target price, which they can derive due to their observation of demand. Once they update, however, they don't change their target price at all. After all, without information, their expectation of epsilon, the change in demand each period, is 0.
|
<ASSISTANT_TASK:>
Python Code:
import numpy.random as rand
import matplotlib.pyplot as plt
import pandas as pd
import sys
%matplotlib inline
print('Python version: ', sys.version)
print('Pandas version: ', pd.__version__)
plt.style.use('seaborn-dark-palette')
variance = .25 #sets variance of epsilon
pds = 100 #sets number of periods
r = .7 #sets relative weight of average price on target price
shocklist = []
cumulist = []
dum_variable = 0
for numba in range(pds):
e = rand.normal(loc = 0.0, scale = variance, size = None)
#each log epsilon shock is a normally distributed random variable centered at 0
shocklist.append(e)
for elem in shocklist:
dum_variable += elem
#This accumulates every epsilon leading up to the period
#makes up the random walk
cumulist.append(dum_variable)
demand = pd.DataFrame({'Period shocks': shocklist, 'Cumulative demand': cumulist})
demand.head()
fig, ax = plt.subplots()
ax.set_title('Demand')
ax.set_xlabel('t (Period)')
ax.set_ylabel('Demand (Natural Log, Normalized)')
demand['Cumulative demand'].plot(kind = 'line',
ax = ax,
legend = False,)
T = 10 #can adjust this
def L(t, tau):
if t >= T:
if tau <= T: lam = 1
else: lam = (1-(1-T**-1)**(t-tau)) #derived from each period's update proportion being independent
else: lam = 0
return lam
def eps(t): #finds the float value of the period t shock
return float(demand['Period shocks'][demand.index == t])
def p_avg(t): #average price
return sum([((L(t,s)*(1-r))/(1-(r*L(t,s))))*eps(s) for s in range(t)])
#formula for average price was outlined in Veldkamp (2014)
demand['Average price'] = [p_avg(t) for t in range(pds)]
def p_target(t):
m = float(demand['Cumulative demand'][demand.index == t])
tar = (1-r)*m + r*p_avg(t)
#p_target as defined in the original problem; linear combination of demand and p_avg
return tar
demand['Target price'] = [p_target(t) for t in range(pds)]
#Now the table for demand has four columns:
#Cumulative demand, period shocks, average prices, target prices
demand.head(2 * T)
fig1, ax1 = plt.subplots()
ax1.set_xlabel('Period')
ax1.set_ylabel('Demand/Price (Natural Log, Normalized)')
demand[["Cumulative demand", "Target price"]].plot(kind = 'line', ax = ax1)
fig2, ax2 = plt.subplots()
ax2.set_xlabel('Period')
ax2.set_ylabel('Price (Natural Log)')
ax2.set_ylim(ax1.get_ylim())
demand[["Target price", "Average price"]].plot(kind = 'line', ax = ax2)
c = .5
def Loss(t, tau_hat):
sm = 0
for k in range(tau_hat + 1, t):
sm += (1-r)/(1-(r*L(t, k)))
return sm * variance
loss_list = [Loss(t, T) for t in range(pds)]
loss_list1 = loss_list.copy()
update_pds = []
for x in range(pds):
if x == T:
tau_hat = x #predetermined point at which everyone updates
update_pds.append(x)
elif loss_list1[x] > c:
tau_hat = x #x becomes the new "last period updated"
update_pds.append(x)
loss_list1[x] = 0.0
for s in range(x+1, pds): #all future periods are affected by this new tau_hat
loss_list1[s] = Loss(s, tau_hat)
update_check = []
for x in range(pds):
if x in update_pds: update_check.append("Yes")
else: update_check.append("No")
demand['Update?'] = update_check
demand.tail(10)
agent_price = []
curr_price = 0
for t in range(pds):
if update_check[t] == "Yes":
agent_price.append(p_target(t))
curr_price = p_target(t)
else: agent_price.append(curr_price)
demand["Agent price"] = agent_price
demand.tail(10)
fig4, ax4 = plt.subplots()
ax4.set_xlabel('Period')
ax4.set_ylabel('Price (Natural Log)')
ax4.set_ylim(ax1.get_ylim())
fig3, ax3 = plt.subplots()
ax3.set_xlabel('Period')
ax3.set_ylabel('Price (Natural Log)')
ax3.set_ylim(ax1.get_ylim())
demand[["Agent price", "Target price"]].plot(kind = 'line', ax = ax4)
demand[["Agent price", "Average price"]].plot(kind = 'line', ax = ax3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gaussian Mixture Models and Expectation Maximisation in Shogun
Step2: Set up the model in Shogun
Step3: Sampling from mixture models
Step4: Evaluating densities in mixture Models
Step5: Density estimating with mixture models
Step6: Imagine you did not know the true generating process of this data. What would you think just looking at it? There are clearly at least two components (or clusters) that might have generated this data, but three also looks reasonable. So let us try to learn a Gaussian mixture model on those.
Step7: So far so good, now lets plot the density of this GMM using the code from above
Step8: It is also possible to access the individual components of the mixture distribution. In our case, we can for example draw 95% ellipses for each of the Gaussians using the method from above. We will do this (and more) below.
Step9: Clustering with mixture models
Step10: These are clusterings obtained via the true mixture model and the one learned via EM. There is a slight subtlety here
Step11: Note how the lower left and middle cluster are overlapping in the sense that points at their intersection have similar likelihoods. If you do not care at all about this and are just interested in a partitioning of the space, simply choose the maximum.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all Shogun classes
from shogun import *
from matplotlib.patches import Ellipse
# a tool for visualisation
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
Returns an ellipse artist for nstd times the standard deviation of this
Gaussian, specified by mean and covariance
# compute eigenvalues (ordered)
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
# width and height are "full" widths, not radius
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
# create mixture of three Gaussians
num_components=3
num_max_samples=100
gmm=GMM(num_components)
dimension=2
# set means (TODO interface should be to construct mixture from individuals with set parameters)
means=zeros((num_components, dimension))
means[0]=[-5.0, -4.0]
means[1]=[7.0, 3.0]
means[2]=[0, 0.]
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
# set covariances
covs=zeros((num_components, dimension, dimension))
covs[0]=array([[2, 1.3],[.6, 3]])
covs[1]=array([[1.3, -0.8],[-0.8, 1.3]])
covs[2]=array([[2.5, .8],[0.8, 2.5]])
[gmm.set_nth_cov(covs[i],i) for i in range(num_components)]
# set mixture coefficients, these have to sum to one (TODO these should be initialised automatically)
weights=array([0.5, 0.3, 0.2])
gmm.put('m_coefficients', weights)
# now sample from each component seperately first, the from the joint model
hold(True)
colors=["red", "green", "blue"]
for i in range(num_components):
# draw a number of samples from current component and plot
num_samples=int(rand()*num_max_samples)+1
# emulate sampling from one component (TODO fix interface of GMM to handle this)
w=zeros(num_components)
w[i]=1.
gmm.put('m_coefficients', w)
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_samples)])
plot(X[:,0], X[:,1], "o", color=colors[i])
# draw 95% elipsoid for current component
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs[i], color=colors[i]))
hold(False)
_=title("%dD Gaussian Mixture Model with %d components" % (dimension, num_components))
# since we used a hack to sample from each component
gmm.put('m_coefficients', weights)
# generate a grid over the full space and evaluate components PDF
resolution=100
Xs=linspace(-10,10, resolution)
Ys=linspace(-8,6, resolution)
pairs=asarray([(x,y) for x in Xs for y in Ys])
D=asarray([gmm.cluster(pairs[i])[3] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,2,1)
pcolor(Xs,Ys,D)
xlim([-10,10])
ylim([-8,6])
title("Log-Likelihood of GMM")
subplot(1,2,2)
pcolor(Xs,Ys,exp(D))
xlim([-10,10])
ylim([-8,6])
_=title("Likelihood of GMM")
# sample and plot (TODO fix interface to have loop within)
X=array([gmm.sample() for _ in range(num_max_samples)])
plot(X[:,0], X[:,1], "o")
_=title("Samples from GMM")
def estimate_gmm(X, num_components):
# bring data into shogun representation (note that Shogun data is in column vector form, so transpose)
feat=features(X.T)
gmm_est=GMM(num_components)
gmm_est.set_features(feat)
# learn GMM
gmm_est.train_em()
return gmm_est
component_numbers=[2,3]
# plot true likelihood
D_true=asarray([gmm.cluster(pairs[i])[num_components] for i in range(len(pairs))]).reshape(resolution,resolution)
figure(figsize=(18,5))
subplot(1,len(component_numbers)+1,1)
pcolor(Xs,Ys,exp(D_true))
xlim([-10,10])
ylim([-8,6])
title("True likelihood")
for n in range(len(component_numbers)):
# TODO get rid of these hacks and offer nice interface from Shogun
# learn GMM with EM
gmm_est=estimate_gmm(X, component_numbers[n])
# evaluate at a grid of points
D_est=asarray([gmm_est.cluster(pairs[i])[component_numbers[n]] for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise densities
subplot(1,len(component_numbers)+1,n+2)
pcolor(Xs,Ys,exp(D_est))
xlim([-10,10])
ylim([-8,6])
_=title("Estimated likelihood for EM with %d components"%component_numbers[n])
# function to draw ellipses for all components of a GMM
def visualise_gmm(gmm, color="blue"):
for i in range(gmm.get_num_components()):
component=Gaussian.obtain_from_generic(gmm.get_component(i))
gca().add_artist(get_gaussian_ellipse_artist(component.get_mean(), component.get_cov(), color=color))
# multiple runs to illustrate random initialisation matters
for _ in range(3):
figure(figsize=(18,5))
subplot(1, len(component_numbers)+1, 1)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color="blue")
title("True components")
for i in range(len(component_numbers)):
gmm_est=estimate_gmm(X, component_numbers[i])
subplot(1, len(component_numbers)+1, i+2)
plot(X[:,0],X[:,1], 'o')
visualise_gmm(gmm_est, color=colors[i])
# TODO add a method to get likelihood of full model, retraining is inefficient
likelihood=gmm_est.train_em()
_=title("Estimated likelihood: %.2f (%d components)"%(likelihood,component_numbers[i]))
def cluster_and_visualise(gmm_est):
# obtain cluster index for each point of the training data
# TODO another hack here: Shogun should allow to pass multiple points and only return the index
# as the likelihood can be done via the individual components
# In addition, argmax should be computed for us, although log-pdf for all components should also be possible
clusters=asarray([argmax(gmm_est.cluster(x)[:gmm.get_num_components()]) for x in X])
# visualise points by cluster
hold(True)
for i in range(gmm.get_num_components()):
indices=clusters==i
plot(X[indices,0],X[indices,1], 'o', color=colors[i])
hold(False)
# learn gmm again
gmm_est=estimate_gmm(X, num_components)
figure(figsize=(18,5))
subplot(121)
cluster_and_visualise(gmm)
title("Clustering under true GMM")
subplot(122)
cluster_and_visualise(gmm_est)
_=title("Clustering under estimated GMM")
figure(figsize=(18,5))
for comp_idx in range(num_components):
subplot(1,num_components,comp_idx+1)
# evaluated likelihood under current component
# TODO Shogun should do the loop and allow to specify component indices to evaluate pdf for
# TODO distribution interface should be the same everywhere
component=Gaussian.obtain_from_generic(gmm.get_component(comp_idx))
cluster_likelihoods=asarray([component.compute_PDF(X[i]) for i in range(len(X))])
# normalise
cluster_likelihoods-=cluster_likelihoods.min()
cluster_likelihoods/=cluster_likelihoods.max()
# plot, coloured by likelihood value
cm=get_cmap("jet")
hold(True)
for j in range(len(X)):
color = cm(cluster_likelihoods[j])
plot(X[j,0], X[j,1] ,"o", color=color)
hold(False)
title("Data coloured by likelihood for component %d" % comp_idx)
# compute cluster index for every point in space
D_est=asarray([gmm_est.cluster(pairs[i])[:num_components].argmax() for i in range(len(pairs))]).reshape(resolution,resolution)
# visualise clustering
cluster_and_visualise(gmm_est)
# visualise space partitioning
hold(True)
pcolor(Xs,Ys,D_est)
hold(False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: <h3> Create a simple machine learning model </h3>
Step4: <h3> What is wrong with calculating RMSE on the training and test data as follows? </h3>
Step6: Hint
Step8: <h2> Using HASH of date to split the data </h2>
Step10: We can now use the alpha to compute RMSE. Because the alpha value is repeatable, we don't need to worry that the alpha in the compute_rmse will be different from the alpha computed in the compute_alpha.
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import bigquery
compute_alpha =
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
(
SELECT
RAND() AS splitfield,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
)
WHERE
splitfield < 0.8
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results['alpha'][0]
print(alpha)
compute_rmse =
#standardSQL
SELECT
dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' )
GROUP BY
dataset
bigquery.Client().query(compute_rmse.replace('ALPHA', str(alpha))).to_dataframe()
train_and_eval_rand =
#standardSQL
WITH
alldata AS (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' ),
training AS (
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
alldata
WHERE
dataset = 'train' )
SELECT
MAX(alpha) AS alpha,
dataset,
SQRT(
AVG(
(arrival_delay - alpha * departure_delay) *
(arrival_delay - alpha * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
alldata,
training
GROUP BY
dataset
bigquery.Client().query(train_and_eval_rand).to_dataframe()
compute_alpha =
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
AND ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results['alpha'][0]
print(alpha)
compute_rmse =
#standardSQL
SELECT
IF(ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8, 'train', 'eval') AS dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
GROUP BY
dataset
print(bigquery.Client().query(compute_rmse.replace('ALPHA', str(alpha))).to_dataframe().head())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FMO dynamics simulated with Redfield theory
Step2: Excited state dynamics
Step3: The long-time evolution indeed approaches thermal equilibrium
Step4: Gaussian pump pulse
Step5: Absorption spectra
Step6: Pump-probe spectra
|
<ASSISTANT_TASK:>
Python Code:
import qspectra as qs
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
electronic_fmo = np.array(np.mat(
12400 -87.7 5.5 -5.9 6.7 -13.7 -9.9;
-87.7 12520 30.8 8.2 0.7 11.8 4.3;
5.5 30.8 12200 -53.5 -2.2 -9.6 6.;
-5.9 8.2 -53.5 12310 -70.7 -17. -63.3;
6.7 0.7 -2.2 -70.7 12470 81.1 -1.3;
-13.7 11.8 -9.6 -17. 81.1 12620 39.7;
-9.9 4.3 6. -63.3 -1.3 39.7 12430
))
dipoles_fmo = np.array([d / np.linalg.norm(d) for d in
np.array([[3.019, 3.442, 0.797, 3.213, 2.969, 0.547, 1.983],
[2.284, -2.023, -3.871, 2.145, -2.642, 3.562, 2.837],
[1.506, 0.431, 0.853, 1.112, -0.661, -1.851, 2.015]]).T])
fmo_hamiltonian = qs.ElectronicHamiltonian(electronic_fmo,
bath=qs.DebyeBath(qs.CM_K * 77, 35, 106),
dipoles=dipoles_fmo,
disorder=100)
dynamical_model = qs.RedfieldModel(fmo_hamiltonian, hilbert_subspace='gef', unit_convert=qs.CM_FS)
%time t, rho = qs.simulate_dynamics(dynamical_model, [1,0,0,0,0,0,0], 100000)
plt.title('Excitation from site 1')
plt.plot(t, np.einsum('tii->ti', rho.reshape(-1, 7, 7)).real)
plt.xlim(0, 1000)
plt.xlabel('Time (fs)')
plt.ylabel('Population')
plt.legend(['Site {}'.format(n) for n in xrange(1, 8)]);
print np.diag(qs.ket_vec_to_matrix(rho[-1])).real
print np.diag(fmo_hamiltonian.thermal_state('e')).real
%%time
pump = qs.GaussianPulse(12500, 50, scale=1e-3, freq_convert=qs.CM_FS)
t, rho = qs.simulate_pump(dynamical_model, pump, 'x', 5000)
plt.title('Laser excitation')
plt.plot(t, 2 * pump(t, 12500).real, 'k--')
plt.plot(t, np.einsum('tii->ti', rho.reshape(-1, 8, 8)[:, 1:, 1:]).real)
plt.xlabel('Time (fs)')
plt.ylabel('Population')
plt.xlim(pump.t_init, 1000)
plt.legend(np.concatenate([['Laser (arb. units)'], ['Site {}'.format(n) for n in xrange(1, 8)]]));
%time f, X = qs.absorption_spectra(dynamical_model, 2000, exact_isotropic_average=True)
%time f, X2 = qs.absorption_spectra(dynamical_model, 2000, exact_isotropic_average=True, ensemble_size=5)
plt.plot(f, X)
plt.plot(f, X2)
plt.xlim(12000, 12800);
%%time
X = {}
for signal in ['GSB', 'ESE', 'ESA']:
f, X[signal] = qs.impulsive_probe(dynamical_model, rho[-1], 5000, 'xx', include_signal=signal)
plt.plot(f, X['GSB'].real, 'b-')
plt.plot(f, X['ESE'].real, 'g-')
plt.plot(f, X['ESA'].real, 'r-')
plt.plot(f, np.sum(X.values(), 0).real, 'k-')
plt.legend(['GSB', 'ESE', 'ESA', 'Total'])
plt.xlim(12000, 12800);
%%time
f, X = qs.impulsive_probe(dynamical_model, rho, 5000, 'xx',
exact_isotropic_average=True, include_signal='ESE')
plt.title('Pump-probe spectrum (ESE only)')
plt.contourf(t, f, X.real.T, 20)
plt.plot([pump.t_final, pump.t_final], [12000, 12600], 'k--')
plt.ylim(12000, 12600)
plt.xlabel('Population time (fs)')
plt.ylabel('Emission frequency (cm$^{-1}$)');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear Regression
Step2: Ridge Regression (L2 penalty)
Step3: Lasso (L1 penalty)
Step4: Linear models for classification
Step5: Multi-Class linear classification
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
X, y, true_coefficient = make_regression(n_samples=80, n_features=30, n_informative=10, noise=100, coef=True, random_state=5)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=5)
print(X_train.shape)
print(y_train.shape)
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression().fit(X_train, y_train)
print("R^2 on training set: %f" % linear_regression.score(X_train, y_train))
print("R^2 on test set: %f" % linear_regression.score(X_test, y_test))
from sklearn.metrics import r2_score
print(r2_score(np.dot(X, true_coefficient), y))
plt.figure(figsize=(10, 5))
coefficient_sorting = np.argsort(true_coefficient)[::-1]
plt.plot(true_coefficient[coefficient_sorting], "o", label="true")
plt.plot(linear_regression.coef_[coefficient_sorting], "o", label="linear regression")
plt.legend()
from sklearn.linear_model import Ridge
ridge_models = {}
training_scores = []
test_scores = []
for alpha in [100, 10, 1, .01]:
ridge = Ridge(alpha=alpha).fit(X_train, y_train)
training_scores.append(ridge.score(X_train, y_train))
test_scores.append(ridge.score(X_test, y_test))
ridge_models[alpha] = ridge
plt.figure()
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), [100, 10, 1, .01])
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b')
for i, alpha in enumerate([100, 10, 1, .01]):
plt.plot(ridge_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.))
plt.legend(loc="best")
from sklearn.linear_model import Lasso
lasso_models = {}
training_scores = []
test_scores = []
for alpha in [30, 10, 1, .01]:
lasso = Lasso(alpha=alpha).fit(X_train, y_train)
training_scores.append(lasso.score(X_train, y_train))
test_scores.append(lasso.score(X_test, y_test))
lasso_models[alpha] = lasso
plt.figure()
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), [30, 10, 1, .01])
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
plt.plot(true_coefficient[coefficient_sorting], "o", label="true", c='b')
for i, alpha in enumerate([30, 10, 1, .01]):
plt.plot(lasso_models[alpha].coef_[coefficient_sorting], "o", label="alpha = %.2f" % alpha, c=plt.cm.summer(i / 3.))
plt.legend(loc="best")
from plots import plot_linear_svc_regularization
plot_linear_svc_regularization()
from sklearn.datasets import make_blobs
plt.figure()
X, y = make_blobs(random_state=42)
plt.scatter(X[:, 0], X[:, 1], c=y)
from sklearn.svm import LinearSVC
linear_svm = LinearSVC().fit(X, y)
print(linear_svm.coef_.shape)
print(linear_svm.intercept_.shape)
plt.scatter(X[:, 0], X[:, 1], c=y)
line = np.linspace(-15, 15)
for coef, intercept in zip(linear_svm.coef_, linear_svm.intercept_):
plt.plot(line, -(line * coef[0] + intercept) / coef[1])
plt.ylim(-10, 15)
plt.xlim(-10, 8)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Вытаскивание даных из файла!
|
<ASSISTANT_TASK:>
Python Code:
def mm(x, k):
if x.shape[0] > 1:
x=x.T
r = np.random.rand(x.shape[1],k)
print(r)
#print(x.dot(r))
return(x.dot(r))
mm(np.array([[1,2, 1,132, 1,2]]), 5)
from sklearn.preprocessing import scale
X_train_draw = scale(X_train[::, 0:2])
X_test_draw = scale(X_test[::, 0:2])
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
clf.fit(X_train_draw, y_train)
x_min, x_max = X_train_draw[:, 0].min() - 1, X_train_draw[:, 0].max() + 1
y_min, y_max = X_train_draw[:, 1].min() - 1, X_train_draw[:, 1].max() + 1
h = 0.02
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
plt.figure()
plt.pcolormesh(xx, yy, pred, cmap=cmap_light)
plt.scatter(X_train_draw[:, 0], X_train_draw[:, 1],
c=y_train, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("Score: %.0f percents" % (clf.score(X_test_draw, y_test) * 100))
plt.show()
x = np.eye(1, 18)
x[0][12] = 5
print(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Präambel generieren (BPSK)
Step2: IFFT
Step3: Cyclic Prefix einfügen
Step4: Kanal
Step5: Synchronisation
Step6: Entzerrung
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
matplotlib.rcParams.update({'font.size': 14})
L = 1024 # Anzahl Punkte in der DFT
K = 3*L//4 # Anzahl belegter Unterträger, muss gerade sein
n_CP = L//4 # Länge des Cyclic Prefix
threshold = 0.8 # Threshold for presence detection
t_off = 1000 # Zeitoffset
f_off = 11.3 # Frequenzoffset in Vielfachen des Unterträgerabstands
h_awgn = [1.,] # Kanalimpulsantwort für AWGN
h_multipath = np.linspace(1, 0, n_CP//2) * np.exp(1j*np.random.uniform(-np.pi, np.pi, n_CP//2)) # Mehrwegekanal
h_multipath /= np.sqrt(np.mean(np.abs(h_multipath)**2)) # normieren
h = h_awgn # Kanal wählen
SNR = 10 # SNR in dB
pn1 = np.random.choice([-np.sqrt(2), np.sqrt(2)], K//2) # erste PN Sequenz
pn2 = np.random.choice([-1, 1], K) # zweite PN Sequenz
c1 = np.zeros((L,))
c1[L//2-K//2:L//2+K//2:2] = pn1 # nur gerade Unterträger werden belegt
s1 = np.fft.ifft(np.fft.fftshift(c1))
c2 = np.zeros((L,))
c2[L//2-K//2:L//2+K//2] = pn2 # alle Unterträger werden belegt
s2 = np.fft.ifft(np.fft.fftshift(c2))
v = np.sqrt(2)*pn2[::2]/pn1
s = np.concatenate((s1[-n_CP:], s1, s2[-n_CP:], s2))
f = np.linspace(-0.5, 0.5, L, endpoint=False)
S1 = np.abs(np.fft.fftshift(np.fft.fft(s[:L])))**2
plt.plot(f, np.abs(S1))
plt.title("|S1|"); plt.xlabel("Normalisierte Frequenz"); plt.xlim([-0.5, 0.5])
plt.show()
S2 = np.abs(np.fft.fftshift(np.fft.fft(s[L+2*n_CP:])))**2
plt.plot(f, np.abs(S2))
plt.title("|S2|"); plt.xlabel("Normalisierte Frequenz"); plt.xlim([-0.5, 0.5])
plt.show()
plt.plot(np.real(s[n_CP:n_CP+L]))
plt.title("Erstes Synchronisationssymbol (ohne CP, nur Realteil)");
plt.ylabel("Amplitude"); plt.xlabel("Sample Index");plt.xlim([0, L])
plt.show()
# Faltung mit Kanalimpulsantwort
r = np.convolve(s, h, mode="full")
filter_delay = (len(h)-1)//2
r = r[filter_delay:] # Einschwingzeit entfernen
# Frequenzoffset
r *= np.exp(1j*2*np.pi*f_off*np.arange(len(r))/L)
# Zeitoffset
r = np.concatenate((np.zeros((t_off,), dtype=complex), r))
# AWGN
P_s = np.mean(np.abs(r[t_off:])**2) # mittlere Signalleistung
sigma = 10**(-SNR/20) * np.sqrt(P_s)
n = sigma * (np.sqrt(1/2) * np.random.randn(len(r)) + 1j * np.sqrt(1/2) * np.random.randn(len(r)))
r += n
# Fixed Lag Correlation
Pd = np.asarray([np.sum(np.conj(r[i:i+L//2])*r[i+L//2:i+L]) for i in range(len(r)-L)])
Rd = np.asarray([np.sum(np.abs(r[i+L//2:i+L])**2) for i in range(len(r) - L)])
M = np.abs(Pd/Rd)**2
plt.plot(np.abs(M)); plt.title("M(d)"); plt.xlabel("Verschiebung"); plt.xlim([0, len(M)]); plt.ylim([0,1.1]); plt.show()
plt.plot(np.angle(Pd)); plt.title("arg(P(d))"); plt.xlabel("Verschiebung"); plt.xlim([0, len(M)]); plt.show()
# Signaldetektion
signal_detected = any(M > threshold)
print("Präambel erkannt!" if signal_detected else "Keine Präambel erkannt.")
# Schätzung Zeitoffset
if signal_detected:
# t_est = np.argmax(M) # keine gute Idee wegen des Plateaus
M_over_threshold = [idx for idx in range(len(M)) if M[idx] > threshold]
t_est = M_over_threshold[0] + 3*n_CP//4 # "ad-hoc" Ansatz
t_valid = [t_off+len(h), t_off+n_CP] # Im realen System ist t_off natürlich unbekannt!
t_est_valid = t_valid[0] <= t_est < t_valid[1]
print("Geschätzter Zeitoffset: " + str(t_est) + " Samples. Gültiger Bereich: " + str(t_valid) + ". Schätzung im gültigen Bereich: " + str(t_est_valid))
# Fractional Frequency Offset Schätzung und Korrektur
if signal_detected:
f_F = np.angle(Pd[t_est])/np.pi
r *= np.exp(-1j*2*np.pi*f_F*np.arange(len(r))/L)
print("Geschätzter Fractional Frequency Offset: " + str(f_F))
# Integer Frequency Offset Schätzung und Korrektur
if signal_detected:
# Präambelsymbole extrahieren, Cyclic Prefix wegschneiden
s1_est = r[t_est:t_est+L]
s2_est = r[t_est+L+n_CP:t_est+2*L+n_CP]
# in den Frequenzbereich transformieren, in intuitive Lage bringen
x1 = np.fft.fftshift(np.fft.fft(s1_est))
x2 = np.fft.fftshift(np.fft.fft(s2_est))
# B(g) berechnen
k = np.concatenate((np.arange(-K//2, 0, 2), np.arange(2, K//2, 2))) + L//2
v_nodc = np.delete(v, K//2//2)
E_x2 = np.sum(np.abs(x2[k])**2)
possible_offsets = range(-(L-K)//2//2, (L-K)//2//2, 1)
B = np.zeros((len(possible_offsets,)), dtype=np.complex)
for i, g in enumerate(possible_offsets):
B[i] = (np.abs(np.sum(np.conj(x1[k+2*g])*np.conj(v_nodc)*x2[k+2*g]))**2)/(2*E_x2**2)
# Integer Frequency Offset berechnen
g_est = np.argmax(B)
f_I = 2*g_est - (L-K)//2
print("Geschätzter Integer Frequency Offset: " + str(f_I))
print("Geschätzter Frequenzoffset (integer + fractional): " + str(f_I + f_F))
plt.plot(possible_offsets, np.abs(B)); plt.title("|B(g)|"); plt.xlabel("g"); plt.xlim([possible_offsets[0], possible_offsets[-1]]); plt.show()
plt.plot(f, np.abs(x1)); plt.title("|x1|"); plt.xlabel("Normalisierte Frequenz"); plt.xlim([f[0], f[-1]]); plt.show()
plt.plot(f, np.abs(x2)); plt.title("|x2|"); plt.xlabel("Normalisierte Frequenz"); plt.xlim([f[0], f[-1]]); plt.show()
if signal_detected:
band_start_idx = L//2 - K//2 + f_I
band_stop_idx = band_start_idx + K
f_I_valid = band_start_idx >= 0 and band_stop_idx < L
if(not f_I_valid):
print("Ungültiger Frequenzoffset!")
else:
c1_est = x1[band_start_idx : band_stop_idx]
c2_est = x2[band_start_idx : band_stop_idx]
plt.scatter(c1_est.real, c1_est.imag); plt.title("c1 geschätzt, ohne Entzerrung"); plt.xlabel("I"); plt.ylabel("Q"); plt.show()
plt.scatter(c2_est.real, c2_est.imag); plt.title("c2 geschätzt, ohne Entzerrung"); plt.xlabel("I"); plt.ylabel("Q"); plt.show()
ZF_eq = 1 / (c2_est / pn2) # berechne Zero-Forcing Entzerrerkoeffizienten um den verbleibenden Phasenoffset zu korrigieren
c1_est_eq = c1_est * ZF_eq
c2_est_eq = c2_est * ZF_eq
plt.scatter(ZF_eq.real, ZF_eq.imag); plt.title("Entzerrerkoeffizienten")
plt.xlabel("I"); plt.ylabel("Q"); plt.show()
plt.plot(np.abs(ZF_eq)); plt.title("Betrag der Entzerrerkoeffizienten")
plt.xlabel("Index"); plt.show()
plt.scatter(c1_est_eq.real, c1_est_eq.imag); plt.title("c1 geschätzt, mit Entzerrung")
plt.xlabel("I"); plt.ylabel("Q"); plt.show()
plt.scatter(c2_est_eq.real, c2_est_eq.imag); plt.title("c2 geschätzt, mit Entzerrung")
plt.xlabel("I"); plt.ylabel("Q"); plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets see which files we have
Step2: First step is to run the configure step (--bundle-path refers to where external dependencies are downloaded)
Step3: You can see all the available options by running ./waf --help.
Step4: Parsing a mp4 file
Step5: To see the mp4 file content we can use the complete_parser example
|
<ASSISTANT_TASK:>
Python Code:
%%bash
rm -rf petro
git clone git@github.com:steinwurf/petro.git
%%bash
ls petro
%%bash
cd petro
./waf configure --bundle-path=~/dev/bundle_dependencies
%%bash
cd petro
./waf build
%%bash
wget http://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4
%%bash
./petro/build/linux/examples/complete_parser BigBuckBunny_320x180.mp4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: All hypotheses discussed herein will be expressed with Gaussian / normal distributions. Let's look at the properties of this distribution.
Step2: Now look at the cumulative distribution function of the standard normal, which integrates from negative infinity up to the function argument, on a unit-normalized distribution.
Step3: The function also accepts a list.
Step4: Now let's be more explicit about the parameters of the distribution.
Step5: In addition to exploring properties of the exact function, we can sample points from it.
Step6: We can also approximate the exact distribution by sampling a large number of points from it.
Step7: Data samples
Step8: Now let's generate a large number of data samples and plot the corresponding distribution of sample means.
Step9: Note that by increasing the number of data points, the variation on the mean decreases.
Step10: Let's numerically determine the sampling distribution under the hypothesis
Step11: With this sampling distribution (which can be calculated exactly), we know exectly how likely a particular result $d(X_0)$ is. We also know how likely it is to observe a result that is even less probable than $d(X_0)$, $P(d(X) > d(X_0); \mu)$.
Step12: Now, imagine that we observe $\bar X_0 = 0.4$. The probability of $\bar X > 0.4$ is less than $2\%$ under $H_0$, so let's say we've rejected $H_0$.
Step13: Calculate the severity of an outcome that is rather unlike (is lower) than the lower bound of a range of alternate hypotheses ($\mu > \mu_1$).
Step14: Calculate the severity for a set of observations.
|
<ASSISTANT_TASK:>
Python Code:
from scipy.stats import norm # properties of the distribution
from numpy.random import normal # samples from the distribution
import numpy as np
import scipy
from matplotlib import pyplot as plt
%matplotlib inline
x = np.arange(-10, 10, 0.001)
plt.plot(x,norm.pdf(x,0,1)) # final arguments are mean and width
norm.cdf(0)
norm.cdf([-1., 0, 1])
mu = 0
sigma = 1
norm(loc=mu, scale=sigma)
norm.cdf([-1., 0, 1])
sigma=2
mu = 0
n = norm(loc=mu, scale=sigma)
n.cdf([-1., 0, 1])
[normal() for _ in range(5)]
size = 1000000
num_bins = 300
plt.hist([normal() for _ in range(size)],num_bins)
plt.xlim([-10,10])
n = 10
my_sample = [normal() for _ in range(n)]
my_sample_mean = np.mean(my_sample)
print(my_sample_mean)
n = 10
means_10 = []
for _ in range(10000):
my_sample = [normal() for _ in range(n)]
my_sample_mean = np.mean(my_sample)
means_10.append(my_sample_mean)
plt.hist(means_10,100)
plt.xlim([-1.5,1.5])
plt.xlabel("P(mean(X))")
plt.show()
n = 100
means_100 = []
for _ in range(10000):
my_sample = [normal() for _ in range(n)]
my_sample_mean = np.mean(my_sample)
means_100.append(my_sample_mean)
plt.hist(means_100,100)
plt.xlim([-1.5,1.5])
plt.xlabel("P(mean(X))")
plt.show()
# show 1/sqrt(n) scaling of deviation
n_s = []
std_100 = []
for i in range(1, 1000, 50):
means_100 = []
for _ in range(5000):
my_sample = [normal() for _ in range(i)]
my_sample_mean = np.mean(my_sample)
means_100.append(my_sample_mean)
my_sample_std = np.std(means_100)
std_100.append(1./(my_sample_std*my_sample_std))
n_s.append(i)
plt.scatter(n_s,std_100)
plt.xlim([0,1000])
plt.ylabel("std(mean(X;sample))")
plt.xlabel("sample")
plt.show()
def d(X=[0], mu = 0, sigma = 1):
X_bar = np.mean(X)
return (X_bar - mu) / sigma * np.sqrt(len(X))
n = 10
my_sample = [normal() for _ in range(n)]
d(my_sample)
size = 100000
n = 10
d_sample = []
for _ in range(size):
my_sample = [normal() for _ in range(n)] # get a sample of size n
d_sample.append(d(my_sample)) # add test statistic for this sample to the list
plt.hist(d_sample,100)
plt.xlabel("P(d(X);H0)")
# look at the distributions of sample means for two hypotheses
def make_histograms(mu0=0,mu1=1,num_samples=10000,n=100,sigma=1):
#d0_sample = []
#d1_sample = []
m0_sample = []
m1_sample = []
for _ in range(num_samples):
H0_sample = [normal(loc=mu0,scale=sigma) for _ in range(n)] # get a sample of size n from H0
H1_sample = [normal(loc=mu1,scale=sigma) for _ in range(n)] # get a sample of size n from H1
m0_sample.append( np.mean(H0_sample) ) # add mean for this sample to the m0 list
m1_sample.append( np.mean(H1_sample) ) # add mean for this sample to the m1 list
# remember that the test statistic is unit-normal-distributed for Gaussian hypotheses,
# so these distributions should be identical
#d0_sample.append( d(H0_sample,mu0,sigma) ) # add test statistic for this sample to the d0 list
#d1_sample.append( d(H1_sample,mu1,sigma) ) # add test statistic for this sample to the d1 list
plt.hist(m0_sample,100,label="H0")
plt.hist(m1_sample,100,label="H1")
plt.xlabel(r"$\bar{X}$")
plt.legend()
num_samples = 10000
n = 100
mu0 = 0
mu1 = 1
sigma=2
make_histograms(mu0=mu0,mu1=mu1,num_samples=num_samples,n=n,sigma=sigma)
# severity for the interval: mu > mu_1
# note that we calculate the probability in terms of the _lower bound_ of the interval,
# since it will provide the _lowest_ severity
def severity(mu_1=0, x=[0], mu0=0, sigma=sigma, n=100):
# find the mean of the observed data
x_bar = np.mean(x)
# calculate the test statistic w.r.t. mu_1
dx = (x_bar - mu_1)/sigma*np.sqrt(n)
# the test statistic is distributed as a unit normal
n = norm()
return n.cdf(dx)
sigma = 2
mu_1 = 0.2
x = [0.4]
severity(mu_1=mu_1,x=x,sigma=sigma)
num_samples = 10000
n = 100
mu0 = 0
mu1 = 0.2
sigma=2
make_histograms(mu0=mu0,mu1=mu1,num_samples=num_samples,n=n,sigma=sigma)
x_bar_values = [[0.4],[0.6],[1.]]
color_indices = ["b","k","r"]
for x,color_idx in zip(x_bar_values,color_indices):
mu_values = scipy.linspace(0,1,100)
sev = [severity(mu_1=mu_1,x=x,sigma=sigma) for mu_1 in mu_values]
plt.plot(mu_values,sev,color_idx,label=x)
plt.ylim(0,1.1)
plt.ylabel("severity for $H: \mu > \mu_1$")
plt.legend(loc="lower left")
plt.xlabel(r"$\mu_1$")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can do a lot with the numpy module. Below is an example to jog your memory
Step2: Do you remember the Fibonacci sequence from yesterday's Lecture 1? Let's define our own function that will help us to write the Fibonacci sequence.
Step3: Remember loops too? Let's get the first 10 numbers in the Fibonacci sequence.
Step4: There's your quick review of numpy and functions along with a while loop thrown in. Now we can move on to the content of Lecture 3.
Step5: Let's generate a vector of length 5 populated with uniformly distributed random numbers. The function np.random.rand takes the array output size as an argument (in this case, 5).
Step6: Additionally, you are not limited to one-dimensional arrays! Let's make a 5x5, two-dimensional array
Step7: Great, so now you have a handle on generating uniform distributions. Let's quickly look at one more type of distribution.
Step8: So these numbers probably don't mean that much to you. Don't worry; they don't mean much to me either!
Step9: Now, let's plot a uniform distribution and take a look.
Step10: Now, let's use plt.hist to see what X looks like. First, run the cell below. Then, vary bins -- doing so will either increase or decrease the apparent effect of noise in your distribution.
Step11: Nice job! Do you see why the "uniform distribution" is referred to as such?
Step12: Nice job! You just plotted a Gaussian distribution with mean of 0 and a standard deviation of 1.
Step13: Before moving onto the next section, vary the values of mu and sigma in the above code to see how your histogram changes. You should find that changing mu (the mean) affects the center of the distribution while changing sigma (the standard deviation) affects the width of the distribution.
Step14: For simplicity's sake, we've used plt.hist without generating any return variables. Remember that plt.hist takes in your data (X) and the number of bins, and it makes histograms from it. In the process, plt.hist generates variables that you can store; we just haven't thus far. Run the cell below to see -- it should replot the Gaussian from above while also generating the output variables.
Step15: Something that might be useful to you is that you can make use of variables outputted by plt.hist -- particularly bins and N.
Step16: The plot above (red stars) should look like it overlays the histogram plot above it. If that's what you see, nice job! If not, let your instructor and/or TAs know before moving onto the next section.
Step17: So you've learned how to generate distributions of numbers, plot them, and generate statistics on them. This is a great starting point, but let's try working with some real data!
Step18: Next, plot a histogram of this data set (play around with the number of bins, too).
Step19: Now, calculate and print the mean and standard deviation of this distribution.
Step20: Nice job! Now that you're used to working with real data, we're going to try to fit some more real data to known functions to gain a better understanding of that data.
Step21: We will show you an example, and then you get to try it out for yourself!
Step22: Next, we will define a function $f(x) = \frac 1 3x^2+3$ that will square the elements in x and add an offset. Call this function f, and implement it below.
Step23: Now set y equal to f called with the numpy array x as the input.
Step24: Now we will add some noise to the array y using the np.random.rand() function and store it in a new variable called y_noisy.
Step25: Let's see what the y values look like now
Step26: It seems like there's still a rough parabolic shape, so let's see if we can recover the original y values without any noise.
Step27: Then, we want to find the optimal values of a, b, and c that will give a function that fits best to y_noisy.
Step28: Now that we have the fitted parameters, let's use quadratic to plot the fitted parabola alongside the noisy y values.
Step29: And we can also compare y_fitted to the original y values without any noise
Step30: Not a bad job for your first fit function!
Steps for using curve_fit
Step31: So you've imported your data and plotted it. It should look similar to the figure below. Run the next cell to see.
Step32: What type of function would you say this is? Think back to the distributions we've learned about today. Any ideas?
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.linspace(0,10,11)
def myFib(a,b):
return a+b
fibLength = 10 #the length we want for our Fibonacci sequence
fibSeq = np.zeros(fibLength) #make a numpy array of 10 zeros
# Let's define the first 2 elements of the Fibonacci sequence
fibSeq[0] = 0
fibSeq[1] = 1
i = 2 #with the first 2 elements defined, we can calculate the rest of the sequence beginning with the 3rd element
while i-1 < fibLength-1:
nextFib = myFib(fibSeq[i-2],fibSeq[i-1])
fibSeq[i] = nextFib
i = i + 1
print(fibSeq)
import numpy as np
np.random.rand(5)
np.random.rand(5,5)
np.random.randn(5)
import matplotlib.pyplot as plt
%matplotlib inline
#your code here
X = np.random.rand(5000)
plt.hist(X, bins=20);
#your code here
X = np.random.randn(5000)
plt.hist(X, bins=50);
mu = 5 #the mean of the distribution
sigma = 3 #the standard deviation
Y = sigma * np.random.randn(5000) + mu
plt.hist([X,Y],bins=50,histtype='step');
#write your observations here
N,bins, = plt.hist(X, bins=50)
_
bin_avg = (bins[1:]+bins[:-1])/2
plt.plot(bin_avg, N, 'r*')
plt.show()
mean = np.mean(X)
std = np.std(X)
print('mean: '+ repr(mean) )
print('standard deviation: ' + repr(std))
lifetimes = np.loadtxt('Data/LifetimeData.txt')
#your code here
N,bins,patches = plt.hist(lifetimes,bins=10)
#your code here
mean = np.mean(lifetimes)
std = np.std(lifetimes)
print("mean: "+repr(mean))
print("standard deviation: "+repr(std))
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
%matplotlib inline
# your code here
x = np.linspace(-5,5,100)
# your code here
def f(x):
return 1/3*x**2 + 3
y = f(x)
# your code here
y_noisy = y + np.random.rand(100)
plt.plot(x,y_noisy)
def quadratic(x,a,b,c):
return a*x**2 + b*x + c
optimal_values, _ = curve_fit(quadratic,x,y_noisy)
a = optimal_values[0]
b = optimal_values[1]
c = optimal_values[2]
print(a, b, c)
y_fitted = quadratic(x,a,b,c)
plt.plot(x,y_fitted)
plt.plot(x,y_noisy)
plt.plot(x,y_fitted)
plt.plot(x,y)
# Step 1: Import the data
# Step 2: Plot the data to see what it looks like
xData,yData = np.loadtxt('Data\photopeak.txt', usecols=(0,1), unpack=True)
plt.plot(xData,yData,'*')
plt.title('xData and yData')
from IPython.display import display, Image
display(Image(filename='Data\photopeak.png'))
# Step 3: Define your function here
def myGaussian(Xvals,A,mu,sigma):
return (A/np.sqrt(2*np.pi*sigma**2))*np.exp(-((Xvals-mu)**2/(2*sigma**2)))
# Step 3.5: SANITY CHECK! Use this step as a way to check that the function you defined above is mathematically correct.
mu = 0.66 #the mean of the distribution
sigma = 0.04 #the standard deviation
A = 10;
Xvals = np.linspace(0.50,0.80,100)
Yvals = A*myGaussian(Xvals,A,mu,sigma)
plt.plot(Xvals,Yvals)
# Step 4: Use curve_fit to generate your output parameters
popt, pcov = curve_fit(myGaussian, xData, yData, p0=[0.5, 0.1, 5])
#perr = np.sqrt(np.diag(pcov))
# Step 5: Generate your model fit
xFit = np.linspace(min(xData),max(xData),100) #give this
line_fit = myGaussian(xFit, *popt)
# Step 6: Plot the best fit function and the scatter plot of data
plt.plot(xData, yData, 'r*')
plt.plot(xFit, line_fit)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='red'>Please put your datahub API key into a file called APIKEY and place it to the notebook folder or assign your API key directly to the variable API_key!</font>
Step2: At first, we need to define the dataset name and a variable we want to use.
Step3: Then we define spatial range. We decided to analyze US, where unfortunately catastrofic wildfires are taking place at the moment and influeces air quality.
Step4: Download the data with package API
Step5: Work with the downloaded files
Step6: Here we are making a Basemap of the US that we will use for showing the data.
Step7: Now it is time to plot all the data. A great way to do it is to make an interactive widget, where you can choose time stamp by using a slider.
Step8: Let's include an image from the last time-step as well, because GitHub Preview doesn't show the time slider images.
Step9: With the function below we will save images you saw above to the local filesystem as a GIF, so it is easily shareable with others.
Step10: As we are interested in California fires right now, it would make sense to make animation of only California area as well. So people can be prepared when smoke hits their area. The model has pretty good spatial resolution as well - 3 km, which makes tracking the smoke easier.
Step11: Finally, we will remove the package we downloaded.
Step12: Data about Burned Area from Cal Fire
Step13: Here we convert incident_dateonly_created column to datetime, so it's easier to group data by year.
Step14: Below you can see the data from acres_burned.csv file. It has information about each incident. This time we only compute total acres burned each year.
Step15: Computing yearly sums. In some reason there's many years without much data, so we will filter it out. Also, reseting index, as we don't want dates to be as an index and making year column.
Step16: We can see the computed data below.
Step17: Finally we will make a bar chart of the data. We are using seaborn this time for plotting the data and to visualize it better, we added colormap to bar chart as well.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
%matplotlib inline
import numpy as np
import dh_py_access.lib.datahub as datahub
import xarray as xr
import matplotlib.pyplot as plt
import ipywidgets as widgets
from mpl_toolkits.basemap import Basemap,shiftgrid
import dh_py_access.package_api as package_api
import matplotlib.colors as colors
import warnings
import datetime
import shutil
import imageio
import seaborn as sns
import pandas as pd
import os
import matplotlib as mpl
import wget
warnings.filterwarnings("ignore")
server = 'api.planetos.com'
API_key = open('APIKEY').readlines()[0].strip() #'<YOUR API KEY HERE>'
version = 'v1'
dh = datahub.datahub(server,version,API_key)
dataset = 'noaa_hrrr_wrf_smoke'
variable_name1 = 'Mass_density_concentration_height_above_ground'
# reftime = datetime.datetime.strftime(datetime.datetime.today(), '%Y-%m-%d') + 'T00:00:00'
area_name = 'usa'
today_hr = datetime.datetime.strftime(datetime.datetime.today(),'%Y%m%dT%H')
latitude_north = 49; longitude_west = -127
latitude_south = 26; longitude_east = -70.5
package_hrrr = package_api.package_api(dh,dataset,variable_name1,longitude_west,longitude_east,latitude_south,latitude_north,area_name=area_name+today_hr)
package_hrrr.make_package()
package_hrrr.download_package()
dd1 = xr.open_dataset(package_hrrr.local_file_name)
dd1['longitude'] = ((dd1.lon+180) % 360) - 180
dd1[variable_name1].data[dd1[variable_name1].data < 0] = 0
dd1[variable_name1].data[dd1[variable_name1].data == np.nan] = 0
m = Basemap(projection='merc', lat_0 = 55, lon_0 = -4,
resolution = 'h', area_thresh = 0.05,
llcrnrlon=longitude_west, llcrnrlat=latitude_south,
urcrnrlon=longitude_east, urcrnrlat=latitude_north)
lons,lats = np.meshgrid(dd1.longitude.data,dd1.lat.data)
lonmap,latmap = m(lons,lats)
vmax = np.nanmax(dd1[variable_name1].data)
vmin = 2
cmap = mpl.cm.twilight.colors[:-100]
tmap = mpl.colors.LinearSegmentedColormap.from_list('twilight_edited', cmap)
def loadimg(k):
fig=plt.figure(figsize=(10,7))
ax = fig.add_subplot(111)
pcm = m.pcolormesh(lonmap,latmap,dd1[variable_name1].data[k][0],
norm = colors.LogNorm(vmin=vmin, vmax=vmax),cmap = tmap)
ilat,ilon = np.unravel_index(np.nanargmax(dd1[variable_name1].data[k][0]),dd1[variable_name1].data[k][0].shape)
cbar = plt.colorbar(pcm,fraction=0.024, pad=0.040,ticks=[10**0, 10**1, 10**2,10**3])
cbar.ax.set_yticklabels([0,10,100,1000])
ttl = plt.title('Near Surface Smoke ' + str(dd1[variable_name1].time[k].data)[:-10],fontsize=20,fontweight = 'bold')
ttl.set_position([.5, 1.05])
cbar.set_label(dd1[variable_name1].units)
m.drawcountries()
m.drawstates()
m.drawcoastlines()
print("Maximum: ","%.2f" % np.nanmax(dd1[variable_name1].data[k][0]))
plt.show()
widgets.interact(loadimg, k=widgets.IntSlider(min=0,max=len(dd1[variable_name1].data)-1,step=1,value=0, layout=widgets.Layout(width='100%')))
loadimg(9)
def make_ani(m,lonmap,latmap,aniname,smaller_area=False):
if smaller_area==True:
fraction = 0.035
fontsize = 13
else:
fraction = 0.024
fontsize = 20
folder = './anim/'
for k in range(len(dd1[variable_name1])):
filename = folder + 'ani_' + str(k).rjust(3,'0') + '.png'
if not os.path.exists(filename):
fig=plt.figure(figsize=(10,7))
ax = fig.add_subplot(111)
pcm = m.pcolormesh(lonmap,latmap,dd1[variable_name1].data[k][0],
norm = colors.LogNorm(vmin=vmin, vmax=vmax),cmap = tmap)
m.drawcoastlines()
m.drawcountries()
m.drawstates()
cbar = plt.colorbar(pcm,fraction=fraction, pad=0.040,ticks=[10**0, 10**1, 10**2,10**3])
cbar.ax.set_yticklabels([0,10,100,1000])
ttl = plt.title('Near Surface Smoke ' + str(dd1[variable_name1].time[k].data)[:-10],fontsize=fontsize,fontweight = 'bold')
ttl.set_position([.5, 1.05])
cbar.set_label(dd1[variable_name1].units)
ax.set_xlim()
if not os.path.exists(folder):
os.mkdir(folder)
plt.savefig(filename,bbox_inches = 'tight',dpi=150)
plt.close()
files = sorted(os.listdir(folder))
images = []
for file in files:
if not file.startswith('.'):
filename = folder + file
images.append(imageio.imread(filename))
kargs = { 'duration': 0.3,'quantizer':2,'fps':5.0}
imageio.mimsave(aniname, images, **kargs)
print ('GIF is saved as {0} under current working directory'.format(aniname))
shutil.rmtree(folder)
make_ani(m,lonmap,latmap,'hrrr_smoke.gif')
latitude_north_cal = 43; longitude_west_cal = -126.
latitude_south_cal = 30.5; longitude_east_cal = -113
m2 = Basemap(projection='merc', lat_0 = 55, lon_0 = -4,
resolution = 'h', area_thresh = 0.05,
llcrnrlon=longitude_west_cal, llcrnrlat=latitude_south_cal,
urcrnrlon=longitude_east_cal, urcrnrlat=latitude_north_cal)
lons2,lats2 = np.meshgrid(dd1.longitude.data,dd1.lat.data)
lonmap_cal,latmap_cal = m2(lons2,lats2)
make_ani(m2,lonmap_cal,latmap_cal,'hrrr_smoke_california.gif',smaller_area=True)
os.remove(package_hrrr.local_file_name)
if not os.path.exists('acres_burned.csv'):
wget.download('https://www.fire.ca.gov/imapdata/mapdataall.csv',out='acres_burned.csv')
datain = pd.read_csv('acres_burned.csv')
datain['incident_dateonly_created'] = pd.to_datetime(datain['incident_dateonly_created'])
datain
burned_acres_yearly = datain.resample('1AS', on='incident_dateonly_created')['incident_acres_burned'].sum()
burned_acres_yearly = burned_acres_yearly[burned_acres_yearly.index > datetime.datetime(2012,1,1)]
burned_acres_yearly = burned_acres_yearly.reset_index()
burned_acres_yearly['year'] = pd.DatetimeIndex(burned_acres_yearly.incident_dateonly_created).year
burned_acres_yearly
fig,ax = plt.subplots(figsize=(10,6))
pal = sns.color_palette("YlOrRd_r", len(burned_acres_yearly))
rank = burned_acres_yearly['incident_acres_burned'].argsort().argsort()
sns.barplot(x='year',y='incident_acres_burned',data=burned_acres_yearly,ci=95,ax=ax,palette=np.array(pal[::-1])[rank])
ax.set_xlabel('Year',fontsize=15)
ax.set_ylabel('Burned Area [acres]',fontsize=15)
ax.grid(color='#C3C8CE',alpha=1)
ax.set_axisbelow(True)
ax.spines['bottom'].set_color('#C3C8CE')
ax.spines['top'].set_color('#C3C8CE')
ax.spines['left'].set_color('#C3C8CE')
ax.spines['right'].set_color('#C3C8CE')
ttl = ax.set_title('Burned Area in California',fontsize=20,fontweight = 'bold')
ttl.set_position([.5, 1.05])
ax.tick_params(labelsize=15,length=0)
plt.savefig('acres_burned_cali.png',dpi=300)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Recreate Input
Step3: Recreate Style
Step4: Style Transfer
Step5: Different Approach (TOFIX)
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import time
from PIL import Image
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
import keras
from keras import backend as K
from keras.models import Model
from keras import metrics
from keras.applications.vgg16 import VGG16
import scipy
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
#backend.set_image_data_format('channels_last')
#keras.backend.set_image_dim_ordering('tf')
import os
import sys
sys.path.append(os.path.join(os.getcwd(), os.pardir))
from utils.vgg_utils import preprocess, deprocess, gram_matrix
from utils.vgg16_avg import VGG16_Avg
RES_DIR = os.path.join('resources')
%load_ext autoreload
%autoreload 2
height = 256
width = 256
# load content image
content_image = None
with Image.open(os.path.join(RES_DIR, 'superman.jpg')) as img:
img = img.resize((height, width))
content_image = np.asarray(img, dtype='float32')
plt.imshow(img.convert(mode='RGB'))
plt.show()
# load style image
style_image = None
with Image.open(os.path.join(RES_DIR, 'comics_style.jpg')) as img:
img = img.resize((height, width))
style_image = np.asarray(img, dtype='float32')
plt.imshow(img.convert(mode='RGB'))
plt.show()
content_image.shape
# define input image
img_arr = preprocess(np.expand_dims(style_image, axis=0))
#img_arr = preproc(np.expand_dims(np.array(Image.open(os.path.join(RES_DIR, 'simpsons_style.jpg'))), axis=0))
shp = img_arr.shape
print(shp)
# get VGG model
model = VGG16(include_top=False)
# define layer model (VGG model input and intermediate layer output)
layer = model.get_layer('block5_conv1').output
layer_model = Model(model.input, layer)
targ = K.variable(layer_model.predict(img_arr))
# define our loss and gradients
loss = metrics.mse(layer, targ)
grads = K.gradients(loss, model.input)
fn = K.function([model.input], [loss]+grads)
# utility function to hold loss and gradients
class Evaluator(object):
def __init__(self, f, shp): self.f, self.shp = f, shp
def loss(self, x):
loss_, self.grad_values = self.f([x.reshape(self.shp)])
return loss_.astype(np.float64)
def grads(self, x): return self.grad_values.flatten().astype(np.float64)
evaluator = Evaluator(fn, shp)
# run optimization process and save result image at each iteration
def solve_image(eval_obj, iterations, x, img_shape, dest_dir=''):
for i in range(iterations):
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(eval_obj.loss, x.flatten(),
fprime=eval_obj.grads, maxfun=20)
x = np.clip(x, -127,127)
print('Current loss value:', min_val)
end_time = time.time()
print('Iteration {} completed in {:.1f}s'.format(i, end_time - start_time))
img = deprocess(x.copy(), img_shape)[0]
img_filepath = os.path.join(dest_dir, "res_at_iteration_{}.png".format(i))
imsave(img_filepath, img)
return x
x = np.random.uniform(-2.5, 2.5, shp)
#x = np.random.uniform(0, 255, shp) - 128.
plt.imshow(x[0]);
x = solve_image(evaluator, 5, x, 'recreate_input')
plt.imshow(deproc(x,shp)[0].astype('uint8'))
# load and process input content
style_arr = preprocess(np.expand_dims(style_image, axis=0)[:,:,:,:3])
shp = style_arr.shape
print(shp)
# get VGG model
#model = VGG16(include_top=False, pooling='avg', input_shape=shp[1:]) #input_tensor=input_tensor
model = VGG16_Avg(include_top=False, input_shape=shp[1:])
model.summary()
model.summary()
outputs = {l.name: l.output for l in model.layers}
layers = [outputs['block{}_conv1'.format(o)] for o in range(1,3)]
layers_model = Model(model.input, layers)
targs = [K.variable(o) for o in layers_model.predict(style_arr)]
def style_loss(x, targ):
return metrics.mse(gram_matrix(x), gram_matrix(targ))
#S = gram_matrix(style)
#C = gram_matrix(combination)
#channels = 3
#size = height * width
#return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
loss = sum(style_loss(l1[0], l2[0]) for l1,l2 in zip(layers, targs))
grads = K.gradients(loss, model.input)
style_fn = K.function([model.input], [loss]+grads)
evaluator = Evaluator(style_fn, shp)
rand_img = lambda shape: np.random.uniform(-2.5, 2.5, shape)/1
x = rand_img(shp)
#x = scipy.ndimage.filters.gaussian_filter(x, [0,2,2,0])
plt.imshow(x[0]);
iterations=10
x = rand_img(shp)
x = solve_image(evaluator, iterations, x, folder_name='recreate_style')
# load and process input content
content_arr = preprocess(np.expand_dims(content_image, axis=0))
style_arr = preprocess(np.expand_dims(style_image, axis=0))
shp = content_arr.shape
print(content_arr.shape)
print(style_arr.shape)
# get VGG model
# later versions of Keras use pooling='avg'
model = VGG16(include_top=False, input_shape=shp[1:])
outputs = {l.name: l.output for l in model.layers}
style_layers = [outputs['block{}_conv2'.format(o)] for o in range(1,6)]
content_name = 'block4_conv2'
content_layer = outputs[content_name]
style_model = Model(model.input, style_layers)
style_targs = [K.variable(o) for o in style_model.predict(style_arr)]
content_model = Model(model.input, content_layer)
content_targ = K.variable(content_model.predict(content_arr))
style_wgts = [0.05,0.2,0.2,0.25,0.3]
loss = sum(style_loss(l1[0], l2[0])*w
for l1,l2,w in zip(style_layers, style_targs, style_wgts))
loss += metrics.mse(content_layer, content_targ)/2
grads = K.gradients(loss, model.input)
transfer_fn = K.function([model.input], [loss]+grads)
evaluator = Evaluator(transfer_fn, shp)
iterations=10
x = np.random.uniform(-2.5, 2.5, shp)
plt.imshow(x[0]);
x = solve_image(evaluator, iterations, x, shp, dest_dir=os.path.join('results', 'style_transfer'))
feature_layers = ['block1_conv2', 'block2_conv2',
'block3_conv3', 'block4_conv3',
'block5_conv3']
for layer_name in feature_layers:
layer_features = layers[layer_name]
style_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_features, combination_features)
loss += (style_weight / len(feature_layers)) * sl
content_image = backend.variable(content_array)
style_image = backend.variable(style_array)
combination_image = backend.placeholder((1, height, width, 3))
#if backend.image_data_format() == 'channels_first':
# combination_image = backend.placeholder((1, 3, height, width))
#else:
# combination_image = backend.placeholder((1, height, width, 3))
input_tensor = backend.concatenate([content_image,
style_image,
combination_image], axis=0)
content_weight = 0.025
style_weight = 5.0
total_variation_weight = 1.0
loss = backend.variable(0.)
layer_features = layers['block2_conv2']
content_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(content_image_features,
combination_features)
def total_variation_loss(x):
a = backend.square(x[:, :height-1, :width-1, :] - x[:, 1:, :width-1, :])
b = backend.square(x[:, :height-1, :width-1, :] - x[:, :height-1, 1:, :])
return backend.sum(backend.pow(a + b, 1.25))
loss += total_variation_weight * total_variation_loss(combination_image)
grads = backend.gradients(loss, combination_image)
outputs = [loss]
outputs += grads
f_outputs = backend.function([combination_image], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, height, width, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
x = np.random.uniform(0, 255, (1, height, width, 3)) - 128.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ipynb 라는 확장자로 끝나는 파일들만 가지고 오려면
Step2: 파일에 대한 경로를 생성할 때
Step3: os.curdir #current directory
Step4: 폴더를 만들 때, os.listdir()로 특정 폴더가 있는지 확인한 후에, 만약 있으면 삭제하고 새로운 폴더를 생성한다.
Step5: 설정 파일 같은 것을 수정하거나 삭제할 때
Step6: os - low-level (저수준)으로 파일/폴더/운영체제를 관리했다면
Step7: 1. 국가명.csv 파일로 만들기 => world.tar.gz (world.zip) 압축하기
Step9: df.to_csv(os.path.join(, , ___.csv))
|
<ASSISTANT_TASK:>
Python Code:
import os
#os 모듈을 통해서
#운영체제 레벨(서버는 ex.우분투)에서 다루는 파일 폴더 생성하고 삭제하기가 가능
#기존에는 ("../../~~") 이런 식으로 경로를 직접 입력 했으나
os.listdir()
#현재 폴더 안에 있는 파일들을 리스트로 뽑는 것
os.listdir("../")
for csv_file in os.listdir("../"):
pass
[
file_name
for file_name
in os.listdir("../01일차.수_입문/")
if file_name.endswith(".ipynb") # csv 파일 가져오기, 엑셀 파일 가져오기로 사용
]
os.path.join("data", "data.csv")
os.curdir
os.path.join(os.curdir, "data", "data.csv")
# 이렇게 하면 경로를 알려줘. 앞으로 만들 때는 무조건 이렇게 만들겠다.
# os.path.join(os.curdir, "data", file_name)
os.makedirs("data") #잠재적인 문제가 있다.
os.listdir() #폴더 만들기는 쉽게 됩니다.
os.rmdir("data") #잠재적인 문제가 있다.
os.listdir()
os.makedirs("data") # DATA라는 폴더 안에 간단한 텍스트 파일 만들기
os.listdir(os.path.join(os.curdir,"data"))
os.rmdir("data")
# 폴더 안에 파일이 있으면 삭제가 안 된다
# os.listdir()로 찾아본 다음에 폴더면 또 들어가서 다시 재귀적으로 찾아보고,
# 파일이면 삭제하고 상위폴더로 올라와서 그리고 rmdir() ...
import shutil
os.listdir(os.path.join(os.curdir, "data"))
shutil.rmtree(os.path.join(os.curdir, "data"))
os.listdir(os.path.join(os.curdir, "data"))
os.makedirs(os.path.join(os.curdir, "data"))
shutil.rmtree(os.path.join(os.curdir, "data"))
os.makedirs(os.path.join(os.curdir, "data"))
os.makedirs(os.path.join(os.curdir, "data", "world"))
# 만약 "data", "world"라는 폴더가 있으면, 삭제하는 기능 ...
# 폴더의 유무를 확인하고, 있으면 삭제한다.
if "data" in os.listdir():
print("./data/ 폴더를 삭제합니다.")
shutil.rmtree(os.path.join(os.curdir, "data"))
# "data"라는 폴더를 생성하고, 그 안에 "world"라는 폴더를 생성한다.
print("./data/ 폴더를 생성합니다.")
os.makedirs(os.path.join(os.curdir, "data"))
os.makedirs(os.path.join(os.curdir, "data", "world"))
import pymysql
db = pymysql.connect(
"db.fastcamp.us",
"root",
"dkstncks",
"world",
charset='utf8'
)
country_df = pd.read_sql("SELECT * FROM Country;", db)
city_df = pd.read_sql("SELECT * FROM City;", db)
#Country.Code를 바탕으로, City.CountryCode와 매칭해서 찾아야 함
#Country.Name은 반드시 가지고 와야지 파일명으로 저장이 가능
city_groups = city_df.groupby("CountryCode")
for index, row in country_df.iterrows():
country_code = row["Code"]
country_name = row["Name"]
city_df = city_groups.get_group(country_code)
city_df.to_csv(os.path.join("data", "world", "{country_name},csv".format(country_name=country_name)))
#"ATA"라는 애가 없다고 나오니까 테스트
SQL_QUERY =
SELECT *
FROM City
WHERE CountryCode = "ATA"
;
pd.read_sql(SQL_QUERY, db)
city_groups.get_group("ATA")
"ATA" in city_groups["CountryCode"].unique()
#없는게 증명 됐으니 if문 첨가
for index, row in country_df.iterrows():
country_code = row["Code"]
country_name = row["Name"]
if country_code in city_df["CountryCode"].unique():
one_city_df = city_groups.get_group(country_code)
one_city_df.to_csv(os.path.join(os.curdir, "data", "world", "{country_name}.csv".format(country_name=country_name)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two indicator variables is equivalent to using single variable, so we
Step2: That's about the same ratio of 0's and 1's.
Step3: and in the original data set we had
Step4: Least Squares Solution
Step5: Which ones appear to be most important?
Step6: What visualization would you use to check the results?
Step7: Might also be revealing to add the continuously-valued output of the
Step8: Shape of Boundary
Step9: Let's include the row and column names as lists, and write a function to print the table.
Step10: Calculate the sums of fruits in each jar by
Step11: Now we can calculate the probability of drawing each type of fruit, given that we have already chosen a jar.
Step12: We can do more if we code the probability of selecting a jar.
Step13: Now we can calculate the joint probabilities, or the probabilities of each pair of a jar and a fruit occurring.
Step14: The sum at the lower right had better be 1, because this table is all possible results.
Step15: Now the probability of a jar given that you know which fruit was drawn, is
Step16: Bayes Rule for Classification
Step17: Now imagine we only have data and don't know how it was
Step18: Start as before. Separate into input columns and target column. The
Step19: Now we need a QDA discriminant function. Here is the math again.
Step20: To use this, we must calculate the mean, covariance, and prior
Step21: Now let's apply our discriminant function to some new data.
Step23: and look at it. If data is more than one dimensional, let's just plot
Step24: Since there are only 10 training samples per class, results will
Step25: What happened? $\Sigma$ is very close to singular, meaning columns of $\Xv$ are
Step26: Let's write a function to do this and run it multiple times (for different divisions into training and testing sets).
Step27: Review. How would you get the values of
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
f = open("parkinsons.data","r")
header = f.readline()
names = header.strip().split(',')[1:]
data = np.loadtxt(f ,delimiter=',', usecols=1 + np.arange(23))
data.shape
names
x=np.arange(10).reshape((2, 5))
x
np.delete?
np.delete(x, 2, 1)
targetColumn = names.index("status")
XColumns = np.arange(23)
XColumns = np.delete(XColumns, targetColumn)
X = data[:, XColumns]
T = data[:, targetColumn].reshape((-1, 1)) # to keep 2-d matrix form
names.remove("status")
X.shape, T.shape
print('{:20s} {:9s} {:9s}'.format(' ','mean','stdev'))
for i in range(X.shape[1]):
print('{:20s} {:9.3g} {:9.3g}'.format(names[i], np.mean(X[:,i]), np.std(X[:,i])))
uniq = np.unique(T)
print(' Value Occurrences')
for i in uniq:
print('{:7.1g} {:10d}'.format(i, np.sum(T==i)))
trainf = 0.8
healthyI,_ = np.where(T == 0)
parkI,_ = np.where(T == 1)
healthyI = np.random.permutation(healthyI)
parkI = np.random.permutation(parkI)
nHealthy = round(trainf * len(healthyI))
nPark = round(trainf * len(parkI))
rowsTrain = np.hstack((healthyI[:nHealthy], parkI[:nPark]))
Xtrain = X[rowsTrain,:]
Ttrain = T[rowsTrain,:]
rowsTest = np.hstack((healthyI[nHealthy:], parkI[nPark:]))
Xtest = X[rowsTest,:]
Ttest = T[rowsTest,:]
print
print('Xtrain is {:d} by {:d}. Ttrain is {:d} by {:d}'.format(*(Xtrain.shape + Ttrain.shape)))
uniq = np.unique(Ttrain)
print(' Value Occurrences')
for i in uniq:
print('{:7.1g} {:10d}'.format(i, np.sum(Ttrain==i)))
print('Xtest is {:d} by {:d}. Ttest is {:d} by {:d}'.format(*(Xtest.shape + Ttest.shape)))
uniq = np.unique(Ttest)
print(' Value Occurrences')
for i in uniq:
print('{:7.1g} {:10d}'.format(i, np.sum(Ttest==i)))
38/118, 10/29
48/147
def train(X, T, lamb=0):
means = X.mean(0)
stds = X.std(0)
n,d = X.shape
Xs = (X - means) / stds
Xs1 = np.insert(Xs , 0, 1, axis=1)
lambDiag = np.eye(d+1) * lamb
lambDiag[0, 0] = 0
w = np.linalg.lstsq( Xs1.T @ Xs1 + lambDiag, Xs1.T @ T)[0]
return {'w': w, 'means':means, 'stds':stds}
def use(model, X):
Xs = (X - model['means']) / model['stds']
Xs1 = np.insert(Xs , 0, 1, axis=1)
return Xs1 @ model['w']
model = train(Xtrain, Ttrain)
names.insert(0,'bias')
for i in range(len(names)):
print('{:2d} {:>20s} {:10.3g}'.format(i, names[i], model['w'][i][0]))
def convertTo01(Y):
distFromTarget = np.abs(Y - [0,1])
whichTargetClosest = np.argmin(distFromTarget, axis=1).reshape((-1, 1))
return whichTargetClosest # column index equivalent to 0 and 1 targets
convertTo01(np.array([0.1, 1.1, -0.5, 0.56]).reshape((-1,1)))
Ytrain = use(model, Xtrain)
predictedTrain = convertTo01(Ytrain)
percentCorrectTrain = np.sum(predictedTrain == Ttrain) / Ttrain.shape[0] * 100.0
Ytest = use(model, Xtest)
predictedTest = convertTo01(Ytest)
percentCorrectTest = np.sum(predictedTest == Ttest) / float(Ttest.shape[0]) * 100.0
print('Percent Correct: Training {:6.1f} Testing {:6.1f}'.format(percentCorrectTrain, percentCorrectTest))
plt.figure(figsize=(8, 8))
plt.subplot(2, 1 ,1)
plt.plot(np.hstack((Ttrain, predictedTrain)), 'o-', alpha=0.5)
plt.ylim(-0.1, 1.1) # so markers will show
plt.xlabel('Sample Index')
plt.ylabel('Class')
plt.title('Training Data')
plt.legend(('Actual', 'Predicted'), loc='center')
plt.subplot(2, 1, 2)
plt.plot(np.hstack((Ttest, predictedTest)), 'o-', alpha=0.5)
plt.ylim(-0.1, 1.1)
plt.xlabel('Sample Index')
plt.ylabel('Class')
plt.title('Testing Data')
plt.legend(('Actual', 'Predicted'), loc='center');
plt.tight_layout()
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(np.hstack((Ttrain, predictedTrain, Ytrain)),'o-', alpha=0.5)
plt.ylim(-0.1, 1.1) # so markers will show
plt.xlabel('Sample Index')
plt.ylabel('Class')
plt.title('Training Data')
plt.legend(('Actual', 'Predicted', 'Cont. Val.'), loc='center')
plt.subplot(2, 1, 2)
plt.plot(np.hstack((Ttest, predictedTest, Ytest)), 'o-', alpha=0.5)
plt.ylim(-0.1, 1.1)
plt.xlabel('Sample Index')
plt.ylabel('Class')
plt.title('Testing Data')
plt.legend(('Actual', 'Predicted', 'Cont. Val.'), loc='center');
counts = np.array([[2, 6, 4], [3, 1, 2]])
counts
jarNames = ['red', 'blue']
fruitNames = ['apple', 'orange', 'strawberry']
def printTable(label, data):
print
print(label)
print(' {:>9s} {:>7s} {:>9s}'.format(*fruitNames))
for i in [0, 1]:
d = data[i, :].tolist()
print('{:4s} {:7.3g} {:7.3g} {:7.3g} {:7.3g}'.format(*([jarNames[i]] + d + [sum(d)])))
colTotals = np.sum(data, axis=0).tolist()
print(' {:7.3g} {:7.3g} {:7.3g} {:7.3g}'.format(*(colTotals + [sum(colTotals)])))
printTable('counts', counts)
jarSums = np.sum(counts, axis=1).reshape((2, 1))
jarSums
pFruitGivenJar = counts / jarSums
printTable('Prob(Fruit|Jar)', pFruitGivenJar)
pJar = np.array([[0.6], [0.4]])
pJar
pFruitAndJar = pFruitGivenJar * pJar
printTable('Prob(Fruit,Jar)', pFruitAndJar)
pFruit = np.sum(pFruitAndJar, axis=0)
pFruit
pJarGivenFruit = pFruitAndJar / pFruit
printTable('Prob(Jar|Fruit)', pJarGivenFruit)
D = 1 # number of components in each sample
N = 10 # number of samples in each class
X1 = np.random.normal(0.0, 1.0, (N, D))
T1 = np.array([1]*N).reshape((N, 1))
X2 = np.random.normal(4.0, 1.5, (N, D)) # wider variance
T2 = np.array([2]*N).reshape((N, 1))
data = np.hstack(( np.vstack((X1, X2)), np.vstack((T1, T2))))
data.shape
data
X = data[:, 0:D]
T = data[:, -1:]
means = np.mean(X, 0)
stds = np.std(X, 0)
Xs = (X - means) / stds
Xs.mean(0), Xs.std(0)
Sigma = np.array([[1, 2], [2, 1]])
Sigma @ np.linalg.inv(Sigma)
Sigma @ np.linalg.pinv(Sigma)
Sigma = np.array([[1, 2], [1, 2]])
Sigma
np.linalg.inv(Sigma)
np.linalg.pinv?
Sigma @ np.linalg.pinv(Sigma)
def discQDA(X, means, stds, mu, Sigma, prior):
Xc = (X - means) / stds - mu
if Sigma.size == 1:
Sigma = np.asarray(Sigma).reshape((1,1))
det = np.linalg.det(Sigma)
# if det == 0:
# raise np.linalg.LinAlgError('discQDA(): Singular covariance matrix')
SigmaInv = np.linalg.pinv(Sigma) # pinv in case Sigma is singular
return -0.5 * np.log(det) \
- 0.5 * np.sum(np.dot(Xc, SigmaInv) * Xc, axis=1).reshape((-1,1)) \
+ np.log(prior)
(T==1).reshape((-1))
class1rows = (T==1).reshape((-1))
class2rows = (T==2).reshape((-1))
mu1 = np.mean(Xs[class1rows, :], axis=0)
mu2 = np.mean(Xs[class2rows, :], axis=0)
Sigma1 = np.cov(Xs[class1rows, :].T)
Sigma2 = np.cov(Xs[class2rows, :].T)
N1 = np.sum(class1rows)
N2 = np.sum(class2rows)
N = len(T)
prior1 = N1 / float(N)
prior2 = N2 / float(N)
Sigma1
nNew = 100
newData = np.linspace(-5.0, 10.0, nNew).repeat(D).reshape((nNew, D))
d1 = discQDA(newData, means, stds, mu1, Sigma1, prior1)
d2 = discQDA(newData, means, stds, mu2, Sigma2, prior2)
d1.shape, d2.shape
mu1, mu2, Sigma1, Sigma2
def normald(X, mu, sigma):
normald:
X contains samples, one per row, N x D.
mu is mean vector, D x 1.
sigma is covariance matrix, D x D.
D = X.shape[1]
detSigma = sigma if D == 1 else np.linalg.det(sigma)
if detSigma == 0:
raise np.linalg.LinAlgError('normald(): Singular covariance matrix')
sigmaI = 1.0/sigma if D == 1 else np.linalg.inv(sigma)
normConstant = 1.0 / np.sqrt((2*np.pi)**D * detSigma)
diffv = X - mu.T # change column vector mu to be row vector
return normConstant * np.exp(-0.5 * np.sum(np.dot(diffv, sigmaI) * diffv, axis=1))[:,np.newaxis]
mu1, mu2
plt.figure(figsize=(10, 10))
plt.subplot(3, 1, 1)
plt.plot(newData[:, 0],np.hstack((d1, d2)))
plt.ylabel("QDA Discriminant Functions")
# Plot generative distributions p(x | Class=k) starting with discriminant functions
plt.subplot(3, 1, 2)
probs = np.exp( np.hstack((d1, d2)) - 0.5 *D * np.log(2 * np.pi) - np.log(np.array([[prior1, prior2]])))
plt.plot(newData[:,0], probs)
plt.ylabel("QDA P(x|Class=k)\n from disc funcs", multialignment="center")
# Plot generative distributions p(x | Class=k) using normald ERROR HERE
plt.subplot(3, 1 ,3)
newDataS = (newData - means) / stds
probs = np.hstack((normald(newDataS, mu1, Sigma1),
normald(newDataS, mu2, Sigma2)))
plt.plot(newData, probs)
plt.ylabel("QDA P(x|Class=k)\n using normald", multialignment="center");
D = 20 # number of components in each sample
N = 10 # number of samples in each class
X1 = np.random.normal(0.0, 1.2, (N, D))
T1 = np.array([1]*N).reshape((N, 1))
X2 = np.random.normal(4.0, 1.8, (N, D)) # wider variance
T2 = np.array([2]*N).reshape((N, 1))
data = np.hstack(( np.vstack((X1, X2)), np.vstack((T1, T2))))
X = data[:, 0:D]
T = data[:, -1]
means, stds = np.mean(X,0), np.std(X,0)
Xs = (X-means)/stds
class1rows = T==1
class2rows = T==2
mu1 = np.mean(Xs[class1rows,:],axis=0)
mu2 = np.mean(Xs[class2rows,:],axis=0)
Sigma1 = np.cov(Xs[class1rows,:].T)
Sigma2 = np.cov(Xs[class2rows,:].T)
N1 = np.sum(class1rows)
N2 = np.sum(class2rows)
N = len(T)
prior1 = N1 / float(N)
prior2 = N2 / float(N)
nNew = 100
newData = np.linspace(-5.0,10.0,nNew).repeat(D).reshape((nNew,D))
d1 = discQDA(newData,means,stds,mu1,Sigma1,prior1)
d2 = discQDA(newData,means,stds,mu2,Sigma2,prior2)
plt.figure(figsize=(10,10))
plt.subplot(3,1,1)
plt.plot(newData[:,0],np.hstack((d1,d2)))
plt.ylabel("QDA Discriminant Functions")
# Plot generative distributions p(x | Class=k) starting with discriminant functions
plt.subplot(3,1,2)
probs = np.exp( np.hstack((d1,d2)) - 0.5*D*np.log(2*np.pi) - np.log(np.array([[prior1,prior2]])))
plt.plot(newData[:,0],probs)
plt.ylabel("QDA P(x|Class=k)\n from disc funcs", multialignment="center")
# Plot generative distributions p(x | Class=k) using normald
plt.subplot(3,1,3)
newDataS = (newData-means)/stds
probs = np.hstack((normald(newDataS,mu1,Sigma1),
normald(newDataS,mu2,Sigma2)))
plt.plot(newData[:,0],probs)
plt.ylabel("QDA P(x|Class=k)\n using normald", multialignment="center");
Xtrain.shape, Ttrain.shape, Xtest.shape, Ttest.shape
# Fit generative models (Normal distributions) to each class
means,stds = np.mean(Xtrain, 0), np.std(Xtrain, 0)
Xtrains = (Xtrain - means) / stds
Ttr = (Ttrain==0).reshape((-1))
mu1 = np.mean(Xtrains[Ttr, :], axis=0)
cov1 = np.cov(Xtrains[Ttr, :].T)
Ttr = (Ttrain.ravel()==1).reshape((-1))
mu2 = np.mean(Xtrains[Ttr, :],axis=0)
cov2 = np.cov(Xtrains[Ttr, :].T)
d1 = discQDA(Xtrain, means, stds, mu1, cov1, float(nHealthy)/(nHealthy+nPark))
d2 = discQDA(Xtrain, means, stds, mu2, cov2, float(nPark)/(nHealthy+nPark))
predictedTrain = np.argmax(np.hstack((d1, d2)), axis=1)
d1t = discQDA(Xtest, means, stds, mu1, cov1, float(nHealthy)/(nHealthy+nPark))
d2t = discQDA(Xtest, means, stds, mu2, cov2, float(nPark)/(nHealthy+nPark))
predictedTest = np.argmax(np.hstack((d1t, d2t)), axis=1)
def percentCorrect(p, t):
return np.sum(p.ravel()==t.ravel()) / float(len(t)) * 100
print('Percent correct: Train', percentCorrect(predictedTrain,Ttrain), 'Test', percentCorrect(predictedTest,Ttest))
def runPark(filename, trainFraction):
f = open(filename,"r")
header = f.readline()
names = header.strip().split(',')[1:]
data = np.loadtxt(f ,delimiter=',', usecols=1+np.arange(23))
targetColumn = names.index("status")
XColumns = np.arange(23)
XColumns = np.delete(XColumns, targetColumn)
X = data[:, XColumns]
T = data[:, targetColumn].reshape((-1,1)) # to keep 2-d matrix form
names.remove("status")
healthyI,_ = np.where(T == 0)
parkI,_ = np.where(T == 1)
healthyI = np.random.permutation(healthyI)
parkI = np.random.permutation(parkI)
nHealthy = round(trainFraction*len(healthyI))
nPark = round(trainf*len(parkI))
rowsTrain = np.hstack((healthyI[:nHealthy], parkI[:nPark]))
Xtrain = X[rowsTrain, :]
Ttrain = T[rowsTrain, :]
rowsTest = np.hstack((healthyI[nHealthy:], parkI[nPark:]))
Xtest = X[rowsTest, :]
Ttest = T[rowsTest, :]
means, stds = np.mean(Xtrain, 0), np.std(Xtrain, 0)
Xtrains = (Xtrain-means)/stds
Ttr = (Ttrain==0).reshape((-1))
mu1 = np.mean(Xtrains[Ttr, :], axis=0)
cov1 = np.cov(Xtrains[Ttr, :].T)
Ttr = (Ttrain.ravel()==1).reshape((-1))
mu2 = np.mean(Xtrains[Ttr, :],axis=0)
cov2 = np.cov(Xtrains[Ttr, :].T)
d1 = discQDA(Xtrain, means, stds, mu1, cov1, float(nHealthy)/(nHealthy+nPark))
d2 = discQDA(Xtrain, means, stds, mu2, cov2, float(nPark)/(nHealthy+nPark))
predictedTrain = np.argmax(np.hstack((d1, d2)), axis=1)
d1t = discQDA(Xtest, means, stds, mu1, cov1, float(nHealthy)/(nHealthy+nPark))
d2t = discQDA(Xtest, means, stds, mu2, cov2, float(nPark)/(nHealthy+nPark))
predictedTest = np.argmax(np.hstack((d1t, d2t)), axis=1)
print('Percent correct: Train', percentCorrect(predictedTrain, Ttrain), 'Test', percentCorrect(predictedTest,Ttest))
def percentCorrect(p, t):
return np.sum(p.ravel()==t.ravel()) / float(len(t)) * 100
runPark('parkinsons.data', 0.8)
runPark('parkinsons.data',0.8)
runPark('parkinsons.data',0.8)
runPark('parkinsons.data',0.8)
import pdb
def discLDA(X, means,stds, mu, Sigma, prior):
X = (X-means)/stds
if Sigma.size == 1:
Sigma = np.asarray(Sigma).reshape((1,1))
det = np.linalg.det(Sigma)
# if det == 0:
# raise np.linalg.LinAlgError('discQDA(): Singular covariance matrix')
SigmaInv = np.linalg.pinv(Sigma) # pinv in case Sigma is singular
mu = mu.reshape((-1,1)) # make mu a column vector
# pdb.set_trace()
return np.dot(np.dot(X,SigmaInv), mu) - 0.5 * np.dot(np.dot(mu.T,SigmaInv), mu) + np.log(prior)
def runPark(filename, trainFraction):
f = open(filename,"r")
header = f.readline()
names = header.strip().split(',')[1:]
data = np.loadtxt(f ,delimiter=',', usecols=1+np.arange(23))
targetColumn = names.index("status")
XColumns = np.arange(23)
XColumns = np.delete(XColumns, targetColumn)
X = data[:, XColumns]
T = data[:, targetColumn].reshape((-1,1)) # to keep 2-d matrix form
names.remove("status")
healthyI,_ = np.where(T == 0)
parkI,_ = np.where(T == 1)
healthyI = np.random.permutation(healthyI)
parkI = np.random.permutation(parkI)
nHealthy = round(trainFraction*len(healthyI))
nPark = round(trainf*len(parkI))
rowsTrain = np.hstack((healthyI[:nHealthy], parkI[:nPark]))
Xtrain = X[rowsTrain, :]
Ttrain = T[rowsTrain, :]
rowsTest = np.hstack((healthyI[nHealthy:], parkI[nPark:]))
Xtest = X[rowsTest, :]
Ttest = T[rowsTest, :]
means,stds = np.mean(Xtrain,0), np.std(Xtrain,0)
Xtrains = (Xtrain-means)/stds
Ttr = (Ttrain==0).reshape((-1))
mu1 = np.mean(Xtrains[Ttr, :],axis=0)
cov1 = np.cov(Xtrains[Ttr, :].T)
Ttr = (Ttrain.ravel()==1).reshape((-1))
mu2 = np.mean(Xtrains[Ttr, :],axis=0)
cov2 = np.cov(Xtrains[Ttr, :].T)
d1 = discQDA(Xtrain, means, stds, mu1, cov1, float(nHealthy)/(nHealthy+nPark))
d2 = discQDA(Xtrain, means, stds, mu2, cov2, float(nPark)/(nHealthy+nPark))
predictedTrain = np.argmax(np.hstack((d1, d2)),axis=1)
d1t = discQDA(Xtest, means, stds, mu1, cov1, float(nHealthy)/(nHealthy+nPark))
d2t = discQDA(Xtest, means, stds, mu2, cov2, float(nPark)/(nHealthy+nPark))
predictedTest = np.argmax(np.hstack((d1t, d2t)), axis=1)
print('QDA Percent correct: Train', percentCorrect(predictedTrain, Ttrain), 'Test', percentCorrect(predictedTest,Ttest))
covMean = (cov1 * nHealthy + cov2 * nPark) / (nHealthy+nPark)
d1 = discLDA(Xtrain, means, stds, mu1, covMean, float(nHealthy)/(nHealthy+nPark))
d2 = discLDA(Xtrain, means, stds, mu2, covMean, float(nPark)/(nHealthy+nPark))
predictedTrain = np.argmax(np.hstack((d1, d2)), axis=1)
d1t = discLDA(Xtest, means, stds, mu1, covMean, float(nHealthy)/(nHealthy+nPark))
d2t = discLDA(Xtest, means, stds, mu2, covMean, float(nPark)/(nHealthy+nPark))
predictedTest = np.argmax(np.hstack((d1t, d2t)), axis=1)
print('LDA Percent correct: Train', percentCorrect(predictedTrain, Ttrain), 'Test', percentCorrect(predictedTest,Ttest))
def percentCorrect(p, t):
return np.sum(p.ravel()==t.ravel()) / float(len(t)) * 100
runPark('parkinsons.data', 0.8)
for i in range(5):
runPark('parkinsons.data', 0.8)
print()
import sys
sys.float_info.epsilon, np.log(sys.float_info.epsilon)
%%writefile qdalda.py
import numpy as np
import sys # for sys.float_info.epsilon
######################################################################
### class QDA
######################################################################
class QDA(object):
def __init__(self):
# Define all instance variables here. Not necessary
self.means = None
self.stds = None
self.mu = None
self.sigma = None
self.sigmaInv = None
self.prior = None
self.determinant = None
self.discriminantConstant = None
def train(self, X, T):
self.classes = np.unique(T)
self.means, self.stds = np.mean(X,0), np.std(X,0)
Xs = (X - self.means) / self.stds
self.mu = []
self.sigma = []
self.sigmaInv = []
self.determinant = []
self.prior = []
nSamples = X.shape[0]
for k in self.classes:
rowsThisClass = (T == k).reshape((-1))
self.mu.append( np.mean(Xs[rowsThisClass, :], 0).reshape((-1,1)) )
self.sigma.append( np.cov(Xs[rowsThisClass, :], rowvar=0) )
if self.sigma[-1].size == 1:
self.sigma[-1] = self.sigma[-1].reshape((1,1))
det = np.linalg.det(self.sigma[-1])
if det == 0:
det = sys.float_info.epsilon
self.determinant.append( det )
self.sigmaInv.append( np.linalg.pinv(self.sigma[-1]) ) # pinv in case Sigma is singular
self.prior.append( np.sum(rowsThisClass) / float(nSamples) )
self._finishTrain()
def _finishTrain(self):
self.discriminantConstant = []
for ki in range(len(self.classes)):
self.discriminantConstant.append( np.log(self.prior[ki]) - 0.5*np.log(self.determinant[ki]) )
def use(self,X):
nSamples = X.shape[0]
Xs = (X - self.means) / self.stds
discriminants,probabilities = self._discriminantFunction(Xs)
predictedClass = self.classes[np.argmax( discriminants, axis=1 )]
predictedClass = predictedClass.reshape((-1, 1))
D = X.shape[1]
return predictedClass, probabilities, discriminants
def _discriminantFunction(self, Xs):
nSamples = Xs.shape[0]
discriminants = np.zeros((nSamples, len(self.classes)))
for ki in range(len(self.classes)):
Xc = Xs - self.mu[ki]
discriminants[:,ki:ki+1] = self.discriminantConstant[ki] - 0.5 * \
np.sum(np.dot(Xc, self.sigmaInv[ki]) * Xc, axis=1).reshape((-1,1))
probabilities = np.exp( discriminants - 0.5*D*np.log(2*np.pi) )
return discriminants, probabilities
def __repr__(self):
if self.mu is None:
return 'QDA not trained.'
else:
return 'QDA trained for classes {}'.format(self.classes)
######################################################################
### class LDA
######################################################################
class LDA(QDA):
def _finishTrain(self):
self.sigmaMean = np.sum(np.stack(self.sigma) * np.array(self.prior)[:,np.newaxis,np.newaxis], axis=0)
self.sigmaMeanInv = np.linalg.pinv(self.sigmaMean)
# print(self.sigma)
# print(self.sigmaMean)
self.discriminantConstant = []
self.discriminantCoefficient = []
for ki in range(len(self.classes)):
sigmaMu = np.dot(self.sigmaMeanInv, self.mu[ki])
self.discriminantConstant.append( -0.5 * np.dot(self.mu[ki].T, sigmaMu) )
self.discriminantCoefficient.append( sigmaMu )
def _discriminantFunction(self,Xs):
nSamples = Xs.shape[0]
discriminants = np.zeros((nSamples, len(self.classes)))
for ki in range(len(self.classes)):
discriminants[:,ki:ki+1] = self.discriminantConstant[ki] + \
np.dot(Xs, self.discriminantCoefficient[ki])
probabilities = np.exp( discriminants - 0.5*D*np.log(2*np.pi) - 0.5*np.log(self.determinant[ki]) \
- 0.5*np.sum(np.dot(Xs,self.sigmaMeanInv) * Xs, axis=1).reshape((-1,1)))
return discriminants, probabilities
######################################################################
### Example use
######################################################################
if __name__ == '__main__':
D = 1 # number of components in each sample
N = 10 # number of samples in each class
X = np.vstack((np.random.normal(0.0, 1.0, (N, D)),
np.random.normal(4.0, 1.5, (N, D))))
T = np.vstack((np.array([1]*N).reshape((N, 1)),
np.array([2]*N).reshape((N, 1))))
qda = QDA()
qda.train(X,T)
c,prob,_ = qda.use(X)
print('QDA', np.sum(c==T)/X.shape[0] * 100, '% correct')
print('{:>3s} {:>4s} {:>14s}'.format('T','Pred','prob(C=k|x)'))
for row in np.hstack((T,c,prob)):
print('{:3.0f} {:3.0f} {:8.4f} {:8.4f}'.format(*row))
lda = LDA()
lda.train(X,T)
c,prob,d = lda.use(X)
print('LDA', np.sum(c==T)/X.shape[0] * 100, '% correct')
print('{:>3s} {:>4s} {:>14s}'.format('T','Pred','prob(C=k|x)'))
for row in np.hstack((T,c,prob)):
print('{:3.0f} {:3.0f} {:8.4f} {:8.4f}'.format(*row))
%run qdalda.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we need some benchmarking code at various dataset sizes. Because some clustering algorithms have performance that can vary quite a lot depending on the exact nature of the dataset we'll also need to run several times on randomly generated datasets of each size so as to get a better idea of the average case performance.
Step2: Comparison of all ten implementations
Step3: Now it is just a matter of running all the clustering algorithms via our benchmark function to collect up all the requsite data. This could be prettier, rolled up into functions appropriately, but sometimes brute force is good enough. More importantly (for me) since this can take a significant amount of compute time, I wanted to be able to comment out algorithms that were slow or I was uninterested in easily. Which brings me to a warning for you the reader and potential user of the notebook
Step4: Now we need to plot the results so we can see what is going on. The catch is that we have several datapoints for each dataset size and ultimately we would like to try and fit a curve through all of it to get the general scaling trend. Fortunately seaborn comes to the rescue here by providing regplot which plots a regression through a dataset, supports higher order regression (we should probably use order two as most algorithms are effectively quadratic) and handles multiple datapoints for each x-value cleanly (using the x_estimator keyword to put a point at the mean and draw an error bar to cover the range of data).
Step5: A few features stand out. First of all there appear to be essentially two classes of implementation, with DeBaCl being an odd case that falls in the middle. The fast implementations tend to be implementations of single linkage agglomerative clustering, K-means, and DBSCAN. The slow cases are largely from sklearn and include agglomerative clustering (in this case using Ward instead of single linkage).
Step6: Again we can use seaborn to do curve fitting and plotting, exactly as before.
Step7: Clearly something has gone woefully wrong with the curve fitting for the scipy single linkage implementation, but what exactly? If we look at the raw data we can see.
Step8: It seems that at around 44000 points we hit a wall and the runtimes spiked. A hint is that I'm running this on a laptop with 8GB of RAM. Both single linkage algorithms use scipy.spatial.pdist to compute pairwise distances between points, which returns an array of shape (n(n-1)/2, 1) of doubles. A quick computation shows that that array of distances is quite large once we nave 44000 points
Step9: If we assume that my laptop is keeping much other than that distance array in RAM then clearly we are going to spend time paging out the distance array to disk and back and hence we will see the runtimes increase dramatically as we become disk IO bound. If we just leave off the last element we can get a better idea of the curve, but keep in mind that the scipy single linkage implementation does not scale past a limit set by your available RAM.
Step10: If we're looking for scaling we can write off the scipy single linkage implementation -- if even we didn't hit the RAM limit the $O(n^2)$ scaling is going to quickly catch up with us. Fastcluster has the same asymptotic scaling, but is heavily optimized to being the constant down much lower -- at this point it is still keeping close to the faster algorithms. It's asymtotics will still catch up with it eventually however.
Step11: Now the some differences become clear. The asymptotic complexity starts to kick in with fastcluster failing to keep up. In turn HDBSCAN and DBSCAN, while having sub-$O(n^2)$ complexity, can't achieve $O(n \log(n))$ at this dataset dimension, and start to curve upward precipitously. Finally it demonstrates again how much of a difference implementation can make
Step12: Now we run that for each of our pre-existing datasets to extrapolate out predicted performance on the relevant dataset sizes. A little pandas wrangling later and we've produced a table of roughly how large a dataset you can tackle in each time frame with each implementation. I had to leave out the scipy KMeans timings because the noise in timing results caused the model to be unrealistic at larger data sizes. Note how the $O(n\log n)$ algorithms utterly dominate here. In the meantime, for medium sizes data sets you can still get quite a lot done with HDBSCAN.
|
<ASSISTANT_TASK:>
Python Code:
import hdbscan
import debacl
import fastcluster
import sklearn.cluster
import scipy.cluster
import sklearn.datasets
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
def benchmark_algorithm(dataset_sizes, cluster_function, function_args, function_kwds,
dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=2):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result = np.nan * np.ones((len(dataset_sizes), sample_size))
for index, size in enumerate(dataset_sizes):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
data, labels = sklearn.datasets.make_blobs(n_samples=size,
n_features=dataset_dimension,
centers=dataset_n_clusters)
# Start the clustering with a timer
start_time = time.time()
cluster_function(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time:
result[index, s] = time_taken
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size),
result.flatten()]).T, columns=['x','y'])
else:
result[index, s] = time_taken
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size),
result.flatten()]).T, columns=['x','y'])
dataset_sizes = np.hstack([np.arange(1, 6) * 500, np.arange(3,7) * 1000, np.arange(4,17) * 2000])
k_means = sklearn.cluster.KMeans(10)
k_means_data = benchmark_algorithm(dataset_sizes, k_means.fit, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=1.25)
dbscan_data = benchmark_algorithm(dataset_sizes, dbscan.fit, (), {})
scipy_k_means_data = benchmark_algorithm(dataset_sizes,
scipy.cluster.vq.kmeans, (10,), {})
scipy_single_data = benchmark_algorithm(dataset_sizes,
scipy.cluster.hierarchy.single, (), {})
fastclust_data = benchmark_algorithm(dataset_sizes,
fastcluster.linkage_vector, (), {})
hdbscan_ = hdbscan.HDBSCAN()
hdbscan_data = benchmark_algorithm(dataset_sizes, hdbscan_.fit, (), {})
debacl_data = benchmark_algorithm(dataset_sizes,
debacl.geom_tree.geomTree, (5, 5), {'verbose':False})
agglomerative = sklearn.cluster.AgglomerativeClustering(10)
agg_data = benchmark_algorithm(dataset_sizes,
agglomerative.fit, (), {}, sample_size=4)
spectral = sklearn.cluster.SpectralClustering(10)
spectral_data = benchmark_algorithm(dataset_sizes,
spectral.fit, (), {}, sample_size=6)
affinity_prop = sklearn.cluster.AffinityPropagation()
ap_data = benchmark_algorithm(dataset_sizes,
affinity_prop.fit, (), {}, sample_size=3)
sns.regplot(x='x', y='y', data=k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=hdbscan_data, order=2,
label='HDBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=fastclust_data, order=2,
label='Fastcluster Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=scipy_single_data, order=2,
label='Scipy Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=debacl_data, order=2,
label='DeBaCl Geom Tree', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=spectral_data, order=2,
label='Sklearn Spectral', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=agg_data, order=2,
label='Sklearn Agglomerative', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=ap_data, order=2,
label='Sklearn Affinity Propagation', x_estimator=np.mean)
plt.gca().axis([0, 34000, 0, 120])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of Clustering Implementations')
plt.legend()
large_dataset_sizes = np.arange(1,16) * 4000
hdbscan_boruvka = hdbscan.HDBSCAN(algorithm='boruvka_kdtree')
large_hdbscan_boruvka_data = benchmark_algorithm(large_dataset_sizes,
hdbscan_boruvka.fit, (), {},
max_time=90, sample_size=1)
k_means = sklearn.cluster.KMeans(10)
large_k_means_data = benchmark_algorithm(large_dataset_sizes,
k_means.fit, (), {},
max_time=90, sample_size=1)
dbscan = sklearn.cluster.DBSCAN(eps=1.25, min_samples=5)
large_dbscan_data = benchmark_algorithm(large_dataset_sizes,
dbscan.fit, (), {},
max_time=90, sample_size=1)
large_fastclust_data = benchmark_algorithm(large_dataset_sizes,
fastcluster.linkage_vector, (), {},
max_time=90, sample_size=1)
large_scipy_k_means_data = benchmark_algorithm(large_dataset_sizes,
scipy.cluster.vq.kmeans, (10,), {},
max_time=90, sample_size=1)
large_scipy_single_data = benchmark_algorithm(large_dataset_sizes,
scipy.cluster.hierarchy.single, (), {},
max_time=90, sample_size=1)
sns.regplot(x='x', y='y', data=large_k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_hdbscan_boruvka_data, order=2,
label='HDBSCAN Boruvka', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_fastclust_data, order=2,
label='Fastcluster Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_single_data, order=2,
label='Scipy Single Linkage', x_estimator=np.mean)
plt.gca().axis([0, 64000, 0, 150])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of Fastest Clustering Implementations')
plt.legend()
large_scipy_single_data.tail(10)
size_of_array = 44000 * (44000 - 1) / 2 # from pdist documentation
bytes_in_array = size_of_array * 8 # Since doubles use 8 bytes
gigabytes_used = bytes_in_array / (1024.0 ** 3) # divide out to get the number of GB
gigabytes_used
sns.regplot(x='x', y='y', data=large_k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_hdbscan_boruvka_data, order=2,
label='HDBSCAN Boruvka', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_fastclust_data, order=2,
label='Fastcluster Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_single_data[:8], order=2,
label='Scipy Single Linkage', x_estimator=np.mean)
plt.gca().axis([0, 64000, 0, 150])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of Fastest Clustering Implementations')
plt.legend()
huge_dataset_sizes = np.arange(1,11) * 20000
k_means = sklearn.cluster.KMeans(10)
huge_k_means_data = benchmark_algorithm(huge_dataset_sizes,
k_means.fit, (), {},
max_time=120, sample_size=2, dataset_dimension=10)
dbscan = sklearn.cluster.DBSCAN(eps=1.5)
huge_dbscan_data = benchmark_algorithm(huge_dataset_sizes,
dbscan.fit, (), {},
max_time=120, sample_size=2, dataset_dimension=10)
huge_scipy_k_means_data = benchmark_algorithm(huge_dataset_sizes,
scipy.cluster.vq.kmeans, (10,), {},
max_time=120, sample_size=2, dataset_dimension=10)
hdbscan_boruvka = hdbscan.HDBSCAN(algorithm='boruvka_kdtree')
huge_hdbscan_data = benchmark_algorithm(huge_dataset_sizes,
hdbscan_boruvka.fit, (), {},
max_time=240, sample_size=4, dataset_dimension=10)
huge_fastcluster_data = benchmark_algorithm(huge_dataset_sizes,
fastcluster.linkage_vector, (), {},
max_time=240, sample_size=2, dataset_dimension=10)
sns.regplot(x='x', y='y', data=huge_k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_hdbscan_data, order=2,
label='HDBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_fastcluster_data, order=2,
label='Fastcluster', x_estimator=np.mean)
plt.gca().axis([0, 200000, 0, 240])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of K-Means and DBSCAN')
plt.legend()
import statsmodels.formula.api as sm
time_samples = [1000, 2000, 5000, 10000, 25000, 50000, 75000, 100000, 250000, 500000, 750000,
1000000, 2500000, 5000000, 10000000, 50000000, 100000000, 500000000, 1000000000]
def get_timing_series(data, quadratic=True):
if quadratic:
data['x_squared'] = data.x**2
model = sm.ols('y ~ x + x_squared', data=data).fit()
predictions = [model.params.dot([1.0, i, i**2]) for i in time_samples]
return pd.Series(predictions, index=pd.Index(time_samples))
else: # assume n log(n)
data['xlogx'] = data.x * np.log(data.x)
model = sm.ols('y ~ x + xlogx', data=data).fit()
predictions = [model.params.dot([1.0, i, i*np.log(i)]) for i in time_samples]
return pd.Series(predictions, index=pd.Index(time_samples))
ap_timings = get_timing_series(ap_data)
spectral_timings = get_timing_series(spectral_data)
agg_timings = get_timing_series(agg_data)
debacl_timings = get_timing_series(debacl_data)
fastclust_timings = get_timing_series(large_fastclust_data.ix[:10,:].copy())
scipy_single_timings = get_timing_series(large_scipy_single_data.ix[:10,:].copy())
hdbscan_boruvka = get_timing_series(huge_hdbscan_data, quadratic=True)
#scipy_k_means_timings = get_timing_series(huge_scipy_k_means_data, quadratic=False)
dbscan_timings = get_timing_series(huge_dbscan_data, quadratic=True)
k_means_timings = get_timing_series(huge_k_means_data, quadratic=False)
timing_data = pd.concat([ap_timings, spectral_timings, agg_timings, debacl_timings,
scipy_single_timings, fastclust_timings, hdbscan_boruvka,
dbscan_timings, k_means_timings
], axis=1)
timing_data.columns=['AffinityPropagation', 'Spectral', 'Agglomerative',
'DeBaCl', 'ScipySingleLinkage', 'Fastcluster',
'HDBSCAN', 'DBSCAN', 'SKLearn KMeans'
]
def get_size(series, max_time):
return series.index[series < max_time].max()
datasize_table = pd.concat([
timing_data.apply(get_size, max_time=30),
timing_data.apply(get_size, max_time=300),
timing_data.apply(get_size, max_time=3600),
timing_data.apply(get_size, max_time=8*3600)
], axis=1)
datasize_table.columns=('Interactive', 'Get Coffee', 'Over Lunch', 'Overnight')
datasize_table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 特異値分解
Step2: 低ランク近似
Step3: 文書ベクトルの次元削減
Step4: 文書$d1$と$d2$の特徴空間上での類似度を計算してみよう.
Step5: このように,次元削減された文書ベクトル${{\bf d}_i^{(k)}}$間のコサイン類似度が,低ランク近似された単語-文書行列$M_k$における文書ベクトル間のコサイン類似度と一致することが分かる.
Step6: 単語ベクトルの次元削減
Step7: $T$の$i$行目と$j$行目がそれぞれ単語$i,j$の特徴空間上でのベクトル表現になる
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.linalg as lg
from scipy.spatial.distance import cosine
import matplotlib.pyplot as plt
import pandas as pd
np.set_printoptions(precision=2)
pd.set_option('precision', 2)
%matplotlib inline
%precision 2
def sim(v1,v2): #コサイン類似度
return 1-cosine(v1,v2)
# IIR exercise 18.4 の内容を用意
M = np.array(
[[1,0,1,0,0,0],
[0,1,0,0,0,0],
[1,1,0,0,0,0],
[1,0,0,1,1,0],
[0,0,0,1,0,1]]
)
print(M)
print(M.shape)
print("Rank(M) =", np.linalg.matrix_rank(M)) #行列Mのランクは5
doc_names = ["d1", "d2", "d3", "d4", "d5", "d6"]
term_names = ["ship", "boat", "ocean", "voyge", "trip"]
df = pd.DataFrame(M,
columns=doc_names, index=term_names)
df
U, sigma, Vt = lg.svd(M) #SVD.なお,sigmaは行列ではなく特異値が降順に並んだ配列
Sigma = lg.diagsvd(sigma, M.shape[0], M.shape[1]) #確認のため truncateしていないSigmaを作成する.特異値集合からMxN対角行列を作成する.
print(Sigma.shape)
print(Sigma)
M_r = np.dot(np.dot(U, Sigma), Vt) #分解した結果が本当にMと一致するのか確認 M = U x Sigma x V^T
np.linalg.norm(M - M_r) # フロベニウスノルム
k = 2 #次元数
U_k = U[:, :k] #m-k行列にカット
Vt_k = Vt[:k,:] #k-n行列にカット
Sigma_k = Sigma[:k,:k] #特異値上位k個のみを用いる
print("U_k ="),
print(U_k)
print("Sigma_k=")
print(Sigma_k)
print("V_k^T ="),
print(Vt_k)
M_k = np.dot(np.dot(U_k, Sigma_k), Vt_k) #低ランク近似
print("M_k=")
print(M_k)
doc_names = ["d1", "d2", "d3", "d4", "d5", "d6"]
term_names = ["ship", "boat", "ocean", "voyge", "trip"]
df = pd.DataFrame(M_k,
columns=doc_names, index=term_names)
df
print("|| M - M_k || =", lg.norm(M-M_k)) # フロベニウスノルム
print("Rank(M_k) =", np.linalg.matrix_rank(M_k)) #ランク2の行列になっていることを確認
print(lg.norm(M-M_k)**2) # フロベニウスノルムの二乗が,k+1以降の特異値の平方和に等しいことを確認
print(sum(map(lambda x: x ** 2, sigma[k:]))) # k+1以降の特異値の平方和
D_k = np.dot(Sigma_k, Vt_k)
D_k
axis_names = ["z1", "z2"]
doc_names = ["d1", "d2", "d3", "d4", "d5", "d6"]
df = pd.DataFrame(D_k.T,
columns=axis_names, index=doc_names) # np.r_ は行列同士の連結
print("D_k=")
df.T
fig, ax = plt.subplots()
df.plot.scatter(x="z1", y="z2", ax=ax)
ax.axvline(x=0, lw=2, color='red') #x軸とy軸に線を引く
ax.axhline(y=0, lw=2, color='red')
ax.set_xlim(-0.5, 2.0)
ax.set_ylim(-1.0, 1.5)
ax.grid(True)
for k, v in df.iterrows():
ax.annotate(k, xy=(v[0]+0.05,v[1]+0.05),size=15) #データ点にラベル名を付与
# d1とd2の特徴空間上での類似度を計算する
print("特徴空間上でのコサイン類似度 =",sim(D_k[:,0], D_k[:,1]))
print("M_k上での文書ベクトルのコサイン類似度 =", sim(M_k[:,0], M_k[:,1]))
print("なお,元の文書ベクトル上でのコサイン類似度 =", sim(M[:,0],M[:,1]))
q = np.array([1,0,1,1,0]) #文書d1と同じものをクエリとして用いてみる
q_k = np.dot(U_k.T, q) #k次元特徴空間へ射影
print(q_k) # d_j^{k} と一致していることを確認
print("sim(q, d) =",sim(q_k, D_k[:,0])) #文書d1との特徴空間上での類似度
T_k = np.dot(U_k, Sigma_k)
axis_names = ["z1", "z2"]
term_names = ["ship", "boat", "ocean", "voyge", "trip"]
df = pd.DataFrame(T_k,
columns=axis_names, index=term_names) # np.r_ は行列同士の連結
df
# 特徴空間上の単語ベクトルをプロット
fig, ax = plt.subplots()
df.plot.scatter(x="z1", y="z2", ax=ax)
ax.axvline(x=0, lw=2, color='red') #x軸とy軸に線を引く
ax.axhline(y=0, lw=2, color='red')
ax.set_xlim(-0.5, 2.0)
ax.set_ylim(-1.0, 1.5)
ax.grid(True)
for k, v in df.iterrows():
ax.annotate(k, xy=(v[0]+0.05,v[1]+0.05),size=15) #データ点にラベル名を付与
t_1 = T_k[0,:] #ship
t_2 = T_k[1,:] #boat
print("k次元特徴空間での類似度")
print("sim(ship, boat) =", sim(t_1, t_2))
print("元の空間での類似度")
print("sim(ship, boat) =", sim(M[0,:], M[1,:]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are actually two different approaches you can take to using TensorFlow or PyTorch models with DeepChem. It depends on whether you want to use TensorFlow/PyTorch APIs or DeepChem APIs for training and evaluating your model. For the former case, DeepChem's Dataset class has methods for easily adapting it to use with other frameworks. make_tf_dataset() returns a tensorflow.data.Dataset object that iterates over the data. make_pytorch_dataset() returns a torch.utils.data.IterableDataset that iterates over the data. This lets you use DeepChem's datasets, loaders, featurizers, transformers, splitters, etc. and easily integrate them into your existing TensorFlow or PyTorch code.
Step2: For this example, we used the Keras Sequential class. Our model consists of a dense layer with ReLU activation, 50% dropout to provide regularization, and a final layer that produces a scalar output. We also need to specify the loss function to use when training the model, in this case L<sub>2</sub> loss. We can now train and evaluate the model exactly as we would with any other DeepChem model. For example, let's load the Delaney solubility dataset. How does our model do at predicting the solubilities of molecules based on their extended-connectivity fingerprints (ECFPs)?
Step3: TorchModel
Step4: Computing Losses
Step5: We can train our model on the BACE dataset. This is a binary classification task that tries to predict whether a molecule will inhibit the enzyme BACE-1.
Step6: Other Features
|
<ASSISTANT_TASK:>
Python Code:
!pip install --pre deepchem
import deepchem as dc
import tensorflow as tf
keras_model = tf.keras.Sequential([
tf.keras.layers.Dense(1000, activation='relu'),
tf.keras.layers.Dropout(rate=0.5),
tf.keras.layers.Dense(1)
])
model = dc.models.KerasModel(keras_model, dc.models.losses.L2Loss())
tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='ECFP', splitter='random')
train_dataset, valid_dataset, test_dataset = datasets
model.fit(train_dataset, nb_epoch=50)
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
print('training set score:', model.evaluate(train_dataset, [metric]))
print('test set score:', model.evaluate(test_dataset, [metric]))
import torch
pytorch_model = torch.nn.Sequential(
torch.nn.Linear(1024, 1000),
torch.nn.ReLU(),
torch.nn.Dropout(0.5),
torch.nn.Linear(1000, 1)
)
model = dc.models.TorchModel(pytorch_model, dc.models.losses.L2Loss())
model.fit(train_dataset, nb_epoch=50)
print('training set score:', model.evaluate(train_dataset, [metric]))
print('test set score:', model.evaluate(test_dataset, [metric]))
class ClassificationModel(tf.keras.Model):
def __init__(self):
super(ClassificationModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(1000, activation='relu')
self.dense2 = tf.keras.layers.Dense(1)
def call(self, inputs, training=False):
y = self.dense1(inputs)
if training:
y = tf.nn.dropout(y, 0.5)
logits = self.dense2(y)
output = tf.nn.sigmoid(logits)
return output, logits
keras_model = ClassificationModel()
output_types = ['prediction', 'loss']
model = dc.models.KerasModel(keras_model, dc.models.losses.SigmoidCrossEntropy(), output_types=output_types)
tasks, datasets, transformers = dc.molnet.load_bace_classification(feturizer='ECFP', split='scaffold')
train_dataset, valid_dataset, test_dataset = datasets
model.fit(train_dataset, nb_epoch=100)
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
print('training set score:', model.evaluate(train_dataset, [metric]))
print('test set score:', model.evaluate(test_dataset, [metric]))
@manual{Intro1,
title={5},
organization={DeepChem},
author={Ramsundar, Bharath},
howpublished = {\url{https://github.com/deepchem/deepchem/blob/master/examples/tutorials/Creating_Models_with_TensorFlow_and_PyTorch.ipynb}},
year={2021},
}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate Input Files
Step2: With our three materials, we can now create a materials file object that can be exported to an actual XML file.
Step3: Now let's move on to the geometry. Our problem will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces -- in this case two cylinders and six reflective planes.
Step4: With the surfaces defined, we can now create cells that are defined by intersections of half-spaces created by the surfaces.
Step5: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe.
Step6: We now must create a geometry that is assigned a root universe, put the geometry into a geometry file, and export it to XML.
Step7: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 10 inactive batches and 90 active batches each with 5000 particles.
Step8: Let us also create a plot file that we can use to verify that our pin cell geometry was created successfully.
Step9: As we can see from the plot, we have a nice pin cell with fuel, cladding, and water! Before we run our simulation, we need to tell the code what we want to tally. The following code shows how to create a 2D mesh tally.
Step10: Now we a have a complete set of inputs, so we can go ahead and run our simulation.
Step11: Tally Data Processing
Step12: Next we need to get the tally, which can be done with the StatePoint.get_tally(...) method.
Step13: The statepoint file actually stores the sum and sum-of-squares for each tally bin from which the mean and variance can be calculated as described here. The sum and sum-of-squares can be accessed using the sum and sum_sq properties
Step14: However, the mean and standard deviation of the mean are usually what you are more interested in. The Tally class also has properties mean and std_dev which automatically calculate these statistics on-the-fly.
Step15: The tally data has three dimensions
Step16: To get the bins into a form that we can plot, we can simply change the shape of the array since it is a numpy array.
Step17: Now let's say we want to look at the distribution of relative errors of our tally bins for flux. First we create a new variable called relative_error and set it to the ratio of the standard deviation and the mean, being careful not to divide by zero in case some bins were never scored to.
Step18: Source Sites
Step19: If we want, say, only the energies from the source sites, we can simply index the source array with the name of the field
Step20: Now, we can look at things like the energy distribution of source sites. Note that we don't directly use the matplotlib.pyplot.hist method since our binning is logarithmic.
Step21: Let's also look at the spatial distribution of the sites. To make the plot a little more interesting, we can also include the direction of the particle emitted from the source and color each source by the logarithm of its energy.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from IPython.display import Image
import numpy as np
import matplotlib.pyplot as plt
import openmc
# 1.6 enriched fuel
fuel = openmc.Material(name='1.6% Fuel')
fuel.set_density('g/cm3', 10.31341)
fuel.add_nuclide('U235', 3.7503e-4)
fuel.add_nuclide('U238', 2.2625e-2)
fuel.add_nuclide('O16', 4.6007e-2)
# borated water
water = openmc.Material(name='Borated Water')
water.set_density('g/cm3', 0.740582)
water.add_nuclide('H1', 4.9457e-2)
water.add_nuclide('O16', 2.4732e-2)
water.add_nuclide('B10', 8.0042e-6)
# zircaloy
zircaloy = openmc.Material(name='Zircaloy')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_nuclide('Zr90', 7.2758e-3)
# Instantiate a Materials collection
materials = openmc.Materials([fuel, water, zircaloy])
# Export to "materials.xml"
materials.export_to_xml()
# Create cylinders for the fuel and clad
fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.39218)
clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.45720)
# Create boundary planes to surround the geometry
min_x = openmc.XPlane(x0=-0.63, boundary_type='reflective')
max_x = openmc.XPlane(x0=+0.63, boundary_type='reflective')
min_y = openmc.YPlane(y0=-0.63, boundary_type='reflective')
max_y = openmc.YPlane(y0=+0.63, boundary_type='reflective')
min_z = openmc.ZPlane(z0=-0.63, boundary_type='reflective')
max_z = openmc.ZPlane(z0=+0.63, boundary_type='reflective')
# Create a Universe to encapsulate a fuel pin
pin_cell_universe = openmc.Universe(name='1.6% Fuel Pin')
# Create fuel Cell
fuel_cell = openmc.Cell(name='1.6% Fuel')
fuel_cell.fill = fuel
fuel_cell.region = -fuel_outer_radius
pin_cell_universe.add_cell(fuel_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='1.6% Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
pin_cell_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='1.6% Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
pin_cell_universe.add_cell(moderator_cell)
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = pin_cell_universe
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
# Create root Universe
root_universe = openmc.Universe(universe_id=0, name='root universe')
root_universe.add_cell(root_cell)
# Create Geometry and set root Universe
geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
geometry.export_to_xml()
# OpenMC simulation parameters
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 10
settings.particles = 5000
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.63, -0.63, -0.63, 0.63, 0.63, 0.63]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings.source = openmc.Source(space=uniform_dist)
# Export to "settings.xml"
settings.export_to_xml()
plot = openmc.Plot.from_geometry(geometry)
plot.pixels = (250, 250)
plot.to_ipython_image()
# Instantiate an empty Tallies object
tallies = openmc.Tallies()
# Create mesh which will be used for tally
mesh = openmc.RegularMesh()
mesh.dimension = [100, 100]
mesh.lower_left = [-0.63, -0.63]
mesh.upper_right = [0.63, 0.63]
# Create mesh filter for tally
mesh_filter = openmc.MeshFilter(mesh)
# Create mesh tally to score flux and fission rate
tally = openmc.Tally(name='flux')
tally.filters = [mesh_filter]
tally.scores = ['flux', 'fission']
tallies.append(tally)
# Export to "tallies.xml"
tallies.export_to_xml()
# Run OpenMC!
openmc.run()
# Load the statepoint file
sp = openmc.StatePoint('statepoint.100.h5')
tally = sp.get_tally(scores=['flux'])
print(tally)
tally.sum
print(tally.mean.shape)
(tally.mean, tally.std_dev)
flux = tally.get_slice(scores=['flux'])
fission = tally.get_slice(scores=['fission'])
print(flux)
flux.std_dev.shape = (100, 100)
flux.mean.shape = (100, 100)
fission.std_dev.shape = (100, 100)
fission.mean.shape = (100, 100)
fig = plt.subplot(121)
fig.imshow(flux.mean)
fig2 = plt.subplot(122)
fig2.imshow(fission.mean)
# Determine relative error
relative_error = np.zeros_like(flux.std_dev)
nonzero = flux.mean > 0
relative_error[nonzero] = flux.std_dev[nonzero] / flux.mean[nonzero]
# distribution of relative errors
ret = plt.hist(relative_error[nonzero], bins=50)
sp.source
sp.source['E']
# Create log-spaced energy bins from 1 keV to 10 MeV
energy_bins = np.logspace(3,7)
# Calculate pdf for source energies
probability, bin_edges = np.histogram(sp.source['E'], energy_bins, density=True)
# Make sure integrating the PDF gives us unity
print(sum(probability*np.diff(energy_bins)))
# Plot source energy PDF
plt.semilogx(energy_bins[:-1], probability*np.diff(energy_bins), drawstyle='steps')
plt.xlabel('Energy (eV)')
plt.ylabel('Probability/eV')
plt.quiver(sp.source['r']['x'], sp.source['r']['y'],
sp.source['u']['x'], sp.source['u']['y'],
np.log(sp.source['E']), cmap='jet', scale=20.0)
plt.colorbar()
plt.xlim((-0.5,0.5))
plt.ylim((-0.5,0.5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Player ratings predict outcome
Step2: A note about standard errors
Step3: See how the change in player average rating changs the sharpness of the function - the intepretation is that absolute differences in rating between players become more predictive for more highly rated players. In other words, higher rated players make more advantage of small differences in ability (note also that the advantage to white at 0 rating difference also grows as the player average rating rises).
Step4: Note - and this will be relevant later - if we had players who were over-rated (i.e. performing at a level below their numerical rating) then this would look like a shift up to the top left of this graph and a shift down to the bottom right. This is a flattening of the non-baselined curve (futher above).
Step5: This comparison shows no stereotype threat effect. When the female/white player has a lower rating than her male opponent her chances of victory are greater than for a comparable match between two male players with an equal rating difference. The overall effect is a 'flattening' of the rating-difference vs outcome curve, as happens with overall lower-rated players compared to higher-rated players.
Step6: And calculate the number of games we are analysing
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd #data munging
import numpy as np #number functions
import os #directory and file functions
import pylab as plt #graphing functions
import socket #machine id
import seaborn as sns
from matplotlib.font_manager import FontProperties
print "working directory = " + os.getcwd()
#----------------- load subset of data, which contains only variables used in this analysis"
local=False
gamedatloc='gamedat_tenpc.csv' #10% of the players in the full dataset
if local:
gamedatloc='../../../../Sonas92/gamedat2.csv' #full dataset not currently available, sorry
#games=pd.read_csv(gamedatloc,index_col=0,nrows=30000) #if you only want a trivial subset of the data
games=pd.read_csv(gamedatloc,index_col=0)
print "N games in data = " + str(len(games)) #not all of these can be used because of missing data
# Display the first 10 rows of the dataset
games.ix[:10,:]
#----------------- graph params"
%matplotlib inline
fsize=(12,10)
binwidth=125
bins=np.arange(-625,627,binwidth)
ylimit_diff=0.08
fmts=['-^','-s','-o','-d','-v'] #marker shapes
lss=['--','-.','-','.',':'] # line styles
colours=['darkred','orange','b','indigo','k'] #line colours
lweight=2
sns.set(font_scale=2)
#------------ graph difference in player rating (White - Black) against mean score
# ----------- (White Win = 1, Black Wins = 0, draw = 0.5)
#categorise games by the difference in rating of the two players (White-Black)
games['diff']=games['wElo']-games['bElo']
games['rdiff_cat']=pd.cut(games['diff'],bins)
#means
yvals=games.groupby(pd.cut(games['diff'],bins)).mean()['WhiteScore']
#standard errors
y_sem=games.groupby(pd.cut(games['diff'],bins)).sem()['WhiteScore']
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=fsize)
axes.errorbar(bins[:-1]+binwidth/2,yvals,yerr=y_sem*1.96,color='b',fmt='-o',lw=lweight)
axes.set_xlabel('Difference in rating \n(White - Black, using bin size = ' + str(binwidth) + ')')
axes.set_ylabel('Average score for White')
axes.set_ylim([0,1])
axes.set_xlim([-650,650])
#plot rating difference - outcome function for different player average rating quintiles
games['avElo']=(games.wElo+games.bElo)/2 #calculate average rating of the two players for each game
games=games[~games.avElo.isnull()] #remove any missing data
quintile_groups=["5th","4th","3rd","2nd","1st"] #labels for the top 20%, top 21-41%, etc
games['quintile']=pd.qcut(games.avElo,5,labels=quintile_groups) #categorise according to quintile
quintile_groups.reverse() #just so legend is easier to read
plot_these=['5th','3rd','1st'] #it's a tidier plot with just middle, top and bottom 20%
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=fsize)
for i,q in enumerate(quintile_groups):
qn_mean=games[games.quintile==q].groupby('rdiff_cat').mean()['WhiteScore'] #mean
qn_sem=games[games.quintile==q].groupby('rdiff_cat').sem()['WhiteScore'] #standard error
if q in plot_these:
axes.errorbar(bins[:-1]+binwidth/2,qn_mean,yerr=qn_sem*1.96,color=colours[i],fmt=fmts[i],ls=lss[i],lw=lweight,label=q)
titletext= 'quintile by player average rating'
axes.set_xlabel('Difference in rating \n(White - Black, using bin size = ' + str(binwidth) + ')')
axes.set_ylabel('Average score for White')
axes.set_xlim([-650,650])
fontP = FontProperties()
fontP.set_size(16)
legend = plt.legend(loc=0, ncol=3, bbox_to_anchor=(0, 0, 1, 1),prop = fontP,fancybox=True,shadow=False,title=titletext)
plt.setp(legend.get_title(),fontsize=16)
#calculate baseline
baseline = games[games.quintile=='3rd'].groupby('rdiff_cat').mean()['WhiteScore'] #mean
base_std = games[games.quintile=='3rd'].groupby('rdiff_cat').std()['WhiteScore'] #std
baseli_n = games[games.quintile=='3rd'].groupby('rdiff_cat').agg(len)['WhiteScore'] #count
#plot different groups
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=fsize)
for i,q in enumerate(quintile_groups):
qn_mean=games[games.quintile==q].groupby('rdiff_cat').mean()['WhiteScore'] #mean
qn_sd=games[games.quintile==q].groupby('rdiff_cat').std()['WhiteScore'] #standard deviation
qn_count=games[games.quintile==q].groupby('rdiff_cat').agg(len)['WhiteScore'] #count
if q in plot_these:
yvals=qn_mean-baseline #mean against baseline
y_std=np.sqrt( ((qn_sd**2)/qn_count) + ((base_std**2)/baseli_n) ) #standard dev
y_sem=y_std/np.sqrt(qn_count+baseli_n)
if i ==2:
y_sem=0 #all error is pushed into difference from baseline
plt.errorbar(bins[:-1]+binwidth/2,yvals,yerr=y_sem*1.96,color=colours[i],fmt=fmts[i],ls=lss[i],lw=lweight,label=q)
axes.set_xlabel('Difference in rating \n(White - Black, using bin size = ' + str(binwidth) + ')')
axes.set_ylabel('Average score for White \ncompared to 3rd quintile baseline')
axes.set_xlim([-650,650])
axes.set_ylim([-ylimit_diff,ylimit_diff])
fontP = FontProperties()
fontP.set_size(16)
legend = plt.legend(loc=0, ncol=3, bbox_to_anchor=(0, 0, 1, 1),prop = fontP,fancybox=True,shadow=False,title=titletext)
plt.setp(legend.get_title(),fontsize=16)
#our baseline: players are Male-Male
yvalsMM=games[games.pair=='MM'].groupby('rdiff_cat').mean()['WhiteScore']
y_semMM=games[games.pair=='MM'].groupby('rdiff_cat').sem()['WhiteScore']
y_stdMM=games[games.pair=='MM'].groupby('rdiff_cat').std()['WhiteScore']
y_cntMM=games[games.pair=='MM'].groupby('rdiff_cat').agg(len)['WhiteScore']
#players are female-female
yvalsFF=games[games.pair=='FF'].groupby('rdiff_cat').mean()['WhiteScore']
y_semFF=games[games.pair=='FF'].groupby('rdiff_cat').sem()['WhiteScore']
y_stdFF=games[games.pair=='FF'].groupby('rdiff_cat').std()['WhiteScore']
y_cntFF=games[games.pair=='FF'].groupby('rdiff_cat').agg(len)['WhiteScore']
#players are female-male
yvalsFM=games[games.pair=='FM'].groupby('rdiff_cat').mean()['WhiteScore']
y_stdFM=games[games.pair=='FM'].groupby('rdiff_cat').std()['WhiteScore']
y_cntFM=games[games.pair=='FM'].groupby('rdiff_cat').agg(len)['WhiteScore']
#players are female-male
yvalsMF=games[games.pair=='MF'].groupby('rdiff_cat').mean()['WhiteScore']
y_stdMF=games[games.pair=='MF'].groupby('rdiff_cat').std()['WhiteScore']
y_cntMF=games[games.pair=='MF'].groupby('rdiff_cat').agg(len)['WhiteScore']
#created weighted average of FM values with MF values by first comparing to appropriate baseline
#(p(win|white) for MM and p(win|black) for MM respectively)
yvalsFM_MF=(yvalsFM-yvalsMM)*(y_cntFM/(y_cntFM+y_cntMF))+(yvalsMF-yvalsMM)*(y_cntMF/(y_cntFM+y_cntMF))
y_cntFM_MF=y_cntFM+y_cntMF
y_stdFM_MF=np.sqrt( ((y_stdFM**2)/y_cntFM) + ((y_stdMF**2)/y_cntMF) )
#calculate standard deviations of differences from baseline
y_stdFFdiff=np.sqrt( ((y_stdFF**2)/y_cntFF) + ((y_stdMM**2)/y_cntMM) )
y_stdFMdiff=np.sqrt( ((y_stdFM**2)/y_cntFM) + ((y_stdMM**2)/y_cntMM) )
y_stdMFdiff=np.sqrt( ((y_stdMF**2)/y_cntMF) + ((y_stdMM**2)/y_cntMM) )
y_stdFM_MFdiff=np.sqrt( ((y_stdFM_MF**2)/y_cntFM_MF) + ((y_stdMM**2)/y_cntMM) )
#calculate standard errors
#sem = std / sqrt(n)
y_semFFdiff= y_stdFFdiff / np.sqrt(y_cntFF+y_cntMM)
y_semFMdiff= y_stdFMdiff / np.sqrt(y_cntFM+y_cntMM)
y_semMFdiff= y_stdMFdiff / np.sqrt(y_cntMF+y_cntMM)
y_semFM_MFdiff = y_stdFM_MFdiff / np.sqrt(y_cntFM_MF+y_cntMM)
#plot
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=fsize)
axes.errorbar(bins[:-1]+binwidth/2,yvalsMM-yvalsMM,yerr=0,fmt=fmts[2],ls=lss[2],lw=lweight,label='MM')
#axes.errorbar(bins[:-1]+binwidth/2,yvalsFM-yvalsMM,yerr=y_semFMdiff*1.96,fmt='-d',ls=lss[0],lw=lweight,label='FM')
axes.errorbar(bins[:-1]+binwidth/2,yvalsFM_MF,yerr=y_semFM_MFdiff*1.96,fmt=fmts[0],ls=lss[0],lw=lweight,label='FM+MF')
#axes.errorbar(bins[:-1]+binwidth/2,yvalsMF-yvalsMM,yerr=y_semMFdiff*1.96,fmt='-d',ls=lss[0],lw=lweight,label='MF')
#axes.errorbar(bins[:-1]+binwidth/2,yvalsFF-yvalsMM,yerr=y_semFFdiff*1.96,fmt=fmts[4],ls=lss[4],lw=lweight,label='FF')
axes.set_xlabel('Difference in rating \n(White - Black, using bin size = ' + str(binwidth) + ')')
axes.set_ylabel('Average score for White \ncompared to MM pairing')
axes.set_ylim([-ylimit_diff,ylimit_diff])
axes.set_xlim([-650,650])
titletext= 'game gender pairing'
fontP = FontProperties()
fontP.set_size(16)
legend = plt.legend(loc=0, ncol=1, bbox_to_anchor=(0, 0, 1, 1),prop = fontP,fancybox=True,shadow=False,title=titletext)
plt.setp(legend.get_title(),fontsize=16)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=fsize)
axes.errorbar(bins[:-1]+binwidth/2,yvalsMM-yvalsMM,yerr=0,fmt=fmts[2],ls=lss[2],lw=lweight,label='MM')
#axes.errorbar(bins[:-1]+binwidth/2,yvalsFM-yvalsMM,yerr=y_semFMdiff*1.96,fmt='-d',ls=lss[0],lw=lweight,label='FM')
axes.errorbar(bins[:-1]+binwidth/2,yvalsFM_MF,yerr=y_semFM_MFdiff*1.96,fmt=fmts[0],ls=lss[0],lw=lweight,label='FM+MF')
#axes.errorbar(bins[:-1]+binwidth/2,yvalsMF-yvalsMM,yerr=y_semMFdiff*1.96,fmt='-d',ls=lss[0],lw=lweight,label='MF')
axes.errorbar(bins[:-1]+binwidth/2,yvalsFF-yvalsMM,yerr=y_semFFdiff*1.96,fmt=fmts[4],ls=lss[4],lw=lweight,label='FF')
axes.set_xlabel('Difference in rating \n(White - Black, using bin size = ' + str(binwidth) + ')')
axes.set_ylabel('Average score for White \ncompared to MM pairing')
axes.set_ylim([-ylimit_diff,ylimit_diff])
axes.set_xlim([-650,650])
fontP = FontProperties()
fontP.set_size(16)
legend = plt.legend(loc=0, ncol=1, bbox_to_anchor=(0, 0, 1, 1),prop = fontP,fancybox=True,shadow=False,title=titletext)
plt.setp(legend.get_title(),fontsize=16)
games.groupby('pair').count()['diff']
print "N of x-gender pairings = " + str(games.groupby('pair').count()['diff']['FM']+games.groupby('pair').count()['diff']['MF'])
print "Total N of games = " + str(sum(games.groupby('pair').count()['diff']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 自定义训练
Step2: 鸢尾花分类问题
Step3: 检查数据
Step4: 我们可以从该数据集视图中注意到以下信息:
Step5: 每个标签都分别与一个字符串名称(例如 “setosa” )相关联,但机器学习通常依赖于数字值。标签编号会映射到一个指定的表示法,例如:
Step6: 创建一个 tf.data.Dataset
Step7: make_csv_dataset 返回一个(features, label) 对构建的 tf.data.Dataset ,其中 features 是一个字典
Step8: 注意到具有相似特征的样本会归为一组,即分为一批。更改 batch_size 可以设置存储在这些特征数组中的样本数。
Step10: 要简化模型构建步骤,请创建一个函数以将特征字典重新打包为形状为 (batch_size, num_features) 的单个数组。
Step11: 然后使用 tf.data.Dataset.map 方法将每个 (features,label) 对中的 features 打包到训练数据集中:
Step12: Dataset 的特征元素被构成了形如 (batch_size, num_features) 的数组。我们来看看前几个样本
Step13: 选择模型类型
Step14: 激活函数可决定层中每个节点的输出形式。 这些非线性关系很重要,如果没有它们,模型将等同于单个层。激活函数有很多种,但隐藏层通常使用 ReLU。
Step15: 在此示例中,每个样本针对每个类别返回一个 logit。
Step16: 对每个类别执行 tf.argmax 运算可得出预测的类别索引。不过,该模型尚未接受训练,因此这些预测并不理想。
Step17: 训练模型
Step18: 使用 tf.GradientTape 的前后关系来计算梯度以优化你的模型
Step19: 创建优化器
Step20: 我们将使用它来计算单个优化步骤:
Step21: 训练循环
Step22: 可视化损失函数随时间推移而变化的情况
Step23: 评估模型的效果
Step24: 根据测试数据集评估模型
Step25: 例如,我们可以看到对于最后一批数据,该模型通常预测正确:
Step26: 使用经过训练的模型进行预测
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import tensorflow as tf
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
train_dataset_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv"
train_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),
origin=train_dataset_url)
print("Local copy of the dataset file: {}".format(train_dataset_fp))
!head -n5 {train_dataset_fp}
# column order in CSV file
column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
feature_names = column_names[:-1]
label_name = column_names[-1]
print("Features: {}".format(feature_names))
print("Label: {}".format(label_name))
class_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica']
batch_size = 32
train_dataset = tf.data.experimental.make_csv_dataset(
train_dataset_fp,
batch_size,
column_names=column_names,
label_name=label_name,
num_epochs=1)
features, labels = next(iter(train_dataset))
print(features)
plt.scatter(features['petal_length'],
features['sepal_length'],
c=labels,
cmap='viridis')
plt.xlabel("Petal length")
plt.ylabel("Sepal length")
plt.show()
def pack_features_vector(features, labels):
Pack the features into a single array.
features = tf.stack(list(features.values()), axis=1)
return features, labels
train_dataset = train_dataset.map(pack_features_vector)
features, labels = next(iter(train_dataset))
print(features[:5])
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)), # input shape required
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
predictions = model(features)
predictions[:5]
tf.nn.softmax(predictions[:5])
print("Prediction: {}".format(tf.argmax(predictions, axis=1)))
print(" Labels: {}".format(labels))
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss(model, x, y, training):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
l = loss(model, features, labels, training=False)
print("Loss test: {}".format(l))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss_value, grads = grad(model, features, labels)
print("Step: {}, Initial Loss: {}".format(optimizer.iterations.numpy(),
loss_value.numpy()))
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("Step: {}, Loss: {}".format(optimizer.iterations.numpy(),
loss(model, features, labels, training=True).numpy()))
## Note: Rerunning this cell uses the same model variables
# Keep results for plotting
train_loss_results = []
train_accuracy_results = []
num_epochs = 201
for epoch in range(num_epochs):
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# Training loop - using batches of 32
for x, y in train_dataset:
# Optimize the model
loss_value, grads = grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Track progress
epoch_loss_avg.update_state(loss_value) # Add current batch loss
# Compare predicted label to actual label
# training=True is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
epoch_accuracy.update_state(y, model(x, training=True))
# End epoch
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
if epoch % 50 == 0:
print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
epoch_loss_avg.result(),
epoch_accuracy.result()))
fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
fig.suptitle('Training Metrics')
axes[0].set_ylabel("Loss", fontsize=14)
axes[0].plot(train_loss_results)
axes[1].set_ylabel("Accuracy", fontsize=14)
axes[1].set_xlabel("Epoch", fontsize=14)
axes[1].plot(train_accuracy_results)
plt.show()
test_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv"
test_fp = tf.keras.utils.get_file(fname=os.path.basename(test_url),
origin=test_url)
test_dataset = tf.data.experimental.make_csv_dataset(
test_fp,
batch_size,
column_names=column_names,
label_name='species',
num_epochs=1,
shuffle=False)
test_dataset = test_dataset.map(pack_features_vector)
test_accuracy = tf.keras.metrics.Accuracy()
for (x, y) in test_dataset:
# training=False is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
logits = model(x, training=False)
prediction = tf.argmax(logits, axis=1, output_type=tf.int32)
test_accuracy(prediction, y)
print("Test set accuracy: {:.3%}".format(test_accuracy.result()))
tf.stack([y,prediction],axis=1)
predict_dataset = tf.convert_to_tensor([
[5.1, 3.3, 1.7, 0.5,],
[5.9, 3.0, 4.2, 1.5,],
[6.9, 3.1, 5.4, 2.1]
])
# training=False is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions = model(predict_dataset, training=False)
for i, logits in enumerate(predictions):
class_idx = tf.argmax(logits).numpy()
p = tf.nn.softmax(logits)[class_idx]
name = class_names[class_idx]
print("Example {} prediction: {} ({:4.1f}%)".format(i, name, 100*p))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I want datetime printed as year-month-day-hour-min.
|
<ASSISTANT_TASK:>
Python Code:
import tarfile
import time
import os
import getpass
import paramiko
import arrow
curtime = time.strftime("%d-%b-%Y-%H", time.gmtime())
sshgetdrn = paramiko.SSHClient()
sshgetdrn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
usrg = getpass.getuser()
sshgetdrn.connect('128.199.60.12', username='wcmckee')
sup = sshgetdrn.exec_command('uptime')
ouput = sshgetdrn.invoke_shell()
ouput.recv(1000)
ouput.send('uptime')
ouput.send('chdir /getsdrawn')
opftp = sshgetdrn.open_sftp()
for i in opftp.listdir('/home/wcmckee/getsdrawndotcom/'):
print i
utc = arrow.utcnow()
print utc
utc.weekday()
utc.date()
utc.datetime
backdir = ('/home/' + usrg + '/backup-motion/')
if os.path.isdir(backdir) == True:
print 'its true'
else:
print 'its false'
os.mkdir(backdir)
tar = tarfile.open(backdir + curtime + ".tar.gz", "w:gz")
tar.add("/home/wcmckee/mot/", arcname="TarName")
tar.close()
#os.rmdir('/home/shared/cam')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define model
Step2: Let's run the model and plot the results for some values.
Step3: Setting up the model fitting
Step4: Fit tolerances to simulation
Step5: Run fitting
Step6: Explore results
Step7: Find point estimates of fit
Step8: As you can see we've not done a bad job at fitting. True parameters are 10,0.5 and 1.0 which all over-lap with the 95% confidence intervals. We may now save the results using the save method.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import ABCPRC as prc
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
def ibm(*ps):
lbda,delta,gamma = ps[0],ps[1],ps[2]
dt = 1.0 #time step
n = 100 #population number
T = 100 #total time simulation is run for.
class Person(object): #define individual
def __init__(self):
self.br = stats.gamma.rvs(gamma) if gamma > 0 else 0 #risk of exposure for individual
self.ps = 0 #number of parasites. This is zero initially.
def update(self):
births = stats.poisson.rvs(self.br * lbda * dt) if self.br * lbda * dt > 0 else 0 #imports of new worms in a time-step
deaths = stats.poisson.rvs(self.ps * delta) if self.ps >0 else 0 #number of deaths of worms
self.ps += (births-deaths)
if (self.ps < 0): self.ps = 0 #check number doesn't become negative.
people = []
for i in range(n):
people.append(Person()) #initialise population.
for t in range(T):
for i in range(n):
people[i].update() #run simulation for all individuals.
par_dist = []
for i in range(n):
par_dist.append(people[i].ps) #record final number of parasites for each individual.
return np.array(par_dist) # ibm function needs to return a numpy array
%time xs = ibm(10.0,0.5,1.0)
plt.hist(xs);
m = prc.ABC()
priors = [stats.expon(scale=10.0).rvs,stats.expon(scale=0.5).rvs,stats.expon(scale=1.0).rvs]
m.setup(modelFunc=ibm,xs=xs,priors=priors,method='Adaptive',toln=10)
m.fit(sample_size=30)
m.run(100)
res = m.trace()
plt.figure()
print('Initial Distribution')
m.trace(plot=True,tol=0)
plt.figure()
print('Middle Tolerance')
m.trace(plot=True,tol=5)
plt.figure()
print('Final Distribution')
m.trace(plot=True,tol=-1)
ps = np.round(m.paramMAP(),decimals=2)
print('MAP for infection rate is : {}, MAP for death rate is {} and MAP for heterogeneity is {}'.format(*ps))
res = m.fitSummary()
m.save('parasite_model_example')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How do we define direction of an earth magnetic field?
Step2: Magnetic applet
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from geoscilabs.mag import Mag, Simulator
%matplotlib inline
#Input parameters
fileName = 'https://github.com/geoscixyz/geosci-labs/raw/master/assets/mag/data/DO27_TMI.dat'
xyzd = np.genfromtxt(fileName, skip_header=3)
B = np.r_[60308, 83.8, 25.4]
survey, dobj = Mag.createMagSurvey(xyzd, B)
# View the data and chose a profile
param = Simulator.ViewMagSurvey2D(survey, dobj)
display(param)
# Define the parametric model interactively
model = Simulator.ViewPrism(param.result)
display(model)
plotwidget = Simulator.PFSimulator(model, param)
display(plotwidget)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use the same list of all the Regional Associations as before,
Step2: The function below is similar to the one we used before.
Step3: Compare the results above with cell [6] from before. Note that now we got 192 records for PacIOOS and 74 for AOOS!
|
<ASSISTANT_TASK:>
Python Code:
from owslib.csw import CatalogueServiceWeb
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw'
csw = CatalogueServiceWeb(endpoint, timeout=30)
import pandas as pd
ioos_ras = ['AOOS', # Alaska
'CaRA', # Caribbean
'CeNCOOS', # Central and Northern California
'GCOOS', # Gulf of Mexico
'GLOS', # Great Lakes
'MARACOOS', # Mid-Atlantic
'NANOOS', # Pacific Northwest
'NERACOOS', # Northeast Atlantic
'PacIOOS', # Pacific Islands
'SCCOOS', # Southern California
'SECOORA'] # Southeast Atlantic
url = 'https://raw.githubusercontent.com/ioos/registry/master/uuid.csv'
df = pd.read_csv(url, index_col=0, header=0, names=['UUID'])
df['UUID'] = df['UUID'].str.strip()
from owslib.fes import PropertyIsEqualTo
def query_ra(csw, uuid):
q = PropertyIsEqualTo(propertyname='sys.siteuuid', literal='%s' % uuid)
csw.getrecords2(constraints=[q], maxrecords=2000, esn='full')
return csw
for ra in ioos_ras:
try:
uuid = df.ix[ra]['UUID']
csw = query_ra(csw, uuid)
ret = csw.results['returned']
word = 'records' if ret > 1 else 'record'
print("{0:>8} has {1:>4} {2}".format(ra, ret, word))
csw.records.clear()
except KeyError:
pass
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preset based - using CoachInterface
Step2: Running each training or inference iteration manually
Step3: Sometimes we may want to track the agent's decisions, log or maybe even modify them.
Step4: Non-preset - using GraphManager directly
Step5: Advanced functionality - proprietary exploration policy, checkpoint evaluation
Step6: Next, we'll override the exploration policy with our own policy defined in Resources/exploration.py.
Step7: Last, we'll load the latest checkpoint from the checkpoint directory, and evaluate it.
|
<ASSISTANT_TASK:>
Python Code:
# Adding module path to sys path if not there, so rl_coach submodules can be imported
import os
import sys
import tensorflow as tf
module_path = os.path.abspath(os.path.join('..'))
resources_path = os.path.abspath(os.path.join('Resources'))
if module_path not in sys.path:
sys.path.append(module_path)
if resources_path not in sys.path:
sys.path.append(resources_path)
from rl_coach.coach import CoachInterface
coach = CoachInterface(preset='CartPole_ClippedPPO',
# The optional custom_parameter enables overriding preset settings
custom_parameter='heatup_steps=EnvironmentSteps(5);improve_steps=TrainingSteps(3)',
# Other optional parameters enable easy access to advanced functionalities
num_workers=1, checkpoint_save_secs=10)
coach.run()
from rl_coach.environments.gym_environment import GymEnvironment, GymVectorEnvironment
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.core_types import EnvironmentSteps
tf.reset_default_graph()
coach = CoachInterface(preset='CartPole_ClippedPPO')
# registering an iteration signal before starting to run
coach.graph_manager.log_signal('iteration', -1)
coach.graph_manager.heatup(EnvironmentSteps(100))
# training
for it in range(10):
# logging the iteration signal during training
coach.graph_manager.log_signal('iteration', it)
# using the graph manager to train and act a given number of steps
coach.graph_manager.train_and_act(EnvironmentSteps(100))
# reading signals during training
training_reward = coach.graph_manager.get_signal_value('Training Reward')
# inference
env_params = GymVectorEnvironment(level='CartPole-v0')
env = GymEnvironment(**env_params.__dict__, visualization_parameters=VisualizationParameters())
response = env.reset_internal_state()
for _ in range(10):
action_info = coach.graph_manager.get_agent().choose_action(response.next_state)
print("State:{}, Action:{}".format(response.next_state,action_info.action))
response = env.step(action_info.action)
print("Reward:{}".format(response.reward))
from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import SimpleSchedule
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
# Resetting tensorflow graph as the network has changed.
tf.reset_default_graph()
# define the environment parameters
bit_length = 10
env_params = GymVectorEnvironment(level='rl_coach.environments.toy_problems.bit_flip:BitFlip')
env_params.additional_simulator_parameters = {'bit_length': bit_length, 'mean_zero': True}
# Clipped PPO
agent_params = ClippedPPOAgentParameters()
agent_params.network_wrappers['main'].input_embedders_parameters = {
'state': InputEmbedderParameters(scheme=[]),
'desired_goal': InputEmbedderParameters(scheme=[])
}
graph_manager = BasicRLGraphManager(
agent_params=agent_params,
env_params=env_params,
schedule_params=SimpleSchedule()
)
graph_manager.improve()
from rl_coach.agents.dqn_agent import DQNAgentParameters
from rl_coach.base_parameters import VisualizationParameters, TaskParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.memories.memory import MemoryGranularity
####################
# Graph Scheduling #
####################
# Resetting tensorflow graph as the network has changed.
tf.reset_default_graph()
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(4000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(1000)
#########
# Agent #
#########
agent_params = DQNAgentParameters()
# DQN params
agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(100)
agent_params.algorithm.discount = 0.99
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(1)
# NN configuration
agent_params.network_wrappers['main'].learning_rate = 0.00025
agent_params.network_wrappers['main'].replace_mse_with_huber_loss = False
# ER size
agent_params.memory.max_size = (MemoryGranularity.Transitions, 40000)
################
# Environment #
################
env_params = GymVectorEnvironment(level='CartPole-v0')
from exploration import MyExplorationParameters
# Overriding the default DQN Agent exploration policy with my exploration policy
agent_params.exploration = MyExplorationParameters()
# Creating a graph manager to train a DQN agent to solve CartPole
graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
schedule_params=schedule_params, vis_params=VisualizationParameters())
# Resources path was defined at the top of this notebook
my_checkpoint_dir = resources_path + '/checkpoints'
# Checkpoints will be stored every 5 seconds to the given directory
task_parameters1 = TaskParameters()
task_parameters1.checkpoint_save_dir = my_checkpoint_dir
task_parameters1.checkpoint_save_secs = 5
graph_manager.create_graph(task_parameters1)
graph_manager.improve()
import tensorflow as tf
import shutil
# Clearing the previous graph before creating the new one to avoid name conflicts
tf.reset_default_graph()
# Updating the graph manager's task parameters to restore the latest stored checkpoint from the checkpoints directory
task_parameters2 = TaskParameters()
task_parameters2.checkpoint_restore_path = my_checkpoint_dir
graph_manager.create_graph(task_parameters2)
graph_manager.evaluate(EnvironmentSteps(5))
# Clearning up
shutil.rmtree(my_checkpoint_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Objectives
Step2: Open Notebook
Step3: Basic Types
Step5: strings
Step6: slices
Step7: ANSWERS Hidden
Step8: Intro to numpy/scipy/matplotlib
Step9: Which dimension is 2 long? 3 long? 4 long? 5 long?
Step10: Which dimension is the input layer and which is the output layer?
Step11: ozone[0,
Step12: CHECK POINT
Step13: ANSWERS Hidden
Step14: XKCD, Jelly beans, and Type I Error?
|
<ASSISTANT_TASK:>
Python Code:
# Prepare my slides
%pylab inline
%cd working
# ipython
!curl -kLO http://github.com/barronh/GCandPython/archive/master.zip
import zipfile
zf = zipfile.ZipFile('master.zip')
zf.extractall()
%mv GCandPython-master GCandPython
def foo(bar):
return 'foo.' + bar
print(foo('bar'))
try:
print(foo(1))
except Exception as e:
print(e)
1 + 1
1 + 1.
1 / 2.
1 // 2
.encode()
"a b c".split()
''.join(['h', 'ow', ' ', 'c', 'an', ' ', 'I ' 'su', 'bse', 't?'])
teststr = 'this is not fun.'
print(teststr[:8])
def foo(pieces):
out = type(pieces[0])()
for i, piece in enumerate(pieces):
out += piece
return out
a = foo('12345')
b = foo([1, 2, 3, 4, 5])
c = foo({0: 1, 1: 2, 2: 3, 3: 4, 4: 5})
print(a, b, c)
%pylab tk
np.random.seed(50)
ozone = (np.random.normal(size = 2*3*4*5) + 40).reshape(2,3,4,5)
print(ozone.ndim)
print(ozone.shape, ozone[0, :, 2, 3].shape, ozone[0, :, 2, 3])
print(ozone.mean())
print(np.percentile(ozone, [5, 95]))
averagekernel = np.array([[0.5, 0.35, 0.15],
[0.25, 0.5, 0.25],
[0.1, 0.4, 0.5]])
averagekernelm = np.matrix(averagekernel)
print(averagekernel.shape)
print(averagekernelm.shape)
print(averagekernelm.T.shape)
averagekernel = np.array([0.25, 0.5, 0.25])
averagekernelm = np.matrix(averagekernel)
(ozone[0, :, 2, 3] * averagekernel).sum()
ozone[0, :, 2, 3] * averagekernelm.T
out = np.zeros_like(ozone[:, 0])
for t in range(ozone.shape[0]):
for j in range(ozone.shape[2]):
for i in range(ozone.shape[3]):
out[t, j, i] = ozone[t, :, j, i] * averagekernelm.T
out
from scipy import constants
?constants
from scipy import stats
?stats
from scipy.stats import mstats
?mstats
%%timeit -n 20
from scipy.stats import ttest_ind, mannwhitneyu
mdifferent = []
tdifferent = []
n = 100
for i in range(n):
a = np.exp(np.random.normal(size = 20))
b = np.exp(np.random.normal(size = 20))
tresult = ttest_ind(a, b)
mresult = mannwhitneyu(a, b)
if tresult.pvalue < 0.05:
tdifferent.append(i);
if mresult.pvalue < 0.05:
mdifferent.append(i);
print(len(mdifferent)/n, end = '/')
print(len(tdifferent)/n, end = ', ')
np.array([0.06, 0.06, 0.01, 0.04, 0.05, 0.02, 0.06, 0.07, 0.02, 0.07, 0.04, 0.05, 0.03, 0.08, 0.04, 0.03, 0.03, 0.02, 0.04, 0.01, 0.06, 0.04, 0.05, 0.01, 0.03, 0.04, 0.02, 0.04, 0.03, 0.02, 0.04, 0.03, 0.06, 0.03, 0.02, 0.07, 0.05, 0.02, 0.01, 0.05, 0.03, 0.04, 0.04, 0.03, 0.03, 0.04, 0.04, 0.06, 0.03, 0.05, 0.05, 0.03, 0.02, 0.0,\
0.04, 0.05, 0.02, 0.07, 0.05, 0.05]).mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create forward bond future PV (Exposure) time profile
Step2: Data input for the CouponBond portfolio
Step3: Create Libor and portfolioScheduleOfCF. This datelist contains all dates
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from datetime import date
import time
import pandas as pd
import numpy as np
pd.options.display.max_colwidth = 60
from Curves.Corporates.CorporateDailyVasicek import CorporateRates
from Boostrappers.CDSBootstrapper.CDSVasicekBootstrapper import BootstrapperCDSLadder
from MonteCarloSimulators.Vasicek.vasicekMCSim import MC_Vasicek_Sim
from Products.Rates.CouponBond import CouponBond
from Products.Credit.CDS import CDS
from Scheduler.Scheduler import Scheduler
import quandl
import matplotlib.pyplot as plt
import pylab
from parameters import WORKING_DIR
import itertools
marker = itertools.cycle((',', '+', '.', 'o', '*'))
from IPython.core.pylabtools import figsize
figsize(15, 4)
from pandas import ExcelWriter
import numpy.random as nprnd
from pprint import pprint
t_step = 1.0 / 365.0
simNumber = 10
trim_start = date(2005,3,10)
trim_end = date(2010,12,31) # Last Date of the Portfolio
start = date(2005, 3, 10)
referenceDate = date(2005, 3, 10)
myScheduler = Scheduler()
ReferenceDateList = myScheduler.getSchedule(start=referenceDate,end=trim_end,freq="1M", referencedate=referenceDate)
# Create Simulator
xOIS = [ 3.0, 0.07536509, -0.208477, 0.07536509]
myVasicek = MC_Vasicek_Sim(ReferenceDateList,xOIS,simNumber,1/365.0)
myVasicek.setVasicek(x=xOIS,minDay=trim_start,maxDay=trim_end,simNumber=simNumber,t_step=1/365.0)
myVasicek.getLibor()
# Create Coupon Bond with several startDates.
SixMonthDelay = myScheduler.extractDelay("6M")
TwoYearsDelay = myScheduler.extractDelay("2Y")
startDates = [referenceDate + nprnd.randint(0,3)*SixMonthDelay for r in range(10)]
# For debugging uncomment this to choose a single date for the forward bond
# print(startDates)
startDates = [date(2005,3,10)+SixMonthDelay,date(2005,3,10)+TwoYearsDelay ]
maturities = [(x+TwoYearsDelay) for x in startDates]
myPortfolio = {}
coupon = 0.07536509
for i in range(len(startDates)):
notional=(-1.0)**i
myPortfolio[i] = CouponBond(fee=1.0,start=startDates[i],coupon=coupon,notional=notional,
maturity= maturities[i], freq="3M", referencedate=referenceDate,observationdate=trim_start )
# Create FullDateList
portfolioScheduleOfCF = set(ReferenceDateList)
for i in range(len(myPortfolio)):
portfolioScheduleOfCF=portfolioScheduleOfCF.union(myPortfolio[i].getScheduleComplete()[0]
)
portfolioScheduleOfCF = sorted(portfolioScheduleOfCF.union(ReferenceDateList))
OIS = myVasicek.getSmallLibor(datelist=portfolioScheduleOfCF)
#print(OIS)
# at this point OIS contains all dates for which the discount curve should be known.
# If the OIS doesn't contain that date, it would not be able to discount the cashflows and the calcualtion would faill.
pvs={}
for t in portfolioScheduleOfCF:
pvs[t] = np.zeros([1,simNumber])
#(pvs[t])
for i in range(len(myPortfolio)):
myPortfolio[i].setLibor(OIS)
pvs[t] = pvs[t] + myPortfolio[i].getExposure(referencedate=t).values
# print(myPortfolio[i].getExposure(referencedate=t).values)
#print(pvs)
#print(OIS)
#print(myPortfolio[i].getExposure(referencedate=t).value)
pvsPlot = pd.DataFrame.from_dict(list(pvs.items()))
pvsPlot.index= list(pvs.keys())
pvs1={}
for i,t in zip(pvsPlot.values,pvsPlot.index):
pvs1[t]=i[1][0]
pvs = pd.DataFrame.from_dict(data=pvs1,orient="index")
ax=pvs.plot(legend=False)
ax.set_xlabel("Year")
ax.set_ylabel("Coupon Bond Exposure")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: group the two print statements into a print_all function
Step2: Rewrite welcome in the style of greet. It should substitute the user name and return the result.
Step3: A function argument can also be optional. Therefore a default value has to be defined in the signature. You woould call this attribute an optinal attribute or more pythonic
Step4: locals / globals
Step5: Additionally one can declare and assign a variable, but it may only be valid in specific sections of your code. These sections of validity are called namespace. Any function defines its own namespace (the function body) and any variable declared within that scope is not valid outside this namespace. Nevertheless, namespaces can be nested. Then any outer namespace is also valid inside the inner namespaces.
Step6: The running Python session also defines a namespace. This is called the global namespace. The built-in function <span style='color
Step7: *args, **kwargs
Step8: lambda
Step9: Returning functions
Step10: Recursive functions
Step11: Now define the Fibonacci series into <span style="color
Step12: Nested functions
|
<ASSISTANT_TASK:>
Python Code:
print('Hello, Wolrd!')
print('This is Python.')
def greet():
return 'Hello, World!'
message = greet()
print(message)
def sum_a_and_b(a, b):
return a + b
print(sum_a_and_b(5, 3))
def print_arguments(arg1, arg2, arg3='Foo', arg4='Bar', g=9.81):
print('arg1:\t', arg1)
print('arg2:\t', arg2)
print('arg3:\t', arg3)
print('arg4:\t', arg4)
print('g:\t', g)
print('-' * 14)
x = 5
print('x = 5\t\tmemory address:', hex(id(x)))
x = 6
print('x = 6\t\tmemory address:', hex(id(x)))
a = 5
def f():
b = 3
print('a: ', a, 'b: ', b)
# call
f()
b
a = 5
print('Global a: ', globals()['a'], '\t\tmemory address: ', hex(id(a)))
def f(b):
print('Local b:', locals()['b'], '\t\tmemory address: ', hex(id(b)))
f(a)
def get_attributes(*args, **kwargs):
return args, kwargs
a, b = get_attributes('foo', 'bar', g=9.81, version=2.7, idiot_president='Donald Trump')
print('args:', a)
print('kwargs:', b)
list(map(lambda x:x**2, [1,2,3,4,5,6,7,8,9]))
list(map(lambda x:len(x), 'This is a sentence with rather with rather short and extraordinary long words.'.split()))
list(map(lambda x:(x,len(x)), 'This is a sentence with rather with rather short and extraordinary long words.'.split()))
def mean(data):
return sum(data) / len(data)
def get_aggregator(variable_name):
if variable_name.lower() == 'temperature':
return mean
elif variable_name.lower() == 'rainfall':
return sum
data = [2,4,7,9,2,3,5]
# Temperature data
agg = get_aggregator('Temperature')
print('Temperature: ', agg(data), '°C')
agg = get_aggregator('Rainfall')
print('Rainfall: ', agg(data), 'mm')
def factorial(n):
return n*factorial(n-1) if n > 1 else 1
def fibonacci_iterative(n):
a,b = 0, 1
for i in range(n):
a,b = b, a + b
return a
def fibonacci_recursive(n):
pass
print('iterative:', fibonacci_iterative(10))
print('recursive:', fibonacci_recursive(10))
%time fibonacci_iterative(40)
%time fibonacci_recursive(40)
def get_model(arid=True):
if arid:
a = 1.2
else:
a = 1.4
def _model(rain, evap, a=a):
return (rain - evap)**a if rain > evap else 0
return _model
rain_values = [3, 0, 0, 16, 4]
evap_values = [1, 1, 5, 3, 2]
print(list(map(get_model(arid=True), rain_values, evap_values)))
print(list(map(get_model(arid=False), rain_values, evap_values)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Firstly let's check that the MCMC chains have converged and that the resulting posteriors are sensible.
Step2: Okay so what is the Bayes factor? Note that this returns $\log_{10}(BF)$ along with an estimate of the error and the so-called Occam factor.
Step3: In this instance the evidence is strongly in favour of bimodality, the errors are relatively small, and the Occam factor is small. The Occam factor provides information on how much the choice of prior (see the BayesBimodalTest implementation for details) effects the modal comparison. So in this case, we would need to change the prior by 60 orders of magnitude to change the conclusion of the model comparsion
Step4: We now plot the Bayes factor for this varying $\sigma$-separation. The measured values are shown as blue dots, with error bars. A red band indicates the Occam factor.
Step5: This shows that, only for means separated above $\approx 2\sigma$, can the Bayes factor unamiguously rule out the unimodal hypothesis. Below this it is difficult to rule out the Bimodal hypthesis, althrough in general the $\log_{10}(\textrm{BF})$ is generally negative (shown in a close-up below)
|
<ASSISTANT_TASK:>
Python Code:
def GenerateBimodalData(muA, muB, sigmaA, sigmaB, p, N=1000):
dataA = np.random.normal(muA, sigmaA, int(p*N))
dataB = np.random.normal(muB, sigmaB, int((1-p)*N))
return np.concatenate([dataA, dataB])
data = GenerateBimodalData(-2, 2, 1, 1, 0.9)
test = BayesBimodalTest(data, ntemps=30, nburn0=50, nburn=50, nprod=100, betamin=-8)
test.diagnostic_plot()
test.BayesFactor(print_result=True)
sigma_seps = np.linspace(3, 0.01, 40)
sigma = 1
p = 0.5
log10BF = []
log10BF_err = []
log10Occam = []
for ss in sigma_seps:
muA = -0.5*ss
muB = 0.5*ss
data = GenerateBimodalData(muA, muB, sigma, sigma, p)
test = BayesBimodalTest(data, ntemps=50, nburn0=50, nburn=50, nprod=50, betamin=-8)
a, b, c, = test.BayesFactor(print_result=False)
log10BF.append(a)
log10BF_err.append(b)
log10Occam.append(c)
fig, ax = plt.subplots()
ax.errorbar(sigma_seps, log10BF, yerr=log10BF_err, fmt=".")
ax.set_xlabel("$\sigma$-separation of the means")
ax.set_ylabel(r"$\log_{10}(\mathrm{BF})$")
ax.fill_between(sigma_seps, log10Occam, -np.array(log10Occam), color="red", alpha=0.2)
plt.show()
fig, ax = plt.subplots()
ax.errorbar(sigma_seps, log10BF, yerr=log10BF_err, fmt=".")
ax.set_xlabel("$\sigma$-separation of the means")
ax.set_ylabel(r"$\log_{10}(\mathrm{BF})$")
ax.fill_between(sigma_seps, log10Occam, -np.array(log10Occam), color="red", alpha=0.2)
ax.set_ylim(-2, 2)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set subset filters
Step2: #########################################################################################################################
Step 2
Step3: #########################################################################################################################
Step 3
|
<ASSISTANT_TASK:>
Python Code:
root_directory = 'D:/github/w_vattenstatus/ekostat_calculator'#"../" #os.getcwd()
workspace_directory = root_directory + '/workspaces'
resource_directory = root_directory + '/resources'
user_id = 'test_user'
# ## Initiate EventHandler
print(root_directory)
paths = {'user_id': user_id,
'workspace_directory': root_directory + '/workspaces',
'resource_directory': root_directory + '/resources',
'log_directory': 'D:/github' + '/log',
'test_data_directory': 'D:/github' + '/test_data',
'cache_directory': 'D:/github/w_vattenstatus/cache'}
t0 = time.time()
ekos = EventHandler(**paths)
#request = ekos.test_requests['request_workspace_list']
#response = ekos.request_workspace_list(request)
#ekos.write_test_response('request_workspace_list', response)
print('-'*50)
print('Time for request: {}'.format(time.time()-t0))
###############################################################################################################################
# ### Make a new workspace
# ekos.copy_workspace(source_uuid='default_workspace', target_alias='kustzonsmodellen_3daydata')
# ### See existing workspaces and choose workspace name to load
ekos.print_workspaces()
#alias = 'lena'
# workspace_alias = 'lena_indicator' # kustzonsmodellen_3daydata
workspace_alias = 'waters_export'
workspace_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias) #'kuszonsmodellen' lena_indicator
print(workspace_uuid)
workspace_alias = ekos.get_alias_for_unique_id(workspace_uuid = workspace_uuid)
###############################################################################################################################
# ### Load existing workspace
ekos.load_workspace(unique_id = workspace_uuid)
###############################################################################################################################
# ### import data
# ekos.import_default_data(workspace_alias = workspace_alias)
###############################################################################################################################
# ### Load all data in workspace
# #### if there is old data that you want to remove
#ekos.get_workspace(workspace_uuid = workspace_uuid).delete_alldata_export()
#ekos.get_workspace(workspace_uuid = workspace_uuid).delete_all_export_data()
###############################################################################################################################
# #### to just load existing data in workspace
ekos.load_data(workspace_uuid = workspace_uuid)
###############################################################################################################################
# ### check workspace data length
w = ekos.get_workspace(workspace_uuid = workspace_uuid)
len(w.data_handler.get_all_column_data_df())
###############################################################################################################################
# ### see subsets in data
for subset_uuid in w.get_subset_list():
print('uuid {} alias {}'.format(subset_uuid, w.uuid_mapping.get_alias(unique_id=subset_uuid)))
###############################################################################################################################
# # Step 0
print(w.data_handler.all_data.columns)
###############################################################################################################################
# ### Apply first data filter
w.apply_data_filter(step = 0) # This sets the first level of data filter in the IndexHandler
###############################################################################################################################
# # Step 1
# ### make new subset
# w.copy_subset(source_uuid='default_subset', target_alias='test_kustzon')
###############################################################################################################################
# ### Choose subset name to load
subset_alias = 'waters_export'
# subset_alias = 'period_2007-2012_refvalues_2013'
# subset_alias = 'test_subset'
subset_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias, subset_alias = subset_alias)
print('subset_alias', subset_alias, 'subset_uuid', subset_uuid)
# #### year filter
w.set_data_filter(subset = subset_uuid, step=1,
filter_type='include_list',
filter_name='MYEAR',
data=[2007,2008,2009,2010,2011,2012])#['2011', '2012', '2013']) #, 2014, 2015, 2016
###############################################################################################################################
# #### waterbody filter
w.set_data_filter(subset = subset_uuid, step=1,
filter_type='include_list',
filter_name='viss_eu_cd', data = []) #'SE584340-174401', 'SE581700-113000', 'SE654470-222700', 'SE633000-195000', 'SE625180-181655'
# data=['SE584340-174401', 'SE581700-113000', 'SE654470-222700', 'SE633000-195000', 'SE625180-181655'])
# wb with no data for din 'SE591400-182320'
f1 = w.get_data_filter_object(subset = subset_uuid, step=1)
print(f1.include_list_filter)
print('subset_alias:', subset_alias, '\nsubset uuid:', subset_uuid)
f1 = w.get_data_filter_object(subset = subset_uuid, step=1)
print(f1.include_list_filter)
###############################################################################################################################
# ## Apply step 1 datafilter to subset
w.apply_data_filter(subset = subset_uuid, step = 1)
filtered_data = w.get_filtered_data(step = 1, subset = subset_uuid)
print(filtered_data['VISS_EU_CD'].unique())
filtered_data[['AMON','NTRA','DIN','CPHL_INTEG_CALC','DEPH']].head()
### Load indicator settings filter
w.get_step_object(step = 2, subset = subset_uuid).load_indicator_settings_filters()
###############################################################################################################################
### set available indicators
w.get_available_indicators(subset= subset_uuid, step=2)
###############################################################################################################################
# ### choose indicators
#list(zip(typeA_list, df_step1.WATER_TYPE_AREA.unique()))
# indicator_list = ['oxygen','din_winter','ntot_summer', 'ntot_winter', 'dip_winter', 'ptot_summer', 'ptot_winter','bqi', 'biov', 'chl', 'secchi']
# indicator_list = ['din_winter','ntot_summer', 'ntot_winter', 'dip_winter', 'ptot_summer', 'ptot_winter']
#indicator_list = ['biov', 'chl']
# indicator_list = ['bqi', 'biov', 'chl', 'secchi']
#indicator_list = ['bqi', 'secchi'] + ['biov', 'chl'] + ['din_winter']
# indicator_list = ['din_winter','ntot_summer']
# indicator_list = ['indicator_' + indicator for indicator in indicator_list]
indicator_list = w.available_indicators
###############################################################################################################################
# ### Apply indicator data filter
print('apply indicator data filter to {}'.format(indicator_list))
for indicator in indicator_list:
w.apply_indicator_data_filter(step = 2,
subset = subset_uuid,
indicator = indicator)#,
# water_body_list = test_wb)
#print(w.mapping_objects['water_body'][wb])
#print('*************************************')
#df = w.get_filtered_data(subset = subset_uuid, step = 'step_2', water_body = 'SE625180-181655', indicator = 'indicator_din_winter').dropna(subset = ['DIN'])
# ### Set up indicator objects
print('indicator set up to {}'.format(indicator_list))
w.get_step_object(step = 3, subset = subset_uuid).indicator_setup(indicator_list = indicator_list)
###############################################################################################################################
# ### CALCULATE STATUS
print('CALCULATE STATUS to {}'.format(indicator_list))
w.get_step_object(step = 3, subset = subset_uuid).calculate_status(indicator_list = indicator_list)
###############################################################################################################################
# ### CALCULATE QUALITY ELEMENTS
w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'nutrients')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'phytoplankton')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'bottomfauna')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'oxygen')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(quality_element = 'secchi')
# w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(subset_unique_id = subset_uuid, quality_element = 'Phytoplankton')
w.get_data_for_waterstool(step = 3, subset = subset_uuid)
['a','b']+['c','d']
p = paths['workspace_directory']+'/6f85f2fc-dcce-4bd4-9fc4-26fc14f9ad0c/subsets/19ac8dc5-80be-4304-9ed2-3267bf59425b/step_3/output/results/'
[re.search(f + r'-by_date.pkl', col) for f in os.listdir(p) if '-by_date.pkl' in f]
paths['workspace_directory']+'/6f85f2fc-dcce-4bd4-9fc4-26fc14f9ad0c/subsets/19ac8dc5-80be-4304-9ed2-3267bf59425b/step_3/output/results/'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hard data
Step2: Soft/uncertain data
Step3: Example 1
Step4: Example 2
Step5: Example 3
|
<ASSISTANT_TASK:>
Python Code:
import mpslib as mps
import numpy as np
import matplotlib.pyplot as plt
O=mps.mpslib(method='mps_snesim_tree', parameter_filename='mps_snesim.txt')
#O=mps.mpslib(method='mps_genesim', parameter_filename='mps_genesim.txt')
TI1, TI_filename1 = mps.trainingimages.strebelle(3, coarse3d=1)
O.par['soft_data_categories']=np.array([0,1])
O.ti=TI1
#%% Set parameters for MPSlib
O.par['rseed']=1
O.par['n_multiple_grids']=0;
O.par['n_cond']=16
O.par['n_cond_soft']=1
O.par['n_real']=100
O.par['debug_level']=-1
O.par['simulation_grid_size'][0]=18
O.par['simulation_grid_size'][1]=13
O.par['simulation_grid_size'][2]=1
O.par['hard_data_fnam']='hard.dat'
O.par['soft_data_fnam']='soft.dat'
O.delete_local_files()
O.par['n_max_cpdf_count']=100
# Set hard data
d_hard = np.array([[ 15, 4, 0, 1],
[ 15, 5, 0, 1]])
#O.d_hard = d_hard
# Set soft data
d_soft = np.array([[ 2, 2, 0, 0.7, 0.3],
[ 5, 5, 0, 0.001, 0.999],
[ 10, 8, 0, 0.999, 0.001]])
O.d_soft = d_soft
# Only co-locational
O.par['n_cond_soft']=0
gtxt=['unilateral','random','preferential']
shuffle_simulation_grid_arr = [0,1,2]
fig = plt.figure(figsize=(15, 8))
for i in range(len(shuffle_simulation_grid_arr)):
# Set preferential path
O.par['shuffle_simulation_grid']=shuffle_simulation_grid_arr[i]
O.delete_local_files()
O.run_parallel()
m_mean, m_std, m_mode=O.etype()
plt.subplot(2,3,i+1)
plt.imshow(m_mean.T, zorder=-1, vmin=0, vmax=1, cmap='hot')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('%s path' % gtxt[i])
plt.subplot(2,3,3+i+1)
plt.imshow(m_std.T, zorder=-1, vmin=0, vmax=0.4, cmap='gray')
plt.title('std')
plt.colorbar(fraction=0.046, pad=0.04)
# Only co-locational
O.par['n_cond_soft']=1
shuffle_simulation_grid_arr = [0,1,2]
fig = plt.figure(figsize=(15, 8))
for i in range(len(shuffle_simulation_grid_arr)):
# Set preferential path
O.par['shuffle_simulation_grid']=shuffle_simulation_grid_arr[i]
O.delete_local_files()
O.run_parallel()
m_mean, m_std, m_mode=O.etype()
plt.subplot(2,3,i+1)
plt.imshow(m_mean.T, zorder=-1, vmin=0, vmax=1, cmap='hot')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('%s path' % gtxt[i])
plt.subplot(2,3,3+i+1)
plt.imshow(m_std.T, zorder=-1, vmin=0, vmax=0.4, cmap='gray')
plt.title('std')
plt.colorbar(fraction=0.046, pad=0.04)
# Only co-locational
O.par['n_cond_soft']=3
shuffle_simulation_grid_arr = [0,1,2]
fig = plt.figure(figsize=(15, 8))
for i in range(len(shuffle_simulation_grid_arr)):
# Set preferential path
O.par['shuffle_simulation_grid']=shuffle_simulation_grid_arr[i]
O.delete_local_files()
O.run_parallel()
m_mean, m_std, m_mode=O.etype()
plt.subplot(2,3,i+1)
plt.imshow(m_mean.T, zorder=-1, vmin=0, vmax=1, cmap='hot')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('%s path' % gtxt[i])
plt.subplot(2,3,3+i+1)
plt.imshow(m_std.T, zorder=-1, vmin=0, vmax=0.4, cmap='gray')
plt.title('std')
plt.colorbar(fraction=0.046, pad=0.04)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the dataset
Step2: Create the model specification
Step3: Estimate the various models to be used for prediction
Step4: Estimate the MNL model
Step5: Estimate the Asymmetric Logit Model
Step6: Estimate the Uneven Logit Model
Step7: Estimate the Scobit Model
Step8: Estimate the Clog-Log Model
Step9: Estimate a Nested Logit Model
Step10: Estimate a Mixed Logit Model
Step11: Predictions using 1 dataset and 1 set of parameters
Step12: Predictions using 1 dataset and multiple parameters.
Step13: In particular, we want to plot the log-likelihood in the range of -1 to 0 for the travel cost parameter for the car utility. To do so, we will generate 100 values of between -1 and 0, containing the true parameter value.
Step14: Next, we'll create a 2D parameter array that contains all the sets of parameters. On the rows will be the particular parameters, and on the columns will be the sets of parameters.
Step15: Finally, PyLogit requires users to be explicit about the sets of parameters being passed as nest, shape, outside intercept, and index parameters. Note that index parameters are also referred to as utility coefficients. This level of explicitness is intended to help prevent errors, and it facilitates internal testing of the passed values.
Step16: With the parameter list created, we can finally perform the prediction. To facilitate easy calculation of the log-likelihoods, given each set of parameter values, we will only return the chosen probabilities of each choice situation.
|
<ASSISTANT_TASK:>
Python Code:
# For recording the model specification
from collections import OrderedDict
# For making plots pretty
import seaborn
# For file input/output
import pandas as pd
# For vectorized math operations
import numpy as np
# For plotting
import matplotlib.pyplot as plt
# For model estimation and prediction
import pylogit as pl
# To display plots inline
%matplotlib inline
# Load the dataset
raw_data = pd.read_csv("../data/long_swiss_metro_data.csv")
# Get a list of the choice situation ids that will be used
# in estimation and in prediction
all_situation_ids = np.sort(raw_data["custom_id"].unique())
# Set a random seed for reproducibility
np.random.seed(61)
# Shuffle and split the ids
np.random.shuffle(all_situation_ids)
prediction_ids = all_situation_ids[:2000]
estimation_ids = all_situation_ids[2000:]
# Create the estimation and prediction datasets
estimation_df = raw_data.loc[raw_data["custom_id"].isin(estimation_ids)].copy()
prediction_df = raw_data.loc[raw_data["custom_id"].isin(prediction_ids)].copy()
# Look at the first 5 rows of the data
# Note that for the mode_id, 1-Train, 2-SwissMetro, 3-Car
estimation_df.head().T
# NOTE: - Specification and variable names must be ordered dictionaries.
# - Keys should be variables within the long format dataframe.
# The sole exception to this is the "intercept" key.
# - For the specification dictionary, the values should be lists
# of integers or or lists of lists of integers. Within a list,
# or within the inner-most list, the integers should be the
# alternative ID's of the alternative whose utility specification
# the explanatory variable is entering. Lists of lists denote
# alternatives that will share a common coefficient for the variable
# in question.
basic_specification = OrderedDict()
basic_names = OrderedDict()
basic_specification["intercept"] = [1, 2]
basic_names["intercept"] = ['ASC Train',
'ASC Swissmetro']
basic_specification["travel_time_hrs"] = [[1, 2,], 3]
basic_names["travel_time_hrs"] = ['Travel Time, units:hrs (Train and Swissmetro)',
'Travel Time, units:hrs (Car)']
basic_specification["travel_cost_hundreth"] = [1, 2, 3]
basic_names["travel_cost_hundreth"] = ['Travel Cost * (Annual Pass == 0), units: 0.01 CHF (Train)',
'Travel Cost * (Annual Pass == 0), units: 0.01 CHF (Swissmetro)',
'Travel Cost, units: 0.01 CHF (Car)']
basic_specification["headway_hrs"] = [1, 2]
basic_names["headway_hrs"] = ["Headway, units:hrs, (Train)",
"Headway, units:hrs, (Swissmetro)"]
basic_specification["seat_configuration"] = [2]
basic_names["seat_configuration"] = ['Airline Seat Configuration, base=No (Swissmetro)']
basic_specification["train_survey"] = [[1, 2]]
basic_names["train_survey"] = ["Surveyed on a Train, base=No, (Train and Swissmetro)"]
basic_specification["regular_class"] = [1]
basic_names["regular_class"] = ["First Class == False, (Swissmetro)"]
basic_specification["single_luggage_piece"] = [3]
basic_names["single_luggage_piece"] = ["Number of Luggage Pieces == 1, (Car)"]
basic_specification["multiple_luggage_pieces"] = [3]
basic_names["multiple_luggage_pieces"] = ["Number of Luggage Pieces > 1, (Car)"]
# The 'alt_id_column' is the name of a column to be created in the long-format data
# It will identify the alternative associated with each row.
alt_id_column = "mode_id"
# The "obs_id_column" is a custom id column that ignores the fact that this is a
# panel/repeated-observations dataset. This column denotes each individual choice
# situation.
obs_id_column = "custom_id"
# The "choice_column" records the name of the column that denotes whether or not each
# individual chose the alternative on a given row.
choice_column = "CHOICE"
# Estimate the multinomial logit model (MNL)
swissmetro_mnl = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=basic_specification,
model_type="MNL",
names=basic_names)
# Specify the initial values and method for the optimization.
swissmetro_mnl.fit_mle(np.zeros(14))
# Create the various specification and name dictionaries
# for the asymmetric logit model. It is the same as for the MNL
# except that it excludes the intercept from V_{i, alternative}.
asym_specification = OrderedDict()
asym_names = OrderedDict()
for col in basic_specification:
if col != "intercept":
asym_specification[col] = basic_specification[col]
asym_names[col] = basic_names[col]
# Get the list of intercept names for the asymmetric logit model
# This is used to tell PyLogit that we will be using 'outside'
# intercept parameters.
# See equation 2 of http://arxiv.org/abs/1606.05900 for more
# details
asym_intercept_names = basic_names["intercept"]
# Specify what alternative is not having its intercept estimated
asym_intercept_ref_pos = 2
# Give names to the shape parameters of the asymmetric logit model
# Note that all of the shape parameters are not identifiable so we
# need to restrict one of them. This accounts for why we do not have
# a "shape_car" name.
asym_shape_names = ["shape_train", "shape_swiss_metro"]
# Note the index of the alternative whose shape parameter is constrained
# (i.e. the Car alternative)
asym_ref = 2
# Create the asymmetric logit's model object
swissmetro_asym = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=asym_specification,
model_type="Asym",
names=asym_names,
shape_names=asym_shape_names,
intercept_names=asym_intercept_names,
shape_ref_pos=asym_ref,
intercept_ref_pos=asym_intercept_ref_pos)
# Note that, below, we use None for initial values and use kwargs to
# specify our initial values for the optimization. This is not
# necessary, but it is convenient and an option.
# Note that dividing the index coefficients by log(J), where J is
# the number of alternatives in the dataset, accounts for the fact
# that when each shape parameter is 1/J, the value of the asymmetric
# logit model's estimated index coefficients are equal to the logit
# model's estimates, divided by log(J).
# Again, see http://arxiv.org/abs/1606.05900 for more details.
swissmetro_asym.fit_mle(None,
init_shapes=np.zeros(2),
init_intercepts=swissmetro_mnl.params.values[:2],
init_coefs=swissmetro_mnl.params.values[2:] / np.log(3))
# Create the various specification and name dictionaries
# for the uneven logit model. It is the same as for the MNL
# except that it excludes the intercept from V_{i, alternative}.
uneven_specification = OrderedDict()
uneven_names = OrderedDict()
for col in basic_specification:
if col != "intercept":
uneven_specification[col] = basic_specification[col]
uneven_names[col] = basic_names[col]
# Get the list of intercept names for the uneven logit model
# This is used to tell PyLogit that we will be using 'outside'
# intercept parameters.
# See equation 2 of http://arxiv.org/abs/1606.05900 for more
# details
uneven_intercept_names = basic_names["intercept"]
# Specify what alternative is not having its intercept estimated
uneven_intercept_ref_pos = 2
# Specify the names of the uneven logit model's shape parameters
# Note that we include "shape_car" because all of the uneven logit
# model's shape parameters are identifiable.
uneven_shape_names = ["shape_train", "shape_swiss_metro", "shape_car"]
swissmetro_uneven = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=uneven_specification,
model_type="Uneven",
names=uneven_names,
shape_names=uneven_shape_names,
intercept_names=uneven_intercept_names,
intercept_ref_pos=uneven_intercept_ref_pos)
# Also, note that we use None for initial values and use kwargs to
# specify our initial values for the optimaztion. This is necessary
# to use 'outside' intercept parameters with the model.
swissmetro_uneven.fit_mle(None,
init_shapes=np.zeros(3),
init_intercepts=swissmetro_mnl.params.values[:2],
init_coefs=swissmetro_mnl.params.values[2:])
# Create the various specification and name dictionaries
# for the scobit model. It is the same as for the MNL
# except that it excludes the intercept from V_{i, alternative}.
scobit_specification = OrderedDict()
scobit_names = OrderedDict()
for col in basic_specification:
if col != "intercept":
scobit_specification[col] = basic_specification[col]
scobit_names[col] = basic_names[col]
# Get the list of intercept names for the scobit model
# This is used to tell PyLogit that we will be using 'outside'
# intercept parameters.
# See equation 2 of http://arxiv.org/abs/1606.05900 for more
# details
scobit_intercept_names = basic_names["intercept"]
# Create the names of the shape parameters that are needed for the scobit model
scobit_shape_names = ["shape_train", "shape_swiss_metro", "shape_car"]
# Specify which intercept/ASC is not being estimated (namely, the Car intercept)
scobit_intercept_ref = 2
swissmetro_scobit = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=scobit_specification,
model_type="Scobit",
names=scobit_names,
shape_names=scobit_shape_names,
intercept_ref_pos=scobit_intercept_ref,
intercept_names=scobit_intercept_names)
# Note that we are using 'outside' intercept parameters for this model
swissmetro_scobit.fit_mle(None,
init_shapes=np.zeros(3),
init_intercepts=swissmetro_mnl.params.values[:2],
init_coefs=swissmetro_mnl.params.values[2:])
# Create the specification and name dictionaries
# for the clog-log model.It is the same as for the MNL
# except that it excludes the intercept from V_{i, alternative}.
clog_specification = OrderedDict()
clog_names = OrderedDict()
# Copy the specification dictionary from the logit
# model, without the intercept parameters so we
# can place the intercept parameters outside the
# index
for col in basic_specification:
if col != "intercept":
clog_specification[col] = basic_specification[col]
clog_names[col] = basic_names[col]
# Get the list of intercept names for the clog-log model
# This is used to tell PyLogit that we will be using 'outside'
# intercept parameters.
# See equation 2 of http://arxiv.org/abs/1606.05900 for more
# details
clog_intercept_names = basic_names["intercept"]
# Specify which intercept/ASC is not being estimated
# (i.e, the Car intercept)
clog_intercept_ref = 2
# Create the Clog-log model object
swissmetro_clog = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=clog_specification,
model_type="Cloglog",
intercept_ref_pos=clog_intercept_ref,
names=clog_names,
intercept_names=clog_intercept_names)
# Estimate the clog log model. Note we don't pass one single array of
# initial values but instead pass keyword arguments
swissmetro_clog.fit_mle(None,
init_intercepts=swissmetro_mnl.params.values[:2],
init_coefs=swissmetro_mnl.params.values[2:])
# Specify the nesting specification.
# I.e. specify the name of the various nests,
# and which alternatives belong to which nests.
nest_membership = OrderedDict()
nest_membership["Future Modes"] = [2]
nest_membership["Existing Modes"] = [1, 3]
# Create the nested logit model object
swissmetro_nested = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=basic_specification,
model_type="Nested Logit",
names=basic_names,
nest_spec=nest_membership)
# Create the initial values for the nest parameters
# Note: This should be in terms of the reparameterized values
# (i.e. the logit of the inverse of the scale parameter) used
# by PyLogit.
# Also, note that 40 corresponds to a scale parameter that is
# essentially equal to 1, and 5 corresponds to a scale
# parameter just slightly greater than 1.
init_nests = np.array([40, 5])
# Create the initial values for the estimation
initial_values = np.concatenate([init_nests,
swissmetro_mnl.params.values])
# Estimate the nested logit model
# Note we constrain the nesting parameter for nest 1 since
# the Future Modes nest only has 1 alternative in it.
swissmetro_nested.fit_mle(initial_values,
constrained_pos=[0])
swissmetro_nested.get_statsmodels_summary()
# Specify the parameters, by name, that are being treated
# as random variables
mixing_variables = (basic_names["travel_time_hrs"] +
basic_names["travel_cost_hundreth"])
# Create the mixed logit model object
swissmetro_mixed = pl.create_choice_model(data=estimation_df,
alt_id_col=alt_id_column,
obs_id_col=obs_id_column,
choice_col=choice_column,
specification=basic_specification,
model_type="Mixed Logit",
names=basic_names,
mixing_id_col="ID",
mixing_vars=mixing_variables)
# Create the initial values for estimation.
# Note that we need one value for each beta
# coefficient and for each of the standard deviations
# that will now be estimated.
initial_values = np.concatenate([swissmetro_mnl.params.values,
np.zeros(len(mixing_variables))])
# Estimate the mixed logit model
swissmetro_mixed.fit_mle(initial_values,
seed=26,
num_draws=400)
swissmetro_mixed.get_statsmodels_summary()
# Create an ordered dictionary mapping shortened model
# names to estimated model objects.
model_name_to_obj = OrderedDict()
model_name_to_obj["Clog"] = swissmetro_clog
model_name_to_obj["MNL"] = swissmetro_mnl
model_name_to_obj["Nested"] = swissmetro_nested
model_name_to_obj["Asym"] = swissmetro_asym
model_name_to_obj["Uneven"] = swissmetro_uneven
model_name_to_obj["Scobit"] = swissmetro_scobit
model_name_to_obj["Mixed"] = swissmetro_mixed
# Initialize a dictionary to store the log-likelihoods
# on the prediction dataset.
prediction_log_likelihoods = OrderedDict()
for key in model_name_to_obj:
prediction_log_likelihoods[key] = None
# Calculate and store the predictions for all models besides
# the mixed logit model. The predictions will be stored on
# prediction_df
for model_name in model_name_to_obj:
if model_name == "Mixed":
# Don't make predictions for the Mixed Logit Model
# using the for-loop
continue
else:
# Get the model object to be used in making predictions
model_obj = model_name_to_obj[model_name]
# Note that by default, the predict method returns the
# predicted probabilities for each available alternative
# for each choice situation.
prediction_array = model_obj.predict(prediction_df)
# We can use the "return_long_probs" and "choice_col"
# keyword arguments to ensure that we only return the
# probability of the chosen alternative for each
# choice situation.
chosen_predictions = model_obj.predict(prediction_df,
choice_col=choice_column,
return_long_probs=False)
# Calculate the predicted log-likelihood in two ways and,
# for demonstration, ensure that they agree.
log_likelihood_calc_1 = np.log(chosen_predictions).sum()
log_predictions = np.log(prediction_array)
choice_array = prediction_df[choice_column].values
log_likelihood_calc_2 = choice_array.dot(log_predictions)
assert np.allclose(log_likelihood_calc_1,
log_likelihood_calc_2)
# Store the log-likelihood
prediction_log_likelihoods[model_name] = log_likelihood_calc_1
# Create a column name for the predictions
prediction_col = model_name + "_Predictions"
# Store the predicted probabilities
prediction_df[prediction_col] = prediction_array
# Calculate and store the predictions for the mixed logit
# model using both the predict and panel_predict functions.
# Note that the panel_predict function will condition the
# predictions on the estimation data of each person
# (if there is any)
for prediction_type in ["Mixed", "Mixed_Panel"]:
if "Panel" not in prediction_type:
# We set the random seed simply for reproducibility
predictions = swissmetro_mixed.predict(prediction_df,
seed=2,
num_draws=400)
else:
# The arguments are ['data', 'num_draws']
args = [prediction_df, 400]
kwargs = {"seed": 2}
# Note that we can change the return type the same
# way as with the predict function. Pass
# return_long_probs = False and
# choice_col = choice_column as keyword arguments
predictions = swissmetro_mixed.panel_predict(*args,
**kwargs)
# Create the column name for the predictions
prediction_col = prediction_type + "_Predictions"
# Store the predicted probabilities
prediction_df[prediction_col] = predictions
# Calculate the log-likelihood
actual_choices = prediction_df[choice_column].values
log_predicted_probs = np.log(predictions)
prediction_log_likelihoods[prediction_type] =\
actual_choices.dot(log_predicted_probs)
# Look at the predicted log-likelihoods of the various models
pd.Series(prediction_log_likelihoods,
name="Log-likelihood of Predictions").sort(inplace=False)
swissmetro_uneven.get_statsmodels_summary()
# Create an array of 100 travel cost parameters between
# Note that the travel cost parameter of interest is
# the tenth parameter, and python is zero indexed so we
# use 9 to access its value
actual_travel_cost_param = swissmetro_uneven.params.iat[9]
low_travel_cost_params = np.linspace(-1,
actual_travel_cost_param,
num=50,
endpoint=False)
high_travel_cost_params = np.linspace(actual_travel_cost_param,
0,
num=50,
endpoint=True)
travel_cost_params = np.concatenate((low_travel_cost_params,
high_travel_cost_params))
# Initialize the 2D parameter array
param_array_2d = (swissmetro_uneven.params.values[:, None] *
np.ones(travel_cost_params.size)[None, :])
# "Fill in" the row for the travel cost parameter for cars
param_array_2d[9, :] = travel_cost_params
# Let's look at the array to make sure we created it correctly
param_array_2d[:, :4]
# Since this is not a nested logit we have no nest parameters
nest_2d = None
# The uneven logit has 3 shape parameters so
# we select the first 3 rows.
shapes_2d = param_array_2d[:3, :]
# The uneven logit has 2 outside intercept parameters
# so we select the 4th and 5th rows
intercepts_2d = param_array_2d[3:5, :]
# Finally, the remaining parameters are the index coefficients
index_coefs_2d = param_array_2d[5:, :]
# Now, we pack all these arrays together in a list
# Note that the order is intentional and it is a requirement
# for the predict method.
parameter_list = [index_coefs_2d,
intercepts_2d,
shapes_2d,
nest_2d]
# Pack the keyword arguments into a dictionary, simply for
# line length considerations
kwargs = {"param_list": parameter_list,
"return_long_probs": False,
"choice_col": choice_column}
# Calculate the predictions for each set of parameter values
# Note that the returned array will have one column per
# set of parameter values and one row per choice situation
multiple_predictions = swissmetro_uneven.predict(estimation_df,
**kwargs)
# Calculate and plot the resulting log-likelihood
# Take the log of the probabilities of the chosen alternatives
# for each choice situation and sum them. log_likelihoods
# will be a 1D array of shape (travel_cost_params.size, )
log_likelihoods = np.log(multiple_predictions).sum(axis=0)
plt.plot(travel_cost_params,
log_likelihoods,
label="Log-Likelihood")
plt.vlines(actual_travel_cost_param,
log_likelihoods.min(),
swissmetro_uneven.log_likelihood,
linestyle='dashed',
label="Estimated Parameter Value")
plt.xlabel("Travel Cost, units: 0.01 CHF (Car)")
plt.ylabel("Log-Likelihood")
plt.title("Log-likelihod vs. Travel Cost Parameter for Car Utility")
plt.legend(loc='best')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Motivation
Step2: The blocks below contain functions to parse and count all entries in each loaded database. Since XML files have a table structure we will have seperate functions to count through each.
|
<ASSISTANT_TASK:>
Python Code:
#import packages
import pandas as pd
import glob
import csv
from xml.etree.ElementTree import ElementTree
import re
#function to load a csv file
#accepts folderpath and headerlist as parameter to load the data files
def file_csv(folderpath,addheader,headerlist):
#this reads all files under that folder
filepaths = glob.glob(folderpath+"data/*.csv")
#we prepare a list, that will contain all the tables that exist in these file paths
dataframe = []
for filepath in filepaths:
#if header is required to be added: as in NVD
if addheader is True:
dataframe.append(pd.read_csv(filepath,names=headerlist))
else:
dataframe.append(pd.read_csv(filepath))
return pd.concat(dataframe);
#function to load a xml file
#accepts folderpath as parameter to load the data files
def file_xml(folderpath):
filepaths = glob.glob(folderpath+"data/*.xml")
count = 0;
#uses ElementTree to parse the tree structure
for filepath in filepaths:
CVE_tree = ElementTree()
CVE_tree.parse(filepath)
CVE_root= CVE_tree.getroot()
count = count+ countrow_xml(CVE_root,'{http://www.icasi.org/CVRF/schema/vuln/1.1}')
return count
#counts number of rows under cve_id header
def countrows_csv(dataframe):
count = 0
entries = []
#consider only unique CVE entries
for element in dataframe['cve_id']:
if element not in entries:
entries.append(element)
count = count + 1
return count
#counts number of rows in CVE tag
def countrow_xml(CVE_root,root_string):
cve_id =[] ;
description=[];
cell=0
entries=[]
for entry in CVE_root:
for child in entry:
if (child.tag == root_string+'CVE'):
if child.tag not in entries:
cve_id.append(child.text);
cell+=1
return len(cve_id)
#all entries added to a dictionary
data={}
#calling functions
nvd_dataframe=file_csv('NVD/',True,['cve_id', 'cwe_id','timestamp'])
details_dataframe=file_csv('CVE_Details/',False,None)
data["nvd"] = countrows_csv(nvd_dataframe)
data["cve_details"]= countrows_csv(details_dataframe)
data['cve_mitre']=file_xml('CVE_Mitre/')
#visualization of the count
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
output_notebook()
plot_data={}
plot_data['Entries'] = data
#saving in dictionary for sorting and visualising
df_data = pd.DataFrame(plot_data).sort_values(by='Entries', ascending=True)
series = df_data.loc[:,'Entries']
p = figure(width=800, y_range=series.index.tolist(), title="Number of Vulnerabilities in each dataset")
p.xaxis.axis_label = 'Number of vulnerabilities/rows'
p.xaxis.axis_label_text_font_size = '10pt'
p.xaxis.major_label_text_font_size = '10pt'
p.yaxis.axis_label = 'Name of the dataset'
p.yaxis.axis_label_text_font_size = '14pt'
p.yaxis.major_label_text_font_size = '12pt'
j = 1
for k,v in series.iteritems():
#Print fields, values, orders
#print (k,v,j)
p.rect(x=v/2, y=j, width=abs(v), height=0.4,
width_units="data", height_units="data")
j += 1
show(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Trend signal over the last 12 months
Step2: Compare EW and 1/vol
|
<ASSISTANT_TASK:>
Python Code:
rf = 0.04
np.random.seed(1)
mus = np.random.normal(loc=0.05,scale=0.02,size=5) + rf
sigmas = (mus - rf)/0.3 + np.random.normal(loc=0.,scale=0.01,size=5)
num_years = 10
num_months_per_year = 12
num_days_per_month = 21
num_days_per_year = num_months_per_year*num_days_per_month
rdf = pd.DataFrame(
index = pd.date_range(
start="2008-01-02",
periods=num_years*num_months_per_year*num_days_per_month,
freq="B"
),
columns=['foo','bar','baz','fake1','fake2']
)
for i,mu in enumerate(mus):
sigma = sigmas[i]
rdf.iloc[:,i] = np.random.normal(
loc=mu/num_days_per_year,
scale=sigma/np.sqrt(num_days_per_year),
size=rdf.shape[0]
)
pdf = np.cumprod(1+rdf)*100
pdf.plot()
sma = pdf.rolling(window=num_days_per_month*12,center=False).median().shift(1)
plt.plot(pdf.index,pdf['foo'])
plt.plot(sma.index,sma['foo'])
plt.show()
#sma with 1 day lag
sma.tail()
#sma with 0 day lag
pdf.rolling(window=num_days_per_month*12,center=False).median().tail()
# target weights
trend = sma.copy()
trend[pdf > sma] = True
trend[pdf <= sma] = False
trend[sma.isnull()] = False
trend.tail()
tsmom_invvol_strat = bt.Strategy(
'tsmom_invvol',
[
bt.algos.RunDaily(),
bt.algos.SelectWhere(trend),
bt.algos.WeighInvVol(),
bt.algos.LimitWeights(limit=0.4),
bt.algos.Rebalance()
]
)
tsmom_ew_strat = bt.Strategy(
'tsmom_ew',
[
bt.algos.RunDaily(),
bt.algos.SelectWhere(trend),
bt.algos.WeighEqually(),
bt.algos.LimitWeights(limit=0.4),
bt.algos.Rebalance()
]
)
# create and run
tsmom_invvol_bt = bt.Backtest(
tsmom_invvol_strat,
pdf,
initial_capital=50000000.0,
commissions=lambda q, p: max(100, abs(q) * 0.0021),
integer_positions=False,
progress_bar=True
)
tsmom_invvol_res = bt.run(tsmom_invvol_bt)
tsmom_ew_bt = bt.Backtest(
tsmom_ew_strat,
pdf,
initial_capital=50000000.0,
commissions=lambda q, p: max(100, abs(q) * 0.0021),
integer_positions=False,
progress_bar=True
)
tsmom_ew_res = bt.run(tsmom_ew_bt)
ax = plt.subplot()
ax.plot(tsmom_ew_res.prices.index,tsmom_ew_res.prices,label='EW')
pdf.plot(ax=ax)
ax.legend()
plt.legend()
plt.show()
tsmom_ew_res.stats
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loops
Step2: We showed above how to use this code to print the angles $(n \pi)/ 12$ for $n = 1, 2, \dots, 6$. We did this by calling the degrees_to_radians function on the angles $15 n$ degrees for $n = 1, 2, \dots, 6$. So this is the formula we want to reproduce in code. To do that we write a loop.
Step3: Let's examine this line by line. The first line defines the angle in degrees, theta_d. We start from $\theta_d=0$.
Step4: Let's examine this code line by line. It first defines a set of numbers, steps, which contains the integers from 1 to 6 (we will make this more precise later when we discuss lists and tuples). We then define the loop using the for command. This looks at the set of numbers steps and picks an entry out one at a time, setting the variable n to be the value of that member of the set. So, the first time through the loop n=1. The next, n=2. Once it has iterated through all members of the set steps, it stops.
Step5: The range function takes the input arguments <start> and <end>, and the optional input argument <step>, to produce the integers from the start up to, but not including, the end. If the <start> is not given it defaults to 0, and if the <step> is not given it defaults to 1.
Step6: In some programming languages this is where the discussion of a for loop would end
Step7: But we can get much more general than that. The different things in the set don't have to have the same type
Step8: This can be used to write very efficient code, but is a feature that isn't always available in other programming languages.
Step9: This could be done in a for loop, but not so straightforwardly.
Step10: We see that the answer is $229^{\circ}$.
Step11: The last example is particularly important, as this chained example (6 < 7 < 8) is equivalent to (6 < 7) and (7 < 8). This checks both inequalities - checking that $6 < x < 8$ when $x=7$, by checking that both $6 < x$ and $x < 8$ is true when $x=7$, which is true (and mathematically what you would expect). However, many programming languages would not interpret it this way, but would instead interpret it as (6 < 7) < 8, which is equivalent to True < 8, which is nonsense. Chaining operations in this way is useful in Python, but don't expect it to always work in other languages.
Step12: Lists are defined by square brackets, []. Objects in the list are separated by commas. A list can be empty (list5 above). A list can contain other lists (list4 above). The objects in the list don't have to have the same type (list3 and list4 above).
Step13: Note
Step14: Additional entries can be appended onto the end of a list
Step15: Entries can be removed (popped) from the end of a list
Step16: The length of a list can be found
Step17: Lists are probably the most used container, but there's a closely related container that we've already used
Step18: Tuples are defined by the commas separating the entries. The round brackets () surrounding the entries are conventional, useful for clarity, and for grouping. If you want to create an empty tuple (tuple5) the round brackets are necessary. A tuple containing a single entry (tuple6) must have a trailing comma.
Step19: However, if a member of a tuple can itself be modified (for example, it's a list, as tuple4[1] is), then that entry can be modified
Step20: Tuples appear a lot when using functions, either when passing in parameters, or when returning results. They can often be treated like lists, and there are functions that convert lists to tuples and vice versa
Step21: Slicing
Step22: The slicing notation [<start>
Step23: By using a negative step we can reverse the order (as shown in the final example), but then we need to be careful with the <start> and <end>.
Step24: Unpacking
Step25: This is related to a very useful Python feature
Step26: This can be used to directly swap two variables, for example
Step27: The number of entries on both sides must match.
Step28: Note that the order it prints out need not match the order we entered the values in. In fact, the order could change if we used a different machine, or entered the values again. This emphasizes the unordered nature of dictionaries.
Step29: To find all the keys or values we can use dictionary methods
Step30: Depending on the version of Python you are using, this might either give a list or an iterator.
Step31: To explain
Step32: So this does exactly the same thing as the previous loop, and most of the code is the same. However, rather than accessing the dictionary each time (using functions[name]), the value in the dictionary has been returned at the start. What is happening is that the items function is returning both the key and the value as a tuple on each iteration through the loop. The name, function notation then uses unpacking to appropriately set the variables. This form is "more pythonic" (ie, is shorter, clearer to many people, and faster).
Step34: But it might be that the input is just wrong
Step36: The control flow here uses the if statement. As with loops such as the for and while loops we have a condition which is checked which, if satisfied, leads to the indented code block after the colon being executed. The logical statements theta_d > 360.0 and theta_d < 0.0 are evaluated and return either True or False (which is how Python represents boolean values). If True, then the statement is executed.
Step37: The elif statement allows another condition to be checked - it is how Python represents "else if", or "all previous checks have been false; let's check this statement as well". Multiple elif blocks can be included to check more conditions. The else statement contains no logical check
Step38: We can nest statements as deep as we like, nesting loops and control flow statements as we go. We have to ensure that the indentation level is consistent. Here is a silly example.
Step39: Debugging
|
<ASSISTANT_TASK:>
Python Code:
from math import pi
def degrees_to_radians(theta_d):
Convert an angle from degrees to radians.
Parameters
----------
theta_d : float
The angle in degrees.
Returns
-------
theta_r : float
The angle in radians.
theta_r = pi / 180.0 * theta_d
return theta_r
theta_d = 0.0
while theta_d <= 90.0:
print(degrees_to_radians(theta_d))
theta_d = theta_d + 15.0
steps = 1, 2, 3, 4, 5, 6
for n in steps:
print(degrees_to_radians(15*n))
for n in range(1,7):
print(degrees_to_radians(15*n))
print(list(range(4)))
print(list(range(-1,3)))
print(list(range(1,10,2)))
angles = 15.0, 30.0, 45.0, 60.0, 75.0, 90.0
for angle in angles:
print(degrees_to_radians(angle))
things = 1, 2.3, True, degrees_to_radians
for thing in things:
print(thing)
theta_d = 0.0
while degrees_to_radians(theta_d) < 4.0:
theta_d = theta_d + 1.0
print(theta_d - 1.0)
print(theta_d)
print(degrees_to_radians(theta_d-1.0) / 4.0)
print(degrees_to_radians(theta_d) / 4.0)
print(True)
print(6 < 7 and 10 > 9)
print(1 < 2 or 1 < 0)
print(not (6 < 7) and 10 > 9)
print(6 < 7 < 8)
list1 = [1, 2, 3, 4, 5, 6]
list2 = [15.0, 30.0, 45.0, 60.0, 75.0, 90.0]
list3 = [1, 2.3, True, degrees_to_radians]
list4 = ["hello", list1, False]
list5 = []
list1[0]
list2[3]
list4[1] = "goodbye"
list4
list4.append('end')
list4
entry = list4.pop()
print(entry)
list4
len(list4)
tuple1 = 1, 2, 3, 4, 5, 6
tuple2 = (15.0, 30.0, 45.0, 60.0, 75.0, 90.0)
tuple3 = (1, 2.3, True, degrees_to_radians)
tuple4 = ("hello", list1, False)
tuple5 = ()
tuple6 = (5,)
tuple1[0]
tuple4[1] = "goodbye"
print(tuple4[1])
tuple4[1][1] = 33
print(tuple4[1])
converted_list1 = list(tuple1)
converted_tuple1 = tuple(list1)
list1 = [1, 2, 3, 4, 5, 6]
print(list1[0])
print(list1[1:3])
print(list1[2:])
print(list1[:4])
print(list1[0:6:2])
print(list1[1::3])
print(list1[4:1:-1])
print(list1[-1])
print(list1[-2])
print(list1[2:-2])
print(list1[-4:-2])
list_slice = [0, 0, 0, 0, 0, 0, 0, 0]
list_slice[1:4] = list1[3:]
print(list_slice)
a, b, c = list1[3:]
print(a)
print(b)
print(c)
a, b = b, a
print(a)
print(b)
from math import sin, cos, exp, log
functions = {"sine" : sin,
"cosine" : cos,
"exponential" : exp,
"logarithm" : log}
print(functions)
print(functions["exponential"])
print(functions.keys())
print(functions.values())
for name in functions:
print("The result of {}(1) is {}.".format(name, functions[name](1.0)))
for name, function in functions.items():
print("The result of {}(1) is {}.".format(name, function(1.0)))
theta_d = 5134.6
theta_d_normalized = theta_d % 360.0
print(theta_d_normalized)
from math import pi
def check_angle_normalized(theta_d):
Check that an angle lies within [0, 360] degrees.
Parameters
----------
theta_d : float
The angle in degrees.
Returns
-------
normalized : Boolean
Whether the angle lies within the range
normalized = True
if theta_d > 360.0:
normalized = False
print("Input angle greater than 360 degrees. Did you mean this?")
if theta_d < 0.0:
normalized = False
print("Input angle less than 0 degrees. Did you mean this?")
return normalized
theta_d = 5134.6
print(check_angle_normalized(theta_d))
theta_d = -52.3
print(check_angle_normalized(theta_d))
from math import pi
def check_angle_normalized(theta_d):
Check that an angle lies within [0, 360] degrees.
Parameters
----------
theta_d : float
The angle in degrees.
Returns
-------
normalized : Boolean
Whether the angle lies within the range
normalized = True
if theta_d > 360.0:
normalized = False
print("Input angle greater than 360 degrees. Did you mean this?")
elif theta_d < 0.0:
normalized = False
print("Input angle less than 0 degrees. Did you mean this?")
else:
print("Input angle in range [0, 360] degrees. Good.")
return normalized
theta_d = 543.2
print(check_angle_normalized(theta_d))
theta_d = -123.4
print(check_angle_normalized(theta_d))
theta_d = 89.12
print(check_angle_normalized(theta_d))
angles = [-123.4, 543.2, 89.12, 0.67, 5143.6, 30.0, 270.0]
# We run through all the angles, but only print those that are
# - in the range [0, 360], and
# - if sin^2(angle) < 0.5
from math import sin
for angle in angles:
print("Input angle in degrees:", angle)
if (check_angle_normalized(angle)):
angle_r = degrees_to_radians(angle)
if (sin(angle_r)**2 < 0.5):
print("Valid angle in radians:", angle_r)
import breakpoints
print(breakpoints.test_sequence(10))
print(breakpoints.test_sequence(100))
print(breakpoints.test_sequence(1000))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup
Step3: Getting a set of molecules
Step4: What if we just want "everything"? Let's use an empty spec.
Step5: The above request might take some time, but hopefully not much more than a few seconds. Why do we allow this? Well, we don't return all the data for each molecule, and the total size of what we send right now is less than 10 MB.
|
<ASSISTANT_TASK:>
Python Code:
urlpattern = {
"results": "https://materialsproject.org/molecules/results?query={spec}",
"mol_json": "https://materialsproject.org/molecules/{mol_id}/json",
"mol_svg": "https://materialsproject.org/molecules/{mol_id}/svg",
"mol_xyz": "https://materialsproject.org/molecules/{mol_id}/xyz",
}
import json
import os
import sys
if sys.version_info[0] == 2:
from urllib import quote_plus
else:
from urllib.parse import quote_plus
import requests
# Ensure you have an API key, which is located on your dashboard
# (https://materialsproject.org/dashboard).
MAPI_KEY = "fAkEaP1K4y" # <-- replace with your api key
# Please do NOT share a notebook with others with your API key hard-coded in it.
# One alternative: Load API key from a set environment variable, e.g.
#
# MAPI_KEY = os.environ['PMG_MAPI_KEY']
#
# Best alternative: Store and load API key using pymatgen, e.g.
### Do once, on command line (without "!" in front) or in notebook
# !pmg config --add PMG_MAPI_KEY "your_api_key_goes_here"
### Then, in notebook/script:
# from pymatgen import SETTINGS
# MAPI_KEY = SETTINGS.get("PMG_MAPI_KEY")
# Here is a function we'll use to get results. We'll walk though some examples that use it.
def get_results(spec, fields=None):
Take a specification document (a `dict`), and return a list of matching molecules.
# Stringify `spec`, ensure the string uses double quotes, and percent-encode it...
str_spec = quote_plus(str(spec).replace("'", '"'))
# ...because the spec is the value of a "query" key in the final URL.
url = urlpattern["results"].format(spec=str_spec)
return (requests.get(url, headers={'X-API-KEY': MAPI_KEY})).json()
# Find molecules containing oxygen and phosphorous,
# and collect the ionization energies (relative to a lithium electrode) of the results.
# Separate elements with a "-"
spec = {"elements": "O-P"}
results = get_results(spec)
# Not all molecules have data for all available properties
ionization_energies = [molecule["IE"] for molecule in results if "IE" in molecule]
# Molecules with ionization energies ("IE") will have oxidation potentials relative to metallic electrodes,
# available as "oxidation_<ELECTRODE>" keys. "IE" itself is relative to lithium.
# There is an analogous relationship between the presence of electron affinity ("EA") values
# and corresponding "reduction_<ELECTRODE>" keys for reduction potentials using a reference metal.
# `task_id` is the molecule's identifier, which we'll use later in this notebook.
# `MW` is molecular weight
# `smiles`: https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system
for key in results[0]:
print(key)
# A "silly" example specification that demonstrates many keys available to query, and
# the expected format of their value specifications.
#
# The "$"-prefixed keys are MongoDB syntax (https://docs.mongodb.org/manual/reference/operator/query/).
spec = {
"elements": "C-H-O-F",
"notelements": ["Al", "Br"], # a list (inconsistent for now with "elements" -- sorry)
"charge": {"$in": [0, -1]}, # {0, 1, -1}
"pointgroup": "C1",
"functional_groups": {"$in": ["-COOH"]},
"base_molecule": {"$in": ["s3"]},
"nelements": 4,
"EA": {"$gte": 0.4}, # >= 0.4
"IE": {"$lt": 5}, # < 5
"formula": "H11 C11 O4 F1", # "H11C11O4F" works too
}
results = get_results(spec)
results = get_results({})
print("{} molecules in total right now".format(len(results)))
def get_molecule(mol_id, fmt='json'):
url = urlpattern["mol_" + fmt].format(mol_id=mol_id)
response = requests.get(url, headers={'X-API-KEY': MAPI_KEY})
if fmt == 'json':
return response.json()
else:
return response.content
first_result = results[0]
mol_id = first_result['task_id']
print("ID: {}".format(mol_id))
# Get all data by default
molecule = get_molecule(mol_id)
print("There are {} key/value pairs in molecule {}. Have a look around!".format(len(molecule), mol_id))
# The SVG format provides a two-dimensional "pretty picture" of the molecular structure.
svg_of_molecule = get_molecule(mol_id, fmt='svg')
with open('molecule.svg','w') as f:
f.write(svg_of_molecule)
print("scalable vector graphic saved")
# The XYZ representation provided is the optimized geometry of the molecule in a charge-neutral state.
xyz_of_molecule = get_molecule(mol_id, fmt='xyz')
with open('molecule.xyz','w') as f:
f.write(xyz_of_molecule)
print("XYZ file saved. Can load into molecule-viewer software.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load audio data
Step2: Create a receptive field
Step3: Simulate a neural response
Step4: Fit a model to recover this receptive field
Step5: Visualize the effects of regularization
Step6: Using different regularization types
Step7: Compare model performance
Step8: Plot the original STRF, and the one that we recovered with modeling.
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds',
shading='gouraud')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
np.random.seed(10)
df = pd.DataFrame(np.random.randint(0, 20, (10, 10)).astype(float), columns=["c%d"%d for d in range(10)])
df.where(np.random.randint(0,2, df.shape).astype(bool), np.nan, inplace=True)
def g(df):
return df.columns[df.iloc[0,:].fillna('Nan') == df.iloc[8,:].fillna('Nan')]
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: code to read the CSV files.
Step2: Read CSV data
Step3: Add columns for year and month
Step4: Add 5 day rolling mean to the timesereis.
Step5: Want date back in the columns
Step6: Use a groupby to compute the row locations that represent the minimum and
Step7: create dictionaries of max and min values for each hemisphere and for daily and rolling-mean
Step8: show that we have actually selected different data for daily and 5-average data
Step9: add rank column for each month and hemsiphere's max and min
Step10: It is also desired that we have Annual min/max rank data so revisit the north and south
Step11: Write out the data frames in a nice format
Step12: clean up your csv files
|
<ASSISTANT_TASK:>
Python Code:
!mkdir -p ../data
!wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/north/daily/data/NH_seaice_extent_final.csv
!wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/north/daily/data/NH_seaice_extent_nrt.csv
!wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/south/daily/data/SH_seaice_extent_final.csv
!wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/south/daily/data/SH_seaice_extent_nrt.csv
import datetime as dt
import numpy as np
import os
import pandas as pd
from pandas import ExcelWriter
def parse_the_date(year, mm, dd):
return dt.date(int(year), int(mm), int(dd))
def slurp_csv(filename):
data = pd.read_csv(filename, header = None, skiprows=2,
names=["year", "mm", "dd", "extent", "missing", "source"],
parse_dates={'date':['year', 'mm', 'dd']},
date_parser=parse_the_date, index_col='date')
data = data.drop('missing', axis=1)
return data
def read_a_hemisphere(hemisphere):
the_dir = '../data'
final_prod_filename = os.path.join(the_dir, '{hemi}H_seaice_extent_final.csv'.format(hemi=hemisphere[0:1].upper()))
nrt_prod_filename = os.path.join(the_dir, '{hemi}H_seaice_extent_nrt.csv'.format(hemi=hemisphere[0:1].upper()))
final = slurp_csv(final_prod_filename)
nrt = slurp_csv(nrt_prod_filename)
all_data = pd.concat([final, nrt])
return all_data
north = read_a_hemisphere('north')
south = read_a_hemisphere('south')
south.head()
def add_year_month_columns(df):
a = df.copy()
a = a.drop('source',1)
a = a.reset_index()
a['year'] = pd.to_datetime(a.date).dt.year
a['month'] = pd.to_datetime(a.date).dt.month
a = a.set_index('date')
return a
north = add_year_month_columns(north)
south = add_year_month_columns(south)
north.head()
south.head()
def add_rolling_mean(df, window=5, min_periods=2):
copy = df.copy()
# create an empty ts to align our extent data with
ts = pd.Series(np.nan, index=pd.date_range('1978-10-25', dt.date.today().strftime('%Y-%m-%d')))
copy.index = copy.index.to_datetime()
copy = df.align(ts, axis=0, join='right')[0]
df['5day-Avg'] = pd.rolling_mean(copy['extent'], window=5, min_periods=2)
return df
north = add_rolling_mean(north)
south = add_rolling_mean(south)
north.head(1)
north = north.reset_index()
south = south.reset_index()
north.head(1)
def select_min_and_max_variable_rows_by_year_and_month(df, variable):
min_groups = df.loc[df.groupby(['year','month'])[variable].idxmin()][['date', variable, 'year', 'month']]
max_groups = df.loc[df.groupby(['year','month'])[variable].idxmax()][['date', variable, 'year', 'month']]
return {'min': min_groups, 'max': max_groups}
n = select_min_and_max_variable_rows_by_year_and_month(north, 'extent')
navg = select_min_and_max_variable_rows_by_year_and_month(north, '5day-Avg')
s = select_min_and_max_variable_rows_by_year_and_month(south, 'extent')
savg = select_min_and_max_variable_rows_by_year_and_month(south, '5day-Avg')
n['max'][3:5]
navg['max'][3:5]
def add_rank(df, rank_by, ascending):
df['rank'] = df.groupby('month')[rank_by].rank(ascending=ascending, method='first')
return df
n['max'] = add_rank(n['max'], 'extent', ascending=False)
n['min'] = add_rank(n['min'], 'extent', ascending=True)
s['max'] = add_rank(s['max'], 'extent', ascending=False)
s['min'] = add_rank(s['min'], 'extent', ascending=True)
navg['max'] = add_rank(navg['max'], '5day-Avg', ascending=False)
navg['min'] = add_rank(navg['min'], '5day-Avg', ascending=True)
savg['max'] = add_rank(savg['max'], '5day-Avg', ascending=False)
savg['min'] = add_rank(savg['min'], '5day-Avg', ascending=True)
def do_annual_min_max_ranking(df, field):
min_index = df.groupby(['year'])[field].idxmin()
max_index = df.groupby(['year'])[field].idxmax()
mindata = df.loc[min_index][['date', field]]
mindata['rank'] = mindata[field].rank(ascending=True, method='first')
maxdata = df.loc[max_index][['date', field]]
maxdata['rank'] = maxdata[field].rank(ascending=False, method='first')
mindata = mindata.set_index(pd.to_datetime(mindata.date).dt.year)
maxdata = maxdata.set_index(pd.to_datetime(maxdata.date).dt.year)
maxdata = maxdata.add_prefix('max_')
mindata = mindata.add_prefix('min_')
data = pd.concat([mindata, maxdata], axis=1)
return data
north_annual_by_day = do_annual_min_max_ranking(north, 'extent')
north_annual_averaged = do_annual_min_max_ranking(north, '5day-Avg')
south_annual_by_day = do_annual_min_max_ranking(south, 'extent')
south_annual_averaged = do_annual_min_max_ranking(south, '5day-Avg')
south_annual_averaged.head(3)
a = navg['min'].copy()
a.columns
a.set_index(['rank', 'month']).unstack('month').head(3)
import calendar
month_names = [calendar.month_name[x] for x in range(1,13)]
def swap_column_level_and_sort(df):
df.columns = df.columns.swaplevel(1,0)
df = df.sortlevel(0, axis=1)
return df
# set index to year and month and then broadcast month back across the columns.
# next swap and sort so that you have the data grouped under the month.
def prepare_for_csv(df):
df = df.set_index(['rank','month']).unstack('month')
df = swap_column_level_and_sort(df)
df.columns = df.columns.set_levels(month_names, level=0)
return df
def write_to_xls(df_list, writer, is_monthly=True):
for df, sheet in df_list:
if is_monthly:
df = prepare_for_csv(df)
df.to_excel(writer,'{sheet}'.format(sheet=sheet), float_format="%.3f")
writer = ExcelWriter('../output/Sea_Ice_MinMax_Statistics.xls')
monthly_dataframelist =[(navg['min'], 'Northern 5day Min'),
(navg['max'], 'Northern 5day Max'),
(savg['min'], 'Southern 5day Min'),
(savg['max'], 'Southern 5day Max'),
(n['min'], 'Northern Daily Min'),
(n['max'], 'Northern Daily Max'),
(s['min'], 'Southern Daily Min'),
(s['max'], 'Southern Daily Max')]
annual_dataframelist = [(north_annual_averaged, 'North Annual 5day-avg'),
(north_annual_by_day, 'North Annual Daily'),
(south_annual_averaged, 'South Annual 5day-avg'),
(south_annual_by_day, 'South Annual Daily')]
write_to_xls(monthly_dataframelist, writer, is_monthly=True)
write_to_xls(annual_dataframelist, writer, is_monthly=False)
writer.save()
b = prepare_for_csv(a)
b
!cd ../data ; rm -f NH_seaice_extent_final.csv NH_seaice_extent_nrt.csv SH_seaice_extent_final.csv SH_seaice_extent_nrt.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Step 2
Step3: Step 3
Step4: Step 4
|
<ASSISTANT_TASK:>
Python Code:
!pip install tf-nightly
import numpy as np
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(28, 28), name='input'),
tf.keras.layers.LSTM(20, time_major=False, return_sequences=True),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='output')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# Load MNIST dataset.
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)
# Change this to True if you want to test the flow rapidly.
# Train with a small dataset and only 1 epoch. The model will work poorly
# but this provides a fast way to test if the conversion works end to end.
_FAST_TRAINING = False
_EPOCHS = 5
if _FAST_TRAINING:
_EPOCHS = 1
_TRAINING_DATA_COUNT = 1000
x_train = x_train[:_TRAINING_DATA_COUNT]
y_train = y_train[:_TRAINING_DATA_COUNT]
model.fit(x_train, y_train, epochs=_EPOCHS)
model.evaluate(x_test, y_test, verbose=0)
run_model = tf.function(lambda x: model(x))
# This is important, let's fix the input size.
BATCH_SIZE = 1
STEPS = 28
INPUT_SIZE = 28
concrete_func = run_model.get_concrete_function(
tf.TensorSpec([BATCH_SIZE, STEPS, INPUT_SIZE], model.inputs[0].dtype))
# model directory.
MODEL_DIR = "keras_lstm"
model.save(MODEL_DIR, save_format="tf", signatures=concrete_func)
converter = tf.lite.TFLiteConverter.from_saved_model(MODEL_DIR)
tflite_model = converter.convert()
# Run the model with TensorFlow to get expected results.
TEST_CASES = 10
# Run the model with TensorFlow Lite
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for i in range(TEST_CASES):
expected = model.predict(x_test[i:i+1])
interpreter.set_tensor(input_details[0]["index"], x_test[i:i+1, :, :])
interpreter.invoke()
result = interpreter.get_tensor(output_details[0]["index"])
# Assert if the result of TFLite model is consistent with the TF model.
np.testing.assert_almost_equal(expected, result, decimal=5)
print("Done. The result of TensorFlow matches the result of TensorFlow Lite.")
# Please note: TfLite fused Lstm kernel is stateful, so we need to reset
# the states.
# Clean up internal states.
interpreter.reset_all_variables()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Function that generates random trees and calculates SPR (and others) distances
Step2: the figure above shows that the replug distance (from USPR) has a similar performance as dSPR, But at a very high computational price, as shown by the timings in the figure below.
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import sys, subprocess, time, dendropy
import numpy as np
bindir="/home/leo/local/bin/"
localdir="/tmp/"
def run_uspr (tree1, tree2, fast = False):
localfile = localdir + "pair.tre"
dendropy.TreeList([tree1,tree2]).write(path=localfile, schema="newick")
if fast:
command_line = bindir + "uspr --tbr --replug < " + localfile
else:
command_line = bindir + "uspr < " + localfile
start_time = time.perf_counter()
proc_run = subprocess.run(command_line, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
elapsed_time = time.perf_counter() - start_time
dists = [-1,-1,-1]
for line in proc_run.stdout.splitlines():
if "d_TBR =" in line:
dists[0] = float(line.split(" ")[2].strip())
if "d_R =" in line:
dists[1] = float(line.split(" ")[2].strip())
if "d_USPR =" in line:
dists[2] = float(line.split(" ")[2].strip())
return elapsed_time, dists # TBR, replug, SPR, respectively
def run_dspr (tree1, tree2):
localfile = localdir + "pair.tre"
dendropy.TreeList([tree1,tree2]).write(path=localfile, schema="nexus")
start_time = time.perf_counter()
proc_run = subprocess.run(bindir + "gf_spr_distance " + localfile, shell=True, stdout=subprocess.PIPE,
universal_newlines=True)
elapsed_time = time.perf_counter() - start_time
line = proc_run.stdout.splitlines()[1].split()[1:4] # second line has dists as columns, we want 2nd~4th
dists = [float(i) for i in line] # SPR, RF, Hdist, respectively
return elapsed_time, dists
def generate_spr_trees (n_leaves, n_trees, n_spr):
start_time = time.perf_counter()
proc_run = subprocess.run([bindir + "gf_generate_spr_trees", str(n_leaves), str(n_trees), str(n_spr)],
stdout=subprocess.PIPE, universal_newlines=True)
elapsed_time = time.perf_counter() - start_time
return elapsed_time, dendropy.TreeList.get(data=proc_run.stdout, schema="nexus")
def simulate_estimate_spr (ntrees = 10, min_leaves=6, max_leaves=9, min_spr = 1, max_spr_f = 0.5, fast = True):
o_tbl = []
for nleaves in range(min_leaves, max_leaves+1):
for nspr in range(1,int(max_spr_f * nleaves) + 1):
_, trees = generate_spr_trees (nleaves,ntrees,nspr) ## remember that C function generates one extra tree!
for i in range(1,ntrees):
secs1, dists1 = run_uspr(trees[i-1],trees[i], fast)
secs2, dists2 = run_dspr(trees[i-1],trees[i])
header = [nleaves,nspr,secs1,secs2] # [0,3]
o_tbl.append(header + dists1 + dists2) # 4-6 [tbr, replug, spr], 7-9 [spr,rf, hdist]
return np.array(o_tbl)
X = simulate_estimate_spr (20, 10, 30, 1, 0.7, True)
np.save ("performance_table", X)
jit1 = np.random.normal(scale=0.1, size=X.shape[0])
jit2 = np.random.normal(scale=0.1, size=X.shape[0])
print (X.shape)
fig, axes = plt.subplots(1); fig.set_size_inches(16, 8)
axes.scatter(X[:,1]+0.8*jit1-0.2,X[:,5]+1.2*jit2, alpha=.3, s=15, edgecolor="none", c=plt.cm.spectral(0.2), label="Whidden's replug")
axes.scatter(X[:,1]+0.8*jit2+0.2,X[:,7]+1.2*jit1, alpha=.3, s=15, edgecolor="none", c=plt.cm.spectral(0.9), label="Leo's dSPR")
axes.legend(loc=2)
axes.set_xlabel("applied (true) SPR distance",fontsize=12)
axes.set_ylabel("estimated SPR distance",fontsize=12)
axes.set_xlim(0,22); axes.set_ylim(0,18)
fig, axes = plt.subplots(1,2); fig.set_size_inches(15, 6)
axes[0].scatter(X[:,0]+0.5*jit1+0.1,np.log(X[:,2]), alpha=.4, s=20, edgecolor="none", c=plt.cm.spectral(0.4), label="USPR")
axes[0].scatter(X[:,0]+0.5*jit2-0.1,np.log(X[:,3]), alpha=.4, s=20, edgecolor="none", c=plt.cm.spectral(0.95), label="dSPR")
axes[0].legend(loc=2)
axes[0].set_ylim(-6,6); axes[0].set_xlim(9,31)
axes[0].set_xlabel("tree size",fontsize=12)
axes[0].set_ylabel("log time (secs)",fontsize=12)
from scipy.stats import gaussian_kde
speedup = np.log(X[:,2]) - np.log(X[:,3])
density = gaussian_kde(speedup)
density.covariance_factor = lambda : .09 # set the covariance_factor, lower means more detail
density._compute_covariance()
dx = np.linspace(np.percentile(speedup,q=0.01), np.percentile(speedup,q=99.99),100)
dy = density(dx)
axes[1].plot(dx, dy, linewidth=1.5, c=plt.cm.spectral(0.1))
axes[1].fill_between(dx, dy, alpha=.5, zorder=5, antialiased=True, color=plt.cm.spectral(0.1))
axes[1].set_title("dSPR speedup compared to USPR fast computations, in log scale")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SystemML Build information
Step2: Download model, proto files and convert them to SystemML format.
Step3: PrintTopK
Step4: Classify images
Step5: Sample code to retrain the model and use it to classify image through two different way
|
<ASSISTANT_TASK:>
Python Code:
!pip show systemml
from systemml import MLContext
ml = MLContext(sc)
print ("SystemML Built-Time:"+ ml.buildTime())
print(ml.info())
# Workaround for Python 2.7.13 to avoid certificate validation issue while downloading any file.
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
# Create label.txt file
def createLabelFile(fileName):
file = open(fileName, 'w')
file.write('1,"Cat" \n')
file.write('2,"Dog" \n')
file.close()
# Download caffemodel and proto files
def downloadAndConvertModel(downloadDir='.', trained_vgg_weights='trained_vgg_weights'):
# Step 1: Download the VGG-19 model and other files.
import errno
import os
import urllib
# Create directory, if exists don't error out
try:
os.makedirs(os.path.join(downloadDir,trained_vgg_weights))
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(trained_vgg_weights):
pass
else:
raise
# Download deployer, network, solver proto and label files.
urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_deploy.proto', os.path.join(downloadDir,'VGG_ILSVRC_19_layers_deploy.proto'))
urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_network.proto',os.path.join(downloadDir,'VGG_ILSVRC_19_layers_network.proto'))
#TODO: After downloading network file (VGG_ILSVRC_19_layers_network.proto) , change num_output from 1000 to 2
urllib.urlretrieve('https://raw.githubusercontent.com/apache/systemml/master/scripts/nn/examples/caffe2dml/models/imagenet/vgg19/VGG_ILSVRC_19_layers_solver.proto',os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'))
# TODO: set values as descrived below in VGG_ILSVRC_19_layers_solver.proto (Possibly through APIs whenever available)
# test_iter: 100
# stepsize: 40
# max_iter: 200
# Create labels for data
### 1,"Cat"
### 2,"Dog"
createLabelFile(os.path.join(downloadDir, trained_vgg_weights, 'labels.txt'))
# TODO: Following line commented as its 500MG file, if u need to download it please uncomment it and run.
# urllib.urlretrieve('http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel', os.path.join(downloadDir,'VGG_ILSVRC_19_layers.caffemodel'))
# Step 2: Convert the caffemodel to trained_vgg_weights directory
import systemml as sml
sml.convert_caffemodel(sc, os.path.join(downloadDir,'VGG_ILSVRC_19_layers_deploy.proto'), os.path.join(downloadDir,'VGG_ILSVRC_19_layers.caffemodel'), os.path.join(downloadDir,trained_vgg_weights))
return
# Print top K indices and probability
def printTopK(prob, label, k):
print(label, 'Top ', k, ' Index : ', np.argsort(-prob)[0, :k])
print(label, 'Top ', k, ' Probability : ', prob[0,np.argsort(-prob)[0, :k]])
import numpy as np
import urllib
from systemml.mllearn import Caffe2DML
import systemml as sml
def classifyImages(urls,img_shape=(3, 224, 224), printTokKData=False, topK=5, downloadDir='.', trained_vgg_weights='trained_vgg_weights'):
size = (img_shape[1], img_shape[2])
vgg = Caffe2DML(sqlCtx, solver=os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'), input_shape=img_shape)
vgg.load(trained_vgg_weights)
for url in urls:
outFile = 'inputTest.jpg'
urllib.urlretrieve(url, outFile)
from IPython.display import Image, display
display(Image(filename=outFile))
print ("Prediction of above image to ImageNet Class using");
## Do image classification through SystemML processing
from PIL import Image
input_image = sml.convertImageToNumPyArr(Image.open(outFile), img_shape=img_shape
, color_mode='BGR', mean=sml.getDatasetMean('VGG_ILSVRC_19_2014'))
print ("Image preprocessed through SystemML :: ", vgg.predict(input_image)[0])
if(printTopKData == True):
sysml_proba = vgg.predict_proba(input_image)
printTopK(sysml_proba, 'SystemML BGR', topK)
from pyspark.ml.linalg import Vectors
import os
import systemml as sml
def getLabelFeatures(filename, train_dir, img_shape):
from PIL import Image
vec = Vectors.dense(sml.convertImageToNumPyArr(Image.open(os.path.join(train_dir, filename)), img_shape=img_shape)[0,:])
if filename.lower().startswith('cat'):
return (1, vec)
elif filename.lower().startswith('dog'):
return (2, vec)
else:
raise ValueError('Expected the filename to start with either cat or dog')
from pyspark.sql.functions import rand
import os
def createTrainingDF(train_dir, train_data_file, img_shape):
list_jpeg_files = os.listdir(train_dir)
# 10 files per partition
train_df = sc.parallelize(list_jpeg_files, int(len(list_jpeg_files)/10)).map(lambda filename : getLabelFeatures(filename, train_dir, img_shape)).toDF(['label', 'features']).orderBy(rand())
# Optional: but helps seperates conversion-related from training
# train_df.write.parquet(train_data_file) # 'kaggle-cats-dogs.parquet'
return train_df
def readTrainingDF(train_dir, train_data_file):
train_df = sqlContext.read.parquet(train_data_file)
return train_df
# downloadAndConvertModel(downloadDir, trained_vgg_weights)
# TODO: Take "TODO" actions mentioned in the downloadAndConvertModel() function after calling downloadAndConvertModel() function.
def retrainModel(img_shape, downloadDir, trained_vgg_weights, train_dir, train_data_file, vgg_new_model):
# Let downloadAndConvertModel() functon be commented out, as it needs to be called separately (which is done in cell above) and manual action to be taken after calling it.
# downloadAndConvertModel(downloadDir, trained_vgg_weights)
# TODO: Take "TODO" actions mentioned in the downloadAndConvertModel() function after calling that function.
train_df = createTrainingDF(train_dir, train_data_file, img_shape)
## Write from input files OR read if its already written/converted
# train_df = readTrainingDF(train_dir, train_data_file)
# Load the model
vgg = Caffe2DML(sqlCtx, solver=os.path.join(downloadDir,'VGG_ILSVRC_19_layers_solver.proto'), input_shape=img_shape)
vgg.load(weights=os.path.join(downloadDir,trained_vgg_weights), ignore_weights=['fc8'])
vgg.set(debug=True).setExplain(True)
# Train the model using new data
vgg.fit(train_df)
# Save the trained model
vgg.save(vgg_new_model)
return vgg
import numpy as np
import urllib
from systemml.mllearn import Caffe2DML
import systemml as sml
def classifyImagesWTransfLearning(urls, model, img_shape=(3, 224, 224), printTokKData=False, topK=5):
size = (img_shape[1], img_shape[2])
# vgg.load(trained_vgg_weights)
for url in urls:
outFile = 'inputTest.jpg'
urllib.urlretrieve(url, outFile)
from IPython.display import Image, display
display(Image(filename=outFile))
print ("Prediction of above image to ImageNet Class using");
## Do image classification through SystemML processing
from PIL import Image
input_image = sml.convertImageToNumPyArr(Image.open(outFile), img_shape=img_shape
, color_mode='BGR', mean=sml.getDatasetMean('VGG_ILSVRC_19_2014'))
print ("Image preprocessed through SystemML :: ", model.predict(input_image)[0])
if(printTopKData == True):
sysml_proba = model.predict_proba(input_image)
printTopK(sysml_proba, 'SystemML BGR', topK)
# ImageNet specific parameters
img_shape = (3, 224, 224)
# Setting other than current directory causes "network file not found" issue, as network file
# location is defined in solver file which does not have a path, so it searches in current dir.
downloadDir = '.' # /home/asurve/caffe_models'
trained_vgg_weights = 'trained_vgg_weights'
train_dir = '/home/asurve/data/keggle/dogs_vs_cats_2/train'
train_data_file = 'kaggle-cats-dogs.parquet'
vgg_new_model = 'kaggle-cats-dogs-model_2'
printTopKData=True
topK=5
urls = ['http://cdn3-www.dogtime.com/assets/uploads/gallery/goldador-dog-breed-pictures/puppy-1.jpg','https://lh3.googleusercontent.com/-YdeAa1Ff4Ac/VkUnQ4vuZGI/AAAAAAAAAEg/nBiUn4pp6aE/w800-h800/images-6.jpeg','https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/MountainLion.jpg/312px-MountainLion.jpg']
vgg = retrainModel(img_shape, downloadDir, trained_vgg_weights, train_dir, train_data_file, vgg_new_model)
classifyImagesWTransfLearning(urls, vgg, img_shape, printTopKData, topK)
img_shape = (3, 224, 224)
printTopKData=True
topK=5
# Setting other than current directory causes "network file not found" issue, as network file
# location is defined in solver file which does not have a path, so it searches in current dir.
downloadDir = '.' # /home/asurve/caffe_models'
trained_vgg_weights = 'kaggle-cats-dogs-model_2'
urls = ['http://cdn3-www.dogtime.com/assets/uploads/gallery/goldador-dog-breed-pictures/puppy-1.jpg','https://lh3.googleusercontent.com/-YdeAa1Ff4Ac/VkUnQ4vuZGI/AAAAAAAAAEg/nBiUn4pp6aE/w800-h800/images-6.jpeg','https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/MountainLion.jpg/312px-MountainLion.jpg']
classifyImages(urls,img_shape, printTopKData, topK, downloadDir, trained_vgg_weights)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Quick full example
Step3: The results table (tree table)
Step4: Filter and examine the tree table
Step5: The tree inference command
Step6: Run tree inference jobs in parallel
Step7: The tree table
Step8: <h3><span style="color
Step9: Draw cloud tree
Step10: <h3><span style="color
|
<ASSISTANT_TASK:>
Python Code:
# conda install ipyrad -c bioconda
# conda install raxml -c bioconda
# conda install toytree -c eaton-lab
import ipyrad.analysis as ipa
import toytree
# the path to your HDF5 formatted seqs file
data = "/home/deren/Downloads/ref_pop2.seqs.hdf5"
# check scaffold idx (row) against scaffold names
ipa.treeslider(data).scaffold_table.head()
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="test2",
data="/home/deren/Downloads/ref_pop2.seqs.hdf5",
workdir="analysis-treeslider",
scaffold_idxs=2,
window_size=250000,
slide_size=250000,
inference_method="raxml",
inference_args={"N": 100, "T": 4},
minsnps=10,
consensus_reduce=True,
mincov=5,
imap={
"reference": ["reference"],
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi-N": ["TXGR3", "TXMD3"],
"fusi-S": ["MXED8", "MXGT4"],
"sagr": ["CUVN10", "CUCA4", "CUSV6"],
"oleo": ["CRL0030", "HNDA09", "BZBB1", "MXSA3017"],
},
)
ts.show_inference_command()
ts.run(auto=True, force=True)
ts.tree_table.head()
# example: remove any rows where the tree is NaN
df = ts.tree_table.loc[ts.tree_table.tree.notna()]
mtre = toytree.mtree(df.tree)
mtre.treelist = [i.root("reference") for i in mtre.treelist]
mtre.draw_tree_grid(
nrows=3, ncols=4, start=20,
tip_labels_align=True,
tip_labels_style={"font-size": "9px"},
);
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="test",
data="/home/deren/Downloads/ref_pop2.seqs.hdf5",
workdir="analysis-treeslider",
scaffold_idxs=2,
window_size=1000000,
slide_size=1000000,
inference_method="mb",
inference_args={"N": 0, "T": 4},
minsnps=10,
mincov=9,
consensus_reduce=True,
imap={
"reference": ["reference"],
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi-N": ["TXGR3", "TXMD3"],
"fusi-S": ["MXED8", "MXGT4"],
"sagr": ["CUVN10", "CUCA4", "CUSV6"],
"oleo": ["CRL0030", "HNDA09", "BZBB1", "MXSA3017"],
},
)
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="test",
data="/home/deren/Downloads/ref_pop2.seqs.hdf5",
workdir="analysis-treeslider",
scaffold_idxs=2,
window_size=2000000,
slide_size=2000000,
inference_method="raxml",
inference_args={"N": 100, "T": 4},
minsnps=10,
mincov=9,
imap={
"reference": ["reference"],
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi-N": ["TXGR3", "TXMD3"],
"fusi-S": ["MXED8", "MXGT4"],
"sagr": ["CUVN10", "CUCA4", "CUSV6"],
"oleo": ["CRL0030", "HNDA09", "BZBB1", "MXSA3017"],
},
)
# this is the tree inference command that will be used
ts.show_inference_command()
ts.run(auto=True, force=True)
# the tree table is automatically saved to disk as a CSV during .run()
ts.tree_table.head()
# filter to only windows with >50 SNPS
trees = ts.tree_table[ts.tree_table.snps > 50].tree.tolist()
# load all trees into a multitree object
mtre = toytree.mtree(trees)
# root trees and collapse nodes with <50 bootstrap support
mtre.treelist = [
i.root("reference").collapse_nodes(min_support=50)
for i in mtre.treelist
]
# draw the first 12 trees in a grid
mtre.draw_tree_grid(
nrows=3, ncols=4, start=0,
tip_labels_align=True,
tip_labels_style={"font-size": "9px"},
);
# filter to only windows with >50 SNPS (this could have been done in run)
trees = ts.tree_table[ts.tree_table.snps > 50].tree.tolist()
# load all trees into a multitree object
mtre = toytree.mtree(trees)
# root trees
mtre.treelist = [i.root("reference") for i in mtre.treelist]
# infer a consensus tree to get best tip order
ctre = mtre.get_consensus_tree()
# draw the first 12 trees in a grid
mtre.draw_cloud_tree(
width=400,
height=400,
fixed_order=ctre.get_tip_labels(),
use_edge_lengths=False,
);
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="chr1_w500K_s100K",
data=data,
workdir="analysis-treeslider",
scaffold_idxs=[0, 1, 2],
window_size=500000,
slide_size=100000,
minsnps=10,
inference_method="raxml",
inference_args={"m": "GTRCAT", "N": 10, "f": "d", 'x': None},
)
# this is the tree inference command that will be used
ts.show_inference_command()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Taylor-Hood P2-P1)
Step4: 4.3. Allocate an object of the Stokes class
Step5: 4.4. Prepare reduction with a POD-Galerkin method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
from sampling import LinearlyDependentUniformDistribution
@PullBackFormsToReferenceDomain()
@AffineShapeParametrization("data/t_bypass_vertices_mapping.vmp")
class Stokes(StokesProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
StokesProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
up = TrialFunction(V)
(self.u, self.p) = split(up)
vq = TestFunction(V)
(self.v, self.q) = split(vq)
self.dx = Measure("dx")(subdomain_data=self.subdomains)
self.ds = Measure("ds")(subdomain_data=self.boundaries)
# ... as well as forcing terms and inlet velocity
self.inlet = Expression(("- 1./0.25*(x[1] - 1)*(2 - x[1])", "0."), degree=2)
self.f = Constant((0.0, 0.0))
self.g = Constant(0.0)
# Return custom problem name
def name(self):
return "Stokes2POD"
# Return theta multiplicative terms of the affine expansion of the problem.
@compute_theta_for_supremizers
def compute_theta(self, term):
if term == "a":
theta_a0 = 1.0
return (theta_a0, )
elif term in ("b", "bt"):
theta_b0 = 1.0
return (theta_b0, )
elif term == "f":
theta_f0 = 1.0
return (theta_f0, )
elif term == "g":
theta_g0 = 1.0
return (theta_g0, )
elif term == "dirichlet_bc_u":
theta_bc0 = 1.
return (theta_bc0, )
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
@assemble_operator_for_supremizers
def assemble_operator(self, term):
dx = self.dx
if term == "a":
u = self.u
v = self.v
a0 = inner(grad(u), grad(v)) * dx
return (a0, )
elif term == "b":
u = self.u
q = self.q
b0 = - q * div(u) * dx
return (b0, )
elif term == "bt":
p = self.p
v = self.v
bt0 = - p * div(v) * dx
return (bt0, )
elif term == "f":
v = self.v
f0 = inner(self.f, v) * dx
return (f0, )
elif term == "g":
q = self.q
g0 = self.g * q * dx
return (g0, )
elif term == "dirichlet_bc_u":
bc0 = [DirichletBC(self.V.sub(0), self.inlet, self.boundaries, 1),
DirichletBC(self.V.sub(0), Constant((0.0, 0.0)), self.boundaries, 3)]
return (bc0, )
elif term == "inner_product_u":
u = self.u
v = self.v
x0 = inner(grad(u), grad(v)) * dx
return (x0, )
elif term == "inner_product_p":
p = self.p
q = self.q
x0 = inner(p, q) * dx
return (x0, )
else:
raise ValueError("Invalid term for assemble_operator().")
mesh = Mesh("data/t_bypass.xml")
subdomains = MeshFunction("size_t", mesh, "data/t_bypass_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/t_bypass_facet_region.xml")
element_u = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_p = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_u, element_p)
V = FunctionSpace(mesh, element, components=[["u", "s"], "p"])
problem = Stokes(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [
(0.5, 1.5),
(0.5, 1.5),
(0.5, 1.5),
(0.5, 1.5),
(0.5, 1.5),
(0., pi / 6.)
]
problem.set_mu_range(mu_range)
reduction_method = PODGalerkin(problem)
reduction_method.set_Nmax(25)
reduction_method.set_tolerance(1e-6)
lifting_mu = (1.0, 1.0, 1.0, 1.0, 1.0, 0.0)
problem.set_mu(lifting_mu)
reduction_method.initialize_training_set(100, sampling=LinearlyDependentUniformDistribution())
reduced_problem = reduction_method.offline()
online_mu = (1.0, 1.0, 1.0, 1.0, 1.0, pi / 6.)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
plot(reduced_solution, reduced_problem=reduced_problem, component="u")
plot(reduced_solution, reduced_problem=reduced_problem, component="p")
reduction_method.initialize_testing_set(100, sampling=LinearlyDependentUniformDistribution())
reduction_method.error_analysis()
reduction_method.speedup_analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: SOLUTION
Step3: The reluctances of the regions in core are
Step4: And the reluctances are hence
Step5: Then the total reluctance of the core is $\mathcal{R}_\text{TOT} = \mathcal{R}_1 + \frac{(\mathcal{R}_2 + \mathcal{R}_3)\mathcal{R}_4}{\mathcal{R}_2 + \mathcal{R}_3 + \mathcal{R}_4}$.
Step6: and the magnetomotive force is $\mathcal{F} = \mathcal{N} \mathcal{I}$
Step7: The total flux in the core $\phi_\text{TOT}$ is equal to the flux in the left leg $\phi_\text{left} = \frac{\mathcal{F}}{\mathcal{R}_\text{TOT}}$ , which is
Step8: The fluxes in the center and right legs can be found by the "flux divider rule", which is analogous to the current divider rule.
Step9: The flux in the right leg $\phi_\text{right} = \frac{\mathcal{R}2 + \mathcal{R}_3}{\mathcal{R}_2 + \mathcal{R}_3 + \mathcal{R}_4}\phi\text{TOT}$ is
Step10: The flux densities $B = \frac{\phi}{A}$ are
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
%precision 4
from scipy import constants as c # we like to use some constants
mu_r = 2000
mu = mu_r * c.mu_0
l1 = 1.08 # [m]
l2 = 0.34 # [m]
l3 = 0.0005 # [m]
l4 = 1.08 # [m]
A1 = 0.09 * 0.05 # [m²]
A2 = 0.15 * 0.05 # [m²]
A3 = 0.15 * 0.05 * 1.05 # [m²] 5% fringing
A4 = 0.09 * 0.05 # [m²]
R1 = l1 / (mu * A1) # At /Wb = At/Vs
R2 = l2 / (mu * A2) # At /Wb = At/Vs
R3 = l3 / (c.mu_0 * A3) # At /Wb = At/Vs
R4 = l4 / (mu * A4) # At /Wb = At/Vs
print('R1 = {:.1f} kAt/Wb'.format(R1/1000) )
print('R2 = {:.1f} kAt/Wb'.format(R2/1000) )
print('R3 = {:.1f} kAt/Wb'.format(R3/1000) )
print('R4 = {:.1f} kAt/Wb'.format(R4/1000) )
Rtot = R1 + ((R2 + R3) * R4) / (R2 + R3 + R4)
print('Rtot = {:.1f} kAt/Wb'.format(Rtot/1000))
N = 100 # t given in description
I = 2.0 # A given in description
F = N * I
phi_left = F / Rtot
print('phi_left = {:.3f} mWb'.format(phi_left*1000))
phi_center = R4 / (R2 + R3 + R4) * phi_left
print('phi_center = {:.3f} mWb'.format(phi_center*1000))
phi_right = (R2 + R3) / (R2 + R3 + R4) * phi_left
print('phi_right = {:.3f} mWb'.format(phi_right*1000))
B_left = phi_left / A1
B_center = phi_center / A2
B_right = phi_right / A4
print('B_left = {:.3f} T'.format(B_left))
print('B_center = {:.3f} T'.format(B_center))
print('B_right = {:.3f} T'.format(B_right))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Does the plot above look like a sine function? Why or why not?
Step2: Question 3
Step3: How did your plot of sin(x) change appearance as you decreased the spacing of your x-array from dx = 4.0 to dx = 0.1? What does this tell you?
|
<ASSISTANT_TASK:>
Python Code:
# put your code here.
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
x1 = np.arange(0.0,20.0,4.0)
y1 = np.sin(x1)
plt.plot(x1,y1,'r-')
# put your code here.
x2 = np.arange(0.0,20.0,1.0)
y2 = np.sin(x2)
plt.plot(x1,y1,'r-',x2,y2,'b-')
# put your code here
x3 = np.arange(0.0,20.0,0.1)
y3 = np.sin(x3)
plt.plot(x1,y1,'r-',x2,y2,'b-',x3,y3,'k--')
# put your code here!
import math
for val in np.nditer(x2):
v1 = math.sin(val) + math.cos(val)
v2 = math.sin(val)**2 + math.cos(val)**2
print(val,v1,v2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's look at the Marvin Results object. We can see how many results were returned with r.count and r.totalcount
Step2: Queries returning more than 1000 results are paginated into chunks of 100. For anything less than 1000, the query will return everything. Totalcount shows the total result count, and count shows the returned count in just that page.
Step3: You can access specific values of the results through tuple indexing or via the named attribute, but this is not recommended in general.
Step4: But be careful Names using the full table.parameter syntax cannot be accessed via the named attribute. This syntax is returned when two parameters with non-unique names are returned, like ifu.name and bintype.name. Instead we recommend using the Marvin Results getListOf and getDictOf methods.
Step5: To see what columns are available, use r.columns and r.coltoparam
Step6: if you want to retrieve the results as a list of dictionaries or dictionary of lists, use getDictOf
Step7: you can change the format returned using the format_type keyword. format_type='dictlist' returns a dictionary of lists getDictOf returns a list of dictionaries
Step8: Retrieving More Results
Step9: Sorting results
Step10: Converting to Marvin Tool Objects
Step11: Save your Results and restore them
|
<ASSISTANT_TASK:>
Python Code:
from marvin import config
config.setRelease('MPL-4')
from marvin.tools.query import Query, Results, doQuery
# make a query
myquery = 'nsa.sersic_logmass > 10.3 AND nsa.z < 0.1'
q = Query(searchfilter=myquery)
# run a query
r = q.run()
print(r)
print('Total count', r.totalcount)
print('Page count', r.count)
r.results
res = r.results[0]
print('single row', res)
print('mangaid', res[0])
print('mangaid', res.mangaid)
# what are the columns
print('columns', r.columns)
print(res.sersic_logmass)
# if you want a retrieve a list of a single parameter, use getListOf
mangaid = r.getListOf('mangaid')
print(mangaid)
# these are the column names in the results
print('columns', r.columns)
# by default, getDictOf returns a list of dictionaries, that you can iterate over
mylist = r.getDictOf()
print(mylist)
print('mangaid', mylist[0]['cube.mangaid'], mylist[1]['cube.mangaid'])
mydict = r.getDictOf(format_type='dictlist')
print(mydict)
print('keys', mydict.keys())
print('mangaid', mydict['cube.mangaid'])
# get the next set of results
r.getNext()
# get only the next 10 results
r.getNext(chunk=10)
# get the previous 20 results
r.getPrevious(chunk=20)
# get a subset of results giving the starting index and number limit
# total results
print('total', r.totalcount)
# let's get a subset of 10 rows starting at 300
r.getSubset(300, limit=10)
# let's sort by redshift. Default is in ascending order
r.sort('z')
# or in descending order
r.sort('nsa.z', order='desc')
# See some results
r.results[0:3]
# Let's convert our results to Marvin Cube objects
r.columns
r.convertToTool('cube')
# Your new objects are stored as a list in your results called objects
r.objects
# We strongly recommend saving to a Marvin pickle file (.mpf), so that you can restore the Results object later
r.save('results.mpf')
restored = Results.restore('results.mpf')
# Saving to CSV, JSON, xlsx, txt, or FITS
df = r.toDataFrame()
df.to_csv('results.csv')
df.to_json('results.json')
df.to_excel('results.xlsx')
table = r.toTable()
table.write('results.txt')
r.toFits('results.fits')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Data was collected using this method. It uses the Twitter API to go some days back in time, and then continuously streaming forward in real-time.
Step3: I had collected around 3 million metoo tweets, and they were in sqlite3 database format. We won't run the parsing process live here but the code below shows how it is done.
Step4: For the continued analysis, I start from a csv-file that was created in this way.
Step5: Now, let's clean the tweets, and add the clean version of the tweets as a column.
Step6: We want to group the tweets by day, so add a column that says which day they were posted.
Step7: Group the tweets (tweets) by day.
Step8: View the grouping.
Step9: Now groups is a list of tuples where element [0] is the group name, and element [1] is a pandas series of all text belonging to the group.
Step10: The series is iterable.
Step11: For topic modelling, we create one text document per group (day in this case).
Step12: Now we have the topicmodelling documents.
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
import os
HTML(
<video width="320" height="240" controls>
<source src="btf.m4v" type="video/mp4">
</video>
)
import sqlite3
import pandas as pd
pd.set_option('display.max_colwidth', -1)
## Read sqlite query results into a pandas DataFrame
# conn = sqlite3.connect("tweets.db")
# tweets_df = pd.read_sql_query("SELECT * from tweets", conn)
## Do some cleanupb
#tweets_df = tweets_df.replace({'\n': ' '}, regex=True) # remove linebreaks in the dataframe
#tweets_df = tweets_df.replace({'\t': ' '}, regex=True) # remove tabs in the dataframe
#tweets_df = tweets_df.replace({'\r': ' '}, regex=True) # remove carriage return in the dataframe
# Read the csv into pandas
metoo_df = pd.DataFrame.from_csv("backup_metoo_171221.csv")
len(metoo_df)
metoo_df.head()
newlist = []
for tweet in metoo_df['text']:
#print(tweet)
tweet = [word.lower() for word in tweet.split() if not word.startswith('#')]
tweet = [word for word in tweet if not word.startswith('htt')]
tweet = [word for word in tweet if not word.startswith('@')]
tweet = [word for word in tweet if not '/' in word]
tweet = [word for word in tweet if word.isalpha()]
tweet = [word for word in tweet if not word == "rt"]
tweet = [word.strip(".") for word in tweet]
tweet = [word.strip() for word in tweet]
tweet = [word for word in tweet if not word == "…"]
tweet = " ".join(tweet)
newlist.append(tweet)
tweets = pd.Series(newlist)
tweets
metoo_df = metoo_df.reset_index(drop=True)
metoo_df['tweets'] = tweets
metoo_df.head()
daylist = [day[:10] for day in metoo_df.created_at]
daylist
days = pd.Series(daylist)
metoo_df['day'] = days
metoo_df.head()
tweets_by_day = metoo_df['tweets'].groupby(metoo_df['day'])
groups = list(tweets_by_day)
groups[0][0]
groups[0][1]
for i in groups[41][1]:
print(i)
os.getcwd()
try:
os.mkdir("tmfiles")
except:
print("Directory already exists")
os.chdir("tmfiles") # run once
os.getcwd()
for c,i in enumerate(groups):
o = open(str(groups[c][0]) + ".txt", "w")
for line in i[1]:
o.write(line + "\n")
os.chdir('..')
os.getcwd()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In order to know the altitude and azimuth of a fixed target in the sky we will mainly need to know
Step2: You can also search by its name if it is in CDS...
Step3: Now we should specify where the observer will be on Earth
Step4: But maybe we are picky and want the exact location (or specify a different location that is not present in the database...)
Step5: Finally we need to set up the time, which by default is set in UTC.
Step6: Let's ask python if we can see the Nebula tonight from la palma
Step7: We assume that at 11.30 pm will be dark, but let's make sure...
Step8: You might get an IERS warning (International Earth Rotation and Reference Systems Service) to update the Earth Location.
Step9: Calculate rise/set/meridian transit times
Step10: By default it set's the nearest sunset but you can specify also next or previous.
Step11: Similarly, we can ask when the target will be raising or setting
Step12: Calculate alt/az positions for targets and Airmass
Step13: With the integrated sec function we can easily get the Airmass
Step14: We can now aim to make an altitude plot scanning the altitude of our target every hour
Step15: Fortunately, there is a function that does it (much faster) within the day around the date we provide
Step16: We can also give a range of dates to focus on a specific region of time (dark time)
Step17: Making sky charts
Step18: Finder Chart Image
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import math
import matplotlib.pyplot as plt
import seaborn
from astropy.io import fits
from astropy import units as u
from astropy.coordinates import SkyCoord
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 14
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['xtick.labelsize'] = 13
plt.rcParams['ytick.labelsize'] = 13
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['legend.fontsize'] = 13
from astropy.coordinates import SkyCoord
from astroplan import FixedTarget
coordinates = SkyCoord('18h18m48.0s', '−13d49m0.0s', frame='icrs')
eagle_nebula = FixedTarget(name='M16', coord=coordinates)
print (eagle_nebula)
eagle_nebula = FixedTarget.from_name('M16')
print (eagle_nebula)
from astroplan import Observer
observer = Observer.at_site('lapalma')
print (observer)
import astropy.units as u
from astropy.coordinates import EarthLocation
#from pytz import timezone
from astroplan import Observer
longitude = '-17d52m54s'
latitude = '28d45m38s'
elevation = 2344 * u.m
location = EarthLocation.from_geodetic(longitude, latitude, elevation)
observer = Observer(name='WHT',
location=location,
pressure=0.615 * u.bar,
relative_humidity=0.04,
temperature=18 * u.deg_C,
#timezone=timezone('US/Hawaii'),
description="Our beloved William Herschel Telescope")
print (observer)
from astropy.time import Time
time = Time('2017-09-15 23:30:00')
observer.target_is_up(time, eagle_nebula)
observer.is_night(time)
from astroplan import download_IERS_A
download_IERS_A()
observer.sun_set_time(time, which='nearest').iso
observer.sun_set_time(time, which='next').iso
observer.twilight_evening_civil(time, which='nearest').iso
observer.twilight_evening_nautical(time, which='nearest').iso
observer.twilight_evening_astronomical(time, which='nearest').iso
observer.target_rise_time(time, eagle_nebula).iso
observer.target_set_time(time, eagle_nebula).iso
altaz_eagle = observer.altaz(time, eagle_nebula)
altaz_eagle.alt, altaz_eagle.az
altaz_eagle.secz
from astropy.time import TimeDelta
time_list = []
airmass_list = []
current_time = observer.sun_set_time(time, which='nearest')
while current_time < observer.sun_rise_time(time, which='nearest'):
current_altaz = observer.altaz(current_time, eagle_nebula)
if current_altaz.alt > 0:
airmass_list.append(current_altaz.alt.value)
else:
airmass_list.append(0)
time_list.append(current_time.datetime)
current_time += TimeDelta(3600, format='sec')
plt.plot(time_list, airmass_list)
from astroplan.plots import plot_airmass
middle_of_the_night = Time('2017-09-16 01:00:00')
plot_airmass(targets=eagle_nebula,
observer=observer,
time=middle_of_the_night,
#brightness_shading=True,
#altitude_yaxis=True
)
plt.legend()
from astroplan.plots import dark_style_sheet
start_time = observer.sun_set_time(time, which='nearest')
end_time = observer.sun_rise_time(time, which='nearest')
delta_t = end_time - start_time
observe_time = start_time + delta_t*np.linspace(0, 1, 75)
andromeda = FixedTarget.from_name('M31')
pleiades = FixedTarget.from_name('M45')
some_nice_stuff_to_look_tonight = [eagle_nebula, andromeda, pleiades]
plot_airmass(targets=some_nice_stuff_to_look_tonight,
observer=observer,
time=observe_time,
brightness_shading=True,
altitude_yaxis=True,
#style_sheet=dark_style_sheet
)
plt.legend()
from astroplan.plots import plot_sky
plot_sky(eagle_nebula, observer, middle_of_the_night)
plot_sky(pleiades, observer, middle_of_the_night)
plot_sky(andromeda, observer, middle_of_the_night)
plt.legend()
observe_time = Time('2000-03-15 17:00:00')
observe_time = observe_time + np.linspace(-4, 5, 10)*u.hour
plot_sky(pleiades, observer, observe_time)
plt.legend(loc='center left', bbox_to_anchor=(1.25, 0.5))
plt.show()
from astroplan.plots import plot_finder_image
plot_finder_image(eagle_nebula, survey='DSS', log=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Batch Normalization
Step2: Batch normalization
Step3: Batch Normalization
Step4: Test on simple example - Remove the mean only
Step5: The direct implementation is a bit trickier.
Step6: Batch Normalization
Step7: Fully Connected Nets with Batch Normalization
Step8: Batchnorm for deep networks
Step9: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
Step10: Batch normalization and initialization
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
# from cs231n.classifiers.fc_net import *
# from cs231n.classifiers.fc_net import *
from cs231n.classifiers.fc_net import FullyConnectedNet
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
import os
os.getcwd()
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
from cs231n.layers import *
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print(' means: ', a.mean(axis=0))
print(' stds: ', a.std(axis=0))
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print(' mean: ', a_norm.mean(axis=0))
print(' std: ', a_norm.std(axis=0))
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print('After batch normalization (nontrivial gamma, beta)')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in xrange(200):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print 'After batch normalization (test-time):'
print ' means: ', a_norm.mean(axis=0)
print ' stds: ', a_norm.std(axis=0)
# Gradient check batchnorm backward pass
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print 'dx error: ', rel_error(dx_num, dx)
print 'dgamma error: ', rel_error(da_num, dgamma)
print 'dbeta error: ', rel_error(db_num, dbeta)
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
#Forward pass
def forward(x):
#Step 1
N,D = x.shape
mu = 1 / float(N) * np.sum(x, axis=0)
#Step 2
xcenter = x - mu
#Step 3
out = xcenter
cache = (mu,xcenter)
return out,cache
#Backward pass
def backward(dout,cache):
mu,xcenter = cache
#Backprop
#Step 3
dxcenter = dout
#Step2
dx = np.ones(x.shape)*dxcenter
dmu = -np.sum(dxcenter,0)
#Step1
dx += 1/float(N)*np.ones(x.shape)*dmu
return dx
# Test of the implementation with numerical gradient
fx = lambda x: forward(x)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
_, cache = forward(x)
dx = backward(dout, cache)
print 'dx error: ', rel_error(dx_num, dx)
def backward_alt(dout,cache):
mu,xcenter = cache
#Backprop
#Step 3
dx = dout-1./float(N)*np.sum(dout,axis=0)
return dx
dx2 = backward_alt(dout,cache)
rel_error(dx2,dx)
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print 'dx difference: ', rel_error(dx1, dx2)
print 'dgamma difference: ', rel_error(dgamma1, dgamma2)
print 'dbeta difference: ', rel_error(dbeta1, dbeta2)
print 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2))
N, D, H1, H2, C = 2, 10, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
if reg == 0: print
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100 ,100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=30, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=30, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print 'Running weight scale %d / %d' % (i + 1, len(weight_scales))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
np.array(solvers[weight_scales[-2]].loss_history).mean()
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gcf().set_size_inches(10, 15)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to Spark
Step2: Read a GeoTiff file
Step3: Visualization
Step4: Interactive visualization
Step5: Histogram
|
<ASSISTANT_TASK:>
Python Code:
#Add all dependencies to PYTHON_PATH
import sys
sys.path.append("/usr/lib/spark/python")
sys.path.append("/usr/lib/spark/python/lib/py4j-0.10.4-src.zip")
sys.path.append("/usr/lib/python3/dist-packages")
#Define environment variables
import os
os.environ["HADOOP_CONF_DIR"] = "/etc/hadoop/conf"
os.environ["PYSPARK_PYTHON"] = "python3"
os.environ["PYSPARK_DRIVER_PYTHON"] = "ipython"
#Load PySpark to connect to a Spark cluster
from pyspark import SparkConf, SparkContext
#from osgeo import gdal
#To read GeoTiffs as a ByteArray
from io import BytesIO
from rasterio.io import MemoryFile
appName = "plot_GeoTiff"
masterURL="spark://pheno0.phenovari-utwente.surf-hosted.nl:7077"
#A context needs to be created if it does not already exist
try:
sc.stop()
except NameError:
print("A new Spark Context will be created.")
sc = SparkContext(conf = SparkConf().setAppName(appName).setMaster(masterURL))
file_path = "hdfs:///user/pheno/modis/Onset_Greenness_Increase/clusters_10_35.tif"
data = sc.binaryFiles(file_path).take(1)
dataByteArray = bytearray(data[0][1])
#If it is needed to convert to a numpy array
#import numpy as np
#file_bytes = np.asarray(dataByteArray, dtype=np.uint8)
#Lets check if the files was read correctly by printing its metadata
with MemoryFile(dataByteArray) as memfile:
with memfile.open() as dataset:
print(dataset.profile)
import matplotlib.pyplot as plt
import rasterio
from rasterio import plot
%matplotlib notebook
with MemoryFile(dataByteArray) as memfile:
with memfile.open() as dataset:
plot.show((dataset,1))
#Define variables so they can be re-used.
memfile = MemoryFile(dataByteArray)
dataset = memfile.open()
%matplotlib notebook
fig, (axr, axg, axb) = plt.subplots(1,3, figsize=(12, 4), sharex=True, sharey=True)
plot.show((dataset, 1), title='band 1', ax=axr)
#plot.show((dataset, 2), title='band 2', ax=axg)
#plot.show((dataset, 3), title='band 3', ax=axb)
%matplotlib inline
plot.show_hist(dataset)#, bins=250)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Complete graph Laplacian
Step3: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
Step5: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
Step6: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
def complete_deg(n):
Return the integer valued degree matrix D for the complete graph K_n.
f=np.zeros((n,n), dtype=int) #creates integer matrix of size nxn
t=np.diag(np.diag(f)+(n-1)) #makes the diagonal n-1 and zeros elsewhere
return t
D = complete_deg(5)
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
def complete_adj(n):
Return the integer valued adjacency matrix A for the complete graph K_n.
x=np.zeros((n,n), dtype=int) #creates integer matrix of size nxn
o=np.ones((n,n), dtype=int) #makes the diagonal 0 and ones elsewhere
return o+np.diag(np.diag(x)+-1)
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
d=complete_deg(5) #calling function
a=complete_adj(5) #calling function
L=d-a #finding difference
np.linalg.eigvals(L) #eigenvalue answer
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below is the outline of processing chain.
Step2: The color code
Step3: For any tables or relational expressions derived from them, you may see all the attributes using the heading attribute.
Step4: Note that the attributes above the dividing line --- comprise the primary key, i.e. identifying information used for matching data across different tables.
Step5: To fetch the contents of a table or a derived relational expression into a numpy record array, use the fetch method.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import datajoint as dj
from pipeline import mice, experiment, vis, preprocess, tuning
pylab.rcParams['figure.figsize'] = (8, 8) # make figures bigger
erd = dj.ERD(preprocess.Sync) - 2
erd += dj.ERD(preprocess.Spikes) - 3
erd += experiment.Session
erd += dj.ERD(tuning.MonetRF.Map) - 1
erd.draw()
experiment.Session().show_definition();
experiment.Session().heading
experiment.Session()
data = experiment.Session().fetch()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Before getting started with
Step2: Adding events
Step3: Adding ~mne.Epochs
Step4: Adding ~mne.Evoked
Step5: Adding ~mne.Covariance
Step6: Adding ~mne.Projection vectors
Step7: Adding ~mne.preprocessing.ICA
Step8: Adding MRI with BEM
Step9: Adding coregistration
Step10: Adding a ~mne.Forward solution
Step11: Adding an ~mne.minimum_norm.InverseOperator
Step12: Adding a ~mne.SourceEstimate
Step13: Adding source code (e.g., a Python script)
Step14: Adding custom figures
Step15: The
Step16: Adding image files
Step17: Working with tags
Step18: Editing a saved report
Step19: The saved report can be read back and modified or amended. This allows the
Step20: To make this even easier,
Step21: With the context manager, the updated report is also automatically saved
Step22: By default, the power spectral density and SSP projectors of the
Step23: This time we'll pass a specific subject and subjects_dir (even though
Step24: Now let's look at how
Step25: If you want to actually view the noise covariance in the report, make sure
|
<ASSISTANT_TASK:>
Python Code:
from pathlib import Path
import tempfile
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import mne
data_path = Path(mne.datasets.sample.data_path(verbose=False))
sample_dir = data_path / 'MEG' / 'sample'
subjects_dir = data_path / 'subjects'
raw_path = sample_dir / 'sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw(raw_path)
raw.pick_types(eeg=True, eog=True).crop(tmax=60).load_data()
report = mne.Report(title='Raw example')
# This method also accepts a path, e.g., raw=raw_path
report.add_raw(raw=raw, title='Raw', psd=False) # omit PSD plot
report.save('report_raw.html', overwrite=True)
events_path = sample_dir / 'sample_audvis_filt-0-40_raw-eve.fif'
events = mne.read_events(events_path)
sfreq = raw.info['sfreq']
report = mne.Report(title='Events example')
report.add_events(events=events_path, title='Events from Path', sfreq=sfreq)
report.add_events(events=events, title='Events from "events"', sfreq=sfreq)
report.save('report_events.html', overwrite=True)
epochs = mne.Epochs(raw=raw, events=events)
report = mne.Report(title='Epochs example')
report.add_epochs(epochs=epochs, title='Epochs from "epochs"')
report.save('report_epochs.html', overwrite=True)
evoked_path = sample_dir / 'sample_audvis-ave.fif'
cov_path = sample_dir / 'sample_audvis-cov.fif'
evokeds = mne.read_evokeds(evoked_path, baseline=(None, 0))
evokeds_subset = evokeds[:2] # The first two
for evoked in evokeds_subset:
evoked.pick('eeg') # just for speed of plotting
report = mne.Report(title='Evoked example')
report.add_evokeds(
evokeds=evokeds_subset,
titles=['evoked 1', # Manually specify titles
'evoked 2'],
noise_cov=cov_path,
n_time_points=5
)
report.save('report_evoked.html', overwrite=True)
cov_path = sample_dir / 'sample_audvis-cov.fif'
report = mne.Report(title='Covariance example')
report.add_covariance(cov=cov_path, info=raw_path, title='Covariance')
report.save('report_cov.html', overwrite=True)
ecg_proj_path = sample_dir / 'sample_audvis_ecg-proj.fif'
eog_proj_path = sample_dir / 'sample_audvis_eog-proj.fif'
report = mne.Report(title='Projectors example')
report.add_projs(info=raw_path, title='Projs from info')
report.add_projs(info=raw_path, projs=ecg_proj_path,
title='ECG projs from path')
report.add_projs(info=raw_path, projs=eog_proj_path,
title='EOG projs from path')
report.save('report_projs.html', overwrite=True)
ica = mne.preprocessing.ICA(
n_components=5, # fit 5 ICA components
fit_params=dict(tol=0.01) # assume very early on that ICA has converged
)
ica.fit(inst=raw)
# create epochs based on EOG events, find EOG artifacts in the data via pattern
# matching, and exclude the EOG-related ICA components
eog_epochs = mne.preprocessing.create_eog_epochs(raw=raw)
eog_components, eog_scores = ica.find_bads_eog(
inst=eog_epochs,
ch_name='EEG 001', # a channel close to the eye
threshold=1 # lower than the default threshold
)
ica.exclude = eog_components
report = mne.Report(title='ICA example')
report.add_ica(
ica=ica,
title='ICA cleaning',
picks=[0, 1], # only plot the first two components
inst=raw,
eog_evoked=eog_epochs.average(),
eog_scores=eog_scores,
n_jobs=1 # could be increased!
)
report.save('report_ica.html', overwrite=True)
report = mne.Report(title='BEM example')
report.add_bem(
subject='sample', subjects_dir=subjects_dir, title='MRI & BEM',
decim=20
)
report.save('report_mri_and_bem.html', overwrite=True)
trans_path = sample_dir / 'sample_audvis_raw-trans.fif'
report = mne.Report(title='Coregistration example')
report.add_trans(
trans=trans_path, info=raw_path, subject='sample',
subjects_dir=subjects_dir, title='Coregistration'
)
report.save('report_coregistration.html', overwrite=True)
fwd_path = sample_dir / 'sample_audvis-meg-oct-6-fwd.fif'
report = mne.Report(title='Forward solution example')
report.add_forward(forward=fwd_path, title='Forward solution')
report.save('report_forward_sol.html', overwrite=True)
inverse_op_path = sample_dir / 'sample_audvis-meg-oct-6-meg-inv.fif'
report = mne.Report(title='Inverse operator example')
report.add_inverse_operator(
inverse_operator=inverse_op_path, title='Inverse operator'
)
report.save('report_inverse_op.html', overwrite=True)
stc_path = sample_dir / 'sample_audvis-meg'
report = mne.Report(title='Source estimate example')
report.add_stc(
stc=stc_path, subject='sample', subjects_dir=subjects_dir,
title='Source estimate', n_time_points=2 # few for speed
)
report.save('report_inverse_sol.html', overwrite=True)
mne_init_py_path = Path(mne.__file__) # __init__.py in the MNE-Python root
mne_init_py_content = mne_init_py_path.read_text(encoding='utf-8')
report = mne.Report(title='Code example')
report.add_code(
code=mne_init_py_path,
title="Code from Path"
)
report.add_code(
code=mne_init_py_content,
title="Code from string"
)
report.save('report_code.html', overwrite=True)
x = np.linspace(start=0, stop=10, num=100)
y = x**2
fig, ax = plt.subplots()
ax.plot(x, y, ls='--', lw=2, color='blue', label='my function')
ax.set_xlabel('x')
ax.set_ylabel('f(x)')
ax.legend()
report = mne.Report(title='Figure example')
report.add_figure(
fig=fig, title='A custom figure',
caption='A blue dashed line reaches up into the sky …',
image_format='PNG'
)
report.save('report_custom_figure.html', overwrite=True)
mne_logo_path = Path(mne.__file__).parent / 'icons' / 'mne_icon-cropped.png'
fig_array = plt.imread(mne_logo_path)
rotation_angles = np.linspace(start=0, stop=360, num=17)
figs = []
captions = []
for angle in rotation_angles:
# Rotate and remove some rounding errors to avoid Matplotlib warnings
fig_array_rotated = scipy.ndimage.rotate(input=fig_array, angle=angle)
fig_array_rotated = fig_array_rotated.clip(min=0, max=1)
# Create the figure
fig, ax = plt.subplots()
ax.imshow(fig_array_rotated)
ax.set_axis_off()
# Store figure and caption
figs.append(fig)
captions.append(f'Rotation angle: {round(angle, 1)}°')
report = mne.Report(title='Multiple figures example')
report.add_figure(fig=figs, title='Fun with figures! 🥳', caption=captions)
report.save('report_custom_figures.html', overwrite=True)
report = mne.Report(title='Image example')
report.add_image(
image=mne_logo_path, title='MNE',
caption='Powered by 🧠 🧠 🧠 around the world!'
)
report.save('report_custom_image.html', overwrite=True)
report = mne.Report(title='Tags example')
report.add_image(
image=mne_logo_path,
title='MNE Logo',
tags=('image', 'mne', 'logo', 'open-source')
)
report.save('report_tags.html', overwrite=True)
report = mne.Report(title='Saved report example', verbose=True)
report.add_image(image=mne_logo_path, title='MNE 1')
report.save('report_partial.hdf5', overwrite=True)
report_from_disk = mne.open_report('report_partial.hdf5')
report_from_disk.add_image(image=mne_logo_path, title='MNE 2')
report_from_disk.save('report_partial.hdf5', overwrite=True)
with mne.open_report('report_partial.hdf5') as report:
report.add_image(image=mne_logo_path, title='MNE 3')
report.save('report_final.html', overwrite=True)
report = mne.Report(title='parse_folder example')
report.parse_folder(
data_path=data_path, pattern='*raw.fif', render_bem=False,
raw_butterfly=False
)
report.save('report_parse_folder_basic.html', overwrite=True)
pattern = 'sample_audvis_filt-0-40_raw.fif'
report = mne.Report(title='parse_folder example 2', raw_psd=True, projs=True)
report.parse_folder(
data_path=data_path, pattern=pattern, render_bem=False, raw_butterfly=False
)
report.save('report_parse_folder_raw_psd_projs.html', overwrite=True)
report = mne.Report(
title='parse_folder example 3', subject='sample', subjects_dir=subjects_dir
)
report.parse_folder(data_path=data_path, pattern='', mri_decim=25)
report.save('report_parse_folder_mri_bem.html', overwrite=True)
baseline = (None, 0)
cov_fname = sample_dir / 'sample_audvis-cov.fif'
pattern = 'sample_audvis-no-filter-ave.fif'
evoked = mne.read_evokeds(sample_dir / pattern)[0]
report = mne.Report(
title='parse_folder example 4', baseline=baseline, cov_fname=cov_fname
)
with tempfile.TemporaryDirectory() as path:
evoked.save(Path(path) / pattern)
report.parse_folder(
path, pattern=pattern, render_bem=False, n_time_points_evokeds=5
)
report.save('report_parse_folder_evoked.html', overwrite=True)
pattern = 'sample_audvis-cov.fif'
info_fname = sample_dir / 'sample_audvis-ave.fif'
report = mne.Report(title='parse_folder example 5', info_fname=info_fname)
report.parse_folder(
data_path, pattern=pattern, render_bem=False, n_time_points_evokeds=5
)
report.save('report_parse_folder_cov.html', overwrite=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3> Install Py-ART </h3>
Step2: <h2> Plot into png </h2>
Step3: <h2> Create animating PNG </h2>
|
<ASSISTANT_TASK:>
Python Code:
%bash
rm -rf data
mkdir data
cd data
RADAR=KIWA
YEAR=2013
MONTH=07
DAY=23
HOUR=23
gsutil cp gs://gcp-public-data-nexrad-l2/$YEAR/$MONTH/$DAY/$RADAR/*_$RADAR_${YEAR}${MONTH}${DAY}${HOUR}0000_${YEAR}${MONTH}${DAY}${HOUR}5959.tar temp.tar
tar xvf temp.tar
rm *.tar
ls
# Based on
# http://arm-doe.github.io/pyart/dev/auto_examples/plotting/plot_nexrad_multiple_moments.html
# by Jonathan J. Helmus (jhelmus@anl.gov)
import matplotlib.pyplot as plt
import pyart
def plot_data(filename):
radar = pyart.io.read_nexrad_archive(infilename)
display = pyart.graph.RadarDisplay(radar)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(221)
display.plot('velocity', 1, ax=ax, title='Doppler Velocity',
colorbar_label='',
axislabels=('', 'North South distance from radar (km)'))
display.set_limits((-300, 300), (-300, 300), ax=ax)
ax = fig.add_subplot(222)
display.plot('reflectivity', 0, ax=ax,
title='Reflectivity lowest', colorbar_label='',
axislabels=('', ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
ax = fig.add_subplot(223)
display.plot('reflectivity', 1, ax=ax,
title='Reflectivity second', colorbar_label='')
display.set_limits((-300, 300), (-300, 300), ax=ax)
ax = fig.add_subplot(224)
display.plot('cross_correlation_ratio', 0, ax=ax,
title='Correlation Coefficient', colorbar_label='',
axislabels=('East West distance from radar (km)', ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
plt.show()
%writefile plot_pngs.py
import matplotlib.pyplot as plt
import pyart
def plot_data(infilename, outpng):
radar = pyart.io.read_nexrad_archive(infilename)
display = pyart.graph.RadarDisplay(radar)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(221)
display.plot('velocity', 1, ax=ax, title='Doppler Velocity',
colorbar_label='',
axislabels=('', 'North South distance from radar (km)'))
display.set_limits((-300, 300), (-300, 300), ax=ax)
ax = fig.add_subplot(222)
display.plot('reflectivity', 0, ax=ax,
title='Reflectivity lowest', colorbar_label='',
axislabels=('', ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
ax = fig.add_subplot(223)
display.plot('reflectivity', 1, ax=ax,
title='Reflectivity second', colorbar_label='')
display.set_limits((-300, 300), (-300, 300), ax=ax)
ax = fig.add_subplot(224)
display.plot('cross_correlation_ratio', 0, ax=ax,
title='Correlation Coefficient', colorbar_label='',
axislabels=('East West distance from radar (km)', ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
fig.savefig(outpng)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='plot some radar data')
parser.add_argument('nexrad', help="volume scan filename")
parser.add_argument('png', help="output png filename")
args = parser.parse_args()
print "Plotting {} into {}".format(args.nexrad, args.png)
plot_data(args.nexrad, args.png)
%bash
python plot_pngs.py data/KIWA20130723_235451_V06.gz radarplot.png
%bash
rm -rf images
mkdir images
for volumefile in $(ls data); do
base=$(basename $volumefile)
python plot_pngs.py data/$volumefile images/$base.png
done
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a smoothing Node with IsotropicSmooth
Step2: Now, to use iterables and therefore smooth with different fwhm is as simple as that
Step3: And to wrap it up. We need to create a workflow, connect the nodes and finally, can run the workflow in parallel.
Step4: If we visualize the graph with exec, we can see where the parallelization actually takes place.
Step5: If you look at the structure in the workflow directory, you can also see, that for each smoothing, a specific folder was created, i.e. _fwhm_16.
Step6: Now, let's visualize the results!
Step7: IdentityInterface (special use case of iterabels)
Step8: Now, we can create the IdentityInterface Node
Step9: That's it. Now, we can connect the output fields of this infosource node like any other node to wherever we want.
Step10: For example, like
|
<ASSISTANT_TASK:>
Python Code:
from nipype import Node, Workflow
from nipype.interfaces.fsl import BET, IsotropicSmooth
# Initiate a skull stripping Node with BET
skullstrip = Node(BET(mask=True,
in_file='/data/ds102/sub-01/anat/sub-01_T1w.nii.gz'),
name="skullstrip")
isosmooth = Node(IsotropicSmooth(), name='iso_smooth')
isosmooth.iterables = ("fwhm", [4, 8, 16])
# Create the workflow
wf = Workflow(name="smoothflow")
wf.base_dir = "/data"
wf.connect(skullstrip, 'out_file', isosmooth, 'in_file')
# Run it in parallel (one core for each smoothing kernel)
wf.run('MultiProc', plugin_args={'n_procs': 3})
# Visualize the detailed graph
from IPython.display import Image
wf.write_graph(graph2use='exec', format='png', simple_form=True)
Image(filename='/data/smoothflow/graph_detailed.dot.png')
!tree /data/smoothflow -I '*txt|*pklz|report*|*.json|*js|*.dot|*.html'
%pylab inline
from nilearn import plotting
plotting.plot_anat(
'/data/ds102/sub-01/anat/sub-01_T1w.nii.gz', title='original',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/skullstrip/sub-01_T1w_brain.nii.gz', title='skullstripped',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/_fwhm_4/iso_smooth/sub-01_T1w_brain_smooth.nii.gz', title='FWHM=4',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/_fwhm_8/iso_smooth/sub-01_T1w_brain_smooth.nii.gz', title='FWHM=8',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
plotting.plot_anat(
'/data/smoothflow/_fwhm_16/iso_smooth/sub-01_T1w_brain_smooth.nii.gz', title='FWHM=16',
display_mode='z', cut_coords=(-20, -10, 0, 10, 20), annotate=False)
# First, let's specify the list of input variables
subject_list = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05']
session_list = ['run-1', 'run-2']
fwhm_widths = [4, 8]
from nipype import IdentityInterface
infosource = Node(IdentityInterface(fields=['subject_id', 'session_id', 'fwhm_id']),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('session_id', session_list),
('fwhm_id', fwhm_widths)]
infosource.outputs
workflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
('session_id', 'session_id')]),
(infosource, smooth, [('fwhm_id', 'fwhm')])
])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: This creates a new array of points that are the values of $\sin(x_i)$ at each point $x_i$
Step3: This plot shows that the points in this numerical array are an approximation to the actual function as they don't have the function's value at all possible points. In this case we know the actual function ($\sin(x)$). What if we only know the value of the function at a limited set of points, and don't know the analytical form of the function itself? This is common when the data points come from a set of measurements.
Step4: Let's create the numerical data we will use to build our interpolation.
Step5: To create our approximate function, we call interp1d as follows, with the numerical data. Options for the kind argument includes
Step6: The sin_approx variabl that interp1d returns is a callable object that can be used to compute the approximate function at other points. Compute the approximate function on a fine grid
Step7: Plot the original data points, along with the approximate interpolated values. It is quite amazing to see how the interpolation has done a good job of reconstructing the actual function with relatively few points.
Step8: Let's look at the absolute error between the actual function and the approximate interpolated function
Step9: 1d non-regular data
Step10: Notice how the absolute error is larger in the intervals where there are no points.
Step11: Here is the actual function we will use the generate our original dataset
Step12: Build 1d arrays to use as the structured grid
Step13: Build 2d arrays to use in computing the function on the grid points
Step14: Here is a scatter plot of the points overlayed with the value of the function at those points
Step15: You can see in this plot that the function is not smooth as we don't have its value on a fine grid.
Step16: Compute the interpolated function on a fine grid
Step17: Plot the original course grid of points, along with the interpolated function values on a fine grid
Step18: Notice how the interpolated values (green points) are now smooth and continuous. The amazing thing is that the interpolation algorithm doesn't know anything about the actual function. It creates this nice approximation using only the original course grid (blue points).
Step19: There is an important difference between griddata and the interp1d/interp2d
Step20: Notice how we pass these 1d arrays to our function and don't use meshgrid
Step21: It is clear that our grid is very unstructured
Step22: To use griddata we need to compute the final (strcutured) grid we want to compute the interpolated function on
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
x = np.linspace(0,4*np.pi,10)
x
f = np.sin(x)
print(f)
c = np.cos(x)
print (c)
plt.plot(x, f, marker='o')
plt.xlabel('x')
plt.ylabel('f(x)');
from scipy.interpolate import interp1d
x = np.linspace(0,4*np.pi,10) # only use 10 points to emphasize this is an approx
f = np.sin(x)
sin_approx = interp1d(x, f, kind='cubic')
newx = np.linspace(0,4*np.pi,100)
newf = sin_approx(newx)
plt.plot(x, f, marker='o', linestyle='', label='original data')
plt.plot(newx, newf, marker='.', label='interpolated');
plt.legend();
plt.xlabel('x')
plt.ylabel('f(x)');
plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))
plt.xlabel('x')
plt.ylabel('Absolute error');
x = 4*np.pi*np.random.rand(15)
f = np.sin(x)
sin_approx = interp1d(x, f, kind='cubic')
# We have to be careful about not interpolating outside the range
newx = np.linspace(np.min(x), np.max(x),100)
newf = sin_approx(newx)
plt.plot(x, f, marker='o', linestyle='', label='original data')
plt.plot(newx, newf, marker='.', label='interpolated');
plt.legend();
plt.xlabel('x')
plt.ylabel('f(x)');
plt.plot(newx, np.abs(np.sin(newx)-sin_approx(newx)))
plt.xlabel('x')
plt.ylabel('Absolute error');
from scipy.interpolate import interp2d
def wave2d(x, y):
return np.sin(2*np.pi*x)*np.sin(3*np.pi*y)
x = np.linspace(0.0, 1.0, 10)
y = np.linspace(0.0, 1.0, 10)
X, Y = np.meshgrid(x, y)
Z = wave2d(X, Y)
plt.pcolor(X, Y, Z)
plt.colorbar();
plt.scatter(X, Y);
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
wave2d_approx = interp2d(X, Y, Z, kind='cubic')
xnew = np.linspace(0.0, 1.0, 40)
ynew = np.linspace(0.0, 1.0, 40)
Xnew, Ynew = np.meshgrid(xnew, ynew) # We will use these in the scatter plot below
Fnew = wave2d_approx(xnew, ynew) # The interpolating function automatically creates the meshgrid!
Fnew.shape
plt.pcolor(xnew, ynew, Fnew);
plt.colorbar();
plt.scatter(X, Y, label='original points')
plt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);
from scipy.interpolate import griddata
x = np.random.rand(100)
y = np.random.rand(100)
f = wave2d(x, y)
plt.scatter(x, y);
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
xnew = np.linspace(x.min(), x.max(), 40)
ynew = np.linspace(y.min(), y.max(), 40)
Xnew, Ynew = np.meshgrid(xnew, ynew)
Xnew.shape, Ynew.shape
Fnew = griddata((x,y), f, (Xnew, Ynew), method='cubic', fill_value=0.0)
Fnew.shape
plt.pcolor(Xnew, Ynew, Fnew, label="points")
plt.colorbar()
plt.scatter(x, y, label='original points')
plt.scatter(Xnew, Ynew, marker='.', color='green', label='interpolated points')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y');
plt.legend(bbox_to_anchor=(1.2, 1), loc=2, borderaxespad=0.);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
input = [10, 20, 30]
def g(input):
ds = tf.data.Dataset.from_tensor_slices(input)
ds = ds.flat_map(lambda x: tf.data.Dataset.from_tensor_slices([x, x + 1, x + 2]))
element = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()
result = []
with tf.compat.v1.Session() as sess:
for _ in range(9):
result.append(sess.run(element))
return result
result = g(input)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def modp(n: int, p: int):
ret = 1
for i in range(n):
ret = (2 * ret) % p
return ret
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And let´s load a data-set
Step2: If you have holoviews installed, you can get an interactive figure
Step3: Sometimes it is necessary to have a look at some statistics for each cycle and step. This can be done using the cycle_info_plot-method
|
<ASSISTANT_TASK:>
Python Code:
import cellpy
from cellpy.utils import plotutils
cell = cellpy.get("../testdata/hdf5/20160805_test001_45_cc.h5", mass=0.8)
plotutils.raw_plot(cell)
fig = plotutils.cycle_info_plot(
cell,
cycle=3,
use_bokeh=False,
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
learning_rate = 0.001
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name="inputs")
targets_ = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name="targets")
### Encoder
conv1 = tf.layers.conv2d(inputs=inputs_, filters=16, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 14x14x16
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=8, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=8, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(images=encoded, size=(7, 7))
# Now 7x7x8
conv4 = tf.layers.conv2d(inputs=upsample1, filters=8, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(images=conv4, size=(14, 14))
# Now 14x14x8
conv5 = tf.layers.conv2d(inputs=upsample2, filters=8, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(images=conv5, size=(28, 28))
# Now 28x28x8
conv6 = tf.layers.conv2d(inputs=upsample3, filters=16, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3, 3), padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name="outputs")
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 10
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs=inputs_, filters=32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(inputs=maxpool1, filters=32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(inputs=maxpool2, filters=16, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(inputs=conv3, pool_size=(2, 2), strides=(2, 2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(images=encoded, size=(7, 7))
# Now 7x7x16
conv4 = tf.layers.conv2d(inputs=upsample1, filters=16, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(images=conv4, size=(14, 14))
# Now 14x14x16
conv5 = tf.layers.conv2d(inputs=upsample2, filters=32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(images=conv5, size=(28, 28))
# Now 28x28x32
conv6 = tf.layers.conv2d(inputs=upsample3, filters=32, kernel_size=(3, 3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(inputs=conv6, filters=1, kernel_size=(3, 3), padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name="outputs")
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 10
batch_size = 500
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Data exploration
Step3: Helper functions
Step4: Ridge regression model fitting
Step5: Ridge regression on subsets
Step6: Applying a higher L2 value
Step7: Selecting an L2 penalty via cross-validation
Step8: Minimize the l2 by using cross validation
Step9: Use the best l2 to train the model on all the data
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import numpy as np
from math import ceil
dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':float, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int}
regressionDir = '/home/weenkus/workspace/Machine Learning - University of Washington/Regression/datasets/'
sales = pd.read_csv(regressionDir + 'kc_house_data.csv', dtype = dtype_dict)
sales = sales.sort(['sqft_living','price'])
# dtype_dict same as above
set_1 = pd.read_csv(regressionDir + 'wk3_kc_house_set_1_data.csv', dtype=dtype_dict)
set_2 = pd.read_csv(regressionDir + 'wk3_kc_house_set_2_data.csv', dtype=dtype_dict)
set_3 = pd.read_csv(regressionDir + 'wk3_kc_house_set_3_data.csv', dtype=dtype_dict)
set_4 = pd.read_csv(regressionDir + 'wk3_kc_house_set_4_data.csv', dtype=dtype_dict)
train_valid_shuffled = pd.read_csv(regressionDir + 'wk3_kc_house_train_valid_shuffled.csv', dtype=dtype_dict)
test = pd.read_csv(regressionDir + 'wk3_kc_house_test_data.csv', dtype=dtype_dict)
training = pd.read_csv(regressionDir + 'wk3_kc_house_train_data.csv', dtype=dtype_dict)
# Show plots in jupyter
%matplotlib inline
sales.head()
sales['price'].head()
def polynomial_dataframe(feature, degree): # feature is pandas.Series type
# assume that degree >= 1
# initialize the dataframe:
poly_dataframe = pd.DataFrame()
# and set poly_dataframe['power_1'] equal to the passed feature
poly_dataframe['power_1'] = feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# assign poly_dataframe[name] to be feature^power; use apply(*)
poly_dataframe[name] = feature;
poly_dataframe[name] = poly_dataframe[name].apply(lambda x: x**power)
return poly_dataframe
poly15_data = polynomial_dataframe(sales['sqft_living'], 15) # use equivalent of `polynomial_sframe`
print(poly15_data)
l2_small_penalty = 1.5e-5
model = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model.fit(poly15_data, sales['price'])
model.coef_
plt.plot(poly15_data, model.predict(poly15_data), poly15_data, sales['price'])
plt.show()
l2_small_penalty=1e-9
poly15_data_set1 = polynomial_dataframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model1 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model1.fit(poly15_data_set1, set_1['price'])
poly15_data_set2 = polynomial_dataframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model2 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model2.fit(poly15_data_set2, set_2['price'])
poly15_data_set3 = polynomial_dataframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model3 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model3.fit(poly15_data_set3, set_3['price'])
poly15_data_set4 = polynomial_dataframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model4 = linear_model.Ridge(alpha=l2_small_penalty, normalize=True)
model4.fit(poly15_data_set4, set_4['price'])
plt.plot(poly15_data_set1, model1.predict(poly15_data_set1), poly15_data_set1, set_1['price'])
plt.show()
plt.plot(poly15_data_set2, model2.predict(poly15_data_set2), poly15_data_set2, set_2['price'])
plt.show()
plt.plot(poly15_data_set3, model3.predict(poly15_data_set3), poly15_data_set3, set_3['price'])
plt.show()
plt.plot(poly15_data_set4, model4.predict(poly15_data_set4), poly15_data_set4, set_4['price'])
plt.show()
print('Model 1 coefficients: ', model1.coef_)
print('Model 2 coefficients: ', model2.coef_)
print('Model 3 coefficients: ', model3.coef_)
print('Model 4 coefficients: ', model4.coef_)
l2_large_penalty=1.23e2
poly15_data_set1 = polynomial_dataframe(set_1['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model1 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model1.fit(poly15_data_set1, set_1['price'])
poly15_data_set2 = polynomial_dataframe(set_2['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model2 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model2.fit(poly15_data_set2, set_2['price'])
poly15_data_set3 = polynomial_dataframe(set_3['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model3 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model3.fit(poly15_data_set3, set_3['price'])
poly15_data_set4 = polynomial_dataframe(set_4['sqft_living'], 15) # use equivalent of `polynomial_sframe`
model4 = linear_model.Ridge(alpha=l2_large_penalty, normalize=True)
model4.fit(poly15_data_set4, set_4['price'])
plt.plot(poly15_data_set1, model1.predict(poly15_data_set1), poly15_data_set1, set_1['price'])
plt.show()
plt.plot(poly15_data_set2, model2.predict(poly15_data_set2), poly15_data_set2, set_2['price'])
plt.show()
plt.plot(poly15_data_set3, model3.predict(poly15_data_set3), poly15_data_set3, set_3['price'])
plt.show()
plt.plot(poly15_data_set4, model4.predict(poly15_data_set4), poly15_data_set4, set_4['price'])
plt.show()
print('Model 1 coefficients: ', model1.coef_)
print('Model 2 coefficients: ', model2.coef_)
print('Model 3 coefficients: ', model3.coef_)
print('Model 4 coefficients: ', model4.coef_)
def k_fold_cross_validation(k, l2_penalty, data, output):
n = len(data)
sumRSS = 0
for i in range(k):
# Get the validation/training interval
start = (n*i)/k
end = (n*(i+1))/k-1
#print (i, (ceil(start), ceil(end)))
train_valid_shuffled[0:ceil(start)].append(train_valid_shuffled[ceil(end)+1:n])
# Train the model
model = linear_model.Ridge(alpha=l2_penalty, normalize=True)
model.fit(data, output)
# Calculate RSS
RSS = (abs(output - model.predict(data)) ** 2).sum()
# Add the RSS to the sum for computing the average
sumRSS += RSS
return (sumRSS / k)
print (k_fold_cross_validation(10, 1e-9, poly15_data_set2, set_2['price']))
import sys
l2s = np.logspace(3, 9, num=13)
train_valid_shuffled_poly15 = polynomial_dataframe(train_valid_shuffled['sqft_living'], 15)
k = 10
minError = sys.maxsize
for l2 in l2s:
avgError = k_fold_cross_validation(k, l2, train_valid_shuffled_poly15, train_valid_shuffled['price'])
print ('For l2:', l2, ' the CV is ', avgError)
if avgError < minError:
minError = avgError
bestl2 = l2
print (minError)
print (bestl2)
model = linear_model.Ridge(alpha=1000, normalize=True)
model.fit(training[['sqft_living']], training['price'])
print("Residual sum of squares: %.2f"
% ((model.predict(test[['sqft_living']]) - test['price']) ** 2).sum())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contents
Step2: <a id='section2'></a>
Step3: Here we have created a dictionary called state with a single item called Ts
Step4: This dictionary holds the state variables for our model -- which is this case is a single number! It is a temperature in degrees Celsius.
Step5: It is also possible to see this state dictionary as an xarray.Dataset object
Step6: The object called ebm here is the entire model -- including its current state (the temperature Ts) as well as all the methods needed to integrated forward in time!
Step7: Here is some internal information about the timestep of the model
Step8: This says the timestep is 2592000 seconds (30 days!), and the model has taken 0 steps forward so far.
Step9: The model got colder!
Step10: This is another dictionary, now with two items. They should make sense to you.
Step11: So why did the model get colder in the first timestep?
Step12: The parameter tau is a property of the OutgoingLongwave subprocess
Step13: and the parameter albedo is a property of the AbsorbedShortwave subprocess
Step14: Let's make an exact clone of our model and then change these two parameters
Step15: Now our model is out of equilibrium and the climate will change!
Step16: Shoud the model warm up or cool down?
Step17: Automatic timestepping
Step18: <a id='section5'></a>
|
<ASSISTANT_TASK:>
Python Code:
# Ensure compatibility with Python 2 and 3
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import climlab
# create a zero-dimensional domain with a single surface temperature
state = climlab.surface_state(num_lat=1, # a single point
water_depth = 100., # 100 meters slab of water (sets the heat capacity)
)
state
state['Ts']
state.Ts
climlab.to_xarray(state)
# create the longwave radiation process
olr = climlab.radiation.Boltzmann(name='OutgoingLongwave',
state=state,
tau = 0.612,
eps = 1.,
timestep = 60*60*24*30.)
# Look at what we just created
print(olr)
# create the shortwave radiation process
asr = climlab.radiation.SimpleAbsorbedShortwave(name='AbsorbedShortwave',
state=state,
insolation=341.3,
albedo=0.299,
timestep = 60*60*24*30.)
# Look at what we just created
print(asr)
# couple them together into a single model
ebm = olr + asr
# Give the parent process name
ebm.name = 'EnergyBalanceModel'
# Examine the model object
print(ebm)
ebm.state
ebm.Ts
print(ebm.time['timestep'])
print(ebm.time['steps'])
ebm.step_forward()
ebm.Ts
ebm.diagnostics
ebm.OLR
ebm.ASR
for name, process in ebm.subprocess.items():
print(name)
print(process)
ebm.subprocess['OutgoingLongwave'].tau
ebm.subprocess['AbsorbedShortwave'].albedo
ebm2 = climlab.process_like(ebm)
print(ebm2)
ebm2.subprocess['OutgoingLongwave'].tau = 0.57
ebm2.subprocess['AbsorbedShortwave'].albedo = 0.32
# Computes diagnostics based on current state but does not change the state
ebm2.compute_diagnostics()
ebm2.ASR - ebm2.OLR
ebm2.Ts
ebm2.step_forward()
ebm2.Ts
ebm3 = climlab.process_like(ebm2)
ebm3.integrate_years(50)
# What is the current temperature?
ebm3.Ts
# How close are we to energy balance?
ebm3.ASR - ebm3.OLR
# We should be able to accomplish the exact same thing with explicit timestepping
for n in range(608):
ebm2.step_forward()
ebm2.Ts
ebm2.ASR - ebm2.OLR
%load_ext version_information
%version_information numpy, matplotlib, climlab
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 2
sample_id = 1
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
return x/255.0
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
from sklearn import preprocessing
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
lb = preprocessing.LabelBinarizer()
labels = list(range(0, 10))
lb.fit(labels)
one_hot = lb.transform(x)
return one_hot
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
# There are tensorflow-gpu settings, but gpu can not work becourse of the net is too big.
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
x = tf.placeholder(tf.float32, shape=(None, image_shape[0], image_shape[1], image_shape[2]), name='x')
return x
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
y = tf.placeholder(tf.float32, shape=(None, n_classes), name='y')
return y
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
#input = tf.placeholder(tf.float32, (None, 32, 32, 3))
x_tensor_shape = x_tensor.get_shape().as_list()
print('x_tensor_shape:\t{0}'.format(x_tensor_shape))
print('conv_num_outputs:{0}'.format(conv_num_outputs))
print('conv_ksize:\t{0}'.format(conv_ksize))
print('conv_strides:\t{0}'.format(conv_strides))
print('pool_ksize:\t{0}'.format(pool_ksize))
print('pool_strides:\t{0}'.format(pool_strides))
filter_weights = tf.Variable(tf.truncated_normal((conv_ksize[0], conv_ksize[1], x_tensor_shape[3], conv_num_outputs), mean=0.0, stddev = 0.05)) # (height, width, input_depth, output_depth)
filter_bias = tf.Variable(tf.zeros(conv_num_outputs))
strides = [1, conv_strides[0], conv_strides[1], 1] # (batch, height, width, depth)
conv_layer = tf.nn.conv2d(x_tensor, filter_weights, strides=strides, padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, filter_bias)
# conv_layer = conv_layer + filter_bias
conv_layer = tf.nn.relu(conv_layer)
# Apply Max Pooling
conv_layer = tf.nn.max_pool(
conv_layer,
ksize=[1, pool_ksize[0], pool_ksize[1], 1],
strides=[1, pool_strides[0], pool_strides[1], 1],
padding='SAME')
return conv_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
return tf.contrib.layers.flatten(x_tensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.contrib.layers.fully_connected(x_tensor, num_outputs)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.contrib.layers.fully_connected(x_tensor, num_outputs, activation_fn=None)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv_num_outputs1 = 32
conv_ksize1 = (4, 4)
conv_strides1 = (1, 1)
pool_ksize1 = (2, 2)
pool_strides1 = (2, 2)
conv_layer1 = conv2d_maxpool(x, conv_num_outputs1, conv_ksize1, conv_strides1, pool_ksize1, pool_strides1)
conv_layer1 = tf.nn.dropout(conv_layer1, keep_prob)
conv_num_outputs2 = 64
conv_ksize2 = (4, 4)
conv_strides2 = (1, 1)
pool_ksize2 = (2, 2)
pool_strides2 = (2, 2)
conv_layer2 = conv2d_maxpool(x, conv_num_outputs2, conv_ksize2, conv_strides2, pool_ksize2, pool_strides2)
conv_layer2 = tf.nn.dropout(conv_layer2, keep_prob)
conv_num_outputs3 = 128
conv_ksize3 = (4, 4)
conv_strides3 = (1, 1)
pool_ksize3 = (2, 2)
pool_strides3 = (2, 2)
conv_layer3 = conv2d_maxpool(x, conv_num_outputs3, conv_ksize3, conv_strides3, pool_ksize3, pool_strides3)
conv_layer3 = tf.nn.dropout(conv_layer3, keep_prob)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
conv_layer_flatten = flatten(conv_layer3)
print('conv_layer_flatten.shape:%s' %conv_layer_flatten.shape)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
fc_num_outputs1 = 1024
fc_layer1 = fully_conn(conv_layer_flatten, fc_num_outputs1)
fc_layer1 = tf.nn.dropout(fc_layer1, keep_prob)
fc_num_outputs2 = 512
fc_layer2 = fully_conn(fc_layer1, fc_num_outputs2)
fc_layer2 = tf.nn.dropout(fc_layer2, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
num_outputs = 10
nn_output = output(fc_layer2, num_outputs)
print('fc_num_outputs1:\t{0}'.format(fc_num_outputs1))
print('fc_num_outputs2:\t{0}'.format(fc_num_outputs2))
print('num_outputs:\t\t{0}'.format(num_outputs))
print('')
# TODO: return output
return nn_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run(optimizer, feed_dict={keep_prob: keep_probability, x: feature_batch, y: label_batch})
pass
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
loss = session.run(cost, feed_dict={ x: feature_batch, y: label_batch, keep_prob: 1.0 })
valid_accuracy = session.run(accuracy, feed_dict={ x: valid_features[0:400], y: valid_labels[0:400], keep_prob: 1.0 })
print('Loss: %.6f' %loss, end=' ')
print('Validation Accuracy: %.6f' %valid_accuracy)
pass
# TODO: Tune Parameters
epochs = 20
batch_size = 256
keep_probability = 0.5
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: BQPlot
Step2: ipyvolume
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
from sklearn.datasets import load_boston
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('poster')
sns.set_style('whitegrid')
plt.rcParams['figure.figsize'] = 12, 8 # plotsize
import warnings
warnings.filterwarnings('ignore')
# mixed feelings about this import
import bqplot.pyplot as plt
import numpy as np
x = np.linspace(0, 2, 50)
y = x**2
fig = plt.figure()
scatter = plt.scatter(x, y)
plt.show()
fig.animation_duration = 500
scatter.y = 2 * x**.5
scatter.selected_style = {'stroke':'red', 'fill': 'orange'}
plt.brush_selector();
scatter.selected
scatter.selected = [1,2,10,40]
import ipyvolume as ipv
N = 1000
x, y, z = np.random.random((3, N))
fig = ipv.figure()
scatter = ipv.scatter(x, y, z, marker='box')
ipv.show()
scatter.x = scatter.x - 0.5
scatter.x = x
scatter.color = "green"
scatter.size = 5
scatter.color = np.random.random((N,3))
scatter.size = 2
ex = ipv.datasets.animated_stream.fetch().data
ex.shape
ex[:, ::, ::4].shape
ipv.figure()
ipv.style.use('dark')
quiver = ipv.quiver(*ipv.datasets.animated_stream.fetch().data[:,::,::4], size=5)
ipv.animation_control(quiver, interval=200)
ipv.show()
ipv.style.use('light')
ipv.style.use('light')
quiver.geo = "cat"
N = 1000*1000
x, y, z = np.random.random((3, N)).astype('f4')
ipv.figure()
s = ipv.scatter(x, y, z, size=0.2)
ipv.show()
ipv.save("3d-example-plot.html")
!open 3d-example-plot.html
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Transfer learning vs. nightlights
Step2: Panel B
Step3: Randomization tests
Step4: Panel D
|
<ASSISTANT_TASK:>
Python Code:
from fig_utils import *
import matplotlib.pyplot as plt
import time
%matplotlib inline
country_path = '../data/output/LSMS/pooled/'
percentiles = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35,
0.40, 0.45, 0.50, 0.55, 0.60, 0.65, 0.70,
0.75, 0.80, 0.85, 0.90, 0.95, 1.00]
survey = 'lsms'
dimension = 10
k = 10
k_inner = 5
trials = 5
poverty_line = 1.90
multiples = [1, 2, 3]
t0 = time.time()
compare_models(country_path, survey, percentiles, dimension, k, k_inner,
trials, poverty_line, multiples)
t1 = time.time()
print 'Finished in {} seconds'.format(t1-t0)
country_path = '../data/output/DHS/pooled/'
percentiles = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35,
0.40, 0.45, 0.50, 0.55, 0.60, 0.65, 0.70,
0.75, 0.80, 0.85, 0.90, 0.95, 1.00]
survey = 'dhs'
dimension = 10
k = 5
k_inner = 3
trials = 3
t0 = time.time()
compare_models(country_path, survey, percentiles, dimension, k, k_inner,
trials, poverty_line, multiples)
t1 = time.time()
print 'Finished in {} seconds'.format(t1-t0)
# Parameters
country_names = ['nigeria', 'tanzania', 'uganda', 'malawi', 'pooled']
country_paths = ['../data/output/LSMS/nigeria/',
'../data/output/LSMS/tanzania/',
'../data/output/LSMS/uganda/',
'../data/output/LSMS/malawi/',
'../data/output/LSMS/pooled/']
survey = 'lsms'
dimension = 100
k = 3
k_inner = 3
points = 10
alpha_low = 0
alpha_high = 3
trials = 100
run_randomization_test(country_names, country_paths, survey, dimension,
k, k_inner, points, alpha_low, alpha_high, trials)
# Parameters
country_names = ['nigeria', 'tanzania', 'uganda', 'malawi', 'rwanda', 'pooled']
country_paths = ['../data/output/DHS/nigeria/',
'../data/output/DHS/tanzania/',
'../data/output/DHS/uganda/',
'../data/output/DHS/malawi/',
'../data/output/DHS/rwanda/',
'../data/output/DHS/pooled/']
survey = 'dhs'
dimension = 100
k = 3
k_inner = 3
points = 10
alpha_low = 0
alpha_high = 3
trials = 100
run_randomization_test(country_names, country_paths, survey, dimension,
k, k_inner, points, alpha_low, alpha_high, trials)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initial set-up
Step2: Plot steady-state and tau functions
Step3: Combine model and experiments to produce
Step4: Set up prior ranges for each parameter in the model.
Step5: Run ABC calibration
Step6: Analysis of results
|
<ASSISTANT_TASK:>
Python Code:
import os, tempfile
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from ionchannelABC import theoretical_population_size
from ionchannelABC import IonChannelDistance, EfficientMultivariateNormalTransition, IonChannelAcceptor
from ionchannelABC.experiment import setup
from ionchannelABC.visualization import plot_sim_results, plot_kde_matrix_custom
import myokit
from pyabc import Distribution, RV, History, ABCSMC
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
from pyabc.populationstrategy import ConstantPopulationSize
from experiments.isus_wang import wang_act_and_kin
from experiments.isus_courtemanche import courtemanche_deact
from experiments.isus_firek import (firek_inact)
from experiments.isus_nygren import (nygren_inact_kin,
nygren_rec)
modelfile = 'models/standardised_isus.mmt'
from ionchannelABC.visualization import plot_variables
sns.set_context('poster')
V = np.arange(-80, 40, 0.01)
sta_par_map = {'ri': 'isus.r_ss',
'si': 'isus.s_ss',
'rt': 'isus.tau_r',
'st': 'isus.tau_s'}
f, ax = plot_variables(V, sta_par_map, modelfile, figshape=(2,2))
observations, model, summary_statistics = setup(modelfile,
wang_act_and_kin,
courtemanche_deact,
firek_inact,
nygren_inact_kin,
nygren_rec)
assert len(observations)==len(summary_statistics(model({})))
g = plot_sim_results(modelfile,
wang_act_and_kin,
courtemanche_deact,
firek_inact,
nygren_inact_kin,
nygren_rec)
limits = {'log_isus.p_1': (-7, 3),
'isus.p_2': (1e-7, 0.4),
'log_isus.p_3': (-7, 3),
'isus.p_4': (1e-7, 0.4),
'log_isus.p_5': (-7, 3),
'isus.p_6': (1e-7, 0.4),
'log_isus.p_7': (-7, 3),
'isus.p_8': (1e-7, 0.4)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
# Test this works correctly with set-up functions
assert len(observations) == len(summary_statistics(model(prior.rvs())))
db_path = ("sqlite:///" + os.path.join(tempfile.gettempdir(), "standardised_isus.db"))
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
pop_size = theoretical_population_size(2, len(limits))
print("Theoretical minimum population size is {} particles".format(pop_size))
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
exp_id=list(observations.exp_id),
variance=list(observations.variance),
delta=0.05),
population_size=ConstantPopulationSize(1000),
summary_statistics=summary_statistics,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(initial_epsilon=100),
sampler=MulticoreEvalParallelSampler(n_procs=16),
acceptor=IonChannelAcceptor())
obs = observations.to_dict()['y']
obs = {str(k): v for k, v in obs.items()}
abc_id = abc.new(db_path, obs)
history = abc.run(minimum_epsilon=0., max_nr_populations=100, min_acceptance_rate=0.01)
history = abc.run(minimum_epsilon=0., max_nr_populations=100, min_acceptance_rate=0.01)
history = History('sqlite:///results/standardised/isus/standardised_isus.db')
df, w = history.get_distribution(m=0)
df.describe()
sns.set_context('poster')
mpl.rcParams['font.size'] = 14
mpl.rcParams['legend.fontsize'] = 14
g = plot_sim_results(modelfile,
wang_act_and_kin,
courtemanche_deact,
firek_inact,
nygren_inact_kin,
nygren_rec,
df=df, w=w)
plt.tight_layout()
import pandas as pd
N = 100
sta_par_samples = df.sample(n=N, weights=w, replace=True)
sta_par_samples = sta_par_samples.set_index([pd.Index(range(N))])
sta_par_samples = sta_par_samples.to_dict(orient='records')
sns.set_context('poster')
mpl.rcParams['font.size'] = 14
mpl.rcParams['legend.fontsize'] = 14
f, ax = plot_variables(V, sta_par_map,
modelfile,
[sta_par_samples],
figshape=(2,2))
plt.tight_layout()
m,_,_ = myokit.load(modelfile)
sns.set_context('paper')
g = plot_kde_matrix_custom(df, w, limits=limits)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now that we have our Dask DataFrame, we can start to create the EntitySet. Inferring Woodwork logical types for the columns in a Dask dataframe can be computationally expensive. To avoid this expense, logical type inference can be skipped by supplying a dictionary of logical types using the logical_types parameter when calling es.add_dataframe(). Logical types can be specified as Woodwork LogicalType classes, or their equivalent string representation. For more information refer to the Woodwork Typing in Featuretools guide.
Step2: Notice that when we print our EntitySet, the number of rows for the DataFrame named dask_input_df is returned as a Dask Delayed object. This is because obtaining the length of a Dask DataFrame may require an expensive compute operation to sum up the lengths of all the individual partitions that make up the DataFrame and that operation is not performed by default.
Step3: This feature matrix can be saved to disk or computed and brought into memory, using the appropriate Dask DataFrame methods.
Step4: While this is a simple example to illustrate the process of using Dask DataFrames with Featuretools, this process will also work with an EntitySet containing multiple dataframes, as well as with aggregation primitives.
Step5: Primitive Limitations
|
<ASSISTANT_TASK:>
Python Code:
import featuretools as ft
import pandas as pd
import dask.dataframe as dd
id = [0, 1, 2, 3, 4]
values = [12, -35, 14, 103, -51]
df = pd.DataFrame({"id": id, "values": values})
dask_df = dd.from_pandas(df, npartitions=2)
dask_df
from woodwork.logical_types import Double, Integer
es = ft.EntitySet(id="dask_es")
es = es.add_dataframe(dataframe_name="dask_input_df",
dataframe=dask_df,
index="id",
logical_types={"id": Integer, "values": Double})
es
feature_matrix, features = ft.dfs(entityset=es,
target_dataframe_name="dask_input_df",
trans_primitives=["negate"],
max_depth=1)
feature_matrix
fm_computed = feature_matrix.compute()
fm_computed
primitives_df = ft.list_primitives()
dask_compatible_df = primitives_df[primitives_df["dask_compatible"] == True]
dask_compatible_df.head()
dask_compatible_df.tail()
values_dict = {'values': [12, 103]}
es.add_interesting_values(dataframe_name='dask_input_df', values=values_dict)
es['dask_input_df'].ww.columns['values'].metadata
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We typically need the following libraries
|
<ASSISTANT_TASK:>
Python Code:
model = LogisticRegression()
model.fit(dataset.data, dataset.target)
expected = dataset.target
predicted = model.predict(dataset.data)
# classification metrics report builds a text report showing the main classification metrics
# In pattern recognition and information retrieval with binary classification,
# precision (also called positive predictive value) is the fraction of retrieved instances that are relevant,
# while recall (also known as sensitivity) is the fraction of relevant instances that are retrieved.
# Both precision and recall are therefore based on an understanding and measure of relevance.
# Suppose a computer program for recognizing dogs in scenes from a video identifies 7 dogs in a scene containing 9 dogs
# and some cats. If 4 of the identifications are correct, but 3 are actually cats, the program's precision is 4/7
# while its recall is 4/9.
# In statistical analysis of binary classification, the F1 score (also F-score or F-measure) is a measure of a test's accuracy.
# It considers both the precision p and the recall r of the test to compute the score:
# p is the number of correct positive results divided by the number of all positive results,
# and r is the number of correct positive results divided by the number of positive results that should have been returned.
# The F1 score can be interpreted as a weighted average of the precision and recall
print(metrics.classification_report(expected, predicted))
# Confusion matrix
# https://en.wikipedia.org/wiki/Confusion_matrix
# In the field of machine learning, a confusion matrix is a table layout that allows visualization of the performance
# of an algorithm, typically a supervised learning one.
# Each column of the matrix represents the instances in a predicted class
# while each row represents the instances in an actual class (or vice-versa)
# If a classification system has been trained to distinguish between cats, dogs and rabbits,
# a confusion matrix will summarize the results of testing the algorithm for further inspection.
# Assuming a sample of 27 animals — 8 cats, 6 dogs, and 13 rabbits, the resulting confusion matrix
# could look like the table below:
Image("http://www.opengardensblog.futuretext.com/wp-content/uploads/2016/01/confusion-matrix.jpg")
# In this confusion matrix, of the 8 actual cats, the system predicted that three were dogs,
# and of the six dogs, it predicted that one was a rabbit and two were cats.
# We can see from the matrix that the system in question has trouble distinguishing between cats and dogs,
# but can make the distinction between rabbits and other types of animals pretty well.
# All correct guesses are located in the diagonal of the table, so it's easy to visually
# inspect the table for errors, as they will be represented by values outside the diagonal.
print (metrics.confusion_matrix(expected, predicted))
import pandas as pd
integers_list = [1,3,5,7,9] # lists are seperated by square brackets
print(integers_list)
tuple_integers = 1,3,5,7,9 #tuples are seperated by commas and are immutable
print(tuple_integers)
tuple_integers[0] = 11
#Python strings can be in single or double quotes
string_ds = "Data Science"
string_iot = "Internet of Things"
string_dsiot = string_ds + " for " + string_iot
print (string_dsiot)
len(string_dsiot)
# sets are unordered collections with no duplicate elements
prog_languages = set(['Python', 'Java', 'Scala'])
prog_languages
# Dictionaies are comma seperated key value pairs seperated by braces
dict_marks = {'John':95, 'Mark': 100, 'Anna': 99}
dict_marks['John']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connecting to National Data Service
Step2: Deterioration Curves of West United states
Step3: Filtering Null Values, Converting JSON format to Dataframes, and Calculating Mean Condition Ratings of Deck, Superstructure, and Substucture
Step4: The calculated Mean Condition Ratings of deck, superstructure, and substructure are now stored in seperate dataframe for the convience.
Step5: Deterioration Curves - Deck
Step6: Deterioration Curve - Superstructure
Step7: Deterioration Curves - Substructure
Step8: The mean deterioration curve can be a measure to evaluate the rate of deterioration. If the condition rating of a bridge lies above the deterioration curve then the bridge is deteriorating at a slower pace than mean deterioration of the bridges, and if the condition rating of the bridge lies below the deterioration curve of the bridges then it is deteriorating at a faster pace than the mean deterioration of the bridges.
Step9: Classification of all the bridges in the West United States
|
<ASSISTANT_TASK:>
Python Code:
import pymongo
from pymongo import MongoClient
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import csv
Client = MongoClient("mongodb://bridges:readonly@nbi-mongo.admin/bridge")
db = Client.bridge
collection = db["bridges"]
def getData(state):
pipeline = [{"$match":{"$and":[{"year":{"$gt":1991, "$lt":2017}},{"stateCode":state}]}},
{"$project":{"_id":0,
"structureNumber":1,
"yearBuilt":1,
"deck":1, ## rating of deck
"year":1, ## survey year
"substructure":1, ## rating of substructure
"superstructure":1, ## rating of superstructure
}}]
dec = collection.aggregate(pipeline)
conditionRatings = pd.DataFrame(list(dec))
conditionRatings['Age'] = conditionRatings['year'] - conditionRatings['yearBuilt']
return conditionRatings
def getMeanRatings(state,startAge, endAge, startYear, endYear):
conditionRatings = getData(state)
conditionRatings = conditionRatings[['structureNumber','Age','superstructure','deck','substructure','year']]
conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])]
#conditionRatings = conditionRatings.loc[~conditionRatings['Structure Type'].isin([19])]
#conditionRatings = conditionRatings.loc[~conditionRatings['Type of Wearing Surface'].isin(['6'])]
maxAge = conditionRatings['Age'].unique()
tempConditionRatingsDataFrame = conditionRatings.loc[conditionRatings['year'].isin([i for i in range(startYear, endYear+1, 1)])]
MeanDeck = []
StdDeck = []
MeanSubstructure = []
StdSubstructure = []
MeanSuperstructure = []
StdSuperstructure = []
## start point of the age to be = 1 and ending point = 100
for age in range(startAge,endAge+1,1):
## Select all the bridges from with age = i
tempAgeDf = tempConditionRatingsDataFrame.loc[tempConditionRatingsDataFrame['Age'] == age]
## type conversion deck rating into int
listOfMeanDeckOfAge = list(tempAgeDf['deck'])
listOfMeanDeckOfAge = [ int(deck) for deck in listOfMeanDeckOfAge ]
## takeing mean and standard deviation of deck rating at age i
meanDeck = np.mean(listOfMeanDeckOfAge)
stdDeck = np.std(listOfMeanDeckOfAge)
## type conversion substructure rating into int
listOfMeanSubstructureOfAge = list(tempAgeDf['substructure'])
listOfMeanSubstructureOfAge = [ int(substructure) for substructure in listOfMeanSubstructureOfAge ]
meanSub = np.mean(listOfMeanSubstructureOfAge)
stdSub = np.std(listOfMeanSubstructureOfAge)
## type conversion substructure rating into int
listOfMeanSuperstructureOfAge = list(tempAgeDf['superstructure'])
listOfMeanSuperstructureOfAge = [ int(superstructure) for superstructure in listOfMeanSuperstructureOfAge ]
meanSup = np.mean(listOfMeanSuperstructureOfAge)
stdSup = np.std(listOfMeanSuperstructureOfAge)
#Append Deck
MeanDeck.append(meanDeck)
StdDeck.append(stdDeck)
#Append Substructure
MeanSubstructure.append(meanSub)
StdSubstructure.append(stdSub)
#Append Superstructure
MeanSuperstructure.append(meanSup)
StdSuperstructure.append(stdSup)
return [MeanDeck, StdDeck ,MeanSubstructure, StdSubstructure, MeanSuperstructure, StdSuperstructure]
states = ['08','56','30','16','53','41','49','32','06','02','15']
# state code to state abbreviation
stateNameDict = {'25':'MA',
'04':'AZ',
'08':'CO',
'38':'ND',
'09':'CT',
'19':'IA',
'26':'MI',
'48':'TX',
'35':'NM',
'17':'IL',
'51':'VA',
'23':'ME',
'16':'ID',
'36':'NY',
'56':'WY',
'29':'MO',
'39':'OH',
'28':'MS',
'11':'DC',
'21':'KY',
'18':'IN',
'06':'CA',
'47':'TN',
'12':'FL',
'24':'MD',
'34':'NJ',
'46':'SD',
'13':'GA',
'55':'WI',
'30':'MT',
'54':'WV',
'15':'HI',
'32':'NV',
'37':'NC',
'10':'DE',
'33':'NH',
'44':'RI',
'50':'VT',
'42':'PA',
'05':'AR',
'20':'KS',
'45':'SC',
'22':'LA',
'40':'OK',
'72':'PR',
'41':'OR',
'27':'MN',
'53':'WA',
'01':'AL',
'31':'NE',
'02':'AK',
'49':'UT'
}
def getBulkMeanRatings(states, stateNameDict):
# Initializaing the dataframes for deck, superstructure and subtructure
df_mean_deck = pd.DataFrame({'Age':range(1,61)})
df_mean_sup = pd.DataFrame({'Age':range(1,61)})
df_mean_sub = pd.DataFrame({'Age':range(1,61)})
df_std_deck = pd.DataFrame({'Age':range(1,61)})
df_std_sup = pd.DataFrame({'Age':range(1,61)})
df_std_sub = pd.DataFrame({'Age':range(1,61)})
for state in states:
meanDeck, stdDeck, meanSub, stdSub, meanSup, stdSup = getMeanRatings(state,1,100,1992,2016)
stateName = stateNameDict[state]
df_mean_deck[stateName] = meanDeck[:60]
df_mean_sup[stateName] = meanSup[:60]
df_mean_sub[stateName] = meanSub[:60]
df_std_deck[stateName] = stdDeck[:60]
df_std_sup[stateName] = stdSup[:60]
df_std_sub[stateName] = stdSub[:60]
return df_mean_deck, df_mean_sup, df_mean_sub, df_std_deck, df_std_sup, df_std_sub
df_mean_deck, df_mean_sup, df_mean_sub, df_std_deck, df_std_sup, df_std_sub = getBulkMeanRatings(states, stateNameDict)
%matplotlib inline
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_deck['Age'],df_mean_deck[stateName], color = palette[index])
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Deck Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Deck Rating')
plt.figure(figsize = (16,12))
plt.xlabel('Age')
plt.ylabel('Mean')
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
#palette = plt.get_cmap('gist_ncar')
palette = [
'blue', 'blue', 'green','magenta','cyan','brown','grey','red','silver','purple','gold','black','olive'
]
# multiple line plot
num=1
for column in df_mean_deck.drop('Age', axis=1):
# Find the right spot on the plot
plt.subplot(4,3, num)
# Plot the lineplot
plt.plot(df_mean_deck['Age'], df_mean_deck[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column)
# Same limits for everybody!
plt.xlim(1,60)
plt.ylim(1,9)
# Not ticks everywhere
if num in range(10) :
plt.tick_params(labelbottom='off')
if num not in [1,4,7,10]:
plt.tick_params(labelleft='off')
# Add title
plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])
plt.text(30, -1, 'Age', ha='center', va='center')
plt.text(1, 4, 'Mean Deck Rating', ha='center', va='center', rotation='vertical')
num = num + 1
# general title
plt.suptitle("Mean Deck Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_sup['Age'],df_mean_sup[stateName], color = palette[index])
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Superstructure Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Superstructure Rating')
plt.figure(figsize = (16,12))
plt.xlabel('Age')
plt.ylabel('Mean')
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
#palette = plt.get_cmap('gist_ncar')
palette = [
'blue',
'blue',
'green',
'magenta',
'cyan',
'brown',
'grey',
'red',
'silver',
'purple',
'gold',
'black',
'olive'
]
# multiple line plot
num=1
for column in df_mean_sup.drop('Age', axis=1):
# Find the right spot on the plot
plt.subplot(4,3, num)
# Plot the lineplot
plt.plot(df_mean_sup['Age'], df_mean_sup[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column)
# Same limits for everybody!
plt.xlim(1,60)
plt.ylim(1,9)
# Not ticks everywhere
if num in range(10) :
plt.tick_params(labelbottom='off')
if num not in [1,4,7,10]:
plt.tick_params(labelleft='off')
# Add title
plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])
plt.text(30, -1, 'Age', ha='center', va='center')
plt.text(1, 4, 'Mean Superstructure Rating', ha='center', va='center', rotation='vertical')
num = num + 1
# general title
plt.suptitle("Mean Superstructure Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_sup['Age'],df_mean_sup[stateName], color = palette[index])
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Superstructure Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Superstructure Rating')
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_sub['Age'],df_mean_sub[stateName], color = palette[index], linewidth=4)
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Substructure Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Substructure Rating')
plt.figure(figsize = (16,12))
plt.xlabel('Age')
plt.ylabel('Mean')
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
palette = [
'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive'
]
# multiple line plot
num=1
for column in df_mean_sub.drop('Age', axis=1):
# Find the right spot on the plot
plt.subplot(4,3, num)
# Plot the lineplot
plt.plot(df_mean_sub['Age'], df_mean_sub[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column)
# Same limits for everybody!
plt.xlim(1,60)
plt.ylim(1,9)
# Not ticks everywhere
if num in range(7) :
plt.tick_params(labelbottom='off')
if num not in [1,4,7] :
plt.tick_params(labelleft='off')
# Add title
plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])
plt.text(30, -1, 'Age', ha='center', va='center')
plt.text(1, 4, 'Mean Substructure Rating', ha='center', va='center', rotation='vertical')
num = num + 1
# general title
plt.suptitle("Mean Substructure Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)
def getDataOneYear(state):
pipeline = [{"$match":{"$and":[{"year":{"$gt":2015, "$lt":2017}},{"stateCode":state}]}},
{"$project":{"_id":0,
"Structure Type":"$structureTypeMain.typeOfDesignConstruction",
"Type of Wearing Surface":"$wearingSurface/ProtectiveSystem.typeOfWearingSurface",
'Structure Type':1,
"structureNumber":1,
"yearBuilt":1,
"deck":1, ## rating of deck
"year":1, ## survey year
"substructure":1, ## rating of substructure
"superstructure":1, ## rating of superstructure
}}]
dec = collection.aggregate(pipeline)
conditionRatings = pd.DataFrame(list(dec))
conditionRatings['Age'] = conditionRatings['year'] - conditionRatings['yearBuilt']
conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])]
#conditionRatings = conditionRatings.loc[~conditionRatings['Type of Wearing Surface'].isin(['6'])]
return conditionRatings
df_mean_deck
## Condition ratings of all states concatenated into one single data frame ConditionRatings
frames = []
for state in states:
f = getDataOneYear(state)
frames.append(f)
df_nbi_w = pd.concat(frames)
df_nbi_w = df_nbi_w.loc[~df_nbi_w['deck'].isin(['N','NA'])]
df_nbi_w = df_nbi_w.loc[~df_nbi_w['substructure'].isin(['N','NA'])]
df_nbi_w = df_nbi_w.loc[~df_nbi_w['superstructure'].isin(['N','NA'])]
df_nbi_w = df_nbi_w.loc[~df_nbi_w['Type of Wearing Surface'].isin(['6'])]
D = {'slow':,'fast':,'average':}
def label_bridges(rating,mean_age_conditionRating,std_age_conditionRating):
if (mean_age_conditionRating - std_age_conditionRating) < int(rating) <= (mean_age_conditionRating + std_age_conditionRating):
# Append a label
return ('Average Deterioration')
# else, if more than a value,
elif int(rating) > (mean_age_conditionRating + std_age_conditionRating):
# Append a label
return ('Slow Deterioration')
# else, if more than a value,
elif int(rating) < (mean_age_conditionRating - std_age_conditionRating):
# Append a label
return ('Fast Deterioration')
else:
return ('Null Value')
stat = ['08','56','30','16','53','41','49','32','06','02','15']
num = 1
slow = []
fast = []
avg = []
for st in stat:
data = getDataOneYear(st)
print(st,len(data))
AgeList = list(data['Age'])
deckList = list(data['deck'])
stateName = stateNameDict[st]
labels = []
for deckRating, Age in zip (deckList,AgeList):
if Age < 60:
mean_age_conditionRating = df_mean_deck[stateName][Age]
std_age_conditionRating = df_std_deck[stateName][Age]
# deck Rating
#detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating
# Substructure Rating
#Substructure_detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating
# Superstructure Rating
#Superstructure_detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating
labels.append(label_bridges(deckRating,mean_age_conditionRating,std_age_conditionRating)) # empty list to initialize a list
D = dict((x,labels.count(x)) for x in set(labels))
total = D['Fast Deterioration'] + D['Slow Deterioration'] + D['Average Deterioration']
slow_percent = (D['Slow Deterioration'] / total) * 100
fast_percent = (D['Fast Deterioration'] / total) * 100
avg_percent = (D['Average Deterioration'] / total) * 100
slow.append(slow_percent)
fast.append(fast_percent)
avg.append(avg_percent)
#stateName = stateNameDict[st]
plt.figure(figsize=(12,6))
plt.title(stateName)
plt.bar(range(len(D)), list(D.values()), align='center')
plt.xticks(range(len(D)), list(D.keys()))
plt.xlabel('Categories')
plt.ylabel('Number of Bridges')
plt.show()
num = num + 1
avg
def label_bridges(rating,labels):
if (mean_age_conditionRating - std_age_conditionRating) < int(rating) <= (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Average Deterioration')
# else, if more than a value,
elif int(rating) > (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Slow Deterioration')
# else, if more than a value,
elif int(rating) < (mean_age_conditionRating - std_age_conditionRating):
# Append a label
labels.append('Fast Deterioration')
else:
labels.append('Null Value')
return labels
rating = [9,2,32,32,1,3,4,5,6,7,8,9,0,32,3]
for i in rating:
stat = ['08','56','30','16','53','41','49','32','06','02','15']
AgeList = list(df_nbi_w['Age'])
deckList = list(df_nbi_w['deck'])
num = 1
label = []
for st in stat:
deckR = []
deckR = getDataOneYear(st)
deckR = deckR[['Age','deck']]
deckR= deckR.loc[~deckR['deck'].isin(['N','NA'])]
stateName = stateNameDict[st]
for deckRating, Age in zip (deckList,AgeList):
if Age < 60:
mean_age_conditionRating = df_mean_deck[stateName][Age]
std_age_conditionRating = df_std_deck[stateName][Age]
detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating
if (mean_age_conditionRating - std_age_conditionRating) < int(deckRating) <= (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Average Deterioration')
# else, if more than a value,
elif int(deckRating) > (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Slow Deterioration')
# else, if more than a value,
elif int(deckRating) < (mean_age_conditionRating - std_age_conditionRating):
# Append a label
labels.append('Fast Deterioration')
else:
labels.append('Null Value')
D = dict((x,labels.count(x)) for x in set(labels))
plt.figure(figsize=(12,6))
plt.title('Classification of Bridges in West United States')
plt.bar(range(len(D)), list(D.values()), align='center')
plt.xticks(range(len(D)), list(D.keys()))
plt.xlabel('Categories of Bridges')
plt.ylabel('Number of Bridges')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scatter plots
Step2: Histogram
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
a=np.random.randn(2,10)
x=a[0,:]
x
y=a[1,:]
y
plt.scatter(x,y,color='red')
plt.grid(True)
plt.box(False)
plt.xlabel('random x values')
plt.ylabel('random y values')
plt.title('TITLE')
a=np.random.randn(1,10)
x=a[0,:]
plt.hist(x,bins=15,histtype='bar',color='green')
plt.title('MY HISTOGRAM')
plt.xlabel('RANDOM X VALUES')
plt.ylabel('FREQUENCY')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Python Summary
Step2: Variables
Step3: Types
Step4: Conditionals
Step5: Loops
Step6: Functions
Step7: Numpy
Step8: Exercises
Step9: Print the variable a in all uppercase
Step10: Print the items in people as comma seperated values
Step11: Write a function that returns the first n prime numbers
Step12: Print the standard deviation of each row in a numpy array
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
print("Numpy:", np.__version__)
dir(np)
location = 'Bethesda'
zip_code = 20892
elevation = 71.9
print("We're in", location, "zip code", zip_code, ", ", elevation, "m above sea level")
print("We're in " + location + " zip code " + str(zip_code) + ", " + str(elevation) + "m above sea level")
print("We're in {0} zip code {1}, {2}m above sea level".format(location, zip_code, elevation))
print("We're in {0} zip code {1}, {2:.2e}m above sea level".format(location, zip_code, elevation))
# Sequences
# Lists
l = [1,2,3,4,4]
print("List:", l, len(l), 1 in l)
# Tuples
t = (1,2,3,4,4)
print("Tuple:", t, len(t), 1 in t)
# Sets
s = set([1,2,3,4,4])
print("Set:", s, len(s), 1 in s)
# Dictionaries
# Dictionaries map hashable values to arbitrary objects
d = {'a': 1, 'b': 2, 3: 's', 2.5: 't'}
print("Dictionary:", d, len(d), 'a' in d)
import random
if random.random() < 0.5:
print("Should be printed 50% of the time")
elif random.random() < 0.5:
print("Should be primted 25% of the time")
else:
print("Should be printed 25% of the time")
for i in ['a', 'b', 'c', 'd']:
print(i)
else:
print('Else')
for i in ['a', 'b', 'c', 'd']:
if i == 'b':
continue
elif i == 'd':
break
print(i)
else:
print('Else')
def is_even(n):
return not n % 2
print(is_even(1), is_even(2))
def first_n_squared_numbers(n=5):
return [i**2 for i in range(1,n+1)]
print(first_n_squared_numbers())
def next_fibonacci(status=[]):
if len(status) < 2:
status.append(1)
return 1
status.append(status[-2] + status[-1])
return status[-1]
print(next_fibonacci(), next_fibonacci(), next_fibonacci(), next_fibonacci(), next_fibonacci(), next_fibonacci())
def accepts_anything(*args, **kwargs):
for a in args:
print(a)
print(type(args))
for k in kwargs:
print(k, kwargs[k])
accepts_anything(1,2,3,4, a=1, b=2, c=3)
# For quick and simple functions a lambda expression can be a useful approach.
# Standard functions are always a valid alternative and often make code clearer.
f = lambda x: x**2
print(f(5))
people = [{'name': 'Alice', 'age': 30},
{'name': 'Bob', 'age': 35},
{'name': 'Charlie', 'age': 35},
{'name': 'Dennis', 'age': 25}]
print(people)
people.sort(key=lambda x: x['age'])
print(people)
a = np.array([[1,2,3], [4,5,6], [7,8,9]])
print(a)
print(a[1:,1:])
a = a + 2
print(a)
a = a + np.array([1,2,3])
print(a)
a = a + np.array([[10],[20],[30]])
print(a)
print(a.mean(), a.mean(axis=0), a.mean(axis=1))
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(0, 3*2*np.pi, 500)
plt.plot(x, np.sin(x))
plt.show()
a = "The quick brown fox jumps over the lazy dog"
b = 1234567890.0
print(a.upper())
upper = True
soln = ''
for i in a:
if upper == True:
soln += i.upper()
else:
soln += i
if i != ' ':
upper = not upper
print(soln)
def reverse_string(s):
al = list(s)
al.reverse()
return ''.join(al)
print(reverse_string(a))
words = a.split(' ')
print(' '.join([reverse_string(i) for i in words]))
print('{0:.4e}'.format(b))
people = [{'name': 'Charlie', 'age': 35},
{'name': 'Alice', 'age': 30},
{'name': 'Eve', 'age': 20},
{'name': 'Gail', 'age': 30},
{'name': 'Dennis', 'age': 25},
{'name': 'Bob', 'age': 35},
{'name': 'Fred', 'age': 25},]
for i in people:
print('{0},{1}'.format(i['name'], i['age']))
for i in people:
print('{0:<10},{1}'.format(i['name'], i['age']))
def key_age(x):
return x['age']
people.sort(key=key_age)
print(people)
people.sort(key=lambda x: x['name'])
people.sort(key=key_age)
print(people)
coords = [(0,0), (10,5), (10,10), (5,10), (3,3), (3,7), (12,3), (10,11)]
def first_n_primes(n):
primes = []
i = 2
while len(primes) < n:
for p in primes:
if i % p == 0:
break
else:
primes.append(i)
i += 1
return primes
print(first_n_primes(5))
def distance(coords):
distance = 0
for p1, p2 in zip(coords[:-1], coords[1:]):
distance += ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5
return distance
print(distance(coords))
assert distance([(0,0), (10,0)]) == 10
assert distance([(0,0), (-10, 0)]) == 10
assert distance([(0,0), (3,4)]) == 5
import itertools
all_options = []
for option in itertools.permutations(coords, len(coords)):
all_options.append((option, distance(option)))
all_options.sort(key=lambda x: x[1])
print(all_options[0])
np.random.seed(0)
a = np.random.randint(0, 100, size=(10,20))
print(a.std())
print(a)
print(a.std(axis=1))
print(a[a>90])
import matplotlib.pyplot as plt
%matplotlib inline
fig, axes = plt.subplots(5, 2, figsize=(16,12))
for i, ax in zip(range(a.shape[0]), axes.flatten()):
ax.plot(a[i])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
df['tweet'] = df['tweet'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop_words)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Erstes Neuronales Netz - Single Layer</h2>
Step2: <h2> Initialisierung eines zweiten Neuronalen Netzes - Multi Layer</h2>
Step3: <h2>Erzeugen von Varianten des Modells nn2</h2>
Step4: <h2>Erzeugen von Varianten des Modells nn2</h2>
|
<ASSISTANT_TASK:>
Python Code:
#importieren sie die Bibliothek pandas
import pandas as pd
#importieren sie matplotlib.pyplot as plt
import matplotlib.pyplot as plt
%matplotlib inline
#laden Sie die Datei "daten.csv" auf Ihren Hub
#laden Sie die Datei "daten.csv" in einen Datframe df
#Einlesen der Dateien mit pd.read_csv()
df=pd.read_csv("daten.csv")
#Betrachten Sie die ersten Daten des Dataframes df (default=5)
df.head()
#Betrachten Sie 20 Datensätze am Ende des DataFrames
df.tail(20)
#Erzeugen Sie eine Beschreibung der Statistischen Daten mit .describe()
df.describe()
#Erzeugen Sie einen einfachen Scatterplot
plt.scatter(df.x1,df.x2, c=df.label)
#importieren Sie tensorflow as tf
# Laden der Bibliotheken
import tensorflow as tf
#importieren Sie das keras.model Sequential()
from keras.models import Sequential
#importieren Sie die Keras Layer Dense und Activation
from keras.layers import Dense
#Auslesen der Daten und Labels aus dem Dataframe in ein array
x_input = df[['x1','x2']].values
y_input = df[['label']].values
#Ausgabe der Datenwerte
print(x_input)
#Ausgabe der labels
print(y_input)
#Initialisierung des Neuronalen Netzes nn
nn=Sequential()
#Hinzufügen eines Dense Layers mit einem Neuron (units = 1)
#Im ersten Layer geben eivel Features übergeben werden (input_dim=2)
#Als Aktivierungsfunktion verwenden wir die Sigmoid-Funktion
nn.add(Dense(units=1,input_dim=2,activation = 'sigmoid'))
# Compilieren des Modells.
# Wir verwenden den optimizer 'adam'.
# Es handelt sich um eine binäre Klassifikation, deshalb verwenden
# wir 'binary_crossentropy' als Loss-Funktion und
# betrachten als Qualitätsmaßstab die Accuracy
nn.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
# Überprüfen der Konfiguration
nn.summary()
# Trainieren des Modells mit .fit
# eventuell validation_split = 0.1 einfügen, d.h. 10% der Daten, werden zur Validierung verwendet.
epoch_num = 1000
# batch_num = 56 eventuell als Beispiel ergänzen
history = nn.fit(x_input, y_input, epochs=epoch_num,validation_split=0.1, verbose = 1)
#Evaluierung der Ergebnisse
nn.evaluate(x_input,y_input)
#Test mit Daten
x_test=[[2,3],[6,4],[5,5],[4.321,2.657],[2.6487,4.5192],[1.5438,2.4443],[1.8991,4.2409],[6.1676,3.6457],[7.567,3.6381],[6.4321,5.4453]]
ergebnis = nn.predict(x_test)
print(ergebnis)
#Ausgabe der Anzahl der Werte in der Variablen ergebnis
len(ergebnis)
# Schleife zur Ausgabe der entsprechenden Labels
i = 0
while i< len(ergebnis):
print("Das Wertepaar : " +str(x_test[i]) + "hat das Ergebnis " + str(ergebnis[i,0]))
if ergebnis[i,0]>0.5:
print("und damit das Label 1")
else:
print("und damit das Label 0")
i +=1
history_dict = history.history
history_dict.keys()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" steht für "blue dot"
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, acc, 'r', label='Training Accuracy')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.plot(epochs, val_acc, 'g', label='Validation Accuracy')
plt.title('Training/Validation loss und accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss / Accuracy')
plt.legend()
plt.show()
#Initialisieren eine neuen Netzwerkes nn2
nn2=Sequential()
#Hinzufügen der layer
nn2.add(Dense(units=100,input_dim=2,activation='relu'))
nn2.add(Dense(units=50,activation='relu'))
nn2.add(Dense(units=1,activation='sigmoid'))
# Kompilieren des neuen Modells
nn2.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#Ausgabe der Modellstruktur
nn2.summary()
#Trainieren des Modells
epoch_num = 50
history = nn2.fit(x_input,y_input, epochs=epoch_num,validation_split=0.1, verbose=1)
#Evaluierung der Ergebnisse
nn2.evaluate(x_input, y_input)
history_dict = history.history
history_dict.keys()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" steht für "blue dot"
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, acc, 'r', label='Training Accuracy')
# b,r steht für "solid blue or red line"
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.plot(epochs, val_acc, 'g', label='Validation Accuracy')
plt.title('Training/Validation loss und accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss / Accuracy')
plt.legend()
plt.show()
#Initialisieren eine neuen Netzwerkes nn3
nn3=Sequential()
#Hinzufügen der layer mit geäderter Aktivierungsfunktion
nn3.add(Dense(units=100,input_dim=2,activation='tanh'))
nn3.add(Dense(units=50,activation='tanh'))
nn3.add(Dense(units=1,activation='sigmoid'))
# Kompilieren des neuen Modells
nn3.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#Ausgabe der Modellstruktur
nn3.summary()
#Trainieren des Modells nn3
epoch_num = 50
history = nn3.fit(x_input,y_input, epochs=epoch_num,validation_split=0.1, verbose=1)
#Evaluierung der Ergebnisse
nn2.evaluate(x_input, y_input)
history_dict = history.history
history_dict.keys()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" steht für "blue dot"
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, acc, 'r', label='Training Accuracy')
# b,r steht für "solid blue or red line"
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.plot(epochs, val_acc, 'g', label='Validation Accuracy')
plt.title('Training/Validation loss und accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss / Accuracy')
plt.legend()
plt.show()
#Initialisieren eine neuen Netzwerkes nn4
nn4=Sequential()
#Hinzufügen der layer mit der Aktivierungsfunktion tanh
nn4.add(Dense(units=100,input_dim=2,activation='tanh'))
nn4.add(Dense(units=50,activation='tanh'))
nn4.add(Dense(units=1,activation='sigmoid'))
# Kompilieren des neuen Modells
nn4.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
#Ausgabe der Modellstruktur
nn4.summary()
#Trainieren des Modells nn4
epoch_num = 50
history = nn4.fit(x_input,y_input, epochs=epoch_num,validation_split=0.1, verbose=1)
#Evaluierung der Ergebnisse
nn2.evaluate(x_input, y_input)
history_dict = history.history
history_dict.keys()
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" steht für "blue dot"
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, acc, 'r', label='Training Accuracy')
# b,r steht für "solid blue or red line"
plt.plot(epochs, val_loss, 'y', label='Validation loss')
plt.plot(epochs, val_acc, 'g', label='Validation Accuracy')
plt.title('Training/Validation loss und accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss / Accuracy')
plt.legend()
plt.show()
# Führen Sie weitere Veränderungen durch:
# 1.
# Verändern Sie in den Modellen nn2, nn3 und nn4 die Anzahl
# der epochs von 50 auf 100, epochs_num=100
# 2.
# Verändern Sie die Anzahl der Neuronen im ersten Layer auf 200
# und im zweiten Layer auf 100
#
# Initialisieren Sie das Modell in der jeweiligen Zelle einfach neu
# und führen dann im Modell die Veränderungen durch.
#
# Ergeben sich Ergebnisverbesserungen
#
# Wie sie sehen wurde mit der einfachen Anweisung validation_split=0.1
# ein Teil der Daten zur Validierung verwendet. Variieren Sie doch auch
# einmal diesen Anteil auf 0.2 oder 0.3.
#
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'mpi-esm-1-2-hr', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image as 'window'
Step2: The sampler can be used as a numpy function, or a tensorflow operation.
Step3: Use the sampler as a tensorflow op
Step4: The location array ['MR_location'] represents the spatial coordinates of the window
Step5: Grid sampler
Step6: Visualisation of the window coordinates (change window_sizes and window_border to see different window allocations)
Step7: Weighted sampler
Step8: Balanced sampler
Step9: Visualisation of the window coordinates (change data_param see different window allocations)
|
<ASSISTANT_TASK:>
Python Code:
import sys
niftynet_path = '/Users/foo/Documents/Niftynet/'
sys.path.insert(0, niftynet_path)
from niftynet.utilities.download import download
download('mr_ct_regression_model_zoo_data')
!{sys.executable} -m pip install matplotlib
from niftynet.io.image_reader import ImageReader
from niftynet.engine.image_window_dataset import ImageWindowDataset
# creating an image reader.
data_param = \
{'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean',
'filename_contains': 'nii'}}
reader = ImageReader().initialise(data_param)
# creating a window sampler dataset from the reader
sampler = ImageWindowDataset(reader)
windows = sampler()
print(windows.keys(), windows['CT_location'], windows['CT'].shape)
import matplotlib.pyplot as plt
plt.imshow(windows['CT'][0,:,:,0,0,0])
plt.show()
import tensorflow as tf
# adding the tensorflow tensors
next_window = sampler.pop_batch_op()
# run the tensors
with tf.Session() as sess:
sampler.run_threads(sess) #initialise the iterator
windows = sess.run(next_window)
print(windows.keys(), windows['CT_location'], windows['CT'].shape)
from niftynet.io.image_reader import ImageReader
from niftynet.engine.sampler_uniform_v2 import UniformSampler
# creating an image reader.
# creating an image reader.
data_param = \
{'MR': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean',
'filename_contains': 'nii',
'spatial_window_size': (80, 80, 1)},
'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/T2_corrected',
'filename_contains': 'nii',
'spatial_window_size': (80, 80, 1)},
}
window_sizes = {'MR': (80, 80, 1), 'CT': (80, 80, 1)}
reader = ImageReader().initialise(data_param)
# uniform sampler returns windows of 32^3-voxels
uniform_sampler = UniformSampler(
reader, window_sizes, batch_size=2, windows_per_image=5)
import tensorflow as tf
# adding the tensorflow tensors
next_window = uniform_sampler.pop_batch_op()
# run the tensors
with tf.Session() as sess:
uniform_sampler.run_threads(sess) #initialise the iterator
windows = sess.run(next_window)
print(windows['MR_location'], windows['MR'].shape)
print(windows['CT_location'], windows['CT'].shape)
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(1,2,1)
plt.imshow(windows['MR'][0,:,:,0])
plt.subplot(1,2,2)
plt.imshow(windows['CT'][0,:,:,0])
plt.show()
from niftynet.io.image_reader import ImageReader
from niftynet.engine.sampler_grid_v2 import GridSampler
# creating an image reader.
data_param = \
{'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean',
'filename_contains': 'nii'}}
reader = ImageReader().initialise(data_param)
# uniform sampler returns windows of 32^3-voxels
uniform_sampler = GridSampler(reader,
window_sizes=(42, 42, 1),
window_border=(8,8,1), batch_size=1)
import tensorflow as tf
# adding the tensorflow tensors
next_window = uniform_sampler.pop_batch_op()
# run the tensors
with tf.Session() as sess:
uniform_sampler.run_threads(sess) #initialise the iterator
subject_id = 0
coords = []
while True:
windows = sess.run(next_window)
if not subject_id == windows['CT_location'][0,0]:
break;
#print(windows.keys(), windows['MR_location'], windows['MR'].shape)
coords.append(windows['CT_location'])
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
f, (ax1, ax) = plt.subplots(1,2)
# show image
_, img, _ = reader(idx=0)
print(img['CT'].shape)
plt.subplot(1,2,1)
plt.imshow(img['CT'][:,:,0,0,0])
# show sampled windows
all_patch = []
for win in np.concatenate(coords, axis=0):
patch = patches.Rectangle(
(win[1], win[2]),
win[4]-win[1], win[5]-win[2], linewidth=1)
all_patch.append(patch)
all_pc = PatchCollection(
all_patch, alpha=0.1, edgecolor='r', facecolor='r')
ax.add_collection(all_pc)
ax.set_xlim([0, np.max(coords, axis=0)[0,4]])
ax.set_ylim([0, np.max(coords, axis=0)[0,5]])
ax.set_aspect('equal', 'datalim')
plt.show()
from niftynet.io.image_reader import ImageReader
from niftynet.engine.sampler_weighted_v2 import WeightedSampler
# creating an image reader.
data_param = \
{'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean',
'filename_contains': 'PAR.nii.gz'},
'sampler': {'path_to_search': '~/niftynet/data/mr_ct_regression/T2_mask',
'filename_contains': 'nii'}}
reader = ImageReader().initialise(data_param)
weighted_sampler = WeightedSampler(
reader, window_sizes=(12, 12, 1), batch_size=1, windows_per_image=100)
import tensorflow as tf
# adding the tensorflow tensors
next_window = weighted_sampler.pop_batch_op()
# run the tensors
with tf.Session() as sess:
weighted_sampler.run_threads(sess) #initialise the iterator
coords = []
for _ in range(200):
windows = sess.run(next_window)
#print(windows.keys(), windows['CT_location'], windows['CT'].shape)
coords.append(windows['CT_location'])
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
f, (ax1, ax) = plt.subplots(1,2)
# show image
_, img, _ = reader(idx=0)
print(img['CT'].shape)
plt.subplot(1,2,1)
plt.imshow(img['CT'][:,:,0,0,0])
#plt.subplot(1,2,2)
ax.imshow(img['sampler'][:,:,0,0,0])
# show sampled windows
all_patch = []
for win in np.concatenate(coords, axis=0):
patch = patches.Rectangle(
(win[2], win[1]),
win[5]-win[2], win[4]-win[1], linewidth=1)
all_patch.append(patch)
all_pc = PatchCollection(
all_patch, alpha=0.5, edgecolor='r', facecolor='r')
ax.add_collection(all_pc)
plt.show()
from niftynet.io.image_reader import ImageReader
from niftynet.engine.sampler_balanced_v2 import BalancedSampler
# creating an image reader.
data_param = \
{'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean',
'filename_contains': 'PAR.nii.gz'},
'sampler': {'path_to_search': '~/niftynet/data/mr_ct_regression/T2_mask',
'filename_contains': 'nii'}}
reader = ImageReader().initialise(data_param)
balanced_sampler = BalancedSampler(
reader, window_sizes=(12, 12, 1), windows_per_image=100)
import tensorflow as tf
# adding the tensorflow tensors
next_window = balanced_sampler.pop_batch_op()
# run the tensors
with tf.Session() as sess:
balanced_sampler.run_threads(sess) #initialise the iterator
coords = []
for _ in range(200):
windows = sess.run(next_window)
#print(windows.keys(), windows['CT_location'], windows['CT'].shape)
coords.append(windows['CT_location'])
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
f, (ax1, ax) = plt.subplots(1,2)
# show image
_, img, _ = reader(idx=0)
print(img['CT'].shape)
plt.subplot(1,2,1)
plt.imshow(img['CT'][:,:,0,0,0])
#plt.subplot(1,2,2)
ax.imshow(img['sampler'][:,:,0,0,0])
# show sampled windows
all_patch = []
for win in np.concatenate(coords, axis=0):
patch = patches.Rectangle(
(win[2], win[1]),
win[5]-win[2], win[4]-win[1], linewidth=1)
all_patch.append(patch)
all_pc = PatchCollection(
all_patch, alpha=0.5, edgecolor='r', facecolor='r')
ax.add_collection(all_pc)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Сортировка в обратном порядке
Step2: Сортировка по ключу
Step3: В качестве параметра key можно указывать не только встроенные функции, но и самостоятельно определённые. Такая функция должна принимать один аргумент, элемент списка, и возращать значение, по которому надо сортировать.
|
<ASSISTANT_TASK:>
Python Code:
a = [5, 3, -2, 9, 1]
# Метод sort меняет существующий список
a.sort()
print(a)
a = [5, 3, -2, 9, 1]
a.sort(reverse=True)
print(a)
# Обычно строки сортируются в алфавитном порядке
a = ["bee", "all", "accessibility", "zen", "treasure"]
a.sort()
print(a)
# А используя сортировку по ключу можно сортировать, например, по длине
a = ["bee", "all", "accessibility", "zen", "treasure"]
a.sort(key=len)
print(a)
# Сортируем по остатку от деления на 10
def mod(x):
return x % 10
a = [1, 15, 143, 8, 0, 5, 17, 48]
a.sort(key=mod)
print(a)
# Обычно списки сортируются сначала по первому элементу, потом по второму и так далее
a = [[4, 3], [1, 5], [2, 15], [1, 6], [2, 9], [4, 1]]
a.sort()
print(a)
# А так можно отсортировать сначала по первому по возрастанию, а при равенсте — по втором
def my_key(x):
return x[0], -x[1]
a = [[4, 3], [1, 5], [2, 15], [1, 6], [2, 9], [4, 1]]
a.sort(key=my_key)
print(a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
print(features.shape)
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1/(1+np.exp(-x))
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
output_error_term = error*1.0
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, output_error_term)
# TODO: Backpropagated error terms - Replace these values with your calculations.
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term*X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term*hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 18000
learning_rate = 0.12
hidden_nodes = 14
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download PTB dataset
Step2: The dataset structure is numpy.ndarray.
Step3: Word ID and word correspondence
Step4: Convert to word sequences
Step5: Validation data text
Step6: Test data text
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import os
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import chainer
train, val, test = chainer.datasets.get_ptb_words()
print('train type: ', type(train), train.shape, train)
print('val type: ', type(val), val.shape, val)
print('test type: ', type(test), test.shape, test)
ptb_dict = chainer.datasets.get_ptb_words_vocabulary()
print('Number of vocabulary', len(ptb_dict))
print('ptb_dict', ptb_dict)
ptb_word_id_dict = ptb_dict
ptb_id_word_dict = dict((v, k) for k, v in ptb_word_id_dict.items())
print([ptb_id_word_dict[i] for i in train[:30]])
# ' '.join(text_list) will convert list representation more readable
' '.join([ptb_id_word_dict[i] for i in train[:300]])
print(' '.join([ptb_id_word_dict[i] for i in val[:300]]))
print(' '.join([ptb_id_word_dict[i] for i in test[:300]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step34: Batches
Step36: Neural Network Training
Step38: Build the Graph
Step40: Train
Step42: Save Parameters
Step44: Checkpoint
Step47: Implement Generate Functions
Step50: Choose Word
Step52: Generate TV Script
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# TODO: Implement Function
counts = Counter(text)
vocab = sorted(counts, key=counts.get, reverse=True) # descending order
vocab_to_int = {word: ii for ii, word in enumerate(vocab)}
int_to_vocab = {ii: word for ii, word in enumerate(vocab)}
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
# TODO: Implement Function
return {
'.':"||Period||",
',':"||Comma||",
'"':"||Quotation_Mark||",
';':"||Semicolon||",
'!':"||Exclamation_mark||",
'?':"||Question_mark||",
'(':"||Left_Parentheses||",
')':"||Right_Parentheses||",
'--':"||Dash||",
'\n':"||Return||"
}
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
# TODO: Implement Function
input_ = tf.placeholder(shape=[None,None],name='input',dtype=tf.int32) # input shape = [batch_size, seq_size]
targets = tf.placeholder(shape=[None,None],name='targets',dtype=tf.int32)
learning_rate = tf.placeholder(dtype=tf.float32)
return input_, targets, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
# Add dropout to the cell
# drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
lstm_layers = 5
cell = tf.contrib.rnn.MultiRNNCell([lstm] * lstm_layers)
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, tf.identity(initial_state,name='initial_state')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs,dtype=tf.float32)
return outputs, tf.identity(final_state,name="final_state")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
# TODO: Implement Function
embed_dim = 300;
embed = get_embed(input_data,vocab_size,embed_dim)
outputs, final_state = build_rnn(cell,embed)
# print(outputs) # Tensor("rnn/transpose:0", shape=(128, 5, 256), dtype=float32)
# print(final_state) # Tensor("final_state:0", shape=(2, 2, ?, 256), dtype=float32)
# !!! it is really import to have a good weigh init
logits = tf.contrib.layers.fully_connected(outputs,vocab_size,activation_fn=None, #tf.nn.relu
weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
biases_initializer=tf.zeros_initializer())
# print(logits) # Tensor("fully_connected/Relu:0", shape=(128, 5, 27), dtype=float32)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
# 根据建议修改的方法,很赞!
def get_batches(int_text, batch_size, seq_length):
n_batches = int(len(int_text) / (batch_size * seq_length))
x_data = np.array(int_text[: n_batches * batch_size * seq_length])
y_data = np.array(int_text[1: n_batches * batch_size * seq_length + 1])
x = np.split(xdata.reshape(batch_size, -1), n_batches, 1)
y = np.split(ydata.reshape(batch_size, -1), n_batches, 1)
return np.array(list(zip(x, y)))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# def get_batches(int_text, batch_size, seq_length):
#
# Return batches of input and target
# :param int_text: Text with the words replaced by their ids
# :param batch_size: The size of batch
# :param seq_length: The length of sequence
# :return: Batches as a Numpy array
#
# # TODO: Implement Function
# batches = []
# n_batchs = (len(int_text)-1) // (batch_size * seq_length)
# # int_text = int_text[:n_batchs*batch_size * seq_length+1]
# for i in range(0,n_batchs*seq_length,seq_length):
# x = []
# y = []
# for j in range(i,i+batch_size * seq_length,seq_length):
# x.append(int_text[j:j+seq_length])
# y.append(int_text[j+1:j+1+seq_length])
# batches.append([x,y])
# return np.array(batches)
# #print(get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3))
#
# DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
#
# tests.test_get_batches(get_batches)
# 4257 line ,average 11 words
# Number of Epochs
num_epochs = 50
# Batch Size
batch_size = 200
# RNN Size
rnn_size = 512
# Sequence Length
seq_length = 10 # !!! when i increase the seq_length from 5 to 10,it really helps,如果继续增加会怎么样呢?
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 40
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
# input_data_shape[0] batch size
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
return loaded_graph.get_tensor_by_name("input:0"), loaded_graph.get_tensor_by_name("initial_state:0"), loaded_graph.get_tensor_by_name("final_state:0"), loaded_graph.get_tensor_by_name("probs:0")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
import random
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
r = random.uniform(0,1)
#store prediction char
s = 0
#since length > indices starting at 0
char_id = len(probabilities) - 1
#for each char prediction probabilty
for i in range(len(probabilities)):
#assign it to S
s += probabilities[i]
#check if probability greater than our randomly generated one
if s >= r:
#if it is, thats the likely next char
char_id = i
break
return int_to_vocab[char_id]
# 另一种简单方法,对于为什么这么选择,可以参考一篇文章:
# http://yanyiwu.com/work/2014/01/30/simhash-shi-xian-xiang-jie.html
rand = np.sum(probabilities) * np.random.rand(1)
pred_word = int_to_vocab[int(np.searchsorted(np.cumsum(probabilities), rand))]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Section 1
Step2: 3. Répétez les questions 1.1 et 1.2 avec un bruit dit blanc, généré ci dessous.
Step3: 4. Bruit respiratoire.
Step4: 5. Ligne de base.
Step5: 6. Mélange de signaux.
Step6: Section 2
Step7: 2.2 Répétez la question 2.1 avec un noyau plus gros.
Step8: 2.3 Nous allons maintenant appliquer des filtres de Butterworth.
Step9: 2.4. Application du filtre de Butterworth.
Step10: 2.5. Optimisation du filtre de Butterworth.
Step11: 2.6 Optionnel
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pymatbridge import Octave
octave = Octave()
octave.start()
%load_ext pymatbridge
%%matlab
%% Définition du signal d'intêret
% fréquence du signal
freq = 0.3;
% on crée des blocs off/on de 15 secondes
bloc = repmat([zeros(1,15*freq) ones(1,15*freq)],[1 10]);
% les temps d'acquisition
ech = (0:(1/freq):(length(bloc)/freq)-(1/freq));
% ce paramètre fixe le pic de la réponse hémodynamique
pic = 5;
% noyau de réponse hémodynamique
noyau = [linspace(0,1,(pic*freq)+1) linspace(1,-0.3,(pic*freq)/2) linspace(-0.3,0,(pic*freq)/2)];
noyau = [zeros(1,length(noyau)-1) noyau];
% normalisation du noyau
noyau = noyau/sum(abs(noyau));
% convolution du bloc avec le noyau
signal = conv(bloc,noyau,'same');
% on fixe la moyenne de la réponse à zéro
signal = signal - mean(signal);
%% représentation en temps
figure
plot(ech,signal)
xlim([-1 max(ech)+1])
ylim([-0.6 0.7])
xlabel('Temps (s)')
ylabel('a.u')
%% représentation en fréquences
figure
Analyse_Frequence_Puissance(signal,ech);
ylim([10^(-10) 1])
%%matlab
%% définition du bruit blanc
bruit = 0.05*randn(size(signal));
%% représentation en temps
figure
plot(ech,bruit)
ylim([-0.6 0.7])
xlabel('Temps (s)')
ylabel('a.u')
%% représentation en fréquences
figure
Analyse_Frequence_Puissance(bruit,ech);
ylim([10^(-10) 1])
%%matlab
%% définition du signal de respiration
% fréquence de la respiration
freq_resp = 0.3;
% un modéle simple (cosinus) des fluctuations liées à la respiration
resp = cos(2*pi*freq_resp*ech/freq);
% fréquence de modulation lente de l'amplitude respiratoire
freq_mod = 0.01;
% modulation de l'amplitude du signal lié à la respiration
resp = resp.*(ones(size(resp))-0.1*cos(2*pi*freq_mod*ech/freq));
% on force une moyenne nulle, et une amplitude max de 0.1
resp = 0.1*(resp-mean(resp));
%% représentation en temps
figure
plot(ech,resp)
xlim([-1 max(ech)/2+1])
xlabel('Temps (s)')
ylabel('a.u')
%% représentation en fréquences
figure
[ech_f,signal_f,signal_af,signal_pu] = Analyse_Frequence_Puissance(resp,ech);
set(gca,'yscale','log');
ylim([10^(-35) 1])
%%matlab
%% définition de la ligne de base
base = 0.1*(ech-mean(ech))/mean(ech);
%% représentation en temps
figure
plot(ech,base)
xlim([-1 max(ech)+1])
ylim([-0.6 0.7])
xlabel('Temps (s)')
ylabel('a.u')
%% représentation en fréquence
figure
[ech_f,base_f,base_af,base_pu] = Analyse_Frequence_Puissance(base,ech);
ylim([10^(-10) 1])
%%matlab
%% Mélanges de signaux
y_sr = signal + resp;
y_srb = signal + resp + bruit;
y_srbb = signal + resp + bruit + base;
%%matlab
y = y_sr;
% représentation en temps
figure
plot(ech,y)
hold on
plot(ech,signal,'r')
xlim([-1 301])
ylim([-0.8 0.8])
xlabel('Temps (s)')
ylabel('a.u')
% représentation en fréquence
figure
[freq_f,y_f,y_af,y_pu] = Analyse_Frequence_Puissance(y,ech);
ylim([10^(-10) 1])
%%matlab
y = y_srb;
% représentation en temps
figure
plot(ech,y)
hold on
plot(ech,signal,'r')
xlim([-1 301])
ylim([-0.8 0.8])
xlabel('Temps (s)')
ylabel('a.u')
% représentation en fréquence
figure
[freq_f,y_f,y_af,y_pu] = Analyse_Frequence_Puissance(y,ech);
ylim([10^(-10) 1])
%%matlab
y = y_srbb;
% représentation en temps
figure
plot(ech,y)
hold on
plot(ech,signal,'r')
xlim([-1 301])
ylim([-0.8 0.8])
xlabel('Temps (s)')
ylabel('a.u')
% représentation en fréquence
figure
[freq_f,y_f,y_af,y_pu] = Analyse_Frequence_Puissance(y,ech);
ylim([10^(-10) 1])
%%matlab
%%définition d'un noyau de moyenne mobile
% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels
taille = ceil(3*freq);
% le noyau, défini sur une fenêtre identique aux signaux précédents
noyau = [zeros(1,(length(signal)-taille)/2) ones(1,taille) zeros(1,(length(signal)-taille)/2)];
% normalisation du moyau
noyau = noyau/sum(abs(noyau));
% convolution avec le noyau (filtrage)
y_f = conv(y_sr,noyau,'same');
%% Représentation fréquentielle du filtre
figure
Analyse_Frequence_Puissance(noyau,ech);
ylim([10^(-10) 1])
%% représentation du signal filtré
figure
plot(ech,y_f,'k')
hold on
plot(ech,signal,'r')
%% erreur résiduelle
err = sqrt(mean((signal-y_f).^2))
%%matlab
%%définition d'un noyau de moyenne mobile
% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels
taille = ceil(6*freq);
% le noyau, défini sur une fenêtre identique aux signaux précédents
noyau = [zeros(1,(length(signal)-taille)/2) ones(1,taille) zeros(1,(length(signal)-taille)/2)];
% normalisation du moyau
noyau = noyau/sum(abs(noyau));
% convolution avec le noyau (filtrage)
y_f = conv(y_sr,noyau,'same');
%% Représentation fréquentielle du filtre
figure
Analyse_Frequence_Puissance(noyau,ech);
ylim([10^(-10) 1])
%% représentation du signal filtré
figure
plot(ech,y_f,'k')
hold on
plot(ech,signal,'r')
%% erreur résiduelle
err = sqrt(mean((signal-y_f).^2))
%%matlab
%% Définition d'une implusion finie unitaire
impulsion = zeros(size(signal));
impulsion(round(length(impulsion)/2))=1;
noyau = FiltrePasseHaut(impulsion,freq,0.1);
%% représentation temporelle
figure
plot(ech,noyau)
xlabel('Temps (s)')
ylabel('a.u')
%% représentation fréquentielle
figure
Analyse_Frequence_Puissance(noyau,ech);
set(gca,'yscale','log');
%%matlab
y = y_sr;
y_f = FiltrePasseBas(y,freq,0.1);
%%représentation du signal filtré
plot(ech,signal,'r')
hold on
plot(ech,y_f,'k')
err = sqrt(mean((signal-y_f).^2))
%%matlab
y = y_sr;
%%définition d'un noyau de moyenne mobile
% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels
taille = ceil(6*freq);
% le noyau, défini sur une fenêtre identique aux signaux précédents
noyau = [zeros(1,(length(signal)-taille)/2) ones(1,taille) zeros(1,(length(signal)-taille)/2)];
% normalisation du moyau
noyau = noyau/sum(abs(noyau));
% convolution avec le noyau (filtrage)
y_f = conv(y,noyau,'same');
%% représentation du signal brut
figure
plot(ech,signal,'r')
hold on
plot(ech,y,'k')
%% représentation du signal filtré
figure
plot(ech,y_f,'k')
hold on
plot(ech,signal,'r')
err = sqrt(mean((signal-y_f).^2))
%% filtre de Butterworth
y_f = FiltrePasseHaut(y,freq,0.08);
y_f = y_f+FiltrePasseBas(y,freq,0.12);
%% représentation du signal filtré
figure
plot(ech,signal,'r')
hold on
plot(ech,y_f,'k')
err = sqrt(mean((signal-y_f).^2))
%%matlab
y = y_srb;
%%définition d'un noyau de moyenne mobile
% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels
taille = ceil(6*freq);
% le noyau, défini sur une fenêtre identique aux signaux précédents
noyau = [zeros(1,(length(signal)-taille)/2) ones(1,taille) zeros(1,(length(signal)-taille)/2)];
% normalisation du moyau
noyau = noyau/sum(abs(noyau));
% convolution avec le noyau (filtrage)
y_f = conv(y,noyau,'same');
%% représentation du signal brut
figure
plot(ech,signal,'r')
hold on
plot(ech,y,'k')
%% représentation du signal filtré
figure
plot(ech,y_f,'k')
hold on
plot(ech,signal,'r')
%% erreur résiduelle
err = sqrt(mean((signal-y_f).^2))
%signal+base+resp+bruit
y_f = FiltrePasseBas(y,freq,0.08);
%% représentation du signal filtré
figure
plot(ech,signal,'r')
hold on
plot(ech,y_f,'k')
err = sqrt(mean((signal-y_f).^2))
%%matlab
y = y_srbb;
%%définition d'un noyau de moyenne mobile
% taille de la fenêtre pour la moyenne mobile, en nombre d'échantillons temporels
taille = ceil(6*freq);
% le noyau, défini sur une fenêtre identique aux signaux précédents
noyau = [zeros(1,(length(signal)-taille)/2) ones(1,taille) zeros(1,(length(signal)-taille)/2)];
% normalisation du moyau
noyau = noyau/sum(abs(noyau));
% convolution avec le noyau (filtrage)
y_f = conv(y,noyau,'same');
%% représentation du signal brut
figure
plot(ech,signal,'r')
hold on
plot(ech,y,'k')
%% représentation du signal filtré
figure
plot(ech,y_f,'k')
hold on
plot(ech,signal,'r')
%% erreur résiduelle
err = sqrt(mean((signal-y_f).^2))
%signal+base+resp+bruit
y_f = FiltrePasseHaut(y,freq,0.02);
y_f = FiltrePasseBas(y_f,freq,0.08);
%% représentation du signal filtré
figure
plot(ech,signal,'r')
hold on
plot(ech,y_f,'k')
err = sqrt(mean((signal-y_f).^2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Message Receive Time
Step2: IMU Noisy Acceleration
Step3: State Tracking
|
<ASSISTANT_TASK:>
Python Code:
import psas_packet
from psas_packet.io import BinFile
import csv
import matplotlib.pyplot as plt
from matplotlib import gridspec
%matplotlib inline
FPS2M = 0.3048
LBF2N = 4.44822
LBS2KG = 0.453592
# Extend PSAS Packet to include our state message
psas_packet.messages.MESSAGES["STAT"] = psas_packet.messages.Message({
'name': "State Vector",
'fourcc': b'STAT',
'size': "Fixed",
'endianness': '!',
'members': [
{'key': "time", 'stype': "Q"},
{'key': "accel", 'stype': "d"},
{'key': "vel", 'stype': "d"},
{'key': "alt", 'stype': "d"},
{'key': "roll_rate", 'stype': "d"},
{'key': "roll_angle", 'stype': "d"},
]
})
# Read data from rust-fc
logfile = BinFile('../logfile-000')
max_acc = 0
rust_time = []
rust_accel_x = []
rust_accel_y = []
rust_accel_z = []
rust_state_time = []
rust_vel = []
rust_alt = []
for fourcc, data in logfile.read():
if fourcc == 'ADIS':
if data['Acc_X'] > max_acc:
max_acc = data['Acc_X']
rust_t = data['timestamp']/1.0e9
rust_time.append(data['timestamp']/1.0e9)
rust_accel_x.append(data['Acc_X'])
rust_accel_y.append(data['Acc_Y'])
rust_accel_z.append(data['Acc_Z'])
if fourcc == 'STAT':
rust_state_time.append(data['timestamp']/1.0e9)
rust_vel.append(data['vel'])
rust_alt.append(data['alt'])
# Read data from JSBSim
max_accel = 0
sim_time = []
measured_accel_x = []
sim_vel_up = []
sim_alt = []
with open('../simulation/data.csv') as datafile:
reader = csv.reader(datafile, delimiter=',')
for row in reader:
# ignore first line
if row[0][0] == 'T':
continue
sim_time.append(float(row[0]))
force_x = float(row[18]) * LBF2N
weight = float(row[6]) * LBS2KG
measured_accel_x.append(force_x/weight)
if (force_x/weight) > max_accel:
max_accel = force_x/weight
sim_t = sim_time[-1]
sim_vel_up.append(-float(row[10]) * FPS2M)
sim_alt.append(float(row[2]))
# line up time
sim_offset = rust_t - sim_t
sim_time = [t + sim_offset for t in sim_time]
# Get the time difference between each ADIS message
diff = [(rust_time[i+1] - t)*1000 for i, t in enumerate(rust_time[:-1])]
fig, ax1 = plt.subplots(figsize=(18,7))
plt.title(r"rust-fc ADIS Message Interval")
plt.ylabel(r"Time Since Last Sample [ms]")
plt.xlabel(r"Sample Number [#]")
plt.plot(range(len(diff)), diff, 'r.', alpha=1.0, ms=0.3, label="rust-fc Sample Interval")
plt.plot((0, len(diff)), (1.2207, 1.2207), 'k-', lw=0.6, alpha=0.7, label="Expected Sample Interval")
ax1.set_yscale("log", nonposy='clip')
plt.ylim([0.1,100])
#plt.xlim()
ax1.legend(loc=1)
plt.show()
fig, ax1 = plt.subplots(figsize=(18,7))
plt.title(r"rust-fc ADIS Message Interval")
plt.ylabel(r"Number of Samples [#]")
plt.xlabel(r"Time Since Last Sample [ms]")
n, bins, patches = plt.hist(diff, 1000, histtype='step', normed=1, alpha=0.8, linewidth=1, fill=True)
plt.plot((1.2207, 1.2207), (0, 1000), 'k-', lw=0.6, alpha=0.7, label="Expected Sample Interval")
plt.ylim([0, 35])
#plt.xlim()
ax1.legend(loc=1)
plt.show()
fig, ax1 = plt.subplots(figsize=(18,7))
plt.title(r"rust-fc Recorded IMU Acceleration")
plt.ylabel(r"Acceleration [m/s${}^2$]")
plt.xlabel(r"Run Time [s]")
plt.plot(rust_time, rust_accel_x, alpha=0.8, lw=0.5, label="rust-fc IMU 'Up'")
plt.plot(rust_time, rust_accel_y, alpha=0.8, lw=0.5, label="rust-fc IMU 'Y'")
plt.plot(rust_time, rust_accel_z, alpha=0.6, lw=0.5, label="rust-fc IMU 'Z'")
plt.plot(sim_time, measured_accel_x, 'k-', lw=1.3, alpha=0.6, label="JSBSim True Acceleration")
#plt.ylim()
#plt.xlim()
ax1.legend(loc=1)
plt.show()
# Computer difference from FC State and simulation "real" numbers
sim_idx = 0
vel = 0
alt = 0
i_count = 0
sim_matched_vel = []
vel_diff = []
alt_diff = []
for i, t in enumerate(rust_state_time):
vel += rust_vel[i]
alt += rust_alt[i]
i_count += 1
if sim_time[sim_idx] < t:
sim_matched_vel.append(vel/float(i_count))
vel_diff.append(sim_vel_up[sim_idx] - (vel/float(i_count)))
alt_diff.append(sim_alt[sim_idx] - (alt/float(i_count)))
vel = 0
alt = 0
i_count = 0
sim_idx += 1
if sim_idx > len(sim_time)-1:
break
fig = plt.figure(figsize=(18,9))
plt.subplots_adjust(hspace=0.001) # no space between vertical charts
gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) # stretch main chart to be most of the width
ax1 = plt.subplot(gs[0])
plt.title(r"rust-fc State Tracking: Velocity And Velocity Integration Error")
plt.ylabel(r"Velocity [m/s]")
plt.plot(rust_state_time, rust_vel, alpha=0.8, lw=1.5, label="rust-fc State Vector Velocity")
plt.plot(sim_time, sim_vel_up, 'k-', lw=1.3, alpha=0.6, label="JSBSim True Velocity")
plt.ylim([-60,400])
ticklabels = ax1.get_xticklabels()
plt.setp(ticklabels, visible=False)
ax2 = plt.subplot(gs[1])
plt.xlabel(r"Run Time [s]")
plt.ylabel(r"Integration Drift Error [m/s]")
plt.plot(sim_time, vel_diff)
ax1.legend(loc=1)
plt.show()
fig = plt.figure(figsize=(18,9))
plt.subplots_adjust(hspace=0.001) # no space between vertical charts
gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1]) # stretch main chart to be most of the width
ax1 = plt.subplot(gs[0])
plt.title(r"rust-fc State Tracking: Altitude And ALtitude Integration Error")
plt.ylabel(r"Altitude MSL [m]")
plt.plot(rust_state_time, rust_alt, alpha=0.8, lw=1.5, label="rust-fc State Vector Altitude")
plt.plot(sim_time, sim_alt, 'k-', lw=1.3, alpha=0.6, label="JSBSim True Velocity")
plt.ylim([1390, 7500])
ticklabels = ax1.get_xticklabels()
plt.setp(ticklabels, visible=False)
ax2 = plt.subplot(gs[1])
plt.xlabel(r"Run Time [s]")
plt.ylabel(r"Integration Drift Error [m]")
plt.plot(sim_time, alt_diff)
#plt.xlim()
ax1.legend(loc=1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: lang
Step2: lang
Step3: lang
Step4: lang
Step5: lang
Step6: lang
Step7: lang
Step9: 5. Exercise
Step11: 5.2. Returning the First Item that Matches a Predicate
Step13: 5.3. Filtering Items in a List
Step15: 5.4. map
Step16: Aside
Step17: lang
Step18: lang
Step19: lang
Step21: 6. Exercise
|
<ASSISTANT_TASK:>
Python Code:
# A program that calculates the final grade for each student.
# Scores for Assignment 1, Assignment 2, and Final Exam.
sam_scores = [90, 80, 90]
yuko_scores = [90, 100, 80]
sam_weighted_score = 0.2 * sam_scores[0] + 0.2 * sam_scores[1] + 0.6 * sam_scores[2]
sam_grade = 'PASS' if sam_weighted_score > 60 else 'FAIL'
print('Sam\'s final score: {} => {}'.format(sam_weighted_score, sam_grade))
yuko_weighted_score = 0.2 * yuko_scores[0] + 0.2 * yuko_scores[1] + 0.6 * yuko_scores[2]
yuko_grade = 'PASS' if yuko_weighted_score > 60 else 'FAIL'
print('Yuko\'s final score: {} => {}'.format(yuko_weighted_score, yuko_grade))
def calculate_grade(student_name, scores):
weighted_score = 0.2 * scores[0] + 0.2 * scores[1] + 0.6 * scores[2]
grade = 'PASS' if weighted_score > 60 else 'FAIL'
return '{}\'s final score: {} => {}'.format(student_name, weighted_score, grade)
print(calculate_grade('Sam', sam_scores))
print(calculate_grade('Yuko', yuko_scores))
devon_scores = [60, 50, 60]
print(calculate_grade('Devon', devon_scores))
print(calculate_grade('Sam', sam_scores))
print(calculate_grade('Sam', sam_scores))
# When we call the function again, the same message will be printed.
print(calculate_grade('Sam', sam_scores))
# Impure version
def calculate_grade_impure(student_name, scores):
scores[0] *= 0.2
scores[1] *= 0.2
scores[2] *= 0.6
weighted_score = scores[0] + scores[1] + scores[2]
grade = 'PASS' if weighted_score > 60 else 'FAIL'
return '{}\'s final score: {} => {}'.format(student_name, weighted_score, grade)
print(calculate_grade_impure('Sam', sam_scores))
print(calculate_grade_impure('Sam', sam_scores))
# When we call the function again, we get a different result!
print(calculate_grade_impure('Sam', sam_scores))
def square(x):
return x ** 2
# We can assign a function `square` to a variable `my_square`
my_square = square
# The variable can be called since it contains a function.
print(my_square(3))
def square(x):
return x ** 2
def cube(x):
return x ** 3
# Creating a list of functions.
power_functions = [ square, cube ]
for power_function in power_functions:
print(power_function(3))
student_ages = [('Sam', 18), ('Yuko', 20), ('Devon', 19)]
# Sort by names in alphabetical order.
def get_name(student):
return student[0]
sorted_by_name = sorted(student_ages, key=get_name)
print('Sorted by name: {}'.format(sorted_by_name))
# Sort by age. (ascending order)
## You can use lambda to avoid defining a new function
sorted_by_age = sorted(student_ages, key=lambda student:student[1])
print('Sorted by age (smallest to largest): {}'.format(sorted_by_age))
# You can use `reverse` to sort a list by descending order.
sorted_by_age_desc = sorted(student_ages, key=lambda student:student[1], reverse=True)
print('Sorted by age (largest to smallest): {}'.format(sorted_by_age_desc))
def create_adder(k):
def adder(x):
# k is captured from the outer function.
return x + k
return adder
adder_5 = create_adder(5)
print(adder_5(3))
%%solution
def apply_n_times(f, x, n):
# BEGIN PROMPT
pass
# END PROMPT
# BEGIN SOLUTION
result = x
for _ in range(n):
result = f(result)
return result
# END SOLUTION
%%studenttest ApplyNTimesStudentTest
def add2(x):
return x + 2
assert apply_n_times(add2, 0, 5) == 10
# Note: the "lambda" syntax is a shorthand for creating a function with no name.
# For more information, see:
# https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions
assert apply_n_times(lambda x: x * 2, 1, 10) == 1024
def fibonacci_step(x):
return (x[1], x[0] + x[1])
assert apply_n_times(fibonacci_step, (1, 1), 10) == (89, 144)
%%inlinetest ApplyNTimesInlineTest
assert apply_n_times(lambda x: True, None, 1) is not None, 'Did you forget to return a value?'
assert apply_n_times(lambda x: x + 1, 1, 2) == 3, 'Are you passing `x` and `y` to the function?'
assert apply_n_times(lambda x: x * x, 0, 100) == 0, '`x` must be returned as is when `n == 0`.'
%%solution
def find_first_match(predicate, items):
# BEGIN PROMPT
pass
# END PROMPT
# BEGIN SOLUTION
for x in items:
if predicate(x):
return x
# END SOLUTION
%%studenttest FindFirstMatchStudentTest
assert find_first_match(lambda x: x > 5, [1, 2, 3, 8, 9]) == 8
assert find_first_match(lambda x: x, [False, False, True]) is True
assert find_first_match(lambda x: x == 10, [11, 12, 8]) is None
%%inlinetest FindFirstMatchInlineTest
assert find_first_match(lambda x: True, []) is None, 'Do you have the correct return value when there are no items?'
assert find_first_match(lambda x: x > 1, [2]) == 2, 'Are you checking if calling the predicate returns True?'
assert find_first_match(lambda x: x > 1, [1, 2]) == 2, 'Are you checking all of the items?'
%%solution
def my_filter(predicate, items):
# BEGIN PROMPT
pass
# END PROMPT
# BEGIN SOLUTION
filtered_items = []
for item in items:
if predicate(item):
filtered_items.append(item)
return filtered_items
# END SOLUTION
%%studenttest MyFilterStudentTest
assert(my_filter(lambda x : x > 3, [4, 2, 8, -3, 10, 3]) ==
[4, 8, 10])
assert (my_filter(lambda x: len(x) > 3,
['dog', 'elephant', 'cat', 'hippopotamus']) ==
['elephant', 'hippopotamus'])
assert my_filter(lambda x: x > 2, [2, 4, 5]) == [4, 5]
assert my_filter(lambda x: x, [True, True, False]) == [True, True]
assert (my_filter(lambda x: x[0] * x[1] < 1, [(1, 0.5), (0.8, 0.9),
(2, 1)]) == [(1, 0.5),
(0.8, 0.9)])
%%inlinetest MyFilterInlineTest
assert len(my_filter(lambda x: True, [1, 2])) == 2, 'Are you returning all matching items?'
assert my_filter(lambda x: x > 1, [1, 2]) == [2], 'Are you calling the predicate on each item?'
assert my_filter(lambda x: True, [1, 2]) != [2, 1], 'Are you following the order of the provided items?'
%%solution
def my_map(function, items):
# BEGIN PROMPT
pass
# END PROMPT
# BEGIN SOLUTION
transformed_items = []
for item in items:
transformed_items.append(function(item))
return transformed_items
# END SOLUTION
%%studenttest MyMapStudentTest
assert my_map(lambda x: x, [1, 2, 3]) == [1, 2, 3]
assert my_map(lambda x: x[0], [(1, 2), (2, 3), (3, 4)]) == [1, 2, 3]
assert my_map(lambda x: x > 3, [8, 2, 1]) == [True, False, False]
%%inlinetest MyMapStudentTest
assert len(my_map(lambda x: x, [1, 2])) == 2, 'Are you returning all of the mapped items?'
assert my_map(lambda x: x, [1, 2]) != [2, 1], 'Are you following the order of the provided items?'
assert my_map(lambda x: x + 1, [1, 2]) == [2, 3], 'Are you calling the function on each of the items?'
filtered_list = list(filter(lambda x : x > 3, [4, 2, 8, -3, 10, 3]))
assert(filtered_list == [4, 8, 10])
mapped_list = list(map(lambda x: x**2, [1, 2, 3]))
assert(mapped_list == [1, 4, 9])
# Prints [1, 4, 9].
print([x ** 2 for x in [1, 2, 3]])
print([x for x in [4, 2, 8, -3, 10, 3] if x > 3])
# Prints [4, 9] since only x > 1 are transformed.
print([x ** 2 for x in [1, 2, 3] if x > 1])
# Creates a list of squares for each number in [1, 2, 3] that is larger than 1.
print([[x ** 2 for x in range(y)] for y in [1, 2, 3] if y > 1])
# Converts a list of number strings into numbers, then creates 3 x 3 matrices
# containing each number.
print([[[x] * 3] * 3 for x in [int(y) for y in ['1', '2', '3']]])
%%solution
# [(city name, population, area)]
test_data = [('Chiyoda', 64894, 11.66),
('Minato', 259042, 20.37),
('Shinjuku', 349844, 18.22),
('Bunkyo', 233926, 11.29),
('Taito', 207838, 10.11)]
# BEGIN PROMPT
class City:
# Q 6.1.
def __init__(self, data):
pass
# Q 6.2.
def population_density(self):
pass
# Q6.3.
def top_k_densest_city_names(city_list, k):
pass
# END PROMPT
# BEGIN SOLUTION
class City:
# Q 6.1.
def __init__(self, data):
self.name = data[0]
self.population = data[1]
self.area = data[2]
# Q 6.2.
def population_density(self):
return self.population / self.area
# Q6.3.
def top_k_densest_city_names(city_list, k):
sorted_cities = list(sorted(city_list, key=lambda c: c.population_density(), reverse=True))
return list(map(lambda c: c.name, sorted_cities[:k]))
# END SOLUTION
%%studenttest FPAndOOPStudentTest1
# Q 6.1.
## Create a `City` instance for ('Chiyoda', 64894, 11.66)
chiyoda_ku = City(test_data[0])
## Each data must be accessed
assert(chiyoda_ku.name == 'Chiyoda')
assert(chiyoda_ku.population == 64894)
assert(chiyoda_ku.area == 11.66)
%%studenttest FPAndOOPStudentTest2
# Q 6.2.
## Population density is (population / area)
assert(chiyoda_ku.population_density() == 64894 / 11.66)
%%studenttest FPAndOOPStudentTest3
# Q 6.3.
## Create a list of `City` instances by using `map` function.
city_list = list(map(City, test_data))
## Get Top 5 cities
top5_densest_cities = top_k_densest_city_names(city_list, 5)
# Expected: ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda']
print('Top 5 cities: {}'.format(top5_densest_cities))
assert(len(top5_densest_cities) == 5)
expected = ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda']
assert top5_densest_cities == expected, 'Expected: {}, Actual: {}'.format(expected, top5_densest_cities)
%%studenttest FPAndOOPStudentTest3_2
# More tests: Change the value of `k`
## Get Top 2 cities
top2_densest_cities = top_k_densest_city_names(city_list, 2)
print('Top 2 cities: {}'.format(top2_densest_cities))
assert len(top2_densest_cities) == 2
assert top2_densest_cities == [ 'Bunkyo', 'Taito' ]
## What if `k` is 0?
top0_densest_cities = top_k_densest_city_names(city_list, 0)
print('Top 0 cities: {}'.format(top0_densest_cities))
assert(top0_densest_cities == [])
%%inlinetest FPAndOOPStudentTest
# Q 6.1.
try:
City(('A', 1, 1))
except NameError:
assert False, 'class `City` is not implemented'
except Exception:
assert False, 'City((\'A\', 1, 1)) raised an exception'
try:
c = City(('A', 1, 1))
name = c.__class__.__name__
except Exception:
assert False, "City(('A', 1, 1)) raised an exception"
if name != 'City':
assert False, 'The class name is not `City` but {}'.format(name)
test_data = [('Chiyoda', 64894, 11.66),
('Minato', 259042, 20.37),
('Shinjuku', 349844, 18.22),
('Bunkyo', 233926, 11.29),
('Taito', 207838, 10.11)]
city_a = City(test_data[0])
try:
city_a.name
city_a.population
city_a.area
except AttributeError:
assert False, 'The class `City` must have fields `name`, `population` and `area`.'
assert city_a.name == 'Chiyoda', '`name` field is not implemeted properly'
if city_a.population == test_data[1] and city_a.area == test_data[0]:
assert False, 'You may swap `population` and `area`?'
assert city_a.population == test_data[0][1], '`population` field is not implemeted properly'
assert city_a.area == test_data[0][2], '`area` field is not implemeted properly'
# Q 6.2.
try:
city_a.population_density()
except AttributeError:
assert False, 'The class has no method like `city_a.population_density()`.'
assert city_a.population_density() == city_a.population / city_a.area, 'population_density() must return `population` / `area`'
# Q 6.3.
try:
top_k_densest_city_names
except NameError:
assert False, 'function \'top_k_densest_city_names\' is not defined'
city_list = list(map(lambda data: City(data), test_data))
try:
assert top_k_densest_city_names(city_list, 3) is not None, "You have not implemented top_k_densest_city_names"
except Exception as e:
assert False, 'Error when trying to run top_k_densest_city_names: %s' % e
ans3 = top_k_densest_city_names(city_list, 3)
assert len(ans3) == 3, 'top_k_densest_city_names(..., 3) must return list with 3 elements, but got %d' % len(ans3)
dense_cities = top_k_densest_city_names(city_list, 5)
assert dense_cities.__class__ == list, "top_k_densest_city_names() should return a list, but got %s" % dense_cities.__class__
assert len(dense_cities) == 5, "top_k_densest_city_names(city_list, 5) should return a list with 5 elements, but got %d" % len(dense_cities)
assert dense_cities[0].__class__ == str, "top_k_densest_city_names() should return a list of strings, but got %s" % dense_cities[0].__class__
ans = ['Bunkyo', 'Taito', 'Shinjuku', 'Minato', 'Chiyoda']
assert dense_cities == ans, ('the population density ranking should be '+"['Bunkyo', 'Taito', 'Shinjuku', 'Chuo', 'Minato'], but your code returned %s" % dense_cities)
result, logs = %autotest FPAndOOPStudentTest
assert result.results['passed']
report(FPAndOOPStudentTest, results=result.results)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Modes
Step2: This is a markdown cell, so even if something looks like code (as below), it won't get executed!
Step3: Common gotcha
Step4: Now we initialize Hail and set up Bokeh to display inline in the notebook.
Step5: Download public 1000 Genomes data
Step6: It is possible to call command-line utilities from Jupyter by prefixing a line with a !
Step7: Part 1
Step8: Read 1KG into Hail
Step9: What is a MatrixTable?
Step10: count
Step11: show
Step12: <font color="brightred"><strong>Exercise
Step13: Most of Hail's functionality is totally general-purpose!
Step14: Or if we had flight data
Step15: By sorting the result in Python, we can recover an interesting bit of biology...
Step16: <font color="brightred"><strong>Question
Step17: Using our equation above, the mean value indicates about 99.9% confidence. But it's not generally a good idea to draw conclusions just based on a mean and standard deviation...
Step18: To look at GQ at genotypes that are not heterozygous, we need add only one character (~)
Step19: There are often many ways to accomplish something in Hail. We could have done these both together (and more efficiently!) using hl.agg.group_by
Step20: Of course, the best way to understand a distribution is to look at it!
Step21: <font color="brightred"><strong>Exercise
Step22: We can import it as a Hail Table with hl.import_table.
Step23: While we can see the names and types of fields in the logging messages, we can also describe and show this table
Step24: Add sample metadata into our 1KG MatrixTable
Step25: What's going on here?
Step26: <font color="brightred"><strong>Exercise
Step27: <font color="brightred"><strong>Exercise
Step28: Entry QC
Step29: Variant QC
Step30: Remove rare sites
Step31: Remove sites far from Hardy-Weinberg equilbrium
Step32: Part 3
Step33: Two of the plots that analysts generally produce are a Manhattan plot and a Q-Q plot.
Step34: Confounded!
Step35: The eigenvalues reflect the amount of variance explained by each principal component
Step36: The scores are the principal components themselves, computed per sample.
Step37: The loadings are the contributions to each component for each variant.
Step38: We can annotate the principal components back onto mt
Step39: Principal components measure ancestry
Step40: <font color="brightred"><strong>Question
Step41: Part 4
Step42: Create an interval key
Step43: Annotate variants using these intervals
Step44: Aggregate genotypes per gene
Step45: What is burden_mt?
Step46: Sorry, no hl.plot.manhattan for genes!
|
<ASSISTANT_TASK:>
Python Code:
print('Hello, world')
# This is a code cell
my_variable = 5
print(my_variable)
import hail as hl
from bokeh.io import output_notebook, show
hl.init()
output_notebook()
hl.utils.get_1kg('data/')
! ls -1 data/
hl.import_vcf('data/1kg.vcf.bgz').write('data/1kg.mt', overwrite=True)
mt = hl.read_matrix_table('data/1kg.mt')
mt.describe()
mt.count()
mt.s.show(5)
mt.locus.show(5)
hl.summarize_variants(mt)
mt.aggregate_rows(hl.agg.count_where(mt.alleles == ['A', 'T']))
snp_counts = mt.aggregate_rows(
hl.array(hl.agg.counter(mt.alleles)))
snp_counts
sorted(snp_counts,
key=lambda x: x[1])
mt.aggregate_entries(hl.agg.stats(mt.GQ))
mt.aggregate_entries(hl.agg.filter(mt.GT.is_het(),
hl.agg.stats(mt.GQ)))
mt.aggregate_entries(hl.agg.filter(~mt.GT.is_het(),
hl.agg.stats(mt.GQ)))
mt.aggregate_entries(hl.agg.group_by(mt.GT,
hl.agg.stats(mt.GQ)))
p = hl.plot.histogram(
mt.GQ,
bins=100)
show(p)
! head data/1kg_annotations.txt
sa = hl.import_table('data/1kg_annotations.txt',
impute=True,
key='Sample')
sa.describe()
sa.show()
mt = mt.annotate_cols(pheno = sa[mt.s])
mt.describe()
mt = hl.sample_qc(mt)
mt.sample_qc.describe()
p = hl.plot.scatter(x=mt.sample_qc.r_het_hom_var,
y=mt.sample_qc.call_rate)
show(p)
mt = mt.filter_cols(mt.sample_qc.dp_stats.mean >= 4)
mt = mt.filter_cols(mt.sample_qc.call_rate >= 0.97)
# call rate before filtering
mt.aggregate_entries(hl.agg.fraction(hl.is_defined(mt.GT)))
ab = mt.AD[1] / hl.sum(mt.AD)
filter_condition_ab = (
hl.case()
.when(mt.GT.is_hom_ref(), ab <= 0.1)
.when(mt.GT.is_het(), (ab >= 0.25) & (ab <= 0.75))
.default(ab >= 0.9) # hom-var
)
mt = mt.filter_entries(filter_condition_ab)
# call rate after filtering
mt.aggregate_entries(hl.agg.fraction(hl.is_defined(mt.GT)))
mt = hl.variant_qc(mt)
mt.variant_qc.describe()
mt.variant_qc.AF.show()
mt = mt.filter_rows(hl.min(mt.variant_qc.AF) > 1e-6)
mt = mt.filter_rows(mt.variant_qc.p_value_hwe > 0.005)
# final variant and sample count
mt.count()
gwas = hl.linear_regression_rows(y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0])
gwas.describe()
p = hl.plot.manhattan(gwas.p_value)
show(p)
p = hl.plot.qq(gwas.p_value)
show(p)
pca_eigenvalues, pca_scores, pca_loadings = hl.hwe_normalized_pca(mt.GT, compute_loadings=True)
pca_eigenvalues
pca_scores.describe()
pca_scores.show()
pca_loadings.describe()
mt = mt.annotate_cols(pca = pca_scores[mt.s])
p = hl.plot.scatter(mt.pca.scores[0],
mt.pca.scores[1],
label=mt.pheno.SuperPopulation)
show(p)
gwas = hl.linear_regression_rows(
y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0, mt.pheno.isFemale, mt.pca.scores[0], mt.pca.scores[1], mt.pca.scores[2]])
p = hl.plot.qq(gwas.p_value)
show(p)
p = hl.plot.manhattan(gwas.p_value)
show(p)
! wget https://storage.googleapis.com/hail-tutorial/ensembl_gene_annotations.txt -O data/ensembl_gene_annotations.txt
gene_ht = hl.import_table('data/ensembl_gene_annotations.txt', impute=True)
gene_ht.show()
gene_ht.count()
gene_ht = gene_ht.transmute(interval = hl.locus_interval(gene_ht['Chromosome'],
gene_ht['Gene start'],
gene_ht['Gene end']))
gene_ht = gene_ht.key_by('interval')
mt = mt.annotate_rows(gene_info = gene_ht[mt.locus])
mt.gene_info.show()
burden_mt = (
mt
.group_rows_by(gene = mt.gene_info['Gene name'])
.aggregate(n_variants = hl.agg.count_where(mt.GT.n_alt_alleles() > 0))
)
burden_mt.describe()
burden_results = hl.linear_regression_rows(
y=burden_mt.pheno.CaffeineConsumption,
x=burden_mt.n_variants,
covariates=[1.0,
burden_mt.pheno.isFemale,
burden_mt.pca.scores[0],
burden_mt.pca.scores[1],
burden_mt.pca.scores[2]])
burden_results.order_by(burden_results.p_value).show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Create CFNCluster
Step2: After you verified the project information, you can execute the pipeline. When the job is done, you will see the log infomration returned from the cluster.
Step3: Use case 2.1
Step4: Use case 2.2
Step5: Use case 2.3
Step6: Use case 3
Step7: Use case 4
Step8: To check the processing status
Step9: To delete the cluster, you just need to set the cluster name and call the below function.
Step10: TODO
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
sys.path.append(os.getcwd().replace("notebooks/awsCluster", "src/awsCluster"))
from util import DesignFileLoader
## S3 input and output address.
s3_input_files_address = "s3://path/to/s3_input_files_address"
s3_output_files_address = "s3://path/to/s3_output_files_address"
## CFNCluster name
your_cluster_name = "cluster_name"
## The private key pair for accessing cluster.
private_key = "/path/to/aws_priate_key.pem"
## Project information
project_name = "project_name"
## The use case number
# use_case = 1: whole-genome sequencing pipeline
# use_case = 2.1: the workflow "star_htseq_workflow" of the RNA sequencing pipeline
# use_case = 2.2: the workflow "kallisto_deseq_workflow" of the RNA sequencing pipeline
# use_case = 2.3: the workflow "star_gatk_workflow" of the RNA sequencing pipeline
# use_case = 3: ChiP sequencing pipeline
# use_case = 4: miRNA sequencing pipeline
use_case = "1"
## If delete cfncluster after job is done.
delete_cfncluster = False
from cfnCluster import CFNClusterManager, ConnectionManager
## Create a new cluster
master_ip_address = CFNClusterManager.create_cfn_cluster(cluster_name=your_cluster_name)
ssh_client = ConnectionManager.connect_master(hostname=master_ip_address,
username="ec2-user",
private_key_file=private_key)
from dnaSeq import WGSPipelineManager
if use_case == "1":
## analysis steps include: "fastqc", "bwa-alignment", "post-alignment", "gatk-haplotype"
analysis_steps = ["fastqc", "bwa-alignment"]
## load design file by calling a function load_design_file in GroupFileMaker.
design_file = os.getcwd().replace("notebooks/awsCluster", "data/awsCluster/dnaseq_design_example.txt")
sample_list, group_list = DesignFileLoader.load_design_file(design_file)
WGSPipelineManager.execute(ssh_client, project_name, analysis_steps, s3_input_files_address,
sample_list, group_list, s3_output_files_address)
from rnaSeq import RNAPipelineManager
if use_case == "2.1":
## star_htseq_workflow
workflow = "star_htseq_workflow"
## "fastqc", "alignment", "counting"
analysis_steps = ["fastqc", "alignment", "counting"]
## load design file by calling a function load_design_file in GroupFileMaker.
design_file = os.getcwd().replace("notebooks/awsCluster", "data/awsCluster/rnaseq_design_example.txt")
sample_list, group_list = DesignFileLoader.load_design_file(design_file)
## run the RNA sequencing pipeline
RNAPipelineManager.run_analysis(ssh_client, workflow, project_name, analysis_steps,
s3_input_files_address, sample_list, group_list, s3_output_files_address)
from rnaSeq import RNAPipelineManager
if use_case == "2.2":
## kallisto_deseq_workflow
workflow = "kallisto_deseq_workflow"
## "fastqc", "alignment", "counting", "differential_calculation"
analysis_steps = ["fastqc"]
## load design file by calling a function load_design_file in GroupFileMaker.
design_file = os.getcwd().replace("notebooks/awsCluster", "data/awsCluster/rnaseq_design_example.txt")
sample_list, group_list = DesignFileLoader.load_design_file(design_file)
## run the RNA sequencing pipeline
RNAPipelineManager.run_analysis(ssh_client, workflow, project_name, analysis_steps,
s3_input_files_address, sample_list, group_list, s3_output_files_address)
from rnaSeq import RNAPipelineManager
if use_case == "2.3":
## star_gatk_workflow
workflow = "star_gatk_workflow"
## "fastqc", "alignment", "variant_calling"
analysis_steps = ["fastqc"]
## load design file by calling a function load_design_file in GroupFileMaker.
design_file = os.getcwd().replace("notebooks/awsCluster", "data/awsCluster/rnaseq_vc_design_example.txt")
sample_list, group_list = DesignFileLoader.load_design_file(design_file)
## run the RNA sequencing pipeline
RNAPipelineManager.run_analysis(ssh_client, workflow, project_name, analysis_steps,
s3_input_files_address, sample_list, group_list, s3_output_files_address)
from chipSeq import ChipPipelineManager
if use_case == "3":
## "homer_workflow"
workflow = "homer_workflow"
## "fastqc", "alignment", "make_tag_directory", "make_UCSC_file", "find_peaks", "annotate_peaks", "pos2bed", "find_motifs_genome"
analysis_steps = ["annotate_peaks", "pos2bed", "find_motifs_genome"]
## the style option can be either "factor", "histone".
style = "histone"
## current available genomes: hg18, hg19, mm9, mm10
genome = "hg19"
## load design file by calling a function load_design_file in GroupFileMaker.
design_file = os.getcwd().replace("notebooks/awsCluster", "data/awsCluster/chipSeq_design_example.txt")
sample_list, group_list = DesignFileLoader.load_chipseq_design_file(design_file)
## run the ChiP sequencing pipeline
ChipPipelineManager.run_analysis(ssh_client, workflow, project_name, analysis_steps,
s3_input_files_address, sample_list, group_list, style, genome, s3_output_files_address)
from miRNASeq import SmallRNAPipelineManager
if use_case == "4":
## "fastqc", "bowtie-alignment", "counting"
analysis_steps = ["fastqc", "bowtie-alignment", "counting"]
## load design file by calling a function load_design_file in GroupFileMaker.
design_file = os.getcwd().replace("notebooks/awsCluster", "data/awsCluster/smallrnaseq_design_example.txt")
sample_list, group_list = DesignFileLoader.load_design_file(design_file)
SmallRNAPipelineManager.execute(ssh_client, project_name, analysis_steps, s3_input_files_address,
sample_list, group_list, s3_output_files_address)
from miRNASeq import SmallRNAPipelineManager
from chipSeq import ChipPipelineManager
from dnaSeq import WGSPipelineManager
from rnaSeq import RNAPipelineManager
if use_case == "1":
WGSPipelineManager.check_processing_status(ssh_client)
if use_case.startswith("2"):
RNAPipelineManager.check_processing_status(ssh_client)
if use_case == "3":
ChipPipelineManager.check_processing_status(ssh_client)
if use_case == "4":
SmallRNAPipelineManager.check_processing_status(ssh_client)
from cfnCluster import CFNClusterManager
if delete_cfncluster == True:
CFNClusterManager.delete_cfn_cluster(cluster_name=your_cluster_name)
from rnaSeq import ConfigureBuilder
## creating a list of options for configuration.
## configuring Star options
star_option_list = [["runThreadN", "1"], ["clip3pNbases", "0"], ["outFileNamePrefix", "mysam"], ["outReadsUnmapped", "None"]]
ConfigureBuilder.configure_star(star_option_list)
## configuring Kallisto options
kallisto_option_list = [["fragment-length", "50"], ["bootstrap-samples", "0"], ["seed", "42"]]
ConfigureBuilder.configure_kallisto(kallisto_option_list)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cat vs. Dog Image Classification
Step2: These are just a few of the options available (for more, see the Keras documentation. Let's quickly go over what we just wrote
Step3: Next, let's apply the datagen transformations to a cat image from the training set to produce five random variants. Rerun the cell a few times to see fresh batches of random variants.
Step4: Add Data Augmentation to the Preprocessing Step
Step5: If we train a new network using this data augmentation configuration, our network will never see the same input twice. However the inputs that it sees are still heavily intercorrelated, so this might not be quite enough to completely get rid of overfitting.
Step6: Retrain the Model
Step7: Solution
Step8: Note that with data augmentation in place, the 2,000 training images are randomly transformed each time a new training epoch runs, which means that the model will never see the same image twice during training.
Step9: Much better! We are no longer overfitting, and we have gained ~3 validation accuracy percentage points (see the green line in the top chart). In fact, judging by our training profile, we could keep fitting our model for 30+ more epochs and we could probably get to ~80%!
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O \
/tmp/cats_and_dogs_filtered.zip
import os
import zipfile
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
train_cat_fnames = os.listdir(train_cats_dir)
train_dog_fnames = os.listdir(train_dogs_dir)
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
img_path = os.path.join(train_cats_dir, train_cat_fnames[2])
img = load_img(img_path, target_size=(150, 150)) # this is a PIL image
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
# The .flow() command below generates batches of randomly transformed images
# It will loop indefinitely, so we need to `break` the loop at some point!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i += 1
if i % 5 == 0:
break
# Adding rescale, rotation_range, width_shift_range, height_shift_range,
# shear_range, zoom_range, and horizontal flip to our ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
# Note that the validation data should not be augmented!
val_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 32 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 32 using val_datagen generator
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import RMSprop
# Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
# the three color channels: R, G, and B
img_input = layers.Input(shape=(150, 150, 3))
# First convolution extracts 16 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
# Second convolution extracts 32 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Third convolution extracts 64 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Convolution2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Flatten feature map to a 1-dim tensor
x = layers.Flatten()(x)
# Create a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512, activation='relu')(x)
# Add a dropout rate of 0.5
x = layers.Dropout(0.5)(x)
# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation='sigmoid')(x)
# Configure and compile the model
model = Model(img_input, output)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
# WRITE CODE TO TRAIN THE MODEL ON ALL 2000 IMAGES FOR 30 EPOCHS, AND VALIDATE
# ON ALL 1,000 VALIDATION IMAGES
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
# Retrieve a list of accuracy results on training and validation data
# sets for each training epoch
acc = history.history['acc']
val_acc = history.history['val_acc']
# Retrieve a list of list results on training and validation data
# sets for each training epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
# Plot training and validation loss per epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.