text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Meet the data
Step2: What columns do we have?
Step3: A quick look at our data
Step4: Where do we begin?
Step5: What did that just do?
Step6: Get subset of columns (DataFrame)
Step7: Get a subset of rows using a boolean array
Step8: So now we know about DataFrames
Step9: What about relationships between variables?
Step10: Target is skewed --- let's transform it so it's more normal
Step11: I see lots of zeroes --- Where are these bad performances coming from?
Step12: Modeling
Step13: Today we will use simple train/test split
Step14: Missing values in numeric columns
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import pandas as pd
import seaborn as sns
raw_df = pd.read_csv("/home/brianb/Downloads/odsc_football_modeling_data_2.csv")
df_no_week_1 = raw_df[raw_df.week > 1]
ff_cols = raw_df.columns
sort(ff_cols.values)
raw_df[ff_cols].head()
df_no_week_1[ff_cols].head()
raw_df = pd.read_csv("/home/brianb/Downloads/odsc_football_modeling_data_2.csv")
raw_df['full_name'].head()
raw_df[['full_name', 'position', 'team']].head()
raw_df[raw_df.week > 1].head()
df_no_week_1 = raw_df[raw_df.week > 1]
df_no_week_1.isnull().sum()
pylab.hist(raw_df['fanduel_points'],
normed=True,
bins=np.linspace(-1, 35, 12),
alpha=0.35,
label='fanduel_points')
pylab.legend()
pylab.figure(figsize=(15,15))
transformed_target = pd.DataFrame.copy(raw_df[raw_df.fanduel_points > 1])
transformed_target['fanduel_points'] = np.log(raw_df[raw_df.fanduel_points > 0]['fanduel_points']-1)
pylab.figure(figsize=(15,15))
pylab.hist(transformed_target['fanduel_points'],
normed=True,
bins=np.linspace(-1, 4, 100),
alpha=0.35,
label='fanduel_points')
pylab.legend()
no_nans = raw_df[raw_df[raw_df.fanduel_points< 3].notnull()]
no_nans = raw_df[raw_df.position != 'UNK'] #remove Unknowns from this dataframe
no_nans.groupby('position').size()
bad_performances = pd.DataFrame({'count' : no_nans.groupby('position').size()}).reset_index()
bad_performances = bad_performances.sort(['count'], ascending=[0])
print bad_performances
g = sns.factorplot("position", "count",
data=bad_performances, kind="bar",
size=15, palette="pastel", dropna=True, x_order=bad_performances.position.values)
#Let's look at an individual player
raw_df[(raw_df.full_name =='Tom Brady') & (raw_df.week == 2)]
#All positions are not created equal -- some score many more points than others
raw_df[raw_df[raw_df.fanduel_points> 1].notnull()].groupby('position')['fanduel_points'].sum()
#Since a few positions seem to score all the points, let's zoom in on those
plot_order = ['TE', 'WR', 'RB',
'K', 'QB']
top_positions_only = raw_df[raw_df.position.isin(plot_order)]
top_positions_only.groupby('position')['fanduel_points'].mean()
# Violin plots are a nice alternative to boxplots that also show interesting detail about
# the shape of the distribution
nonnull_subset = top_positions_only['fanduel_points'].notnull()
plt.figure(figsize=(12, 6))
sns.violinplot(top_positions_only['fanduel_points'][nonnull_subset],
top_positions_only['position'][nonnull_subset],
inner='box',
order=plot_order,
bw=1,
size=16)
# QB's and kickers score the most points. Let's look into those using a Histogram
qb_k = ['K', 'QB']
qb_k_data = raw_df[raw_df.position.isin(qb_k)]
groups = qb_k_data.groupby('position').groups
pylab.figure(figsize=(15,5))
for key, row_ids in groups.iteritems():
pylab.hist(qb_k_data['fanduel_points'][row_ids].values,
normed=True,
bins=np.linspace(-10, 50, 50),
alpha=0.35,
label=str(key))
pylab.legend()
# We can use a Facetgrid to analyze teams
def vertical_mean_line(x, **kwargs):
plt.axvline(np.percentile(x, 95), **kwargs)
teams = ['NE', 'CHI']
team_data = raw_df[raw_df.team.isin(teams)]
team_data = team_data[team_data.week < 4]
g = sns.FacetGrid(team_data, row="team", col="week",
margin_titles=True, dropna=True, size=4)
bins = np.linspace(-3, 30, 30)
g.map(plt.hist, "fanduel_points", color="black", bins=bins,
lw=0, normed=True)
g.map(vertical_mean_line, 'fanduel_points')
# We can use a Heatmap to analyze teams kicker and qb performance
teams = ['NE', 'CHI', 'NYG', 'DET', 'NYJ']
team_data = qb_k_data[qb_k_data.team.isin(teams)]
team_data = team_data[team_data.week < 10]
ptable = pd.pivot_table(
team_data,
values='fanduel_points',
index=["team"],
columns='week')
reorder_teams = ptable.reindex(teams).fillna(0)
pylab.figure(figsize=(15,5))
sns.heatmap(reorder_teams.astype(int), annot=True, fmt="d", cmap="YlGnBu")
# Zero values are bye weeks
# Are previous week's points a good predictor of current week's points?
# Let's consider only kicker and QB data for these teams
# We have to exclude week 1 here since there is no previous weeks' mean
team_data_no_week_1 = team_data[team_data.week > 1]
grid = sns.JointGrid(team_data_no_week_1['mean_fanduel_points'],
team_data_no_week_1['fanduel_points'],space=0, size=10, ratio=50)
grid.plot_joint(plt.scatter, color="g")
grid.plot_marginals(sns.rugplot, height=1, color="g")
# We can use jointplot (uses JointGrid internally) to get a quick regression line for this
sns.jointplot('mean_fanduel_points', 'fanduel_points', data=team_data_no_week_1,
kind="reg", color=sns.color_palette()[1], size=9)
# QB's are significantly more important than any other position. Let's dig in
qb_df = raw_df[raw_df.position == 'QB']
# Passing attempts from previous weeks --- is there a trend with next week's performance? No
sns.jointplot(qb_df['mean_passing_att'],
qb_df['fanduel_points'], kind="reg", size=9)
import sklearn
# Let's prep for modeling
exclude_week_1 = top_positions_only[top_positions_only.week > 1]
model_data = pd.DataFrame.copy(exclude_week_1)
model_data = model_data[model_data.fanduel_points > 0]
print np.isnan(model_data['fanduel_points']).sum()
# Let's cut our target out so we don't train on it
target = model_data.pop('fanduel_points')
# We don't need player id's --- let's throw this away
throw_away = model_data.pop('player_id')
import sklearn.cross_validation
(train_data,
test_data,
train_target,
test_target) = sklearn.cross_validation.train_test_split(
model_data, target, test_size=0.2, random_state=1337
)
#Handle categorical vars
import sklearn.preprocessing
import sklearn.feature_extraction
from sklearn.feature_extraction import DictVectorizer
encoder = DictVectorizer(sparse=False)
#Let's do one-hot encoding in sklearn using DictVectorizer
categorical_vars = ['full_name', 'position', 'team', 'week', 'opponent', 'home_team', 'away_team']
vardata = train_data[categorical_vars].fillna('MISSING')
encoder.fit(vardata.to_dict(orient='records'))
train_catdata = encoder.transform(vardata.to_dict(orient='records'))
test_vardata = test_data[categorical_vars].fillna('MISSING')
test_catdata = encoder.transform(
test_vardata[categorical_vars].to_dict(orient='records'))
pd.DataFrame(train_catdata).describe()
#Handle numeric vars
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy='median')
numeric_vars = list(set(train_data.columns.tolist()) - set(categorical_vars))
numdata = train_data[numeric_vars]
imputer.fit(numdata)
train_numdata = imputer.transform(numdata)
test_numdata = imputer.transform(test_data[numeric_vars])
train_this = np.hstack([train_numdata, train_catdata])
test_this = np.hstack([test_numdata, test_catdata])
import sklearn
from sklearn.linear_model import LinearRegression
print np.any(isnan(train_numdata))
print np.all(np.isfinite(train_numdata))
lr = LinearRegression(fit_intercept=False)
lr.fit(train_numdata, train_target)
lr_predictions = pd.Series(lr.predict(test_numdata),
name='Linear Regression')
p_df = pd.DataFrame({'Prediction': lr_predictions,
'Actual': test_target.values})
pylab.figure(figsize=(10, 10))
sns.jointplot('Actual', 'Prediction', data=p_df,
kind="hex", color=sns.color_palette()[1])
#Let's take a look at our residuals using using just the categorical vars
from sklearn import metrics
test_metrics = {
'Explained Variance': metrics.explained_variance_score,
'MAE': metrics.mean_absolute_error,
'MSE': metrics.mean_squared_error,
'MedAE': metrics.median_absolute_error,
'R2': metrics.r2_score
}
def metrics_report(*predictions):
records = []
for prediction_set in predictions:
record = {'name': prediction_set.name}
for metric_name in sorted(test_metrics.keys()):
metric_func = test_metrics[metric_name]
record[metric_name] = metric_func(test_target, prediction_set)
records.append(record)
frame = pd.DataFrame.from_records(records).set_index('name')
return frame
metrics_report(lr_predictions)
# We need to add reference models to track a baseline performance that we can compare our other models to
mean_response = np.mean(train_target)
mean_predictions = pd.Series(np.ones_like(test_target) * mean_response,
name='Mean Response')
median_response = np.median(train_target)
median_predictions = pd.Series(np.ones_like(test_target) * median_response,
name='Median Response')
metrics_report(mean_predictions,
median_predictions,
lr_predictions)
#Time for ElasticNet
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import ElasticNet
estimator = ElasticNet()
parameters = {
'alpha': np.linspace(0.1, 2, 10, endpoint=True),
'l1_ratio': np.linspace(0, 1, 10, endpoint=True)
}
enet = GridSearchCV(estimator, parameters)
enet.fit(train_numdata, train_target)
print(enet.best_params_, enet.best_score_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in enet.grid_scores_:
print("%0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() * 2, params))
print()
estimator2 = ElasticNet()
parameters2 = {
'alpha': np.linspace(0.4, 0.6, 10, endpoint=True),
'l1_ratio': np.linspace(0.4, 0.6, 10, endpoint=True)
}
enet2 = GridSearchCV(estimator2, parameters2)
enet2.fit(train_numdata, train_target)
print(enet2.best_params_, enet2.best_score_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in enet2.grid_scores_:
print("%0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() * 2, params))
print()
enet_predictions = pd.Series(enet.predict(test_numdata),
name='Elastic Net')
p_df = pd.DataFrame({'Enet Prediction': enet_predictions,
'Actual': test_target.values})
pylab.figure(figsize=(10, 10))
sns.jointplot('Actual', 'Enet Prediction', data=p_df, kind="hex",
color=sns.color_palette()[2])
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
estimator = RandomForestRegressor()
parameters = {'n_estimators': (5, 10, 15, 20, 25, 30, 35),
'max_depth': (3, 5, 7, 9, 11),
}
rfr = GridSearchCV(estimator, parameters, n_jobs=3)
rfr.fit(train_this, train_target)
rfr_predictions = pd.Series(rfr.predict(test_this),
name='Random Forest')
p_df = pd.DataFrame({'RF Prediction': rfr_predictions,
'Actual': test_target.values})
pylab.figure(figsize=(10, 10))
sns.jointplot('Actual', 'RF Prediction', data=p_df, kind="hex",
color=sns.color_palette()[3])
metrics_report(mean_predictions,
median_predictions,
lr_predictions,
enet_predictions,
rfr_predictions)
lr_diffs = lr_predictions - test_target
lr_diffs.name = 'LinearRegression Error'
rfr_diffs = rfr_predictions - test_target
rfr_diffs.name = 'RandomForest Error'
sns.jointplot(lr_predictions, rfr_predictions, kind='resid', color=sns.color_palette()[4])
from sklearn.linear_model import Lasso
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
degree = 2
model = make_pipeline(PolynomialFeatures(degree), Lasso())
model.fit(train_numdata, train_target)
poly_preds = pd.Series(model.predict(test_numdata),
name='Polynomial Lasso',
index=test_target.index)
sns.jointplot(test_target,
poly_preds,
kind='resid',
color=sns.color_palette()[5])
metrics_report(mean_predictions,
median_predictions,
lr_predictions,
enet_predictions,
rfr_predictions,
poly_preds)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use the Alpine1 function, that it is available in the benchmark of functions of the package. This function is defined for arbitrary dimension. In this example we will work in dimension 9. The functional form of the Alpine1 function is
Step2: We will consider that variables $x_1$, $x_2$ and $x_3$ are continuous and defined in the interval $[-5,5]$, variable $x_9$ is takes continuous values in the interval $[-1,2]$, variable $x_3$ is discrete and takes values ${3,8,10}$, and finally $x_4$ is categorical with three different categories. Next we define this domain to use it in GPyOpt.
Step3: Now, we run the optimization for 20 iterations or a maximum of 60 seconds and we show the convergence plots.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import GPyOpt
from numpy.random import seed
import numpy as np
seed(123)
func = GPyOpt.objective_examples.experimentsNd.alpine1(input_dim=9)
mixed_domain =[{'name': 'var1', 'type': 'continuous', 'domain': (-5,5),'dimensionality': 3},
{'name': 'var3', 'type': 'discrete', 'domain': (3,8,10),'dimensionality': 2},
{'name': 'var4', 'type': 'categorical', 'domain': (0,1,2),'dimensionality': 1},
{'name': 'var5', 'type': 'continuous', 'domain': (-1,2)}]
myBopt = GPyOpt.methods.BayesianOptimization(f=func.f, # Objective function
domain=mixed_domain, # Box-constraints of the problem
initial_design_numdata = 5, # Number data initial design
acquisition_type='EI', # Expected Improvement
exact_feval = True) # True evaluations, no sample noise
max_iter = 20 ## maximum number of iterations
max_time = 60 ## maximum allowed time
eps = 1e-6 ## tolerance, max distance between consicutive evaluations.
myBopt.run_optimization(max_iter,eps=0)
np.round(myBopt.X,2)
myBopt.plot_convergence()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 4. Looking at the data
Step2: Cycles
Step3: Selecting specific cells and investigating them
Step4: Let's see how the smoothing (interpolation) method works
Step5: Using hvplot for plotting summaries
Step6: Looking more in-depth and utilising advanced features
Step7: Looking closer at some summary-plots
Step8: 5. Checking for more details per cycle
Step9: B. Get some voltage curves for some cycles and plot them
Step10: Looking at some dqdv data
Step11: Put it in a for-loop for plotting many ica plots
Step12: Get all the dqdv data in one go
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cellpy
from cellpy import log
from cellpy import cellreader
from cellpy import prms
from cellpy import prmreader
from cellpy.utils import batch
# import holoviews as hv
%matplotlib inline
# hv.extension('bokeh')
log.setup_logging(default_level="DEBUG")
## Uncomment this and run for checking your cellpy parameters.
# prmreader.info()
filepath = r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\2019_types.res"
filepath = [
r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\20181126_cen41_02_cc_01.res",
r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\20181126_cen41_02_cc_02.res",
r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\20181126_cen41_02_cc_03.res",
r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\20181126_cen41_02_cc_04.res",
r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\20181126_cen41_02_cc_05.res",
r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\20181126_cen41_02_cc_06.res",
]
filepath2 = filepath[0]
m = 0.374433
outfilepath = r"C:\Scripting\MyFiles\development_cellpy\dev_data\arbin\2019_types.h5"
prms.Paths.rawdatadir = r"C:\ExperimentalData\BatteryTestData\Arbin\RAW"
cell = cellreader.CellpyData()
cell.from_raw(filepath)
cell2 = cellreader.CellpyData()
cell2.from_raw(filepath2)
cell.set_mass(m)
cell2.set_mass(m)
cell.make_step_table()
cell2.make_step_table()
cell.make_summary()
cell2.make_summary()
dataset = cell.dataset
dataset2 = cell2.dataset
dataset.summary
dataset.steps
dataset.raw
dataset.raw.describe()
dataset.raw.dtypes
dataset2.raw.dtypes
dataset.raw.Step_Index.unique()
dataset2.raw.Step_Index.unique()
dataset.summary.dtypes
dataset.steps.dtypes
cell.save(outfilepath)
# Plot the charge capacity and the C.E. (and resistance) vs. cycle number (standard plot)
b.plot_summaries()
# Show the journal pages
# b.experiment.journal.pages.head()
# Show the most important part of the journal pages
b.view
# b.experiment.status()
# b.summaries.head()
%%opts Curve (color=hv.Palette('Magma'))
voltage_curves = dict()
for label in b.experiment.cell_names:
d = b.experiment.data[label]
curves = d.get_cap(label_cycle_number=True, interpolated=True, number_of_points=100)
curve = hv.Curve(curves, kdims=["capacity", "cycle"], vdims="voltage").groupby("cycle").overlay().opts(show_legend=False)
voltage_curves[label] = curve
NdLayout = hv.NdLayout(voltage_curves, kdims='label').cols(3)
NdLayout
%%opts Curve (color=hv.Palette('Magma'))
ocv_curves = dict()
for label in b.experiment.cell_names:
d = b.experiment.data[label]
ocv_data = d.get_ocv(direction="up", number_of_points=40)
ocv_curve = hv.Curve(ocv_data, kdims=["Step_Time", "Cycle_Index"], vdims="Voltage").groupby("Cycle_Index").overlay().opts(show_legend=False)
ocv_curves[label] = ocv_curve
NdLayout = hv.NdLayout(ocv_curves, kdims='label').cols(3)
NdLayout
# This will show you all your cell names
cell_labels = b.experiment.cell_names
cell_labels
# This is how to select the data (CellpyData-objects)
data1 = b.experiment.data["20160805_test001_45_cc"]
data2 = b.experiment.data["20160805_test001_47_cc"]
# get voltage curves
df_cycles1 = data1.get_cap(
method="back-and-forth",
categorical_column=True,
label_cycle_number=True,
interpolated=False,
)
# get interpolated voltage curves
df_cycles2 = data1.get_cap(
method="back-and-forth",
categorical_column=True,
label_cycle_number=True,
interpolated=True,
dx=0.1,
number_of_points=100,
)
%%opts Scatter [width=600] (color="red", alpha=0.9, size=12)
single_curve = hv.Curve(df_cycles1, kdims=["capacity", "cycle"], vdims="voltage", label="not-smoothed").groupby("cycle")
single_scatter = hv.Scatter(df_cycles2, kdims=["capacity", "cycle"], vdims="voltage", label="smoothed").groupby("cycle")
single_scatter * single_curve
import hvplot.pandas
# hvplot does not like infinities
s = b.summaries.replace([np.inf, -np.inf], np.nan)
layout = (
s["coulombic_efficiency"].hvplot()
+ s["discharge_capacity"].hvplot() * s["charge_capacity"].hvplot()
)
layout.cols(1)
s["cumulated_coulombic_efficiency"].hvplot()
from cellpy.utils.batch_tools.batch_analyzers import OCVRelaxationAnalyzer
print(" analyzing ocv relaxation data ".center(80, "-"))
analyzer = OCVRelaxationAnalyzer()
analyzer.assign(b.experiment)
analyzer.direction = "down"
analyzer.do()
dfs = analyzer.last
df_file_one, _df_file_two = dfs
# keeping only the columns with voltages
ycols = [col for col in df_file_one.columns if col.find("point") >= 0]
# removing the first ocv rlx (relaxation before starting cycling)
df = df_file_one.iloc[1:, :]
# tidy format
df = df.melt(id_vars="cycle", var_name="point", value_vars=ycols, value_name="voltage")
curve = (
hv.Curve(df, kdims=["cycle", "point"], vdims="voltage")
.groupby("point")
.overlay()
.opts(xlim=(1, 10), width=800)
)
scatter = (
hv.Scatter(df, kdims=["cycle", "point"], vdims="voltage")
.groupby("point")
.overlay()
.opts(xlim=(1, 10), ylim=(0.7, 1))
)
layout = hv.Layout(curve * scatter)
layout.cols(1)
b.summary_columns
discharge_capacity = b.summaries.discharge_capacity
charge_capacity = b.summaries.charge_capacity
coulombic_efficiency = b.summaries.coulombic_efficiency
ir_charge = b.summaries.ir_charge
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(discharge_capacity)
ax1.set_ylabel("capacity ")
ax2.plot(ir_charge)
ax2.set_xlabel("cycle")
ax2.set_ylabel("resistance")
# Lets check what cells we have
cell_labels = b.experiment.cell_names
cell_labels
# OK, then I choose one of them
data = b.experiment.data["20160805_test001_45_cc"]
cap = data.get_cap(categorical_column=True)
cap.head()
fig, ax = plt.subplots()
ax.plot(cap.capacity, cap.voltage)
ax.set_xlabel("capacity")
ax.set_ylabel("voltage")
cv = data.get_cap(method="forth")
fig, ax = plt.subplots()
ax.set_xlabel("capacity")
ax.set_ylabel("voltage")
ax.plot(cv.capacity, cv.voltage)
c4 = data.get_cap(cycle=4, method="forth-and-forth")
c10 = data.get_cap(cycle=10, method="forth-and-forth")
fig, ax = plt.subplots()
ax.set_xlabel("capacity")
ax.set_ylabel("voltage")
ax.plot(c4.capacity, c4.voltage, "ro", label="cycle 4")
ax.plot(c10.capacity, c10.voltage, "bs", label="cycle 22")
ax.legend();
from cellpy.utils import ica
v4, dqdv4 = ica.dqdv_cycle(
data.get_cap(4, categorical_column=True, method="forth-and-forth")
)
v10, dqdv10 = ica.dqdv_cycle(
data.get_cap(10, categorical_column=True, method="forth-and-forth")
)
plt.plot(v4, dqdv4, label="cycle 4")
plt.plot(v10, dqdv10, label="cycle 10")
plt.legend();
fig, ax = plt.subplots()
for cycle in data.get_cycle_numbers():
d = data.get_cap(cycle, categorical_column=True, method="forth-and-forth")
if not d.empty:
v, dqdv = ica.dqdv_cycle(d)
ax.plot(v, dqdv)
else:
print(f"cycle {cycle} seems to be missing or corrupted")
hv.extension("bokeh")
tidy_ica = ica.dqdv_frames(data)
cycles = list(range(1, 3)) + [10, 11, 12, 15]
tidy_ica = tidy_ica.loc[tidy_ica.cycle.isin(cycles), :]
%%opts Curve [xlim=(0,1)] (color=hv.Palette('Magma'), alpha=0.9) NdOverlay [legend_position='right', width=800, height=500]
curve4 = (hv.Curve(tidy_ica, kdims=['voltage'], vdims=['dq', 'cycle'], label="Incremental capacity plot")
.groupby("cycle")
.overlay()
)
curve4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Классификатор C4.5 и три его модификации
Step2: Сколько статистически значимых на уровне 0.05 различий мы обнаружили?
Step3: Сравнивая 4 классификатора между собой, мы проверили 6 гипотез. Давайте сделаем поправку на множественную проверку. Начнём с метода Холма. Сколько гипотез можно отвергнуть на уровне значимости 0.05 после поправки этим методом?
Step4: Сколько гипотез можно отвергнуть на уровне значимости 0.05 после поправки методом Бенджамини-Хохберга?
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.sandbox.stats.multicomp import multipletests
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from itertools import combinations
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
aucs = pd.read_csv('AUCs.txt', delimiter='\t')
aucs.info()
aucs.columns = [u'Dataset', u'C4.5', u'C4.5+m', u'C4.5+cf', u'C4.5+m+cf']
aucs
w_stat = pd.DataFrame(columns=['Model 1', 'Model 2', 'Wilcoxon stat', 'p-value'])
k = 0
for i, j in combinations([1, 2, 3, 4], 2):
w_stat.loc[k, 'Model 1'], w_stat.loc[k, 'Model 2'] = aucs.columns[i], aucs.columns[j]
w_stat.loc[k, 'Wilcoxon stat'], w_stat.loc[k, 'p-value'] = stats.wilcoxon(aucs.iloc[:, i], aucs.iloc[:, j])
k += 1
w_stat
top_diff_idx = w_stat.loc[:, 'p-value'].idxmin()
print('Two classifiers with the highest significance difference: %s & %s' % (w_stat.loc[top_diff_idx, 'Model 1'],
w_stat.loc[top_diff_idx, 'Model 2']))
diff_models_cnt = w_stat.loc[w_stat.loc[:, 'p-value'] <= 0.05, :].shape[0]
print('Number of p-value <= 0.05: %d' % diff_models_cnt)
reject, p_corrected, a1, a2 = multipletests(w_stat['p-value'], alpha = 0.05, method = 'holm')
w_stat['p_corrected'] = p_corrected
w_stat['reject'] = reject
w_stat
reject, p_corrected, a1, a2 = multipletests(w_stat['p-value'], alpha = 0.05, method = 'fdr_bh')
w_stat['p_corrected'] = p_corrected
w_stat['reject'] = reject
w_stat
diff_models_cnt = w_stat.loc[w_stat.loc[:, 'p_corrected'] <= 0.05, :].shape[0]
print('Number of p-value <= 0.05: %d' % diff_models_cnt)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn import preprocessing
data = load_data()
df_out = pd.DataFrame(preprocessing.scale(data), index=data.index, columns=data.columns)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For the interest rate to be used, observing the charts of LIBOR rates, I will pick a linear model, starting from 2.5% at Jan-1-2005, going to 6% at Dec-31-2007 and decaying to 0.5% until Dec-31-2007 and staying there.
Step2: That's all. Now I will measure volatility on the last running two weeks and decide if I will hedge that risk. Else return to the initial position. Options are bought or sold at the Black-Scholes price using the current rate and the running volatility. This mean that the first month I will keep the original portfolio, until I have enough (?) info about the market.
Step3: I will define two functions, one to open a hedge position, selling if necessary some stock in order to affor the position. The other function will close the position, buying all possible stock with the earnings, if any. A transaction cost is included. It must account of both opening and closing the position (so, if your brokers charges you \$50, the commission is \$100)
Step4: Now I will proceed to explain how this trading algorithm will work
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
data = pd.read_csv("../data/GOOG.csv").ix[:,["Date", "Open"]]
data.sort_values(by="Date", inplace=True)
data.reset_index(inplace=True)
rate = np.zeros_like(data.Date)
n = 0
m = 0
for d in data.Date:
if d < "2007-12-31":
rate[m] = 2.5 + n*3.5/734.0
if d == "2007-12-31":
rate[m] = 6.0
n = 0
if "2008-12-31" > d > "2007-12-31":
rate[m] = 6.0 - 5.5*n/256
if d >= "2008-12-31":
rate[m] = 0.5
m +=1
n +=1
rate = rate/100
from scipy.stats import norm
def volatility(v):
return np.sqrt(260)*np.std(v)
def eu_put_option(S, K, r, s, t):
d1 = (np.log(S/K) + t*(r + s*s/2))/np.sqrt(s*s*t)
d2 = (np.log(S/K) + t*(r - s*s/2))/np.sqrt(s*s*t)
return K*np.exp(-r*t)*norm.cdf(-d2) - S*norm.cdf(-d1)
def eu_put_option_delta(S, K, r, s, t):
return norm.cdf((np.log(S/K) + t*(r + s*s/2))/np.sqrt(s*s*t)) - 1
def repair_split(df, info): # info is a list of tuples [(date, split-ratio)]
temp = df.Open.values.copy()
for i in info:
date, ratio = i
mask = np.array(df.Date >= date)
temp[mask] = temp[mask]*ratio
return temp
stock_price = repair_split(data, [("2014-03-27", 2)])
rets = np.diff(np.log(stock_price))
commission = 0
def rebalance(S, K, r, s, t, cap):
option_price = eu_put_option(S, K, r, s, t)
delta = eu_put_option_delta(S, K, r, s, t)
# rebalance the portfolio, if not enough money, then sell stock to buy put options
options = np.floor(cap/(option_price - delta*S))
stock = np.floor(-delta*cap/(option_price - delta*S))
money = cap - (options*option_price + stock*S)
return (stock, options, money, option_price)
def close_position(S, K, nstock, nopt, mon):
profit = nopt*max(0, K - S)
money = mon + profit
stock = nstock + np.floor(money/S)
money -= (stock - nstock)*S
money -= commission
return (profit, stock, 0, money)
capital = 200000 # just enough to buy the stocks
stock = 1000
money = capital - 1000*stock_price[0] # this money will not take any interest
options = 0
strike = 0
option_price = 0
profit = 0
net_worth = []
vola = []
n_options = []
n_stock = []
n = 0
sell_options = 0
print("Not invested money: {0}".format(money))
for d in data.Date:
capital = money + stock*stock_price[n]
net_worth.append(capital)
if n<60:
n += 1
continue
# here begins the simulation
vol = volatility(rets[n-15:n])
vola.append(vol)
if sell_options == 0 and options > 0:
(profit, stock, options, money) = close_position(stock_price[n], strike, stock, options, money)
print("\nSell options: {0}".format(data.Date[n]))
print(" Profit: {0}".format(profit))
print(" Stock price at {0}, strike at {1}".format(stock_price[n], strike))
print(" Current balance: {0}".format(money + stock*stock_price[n]))
if vol > 0.5 and options == 0:
strike = stock_price[n] + 20
(stock, options, money, option_price) = rebalance(stock_price[n], strike, rate[n], vol, 30/260.0, capital);
print("\nBuy options: {0}".format(data.Date[n]))
print(" Put option price (stock price at {0}, strike at {1}): {2}".format(stock_price[n], strike, option_price))
print(" Position: {0} stock, {1} options, money: {2}".format(stock, options, money))
print(" Current balance: {0}".format(money + stock*stock_price[n]))
sell_options = 90
if sell_options > 0:
sell_options -= 1
n_options.append(options)
n_stock.append(stock)
n += 1
plt.figure(figsize=(9,9))
plt.subplot(311)
plt.plot(1000*stock_price, label="GOOG")
plt.plot(net_worth, label="Portfolio")
plt.legend(loc=0)
plt.xlim(0,len(net_worth))
plt.subplot(312)
plt.plot(n_stock, label="Stock")
plt.legend(loc=0)
plt.subplot(313)
plt.plot(n_options, label="Put options")
plt.legend(loc=0)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data loading, sanitization and massage
Step2: The available data columns are
Step3: A peek at the topmost data items.
Step4: Does the id field uniquely identify items on the search list? If so, using it as index could be a good idea.
Step5: Ok apparently it is used for deduplication. Who knows why are the items twice in the downloaded list.
Step6: Massage the keywords to be lists. Note that str.split(',') returns a [''], therefore the little if filter in there.
Step7: Are any items missing the year?
Step8: Complementary data
Step9: Data overview
Step10: With the above peek at the ID field, how many unique items did we receive in the download?
Step11: Ok capped at 1000 I guess, which brings the percentage of the website search results available to us down to
Step12: Data exploration
Step13: What about the ACM Digital Library total, what does it's profile look like over time?
Step14: Similar overall shape, which isn't a surprise. Overlay, with arbitrary scaling of 300.
Step15: Right, so they seem to have somewhat similar shape. Legacy as a concept was lagging overall ACM DL, until it caught by increasing growth during 1990s.
Step16: All the pre-1990 publications are
Step17: And over 1000 publications after 1990, until 2016. First 10 of which are
Step18: Did something happen around 1990s, as the fraction of publications related to legacy started increasing? Let's plot a global linear regression model, as well as separate linear regression models before and after 1990.
Step19: Statistical validation of the above would be good, of course, to check against randomness.
Step20: How many keywords do each item have?
Step21: Ok almost 400 items are without any keywords. There are some outliers, let's inspect the ones with more than 15 keywords. Sounds excessive...
Step22: And the keyword lists for the above
Step23: That is excessive, but seems legit to me.
Step24: Of which occur in 10 or more items in the subset
Step25: and further those that occur in 3-10 items
Step26: Of the remainder, number of keywords which appear on only two items
Step27: and only on one item
Step28: Keywords with 'legacy' in them
Step29: Network analysis of keywords
Step30: So there is one dominant component, and 150 small ones. It's best to explore them interactively with Gephi.
Step31: Degree distribution of the keyword graph, ie. are there a few nodes which have huge degree and then a large number of nodes with smaller number of connections, like a power network. Additionally, let's see where the keywords with the work legacy in them are placed, by indicating them with green vertical lines. In the left diagram below, hubs are towards the right.
Step32: Eyeballing the above, most of the legacy keywords are where the mass of the distribution is, ie. at low degrees. One of the legacy nodes is a top hub, and there are some in the mid-ranges.
Step34: Let's plot the top hub out.
Step36: Communities
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import networkx as nx
import community
import itertools
import matplotlib.pyplot as plt
import numpy as np
import re
%matplotlib inline
legacybib = pd.read_csv("ACMDL201612108240806.csv")
legacybib.columns
legacybib.head(3)
assert 0, sum(legacybib.id.duplicated())
legacybib[legacybib.id.duplicated(keep=False)].head(2 * 2)
legacybib.dtypes
legacybib.keywords.fillna('', inplace=True)
legacybib.keywords = legacybib.keywords.map(lambda l: [k.lower().strip() for k in l.split(',') if k])
legacybib[legacybib.year.isnull()].year
acmPerYearData = { 1951: 43, 1952: 77, 1953: 34, 1954: 71, 1955: 72, 1956: 162, 1957: 144, 1958: 234, 1959: 335,
1960: 302, 1961: 521, 1962: 519, 1963: 451, 1964: 537, 1965: 561, 1966: 633, 1967: 754, 1968: 669, 1969: 907,
1970: 800, 1971: 1103, 1972: 1304, 1973: 1704, 1974: 1698, 1975: 1707, 1976: 2086, 1977: 1943, 1978: 2235, 1979: 1687,
1980: 2152, 1981: 2241, 1982: 2578, 1983: 2485, 1984: 2531, 1985: 2608, 1986: 3143, 1987: 3059, 1988: 3827, 1989: 4155,
1990: 4313, 1991: 4551, 1992: 5019, 1993: 5107, 1994: 5939, 1995: 6179, 1996: 6858, 1997: 7181, 1998: 8003, 1999: 7628,
2000: 9348, 2001: 8691, 2002: 10965, 2003: 11624, 2004: 14493, 2005: 16715, 2006: 19222, 2007: 19865, 2008: 21631, 2009: 23827,
2010: 27039, 2011: 25985, 2012: 27737, 2013: 25832, 2014: 26928, 2015: 27131, 2016: 25557, 2017: 39}
acmPerYear = pd.Series(acmPerYearData)
round(len(legacybib) / 1541 * 100, 2)
len(legacybib.id.unique())
round(len(legacybib.id.unique()) / 1541 * 100, 2)
legacybib.year.hist(bins=legacybib.year.max() - legacybib.year.min(), figsize=(10,2))
acmPerYear.plot(figsize=(10, 2))
#plt.hist(legacybib.year.dropna(), label="Year histogram")
plt.plot(legacybib.year.groupby(legacybib.year).count(), label='legacy publication')
plt.plot(acmPerYear * 0.003, label="total publications * 0.003")
plt.legend()
plt.legend(loc='best')
plt.plot(pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear), 'o')
legacybib[legacybib.year <= 1990][["year", "title"]].sort_values("year")
legacybib[legacybib.year > 1990][["year", "title"]].sort_values("year").head(10)
pre1990range = np.arange(legacybib.year.min(), 1991)
post1990range = np.arange(1990, legacybib.year.max())
# Linear regression models
# note the use of np.polyfit
propLm = np.polyfit(pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear).dropna().index, pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear).dropna(), 1)
pre1990 = np.polyfit(pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear)[pre1990range].dropna().index, pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear)[pre1990range].dropna(), 1)
post1990 = np.polyfit(pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear)[post1990range].dropna().index, pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear)[post1990range].dropna(), 1)
# Plot the fractions of legacy vs. all publications, the models, and a legend
plt.plot(pd.Series(legacybib.groupby(legacybib.year).year.count() / acmPerYear), 'o')
plt.plot(np.arange(legacybib.year.min(), legacybib.year.max()), np.poly1d(propLm)(np.arange(legacybib.year.min(), legacybib.year.max())), label="global lm")
plt.plot(pre1990range, np.poly1d(pre1990)(pre1990range), linestyle="dashed", label="pre 1990 lm")
plt.plot(post1990range, np.poly1d(post1990)(post1990range), linestyle="dashed", label="post 1990 lm")
plt.title("Fraction of legacy related publications against ACM")
plt.legend(loc="best")
# this could be a pandas.Series instead of dict
keywordhist = {}
for kws in legacybib.keywords:
for k in kws:
if k in keywordhist:
keywordhist[k] = keywordhist[k] + 1
else:
keywordhist[k] = 1
legacybib.keywords.map(lambda kws: len(kws)).describe()
plt.title("Histogram of numbers of keywords per item")
plt.hist(legacybib.keywords.map(lambda kws: len(kws)), bins=max(legacybib.keywords.map(lambda kws: len(kws))) - 1)
legacybib[legacybib.keywords.map(lambda kws: len(kws)) > 15][["id", "title", "author", "keywords"]]
[keywordlist for keywordlist in legacybib[legacybib.keywords.map(lambda kws: len(kws)) > 15].keywords]
len(keywordhist)
[(k, keywordhist[k]) for k in sorted(keywordhist, key=keywordhist.get, reverse=True) if keywordhist[k] >= 10]
[(k, keywordhist[k]) for k in sorted(keywordhist, key=keywordhist.get, reverse=True) if keywordhist[k] < 10 and keywordhist[k] >= 3]
len([k for k in keywordhist if keywordhist[k] == 2])
len([k for k in keywordhist if keywordhist[k] == 1])
sorted([(k, keywordhist[k]) for k in keywordhist if re.match("legacy", k)], key=lambda k: k[1], reverse=True)
keywordg = nx.Graph()
legacybib.keywords.map(lambda item: keywordg.add_edges_from([p for p in itertools.permutations(item, 2)]), na_action='ignore')
print("Number of components", len([comp for comp in nx.connected_components(keywordg)]))
print("Largest ten components sizes", sorted([len(comp) for comp in nx.connected_components(keywordg)], reverse=True)[:10])
nx.write_gexf(keywordg, "keywordg.gexf")
fig, (ax1, ax2) = plt.subplots(1,2)
fig.set_size_inches(10, 2)
ax1.set_title("Keyword degree histogram")
ax1.plot(nx.degree_histogram(keywordg))
ax1.vlines([keywordg.degree(l) for l in keywordg if re.match('legacy', l)], ax1.get_ylim()[0], ax1.get_ylim()[1], colors='green')
ax2.set_title("Keyword degree diagram, log/log")
ax2.loglog(nx.degree_histogram(keywordg))
keywordgDegrees = pd.Series(keywordg.degree()).sort_values(ascending=False)
keywordgDegrees.head(3)
def plotNeighborhood(graph, ego, color = "green", includeEgo = False):
from math import sqrt
Plot neighbourhood of keyword in graph, after possibly removing the ego.
graph : networkx.Graph-like graph
The graph to get the neighbourhood from
ego : node in graph
The node whose neighbourhood to plot
color : string
Name of the color to use for plotting
includeEgo : bool
Include the ego node
The function defaults to removing the ego node, because by definition
it is connected to each of the nodes in the subgraph. With the ego
removed, the result basically tells how the neighbours are connected
with one another.
plt.rcParams["figure.figsize"] = (10, 10)
subgraph = nx.Graph()
if includeEgo:
subgraph = graph.subgraph(graph.neighbors(ego) + [ego])
else:
subgraph = graph.subgraph(graph.neighbors(ego))
plt.title("Neighbourhood of " + ego + " (" + str(len(subgraph)) + ")")
plt.axis('off')
pos = nx.spring_layout(subgraph, k = 1/sqrt(len(subgraph) * 2))
nx.draw_networkx(subgraph,
pos = pos,
font_size = 9,
node_color = color,
alpha = 0.8,
edge_color = "light" + color)
plt.show()
plotNeighborhood(keywordg, "legacy systems")
plotNeighborhood(keywordg, "legacy software")
def plotCommunities(graph):
Plot community information from a graph.
Basically just copied from http://perso.crans.org/aynaud/communities/index.html
at this point, while in development
# zoom in on something, for dev. purposes
graph = graph.subgraph(graph.neighbors('legacy software'))
# graph = [c for c in nx.connected_component_subgraphs(graph)][0]
graph = max(nx.connected_component_subgraphs(graph), key=len) # I love you Python
partition = community.best_partition(graph)
size = float(len(set(partition.values())))
pos = nx.spring_layout(graph)
count = 0
for com in set(partition.values()):
count = count + 1
list_nodes = [nodes for nodes in partition.keys() if partition[nodes] == com]
plt.axis('off')
nx.draw_networkx_nodes(graph, pos, list_nodes, node_size = 40, node_color = str(count/size), alpha=0.4)
nx.draw_networkx_labels(graph, pos, font_size = 9)
nx.draw_networkx_edges(graph, pos, alpha=0.1)
plt.show()
plotCommunities(keywordg)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Motivating KDE
Step2: Let's plot this dataset.
Step3: Or, more properly
Step4: The normalization simply changes the scale on the y-axis
Step5: Do you see any issues with this kind of density estimation?
Step6: On the left, the histogram makes clear that this is a bimodal distribution.
Step7: The actual problem is that the height of the block stack often reflects not on the actual density of points nearby, but on coincidences of how the bins align with the data points.
Step8: The result is a much more robust reflection of the actual data characteristics than is the standard histogram.
Step9: This smooth plot gives a much more accurate idea of the shape of the data distribution.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
def make_data(N, f=0.3, rseed=1):
rand = np.random.RandomState(rseed)
x = rand.randn(N)
x[int(f * N):] += 5
return x
x = make_data(1000)
hist = plt.hist(x, bins=30)
hist = plt.hist(x, bins=30, normed=True)
density, bins, _ = hist
widths = bins[1:] - bins[:-1]
(density * widths).sum()
x = make_data(20)
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(1, 2, figsize=(12, 4), sharex=True, sharey=True,
subplot_kw={'xlim':(-4, 9), 'ylim':(-0.02, 0.3)})
fig.subplots_adjust(wspace=0.05)
for i, offset in enumerate([0.0, 0.6]):
ax[i].hist(x, bins=bins + offset, normed=True)
ax[i].plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)
fig, ax = plt.subplots()
bins = np.arange(-3, 8)
ax.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
for count, edge in zip(*np.histogram(x, bins)):
for i in range(count):
ax.add_patch(plt.Rectangle((edge, i), 1, 1, alpha=0.5))
ax.set_xlim(-4, 8)
ax.set_ylim(-0.2, 8)
x_d = np.linspace(-4, 8, 2000)
density = sum((abs(xi - x_d) < 0.5) for xi in x)
plt.fill_between(x_d, density, alpha=0.5)
plt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
plt.axis([-4, 8, -0.2, 8]);
from scipy.stats import norm
x_d = np.linspace(-4, 8, 1000)
density = sum(norm(xi).pdf(x_d) for xi in x)
plt.fill_between(x_d, density, alpha=0.5)
plt.plot(x, np.full_like(x, -0.1), '|k', markeredgewidth=1)
plt.axis([-4, 8, -0.2, 5]);
from sklearn.neighbors import KernelDensity
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(x[:, None])
# score_samples returns the log of the probability density
logprob = kde.score_samples(x_d[:, None])
plt.fill_between(x_d, np.exp(logprob), alpha=0.5)
plt.plot(x, np.full_like(x, -0.01), '|k', markeredgewidth=1)
plt.ylim(-0.02, 0.22)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Using interact for animation with data
Step3: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays
Step4: Compute a 2d NumPy array called phi
Step6: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
Step7: Use interact to animate the plot_soliton_data function versus time.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def soliton(x, t, c, a):
Return phi(x, t) for a soliton wave with constants c and a.
if type(x) == np.ndarray:
hi = np.zeros(len(x),dtype=float)
for b in range(len(x)):
hi[b] = .5*c*(1/np.cosh((((c**.5)/2)*(x[b]-c*t-a)))**2)
elif type(t) == np.ndarray:
hi = np.zeros(len(t),dtype=float)
for b in range(len(t)):
hi[b] = .5*c*(1/np.cosh((((c**.5)/2)*(x-c*t[b]-a)))**2)
else:
hi = .5*c*(1/np.cosh((((c**.5)/2)*(x-c*t-a)))**2)
return hi
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
phi = np.zeros((len(x),len(t)))
for b in range(len(x)):
phi[b,:] = soliton(x[b], t, c, a)
print(phi)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
def plot_soliton_data(i=0):
Plot the soliton data at t[i] versus x.
plt.plot(phi[:,i],x)
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
interact(plot_soliton_data, i=(0.0,10.0,0.1));
assert True # leave this for grading the interact with plot_soliton_data cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: V4 grade (범주형 데이터형)
Step2: V5 sub_grade (범주형 데이터형)
Step3: V6 emp_title (범주형 데이터형)
Step4: V7 emp_length (범주형 데이터형)
Step5: V8 home_ownership (범주형 데이터형)
Step6: V10 verification_status (범주형 데이터형)
Step7: V11 issue_d (범주형 데이터형)
Step8: V14 purpose (범주형 데이터형)
Step9: V23 initial_list_status (범주형 데이터형)
|
<ASSISTANT_TASK:>
Python Code:
lc_data = pd.DataFrame.from_csv('./lc_dataframe(cleaning).csv')
lc_data = lc_data.reset_index()
lc_data.tail()
x = lc_data['grade']
sns.distplot(x, color = 'r')
plt.show()
x = lc_data['sub_grade']
sns.distplot(x, color = 'g')
plt.show()
x = lc_data['emp_title']
plt.hist(x)
plt.show()
x = lc_data['emp_length']
sns.distplot(x, color = 'r')
plt.show()
x = lc_data['home_ownership']
sns.distplot(x, color = 'g')
plt.show()
x = lc_data['verification_status']
sns.distplot(x)
plt.show()
x = lc_data['issue_d']
sns.distplot(x, color = 'r')
plt.show()
x = lc_data['purpose']
sns.distplot(x, color = 'g')
plt.show()
x = lc_data['initial_list_status']
plt.hist(x)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Motivation 🎇
Step2: Strategies for Exact Nearest Neighbors 🧠
Step3: But It’s Not All Rainbows And Unicorns 🦄
Step4: Vector Encoding using Product Quantization
|
<ASSISTANT_TASK:>
Python Code:
# Additional Resources 📚
import pickle
import faiss
def load_data():
with open('movies.pickle', 'rb') as f:
data = pickle.load(f)
return data
data = load_data()
vectors = data["vector"]
names = data["name"]
data
faiss.MatrixStats(vectors).comments.split("\n")
index = faiss.IndexFlatL2(vectors.shape[1])
index.add(vectors)
search_vector = vectors[90:91]
distances, indices = index.search(search_vector, 10)
print(f"The most similar movies to {names[90]} are:\n")
print([names[i] for i in indices[0]])
quantizer = faiss.IndexFlatL2(vectors.shape[1])
index = faiss.IndexIVFFlat(quantizer,
vectors.shape[1],
100, # number_of_partition,
8) # search_in_x_partitions
index.train(vectors)
index.add(vectors)
search_vector = vectors[90:91]
distances, indices = index.search(search_vector, 10)
print(f"The most similar movies to {names[90]} are:\n")
print([names[i] for i in indices[0]])
quantizer = faiss.IndexFlatL2(vectors.shape[1])
index = faiss.IndexIVFPQ(quantizer,
vectors.shape[1],
100, # number_of_partition,
8, # search_in_x_partitions,
8) # subvector_size
index.train(vectors)
index.add(vectors)
search_vector = vectors[90:91]
distances, indices = index.search(search_vector, 10)
print(f"The most similar movies to {names[90]} are:\n")
print([names[i] for i in indices[0]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Вопрос
Step2: Заготовка для типичного скрипта на Python
Step3: Модули
Step4: Обработка ошибок
Step5: Генерация списков (списковые включения)
Step6: Еще немного встроенных функций
|
<ASSISTANT_TASK:>
Python Code:
%time "list(range(1000000)); print('ololo')"
def my_cool_function(a, b):
return a + b
def my_cool_function2(a: int, b: int) -> int:
return a + b
def my_cool_function(a, b):
return a + b
my_cool_function2("foo", "bar")
def main():
# here be dragons
return
if __name__ == "__main__":
main()
import random # встроенный модуль
import os.path as op # импорт с псевдонимом
random.randint(1, 10)
from os.path import *
from sample2 import fibonacci
sample2.fibonacci(5)
import traceback
d = {}
try:
1 / 0
except KeyError as exc:
print(traceback.format_exc())
except ZeroDivisionError as an_exc:
print("bad luck")
import random
a = [
random.randint(-10, 10)
for _ in range(10)
]
a
b = [item ** 2 for item in a if item > 0]
b
a = "asfdhasdfh"
dir(a)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: While this procedure is written in JavaScript for use by D3.js during the simulation, an implementation in python is shown below for simplicity.<br>
Step3: If we graph this function, it should look familiar.
Step5: Homogenizing Longitude and Latitude
Steps
|
<ASSISTANT_TASK:>
Python Code:
# Imports
import math
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
%matplotlib inline
def logistic_algo(x, max_value, min_value=1.5, c=0.85, k=0.1):
Algorithm for scaling a given point's radius according to a Logistic Function.
phi = c * (10**(int(math.log10(max_value)) - 1))
L = max_value/phi
denominator = 1 + math.e**(-1 * k * (x/phi))
return L/denominator - L/2 + min_value
x = list(range(50))
y = [logistic_algo(p, max(x)) for p in x]
# compose the plot
plt.plot(x, y)
# Imports
import numpy as np
from collections import Counter
from haversine import haversine
def most_central_point(geos_array, valid_medoid):
Algorithm to find the point that is most central (i.e., medoid)
using the haversine formula. Distances are weighted by the number of observations.
:param geos_array: a list of coordinate
:type geos_array: list of tuples (must be immutable as this will be hashed)
:param valid_medoid: min for mean distance to all other points / number of observations.
:type valid_medoid: float or int
:return: medoid coordinate
# Count the number times each coordinate appears in `geos_array`
geos_array_count = dict(Counter(geos_array))
# Define a list of unique coordinates
uniuqe_geos = list(set(geos_array))
# Compute the distance from each point to all of the others
coord_dict = {i: [haversine(i, j) for j in uniuqe_geos if j != i] for i in uniuqe_geos}
# Compute the mean distance for each coordinate and divide by the number of times it occured
coord_dict_mean = {k: mean(v) / float(geos_array_count[k]) for k, v in coord_dict.items()}
# Use the coordinate with the smallest corresponding value
medoid_mean_coord = min(coord_dict_mean, key=coord_dict_mean.get)
# Check against threshold
if coord_dict_mean[medoid_mean_coord] <= valid_medoid:
return medoid_mean_coord
else:
return np.NaN
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: We need to setup Accelerate to use all of our GPUs. We can do so quickly with write_basic_config ()
Step3: Next let's download some data to train on. You don't need to worry about using rank0_first, as since we're in our Jupyter Notebook it will only run on one process like normal
Step4: We wrap the creation of the DataLoaders, our vision_learner, and call to fine_tune inside of a train function.
Step5: The last addition to the train function needed is to use our context manager before calling fine_tune and setting in_notebook to True
Step6: if not rank_distrib()
Step7: Afterwards we can import our exported Learner, save, or anything else we may want to do in our Jupyter Notebook outside of a distributed process
|
<ASSISTANT_TASK:>
Python Code:
#|all_multicuda
#hide
from fastai.vision.all import *
from fastai.distributed import *
from fastai.vision.models.xresnet import *
from accelerate import notebook_launcher
from accelerate.utils import write_basic_config
#from accelerate.utils import write_basic_config
#write_basic_config()
path = untar_data(URLs.PETS)
def get_y(o): return o[0].isupper()
def train(path):
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2,
label_func=get_y, item_tfms=Resize(224))
learn = vision_learner(dls, resnet34, metrics=error_rate).to_fp16()
learn.fine_tune(1)
def train(path):
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2,
label_func=get_y, item_tfms=Resize(224))
learn = vision_learner(dls, resnet34, metrics=error_rate).to_fp16()
with learn.distrib_ctx(sync_bn=False, in_notebook=True):
learn.fine_tune(1)
learn.export("pets")
notebook_launcher(train, (path,), num_processes=2)
imgs = get_image_files(path)
learn = load_learner(path/'pets')
learn.predict(imgs[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a quadratic curve
Step2: First, generate a dataset using this model using these parameters and the following characteristics
Step3: Now fit the model to the dataset to recover estimates for the model's parameters
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
a_true = 0.5
b_true = 2.0
c_true = -4.0
N = 30
xdata = np.linspace(-5, 5, N)
dy = 2
ydata = a_true*xdata**2 + b_true*xdata + c_true + np.random.normal(0.0, dy, size = N)
plt.figure(figsize=(8,6))
plt.errorbar(xdata, ydata, dy, fmt='.k', ecolor='lightgray')
plt.tick_params(axis='x', direction='out', top='off')
plt.tick_params(axis='y', direction='out', right='off')
plt.xlabel('x'), plt.ylabel('y'), plt.title('Random Quadratic Raw Data');
assert True # leave this cell for grading the raw data generation and plot
def model(x, a, b, c):
return a*x**2+b*x+c
theta_best, theta_cov = opt.curve_fit(model, xdata, ydata, sigma=dy)
print('a = {0:.3f} +/- {1:.3f}'.format(theta_best[0], np.sqrt(theta_cov[0,0])))
print('b = {0:.3f} +/- {1:.3f}'.format(theta_best[1], np.sqrt(theta_cov[1,1])))
print('c = {0:.3f} +/- {1:.3f}'.format(theta_best[2], np.sqrt(theta_cov[2,2])))
xfit = np.linspace(-5.0,5.0)
yfit = theta_best[0]*xfit**2 + theta_best[1]*xfit + theta_best[2]
plt.figure(figsize=(8,6))
plt.plot(xfit, yfit)
plt.errorbar(xdata, ydata, dy, fmt='.k', ecolor='lightgray')
plt.xlabel('x'), plt.ylabel('y'), plt.title('Random Quadratic Curve Fitted Data')
plt.tick_params(axis='x', direction='out', top='off')
plt.tick_params(axis='y', direction='out', right='off')
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1 - Creating a Checkpoint
Step1: Pre-Questions
Step2: PART 1
Step3: Use and modify the section of code below to answer questions 3-5.
Step4: PART 1
Step5: PART 2
Step6: PART 3
|
<ASSISTANT_TASK:>
Python Code:
# Import modules that contain functions we need
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Our data is the dichotomous key table and is defined as the word 'key'.
# key is set equal to the .csv file that is read by pandas.
# The .csv file must be in the same directory as the program.
#If the data is being pulled locally use the code that is commented out below
#key = pd.read_csv("Classification of Organisms- Jupyter Data.csv")
#key2 = pd.read_csv("Classification of Organisms- Jupyter Data KEY 2.csv")
key = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/f4d51945a169ef3125234c57b878e058/raw/bebeaae8038f0b418ed37c2a98b82aa9d3cc38d1/Classification%2520of%2520Organisms-Jupyter%2520Data.csv")
key2 = pd.read_csv("https://gist.githubusercontent.com/GoodmanSciences/4060d993635e90cdcc46fe637c92ee37/raw/d9031747855b9762b239dea07a60254eaa6051f7/Classification%2520of%2520Organisms-%2520Jupyter%2520Data%2520KEY%25202.csv")
# This sets Organism as the index instead of numbers
#key = data.set_index("organism")
# Here is a helpful image of a sample Dichotomous Key!
from IPython.display import Image
from IPython.core.display import HTML
Image(url= 'http://biology-igcse.weebly.com/uploads/1/5/0/7/15070316/8196495_orig.gif')
# Animal options in Dichotomous Key
# Displays all row titles as an array
key.organism
# Conditions/Questions for finding the correct animal
# Displays all column titles as an array
key.columns
key[(key['fur'] == 'yes')]
# This conditional allows us to query a column and if the data within that cell matches it will display the animal(s).
#if you are unsure of what to put try making that column a comment by adding # in front of it.
key[
#physical characteristics
(key['fur'] == 'yes') & \
(key['feathers'] == 'no') & \
(key['poisonous'] == 'no') & \
(key['scales'] == 'no') & \
(key['multicellular'] == 'yes') & \
(key['fins'] == 'no') & \
(key['wings'] == 'no') & \
(key['vertebrate'] == 'yes') & \
#environmental characteristics
(key['marine'] == 'no') & \
(key['terrestrial'] == 'yes') & \
#feeding characteristics
#decomposers get their food by breaking down decaying organisms
(key['decomposer'] == 'no') & \
#carnivores get their food by eating animals
(key['carnivore'] == 'no') & \
#herbivores get their food by eating plants
(key['herbivore'] == 'yes') & \
#omnivores get their food by eating both plants and animals
(key['omnivore'] == 'no') & \
#photosynthesis is the process of making food using energy from sunlight
(key['photosynthesis'] == 'no') & \
#autotrophs are organisms that generate their own food inside themselves
(key['autotroph'] == 'no') & \
#possible kingdoms include: animalia, plantae, fungi
(key['kingdom'] == 'animalia') & \
#cell type
(key['eukaryotic'] == 'yes') & \
(key['prokaryotic'] == 'no')
]
#sort your organisms by their taxonomical classification
# This conditional allows us to query a column and if the data within that cell matches,
# it will display the corresponding animal(s)
key2[(key2['kingdom'] == 'animalia')]
#Done?? Insert a image for one of the organisms you found using the dichotomous key.
from IPython.display import Image
from IPython.core.display import HTML
Image(url= 'https://lms.mrc.ac.uk/wp-content/uploads/insert-pretty-picture-here1.jpg')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Timing individual functions
Step2: Testing pipeline as a whole
|
<ASSISTANT_TASK:>
Python Code:
import analysis3 as a3
reload(a3)
import time
def time_function(fun, *args):
start = time.time();
result = fun(*args);
run_time = time.time() - start;
minutes = run_time / 60;
print('RUN TIME: %f s (%f m)' % (run_time, minutes));
return result;
token = 's275_to_ara3'
cert_path = '../userToken.pem'
time_function(a3.get_registered, token, cert_path);
path = "img/" + token + "_regis.nii"
im = time_function(a3.apply_clahe, path);
output_ds = time_function(a3.downsample, im, 10000);
time_function(a3.save_points, output_ds, "points/" + token + ".csv");
points_path = "points/" + token + ".csv";
time_function(a3.generate_pointcloud, points_path, "output/" + token + "_pointcloud.html");
time_function(a3.get_atlas_annotate, cert_path, True, None);
time_function(a3.get_regions, points_path, "atlas/ara3_annotation.nii", "points/" + token + "_regions.csv");
points_region_path = "points/" + token + "_regions.csv";
g = time_function(a3.create_graph, points_region_path, 20, "graphml/" + token + "_graph.graphml");
time_function(a3.plot_graphml3d, g, False, "output/" + token + "_edgegraph.html");
time_function(a3.generate_region_graph, token, points_region_path, "output/" + token + "_regions.html");
time_function(a3.generate_density_graph, "graphml/" + token + "_graph.graphml",
"output/" + token + "_density.html", "False-Color Density of " + token);
print("Completed pipeline...!")
token = 's275_to_ara3'
cert_path = '../userToken.pem'
time_function(a3.run_pipeline, token, cert_path, 5);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Jupyter Notebooks
Step2: This includes the whole module and makes it available for use later in the program. For example, we can do
Step3: Alternatively, we can chose to import all symbols (functions and variables) in a module to the current namespace (so that we don't need to use the prefix "math." every time we use something from the math module
Step4: This pattern can be very convenient, but in large programs that include many modules it is often a good idea to keep the symbols from each module in their own namespaces, by using the import math pattern. This would elminate potentially confusing problems with name space collisions.
Step5: Looking at what a module contains, and its documentation
Step6: And using the function help we can get a description of each function (almost .. not all functions have docstrings, as they are technically called, but the vast majority of functions are documented this way).
Step7: We can also use the help function directly on modules
Step8: Variables and types
Step9: Although not explicitly specified, a variable does have a type associated with it. The type is derived from the value that was assigned to it.
Step10: If we assign a new value to a variable, its type can change.
Step11: If we try to use a variable that has not yet been defined we get an NameError
Step12: Fundamental types
Step13: Type utility functions
Step14: We can also use the isinstance method for testing types of variables
Step15: Type casting
Step16: Operators and comparisons
Step17: Note
Step18: Comparison operators >, <, >= (greater or equal), <= (less or equal), == equality, is identical.
Step19: Exercise
Step20: We can index a character in a string using []
Step21: Heads up MATLAB and R users
Step22: If we omit either (or both) of start or stop from [start
Step23: We can also define the step size using the syntax [start
Step24: This technique is called slicing. Read more about the syntax here
Step25: Exercise
Step26: We can use the same slicing techniques to manipulate lists as we could use on strings
Step27: Heads up MATLAB and R users
Step28: Elements in a list do not all have to be of the same type
Step29: Python lists can be heterogeneous and arbitrarily nested
Step30: Lists play a very important role in Python. For example they are used in loops and other flow control structures (discussed below). There are a number of convenient functions for generating lists of various types, for example the range function
Step31: Adding, inserting, modifying, and removing elements from lists
Step32: We can modify lists by assigning new values to elements in the list. In technical jargon, lists are mutable.
Step33: Insert an element at an specific index using insert
Step34: Remove first element with specific value using 'remove'
Step35: Remove an element at a specific location using del
Step36: See help(list) for more details, or read the online documentation
Step37: We can unpack a tuple by assigning it to a comma-separated list of variables
Step38: If we try to assign a new value to an element in a tuple we get an error
Step39: Dictionaries
Step40: Exercise
Step41: For the first time, here we encounted a peculiar and unusual aspect of the Python programming language
Step42: Loops
Step43: The for loop iterates over the elements of the supplied list, and executes the containing block once for each element. Any kind of list can be used in the for loop. For example
Step44: Note
Step45: To iterate over key-value pairs of a dictionary
Step46: Sometimes it is useful to have access to the indices of the values when iterating over a list. We can use the enumerate function for this
Step47: List comprehensions
Step48: while loops
Step49: Note that the print("done") statement is not part of the while loop body because of the difference in indentation.
Step51: Optionally, but highly recommended, we can define a so called "docstring", which is a description of the functions purpose and behaivor. The docstring should follow directly after the function definition, before the code in the function body.
Step53: Functions that returns a value use the return keyword
Step55: We can return multiple values from a function using tuples (see above)
Step56: Default argument and keyword arguments
Step57: If we don't provide a value of the debug argument when calling the the function myfunc it defaults to the value provided in the function definition
Step58: If we explicitly list the name of the arguments in the function calls, they do not need to come in the same order as in the function definition. This is called keyword arguments, and is often very useful in functions that takes a lot of optional arguments.
Step59: Unnamed functions (lambda function)
Step60: This technique is useful for example when we want to pass a simple function as an argument to another function, like this
Step64: Exercise
Step65: To create a new instance of a class
Step66: To invoke a class method in the class instance p
Step71: Note that calling class methods can modifiy the state of that particular class instance, but does not effect other class instances or any global variables.
Step72: We can import the module mymodule into our Python program using import
Step73: Use help(module) to get a summary of what the module provides
Step74: If we make changes to the code in mymodule.py, we need to reload it using reload
Step75: Exceptions
Step76: A typical use of exceptions is to abort functions when some error condition occurs, for example
Step77: To get information about the error, we can access the Exception class instance that describes the exception by using for example
Step78: Excercise
|
<ASSISTANT_TASK:>
Python Code:
!ls ../scripts/hello-world*.py
!cat ../scripts/hello-world.py
!python scripts/hello-world.py
import math
import math
x = math.cos(2 * math.pi)
print(x)
from math import *
x = cos(2 * pi)
print(x)
from math import cos, pi
x = cos(2 * pi)
print(x)
import math
print(dir(math))
help(math.log)
math.log(10)
math.log(10, 2)
import os
os.listdir()
# variable assignments
x = 1.0
my_variable = 12.2
type(x)
x = 1
type(x)
import traceback
try:
print(y)
except NameError as e:
print(traceback.format_exc())
# integers
x = 1
type(x)
# float
x = 1.0
type(x)
# boolean
b1 = True
b2 = False
type(b1)
# complex numbers: note the use of `j` to specify the imaginary part
x = 1.0 - 1.0j
type(x)
print(x)
print(x.real, x.imag)
x = 1.0
# check if the variable x is a float
type(x) is float
# check if the variable x is an int
type(x) is int
isinstance(x, float)
x = 1.5
print(x, type(x))
x = int(x)
print(x, type(x))
z = complex(x)
print(z, type(z))
import traceback
try:
x = float(z)
except TypeError as e:
print(traceback.format_exc())
1 + 2, 1 - 2, 1 * 2, 1 / 2
1.0 + 2.0, 1.0 - 2.0, 1.0 * 2.0, 1.0 / 2.0
# Integer division of float numbers
3.0 // 2.0
# Note! The power operators in python isn't ^, but **
2 ** 2
True and False
not False
True or False
2 > 1, 2 < 1
2 > 2, 2 < 2
2 >= 2, 2 <= 2
# equality
[1,2] == [1,2]
# objects identical?
list1 = list2 = [1,2]
list1 is list2
s = "Hello world"
type(s)
# length of the string: the number of characters
len(s)
# replace a substring in a string with somethign else
s2 = s.replace("world", "test")
print(s2)
s[0]
s[0:5]
s[4:5]
s[:5]
s[6:]
s[:]
s[::1]
s[::2]
print("str1", "str2", "str3") # The print statement concatenates strings with a space
print("str1", 1.0, False, -1j) # The print statements converts all arguments to strings
print("str1" + "str2" + "str3") # strings added with + are concatenated without space
print("value = %f" % 1.0) # we can use C-style string formatting
# this formatting creates a string
s2 = "value1 = %.2f. value2 = %d" % (3.1415, 1.5)
print(s2)
# alternative, more intuitive way of formatting a string
s3 = 'value1 = {0}, value2 = {1}'.format(3.1415, 1.5)
print(s3)
l = [1,2,3,4]
print(type(l))
print(l)
print(l)
print(l[1:3])
print(l[::2])
l[0]
l = [1, 'a', 1.0, 1-1j]
print(l)
nested_list = [1, [2, [3, [4, [5]]]]]
nested_list
start = 10
stop = 30
step = 2
range(start, stop, step)
# in python 3 range generates an iterator, which can be converted to a list using 'list(...)'.
# It has no effect in python 2
list(range(start, stop, step))
list(range(-10, 10))
s
# convert a string to a list by type casting:
s2 = list(s)
s2
# sorting lists (by creating a new variable)
s3 = sorted(s2)
print(s2)
print(s3)
# sorting lists in place
s2.sort()
print(s2)
# create a new empty list
l = []
# add an elements using `append`
l.append("A")
l.append("d")
l.append("d")
print(l)
l[1] = "p"
l[2] = "p"
print(l)
l[1:3] = ["d", "d"]
print(l)
l.insert(0, "i")
l.insert(1, "n")
l.insert(2, "s")
l.insert(3, "e")
l.insert(4, "r")
l.insert(5, "t")
print(l)
l.remove("A")
print(l)
del l[7]
del l[6]
print(l)
point = (10, 20)
print(point, type(point))
point = 10, 20
print(point, type(point))
x, y = point
print("x =", x)
print("y =", y)
try:
point[0] = 20
except TypeError as e:
print(traceback.format_exc())
params = {"parameter1" : 1.0,
"parameter2" : 2.0,
"parameter3" : 3.0,}
print(type(params))
print(params)
print("parameter1 = " + str(params["parameter1"]))
print("parameter2 = " + str(params["parameter2"]))
print("parameter3 = " + str(params["parameter3"]))
params["parameter1"] = "A"
params["parameter2"] = "B"
# add a new entry
params["parameter4"] = "D"
print("parameter1 = " + str(params["parameter1"]))
print("parameter2 = " + str(params["parameter2"]))
print("parameter3 = " + str(params["parameter3"]))
print("parameter4 = " + str(params["parameter4"]))
statement1 = False
statement2 = False
if statement1:
print("statement1 is True")
elif statement2:
print("statement2 is True")
else:
print("statement1 and statement2 are False")
statement1 = statement2 = True
if statement1:
if statement2:
print("both statement1 and statement2 are True")
# # Bad indentation!
# if statement1:
# if statement2:
# print("both statement1 and statement2 are True") # this line is not properly indented
statement1 = False
if statement1:
print("printed if statement1 is True")
print("still inside the if block")
if statement1:
print("printed if statement1 is True")
print("now outside the if block")
for x in [1,2,3]:
print(x)
for x in range(4): # by default range start at 0
print(x)
for x in range(-3,3):
print(x)
for word in ["scientific", "computing", "with", "python"]:
print(word)
for key, value in params.items():
print(key + " = " + str(value))
for idx, x in enumerate(range(-3,3)):
print(idx, x)
l1 = [x**2 for x in range(0,5)]
print(l1)
i = 0
while i < 5:
print(i)
i = i + 1
print("done")
def func0():
print("test")
func0()
def func1(s):
Print a string 's' and tell how many characters it has
print(s + " has " + str(len(s)) + " characters")
help(func1)
func1("test")
def square(x):
Return the square of x.
return x ** 2
square(4)
def powers(x):
Return a few powers of x.
return x ** 2, x ** 3, x ** 4
powers(3)
x2, x3, x4 = powers(3)
print(x3)
def myfunc(x, p=2, debug=False):
if debug:
print("evaluating myfunc for x = " + str(x) + " using exponent p = " + str(p))
return x**p
myfunc(5)
myfunc(5, debug=True)
myfunc(p=3, debug=True, x=7)
f1 = lambda x: x**2
# is equivalent to
def f2(x):
return x**2
f1(2), f2(2)
# map is a built-in python function
map(lambda x: x**2, range(-3,4))
# in python 3 we can use `list(...)` to convert the iterator to an explicit list
list(map(lambda x: x**2, range(-3,4)))
class Point:
Simple class for representing a point in a Cartesian coordinate system.
def __init__(self, x, y):
Create a new Point at x, y.
self.x = x
self.y = y
def translate(self, dx, dy):
Translate the point by dx and dy in the x and y direction.
self.x += dx
self.y += dy
def __str__(self):
return("Point at [%f, %f]" % (self.x, self.y))
p1 = Point(0, 0) # this will invoke the __init__ method in the Point class
print(p1) # this will invoke the __str__ method
p2 = Point(1, 1)
p1.translate(0.25, 1.5)
print(p1)
print(p2)
%%file mymodule.py
Example of a python module. Contains a variable called my_variable,
a function called my_function, and a class called MyClass.
my_variable = 0
def my_function():
Example function
return my_variable
class MyClass:
Example class.
def __init__(self):
self.variable = my_variable
def set_variable(self, new_value):
Set self.variable to a new value
self.variable = new_value
def get_variable(self):
return self.variable
import mymodule
help(mymodule)
mymodule.my_variable
mymodule.my_function()
my_class = mymodule.MyClass()
my_class.set_variable(10)
my_class.get_variable()
import importlib
importlib.reload(mymodule) # Python 3 only
# For Python 2 use reload(mymodule)
try:
raise Exception("description of the error")
except Exception as e:
print(traceback.format_exc())
try:
print("test")
# generate an error: the variable test is not defined
print(test)
except Exception:
print("Caught an exception")
try:
print("test")
# generate an error: the variable test is not defined
print(test)
except Exception as e:
print("Caught an exception:", e)
%reload_ext version_information
%version_information
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Annotating bad spans of data
Step2: .. sidebar
Step3: Now we can confirm that the annotations are centered on the EOG events. Since
Step4: See the section tut-section-programmatic-annotations for more details
Step5: The values that are appropriate are dataset- and hardware-dependent, so some
Step6: Notice that we've passed reject_by_annotation=False above, in order to
Step7: More importantly, note that many more epochs are rejected (~20% instead of
Step8: Finally, it should be noted that "dropped" epochs are not necessarily deleted
Step9: Alternatively, if rejection thresholds were not originally given to the
|
<ASSISTANT_TASK:>
Python Code:
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(events_file)
fig = raw.plot()
fig.canvas.key_press_event('a')
eog_events = mne.preprocessing.find_eog_events(raw)
onsets = eog_events[:, 0] / raw.info['sfreq'] - 0.25
durations = [0.5] * len(eog_events)
descriptions = ['bad blink'] * len(eog_events)
blink_annot = mne.Annotations(onsets, durations, descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(blink_annot)
eeg_picks = mne.pick_types(raw.info, meg=False, eeg=True)
raw.plot(events=eog_events, order=eeg_picks)
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
flat_criteria = dict(mag=1e-15, # 1 fT
grad=1e-13, # 1 fT/cm
eeg=1e-6) # 1 µV
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria,
reject_by_annotation=False, preload=True)
epochs.plot_drop_log()
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, reject_tmax=0,
reject=reject_criteria, flat=flat_criteria, preload=True)
epochs.plot_drop_log()
print(epochs.drop_log)
epochs.drop_bad()
stronger_reject_criteria = dict(mag=2000e-15, # 2000 fT
grad=2000e-13, # 2000 fT/cm
eeg=100e-6, # 100 µV
eog=100e-6) # 100 µV
epochs.drop_bad(reject=stronger_reject_criteria)
print(epochs.drop_log)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Source Data
Step2: Procedure
Step3: Then, we group the dataset by valueh_group and collect all of the income values for each group. These groups have different distributions, with the lower numbered group shewing to the left and the higher numbered group skewing to the right.
Step4: A scatter matrix show similar structure for VALUEH and t_income.
Step5: The simulated incomes also have similar statistics to the original incomes. However, the median income is high. In San Diego county, the median household income for householders 65 and older in the 2015 5 year ACS about \$51K, versus \$56K here. For home values, the mean home value for 65+ old homeowners is \$468K in the 5 year ACS, vs \$510K here.
Step6: Bibliography
Step7: Create a new KDE distribution, based on the home values, including only home values ( actually KDE supports ) between $130,000 and $1.5M.
Step8: Overlay the prior plot with the histogram of the original values. We're using np.histogram to make the histograph, so it appears as a line chart.
Step9: Show an a home value curve, interpolated to the same values as the distribution. The two curves should be co-incident.
Step10: Now, interpolate to the values for the county, which shifts the curve right.
Step11: Here is an example of creating an interpolated distribution, then generating a synthetic distribution from it.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext metatab
%load_ext autoreload
%autoreload 2
%mt_lib_dir lib
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import metatab as mt
import seaborn as sns; sns.set(color_codes=True)
import sqlite3
import statsmodels as sm
from statsmodels.nonparametric.kde import KDEUnivariate
from scipy import integrate, stats
from incomedist import *
from multikde import MultiKde
plt.rcParams['figure.figsize']=(6,6)
%%metatab
Origin: ipums.org
Dataset: income_homevalue
Identifier: b407e5af-cc23-431d-a431-15c202ec0c3b
Name: ipums.org-income_homevalue-4
Version: 4
Section: Contacts
Wrangler: Eric Busboom
Wrangler.Email: eric@civicknowledge.com
Section: Bibliography
Citation: ipums
Citation.Type: dataset
Citation.Author: Steven Ruggles; Katie Genadek; Ronald Goeken; Josiah Grover; Matthew Sobek
Citation.Title: Integrated Public Use Microdata Series
Citation.Year: 2017
Citation.Publisher: University of Minnesota
Citation.Version: 7.0
Citation.AccessDate: 20170718
Citation.Url: https://usa.ipums.org/usa/index.shtml
Citation.Doi: https://doi.org/10.18128/D010.V7.0
Citation: bordley
Citation.Type: article
Citation.Author: Robert F. Bordley; James B. McDonald; Anand Mantrala
Citation.Title: Something New, Something Old: Parametric Models for the Size of Distribution of Income
Citation.Year: 1997
Citation.Month: June
Citation.Journal: Journal of Income Distribution
Citation.Volume: 6
Citation.Number: 1
Citation.Pages: 5-5
Citation.Url: https://ideas.repec.org/a/jid/journl/y1997v06i1p5-5.html
Citation: mcdonald
Citation.Type: article
Citation.Author: McDonald, James B.; Mantrala, Anand
Citation.Title: The distribution of personal income: Revisited
Citation.Journal: Journal of Applied Econometrics
Citation.Volume: 10
Citation.Number: 2
Citation.Publisher: Wiley Subscription Services, Inc., A Wiley Company
Citation.Issn: 1099-1255
Citation.Doi: 10.1002/jae.3950100208
Citation.Pages: 201--204,
Citation.Year: 1995
Citation: majumder
Citation.Type: article
Citation.Author: Majumder, Amita; Chakravarty, Satya Ranjan
Citation.Title: Distribution of personal income: Development of a new model and its application to U.S. income data
Citation.Journal: Journal of Applied Econometrics
Citation.Volume: 5
Citation.Number: 2
Citation.Publisher: Wiley Subscription Services, Inc., A Wiley Company
Citation.Issn: 1099-1255
Citation.Doi: 10.1002/jae.3950050206
Citation.Pages: 189--196
Citation.Year: 1990
%%bash
# Create a sample of a SQL database, so we can edit the schema.
# Run the cell once to create the schema, then edit the schema and run it
# again to build the database.
fn='/Volumes/Storage/Downloads/usa_00005.csv'
if [ ! -e schema.sql ]
then
head -100 $fn > sample.csv
sqlite3 --csv ipums.sqlite '.import sample.csv ipums'
sqlite3 ipums.sqlite .schema > schema-orig.sql
sqlite3 -header ipums.sqlite "select * from ipums limit 2" > sample.sql # Show a sample of data
rm ipums.sqlite
fi
if [ -e schema.sql -a \( ! -e ipums.sqlite \) ]
then
sqlite3 ipums.sqlite < schema.sql
sqlite3 --csv ipums.sqlite ".import $fn ipums"
# Create some indexes to speed up queries
sqlite3 ipums.sqlite "CREATE INDEX IF NOT EXISTS state_idx ON ipums (STATEFIP)"
sqlite3 ipums.sqlite "CREATE INDEX IF NOT EXISTS county_idx ON ipums (COUNTYFIPS)"
sqlite3 ipums.sqlite "CREATE INDEX IF NOT EXISTS state_county_idx ON ipums (STATEFIP, COUNTYFIPS)"
fi
# Check the weights for the whole file to see if they sum to the number
# of households and people in the county. They don't, but the sum of the weights for households is close,
# 126,279,060 vs about 116M housholds
con = sqlite3.connect("ipums.sqlite")
wt = pd.read_sql_query("SELECT YEAR, DATANUM, SERIAL, HHWT, PERNUM, PERWT FROM ipums "
"WHERE PERNUM = 1 AND YEAR = 2015", con)
wt.drop(0, inplace=True)
nd_s = wt.drop_duplicates(['YEAR', 'DATANUM','SERIAL'])
country_hhwt_sum = nd_s[nd_s.PERNUM == 1]['HHWT'].sum()
len(wt), len(nd_s), country_hhwt_sum
import sqlite3
# PERNUM = 1 ensures only record for each household
con = sqlite3.connect("ipums.sqlite")
senior_hh = pd.read_sql_query(
"SELECT DISTINCT SERIAL, HHWT, PERWT, HHINCOME, VALUEH "
"FROM ipums "
"WHERE AGE >= 65 "
"AND HHINCOME < 9999999 AND VALUEH < 9999999 "
"AND STATEFIP = 6 AND COUNTYFIPS=73 ", con)
# Since we're doing a probabilistic simulation, the easiest way to deal with the weight is just to repeat rows.
# However, adding the weights doesn't change the statistics much, so they are turned off now, for speed.
def generate_data():
for index, row in senior_hh.drop_duplicates('SERIAL').iterrows():
#for i in range(row.HHWT):
yield (row.HHINCOME, row.VALUEH)
incv = pd.DataFrame(list(generate_data()), columns=['HHINCOME', 'VALUEH'])
sns.jointplot(x="HHINCOME", y="VALUEH", marker='.', scatter_kws={'alpha': 0.1}, data=incv, kind='reg');
incv['valueh_rank'] = incv.rank()['VALUEH']
incv['valueh_group'] = pd.qcut(incv.valueh_rank, 10, labels=False )
incv['hhincome_rank'] = incv.rank()['HHINCOME']
incv['hhincome_group'] = pd.qcut(incv.hhincome_rank, 10, labels=False )
incv[['HHINCOME', 'VALUEH', 'hhincome_group', 'valueh_group']] .corr()
from metatab.pands import MetatabDataFrame
odf = MetatabDataFrame(incv)
odf.name = 'income_homeval'
odf.title = 'Income and Home Value Records for San Diego County'
odf.HHINCOME.description = 'Household income'
odf.VALUEH.description = 'Home value'
odf.valueh_rank.description = 'Rank of the VALUEH value'
odf.valueh_group.description = 'The valueh_rank value quantized into 10 bins, from 0 to 9'
odf.hhincome_rank.description = 'Rank of the HHINCOME value'
odf.hhincome_group.description = 'The hhincome_rank value quantized into 10 bins, from 0 to 9'
%mt_add_dataframe odf --materialize
import matplotlib.pyplot as plt
import numpy as np
mk = MultiKde(odf, 'valueh_group', 'HHINCOME')
fig,AX = plt.subplots(3, 3, sharex=True, sharey=True, figsize=(15,15))
incomes = [30000,
40000,
50000,
60000,
70000,
80000,
90000,
100000,
110000]
for mi, ax in zip(incomes, AX.flatten()):
s, d, icdf, g = mk.make_kde(mi)
syn_d = mk.syn_dist(mi, 10000)
syn_d.plot.hist(ax=ax, bins=40, title='Median Income ${:0,.0f}'.format(mi), normed=True, label='Generated')
ax.plot(s,d, lw=2, label='KDE')
fig.suptitle('Income Distributions By Median Income\nKDE and Generated Distribution')
plt.legend(loc='upper left')
plt.show()
t = incv.copy()
t['t_income'] = mk.syn_dist(t.HHINCOME.median(), len(t))
t[['HHINCOME','VALUEH','t_income']].corr()
sns.pairplot(t[['VALUEH','HHINCOME','t_income']]);
from IPython.display import display_html, HTML
display(HTML("<h3>Descriptive Stats</h3>"))
t[['VALUEH','HHINCOME','t_income']].describe()
display(HTML("<h3>Correlations</h3>"))
t[['VALUEH','HHINCOME','t_income']].corr()
%mt_bibliography
# Tests
s,d = make_prototype(incv.VALUEH.astype(float), 130_000, 1_500_000)
plt.plot(s,d)
v = incv.VALUEH.astype(float).sort_values()
#v = v[ ( v > 60000 ) & ( v < 1500000 )]
hist, bin_edges = np.histogram(v, bins=100, density=True)
bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1])
bin_width = bin_middles[1] - bin_middles[0]
assert np.isclose(sum(hist*bin_width),1) # == 1 b/c density==True
hist, bin_edges = np.histogram(v, bins=100) # Now, without 'density'
# And, get back to the counts, but now on the KDE
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(s,d * sum(hist*bin_width));
ax.plot(bin_middles, hist);
def plot_compare_curves(p25, p50, p75):
fig = plt.figure(figsize = (8,3))
ax = fig.add_subplot(111)
sp, dp = interpolate_curve(s, d, p25, p50, p75)
ax.plot(pd.Series(s), d, color='black');
ax.plot(pd.Series(sp), dp, color='red');
# Re-input the quantiles for the KDE
# Curves should be co-incident
plot_compare_curves(2.800000e+05,4.060000e+05,5.800000e+05)
# Values for SD County home values
plot_compare_curves(349100.0,485900.0,703200.0)
sp, dp = interpolate_curve(s, d, 349100.0,485900.0,703200.0)
v = syn_dist(sp, dp, 10000)
plt.hist(v, bins=100);
pd.Series(v).describe()
%mt_materialize /Volumes/Storage/proj/virt/data-projects/sdrdl-data-projects/ipums.org/income_home_value/ipums.org-income_homevalue
%mt_show_metatab
%mt_show_libdirs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Making proper sets
Step2: Boolean operations
Step3: Q3. Find the unique intersection of x and y.
Step4: Q4. Find the unique elements of x that are not present in y.
Step5: Q5. Find the xor elements of x and y.
Step6: Q6. Find the union of x and y.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.__version__
author = 'kyubyong. longinglove@nate.com'
x = np.array([1, 2, 6, 4, 2, 3, 2])
out, indices = np.unique(x, return_inverse=True)
print "unique elements =", out
print "reconstruction indices =", indices
print "reconstructed =", out[indices]
x = np.array([0, 1, 2, 5, 0])
y = np.array([0, 1])
print np.in1d(x, y)
x = np.array([0, 1, 2, 5, 0])
y = np.array([0, 1, 4])
print np.intersect1d(x, y)
x = np.array([0, 1, 2, 5, 0])
y = np.array([0, 1, 4])
print np.setdiff1d(x, y)
x = np.array([0, 1, 2, 5, 0])
y = np.array([0, 1, 4])
out1 = np.setxor1d(x, y)
out2 = np.sort(np.concatenate((np.setdiff1d(x, y), np.setdiff1d(y, x))))
assert np.allclose(out1, out2)
print out1
x = np.array([0, 1, 2, 5, 0])
y = np.array([0, 1, 4])
out1 = np.union1d(x, y)
out2 = np.sort(np.unique(np.concatenate((x, y))))
assert np.allclose(out1, out2)
print np.union1d(x, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we need to get some data into our session.
Step2: The datastep.runcode Action
Step3: We can pull the output table DataFrame out using the following line of code. The ix property is a DataFrame property that allows you to extract elements from a DataFrame at indexes or labels. In this case, we want the element in row zero, column name casTable.
Step4: As you can see, we have a new CAS table that now includes the BMI column.
Step5: The casds IPython Magic Command
Step6: Now we can use the %%casds magic to enter an entire cell of data step code. The %casds magic requires at least one argument which contains the CAS connection object where the action should run. In most cases, you'll want to add the --output option as well which specifies the name of an output variable that will be surfaced to the Python environment which contains the output of the datastep.runcode action.
Step7: Just as before, we can extract the output CASTable object from the returned DataFrames.
Step8: Conclusion
|
<ASSISTANT_TASK:>
Python Code:
import swat
conn = swat.CAS(host, port, username, password)
cls = conn.read_csv('https://raw.githubusercontent.com/sassoftware/sas-viya-programming/master/data/class.csv',
casout=dict(name='class', caslib='casuser'))
cls
out = conn.datastep.runcode('''
data bmi(caslib='casuser');
set class(caslib='casuser');
BMI = weight / (height**2) * 703;
run;
''')
out
bmi = out.OutputCasTables.ix[0, 'casTable']
bmi.to_frame()
bmi2 = cls.datastep('''BMI = weight / (height**2) * 703''')
bmi2.to_frame()
%load_ext swat.cas.magics
%%casds --output out2 conn
data bmi3(caslib='casuser');
set class(caslib='casuser');
BMI = weight / (height**2) * 703;
run;
bmi3 = out2.OutputCasTables.ix[0, 'casTable']
bmi3.to_frame()
conn.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Neural Network on image features
|
<ASSISTANT_TASK:>
Python Code:
# Run some setup code
import numpy as np
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# bool var. to let program show debug info.
debug = True
show_img = True
import cifar10
# Load the raw CIFAR-10 data
m, m_val, m_dev, m_test = 49000, 1000, 500, 1000
m_spec = (m, m_val, m_dev, m_test)
data = cifar10.load_raw('../cifar-10-batches-py', m_spec, debug = debug)
X, y, X_test, y_test, X_val, y_val, X_dev, y_dev = data
from features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
# X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
# X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
# X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
from svm import SVM
n = X_train_feats.shape[1]
K = 10
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
best_model = None
best_val = -1
alpha, lamda, T, B = 1e-7, 3e4, 1000, 200
for lamda in [3e4]:
hpara = (alpha, lamda, T, B)
print hpara
model = SVM(n, K)
model.train(X_train_feats, y, hpara, show_img = False, debug = False)
train_acc = np.mean(model.predict(X_val_feats) == y_val)
val_acc = np.mean(model.predict(X_val_feats) == y_val)
print 'train acc.:', train_acc, 'val. acc.:', val_acc
if val_acc > best_val:
best_model = model
best_val = val_acc
# Evaluate your trained SVM on the test set
print 'test acc.', np.mean(best_model.predict(X_test_feats) == y_test)
from nnet import NNet
best_model = None
best_acc = -1
# TODO: Tune hyperparameters using the validation set. Store your best trained
# model in best_net.
#
# To help debug your network, it may help to use visualizations similar to the
# ones we used above; these visualizations will have significant qualitative
# differences from the ones we saw above for the poorly tuned network.
#
# Tweaking hyperparameters by hand can be fun, but you might find it useful to
# write code to sweep through possible combinations of hyperparameters
# automatically like we did on the previous exercises.
n0 = X_train_feats.shape[1]
n1 = 500
n2 = 10
alpha, lamda, T, B, rho = 2e-3, 1e-3, 1000, 200, 0.95
for alpha in [1e-2, 1e-1, 1e0]:
hpara = (alpha, lamda, T, B, rho)
print hpara
model = NNet(n0, n1, n2, std = 1e-1)
model.train(X_train_feats, y, X_val_feats, y_val, hpara, debug, show_img)
# Predict on the val. set
val_acc = np.mean(model.predict(X_val_feats) == y_val)
print 'val. acc.:', val_acc
print '\n'
if val_acc > best_acc:
best_acc = val_acc
best_model = model
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
print 'test acc.', np.mean(best_model.predict(X_test_feats) == y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-2', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Old tagging
Step2: MC
Step3: Taggers combination
Step4: Additional information
Step5: Check calibration of mistag
Step6: Check calibration of mistag
Step7: (PIDNNp < 0.6) & (PIDNNpi < 0.6) & (ghostProb < 0.4)
Step8: (PIDNNpi < 0.6) & (ghostProb < 0.4)
Step9: Preliminary estimation (track
Step10: Preliminary estimation (track
Step11: Checks on track
Step12: This ROC AUC score is similar to the current tagging implementation
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
import pandas
pandas.set_option('display.precision', 4)
pandas.read_csv('img/old-tagging-parts.csv').drop(['AUC, with untag', '$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 4)
pandas.read_csv('img/old-tagging-parts-MC.csv').drop(['AUC, with untag', '$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 4)
pandas.read_csv('img/old-tagging.csv').drop(['$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 4)
pandas.read_csv('img/old-tagging-MC.csv').drop(['$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 5)
pandas.read_csv('img/eff_OS.csv').drop(['$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 5)
pandas.read_csv('img/eff_tracking_SS.csv').drop(['$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 5)
pandas.read_csv('img/new-tagging.csv').drop(['$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 5)
pandas.read_csv('img/new-tagging_relax1.csv')
pandas.set_option('display.precision', 5)
pandas.read_csv('img/new-tagging_relax2.csv')
pandas.set_option('display.precision', 5)
pandas.read_csv('img/new-tagging-PID-less.csv').drop(['$\Delta$ AUC, with untag'], axis=1)
pandas.set_option('display.precision', 5)
pandas.read_csv('img/new-tagging_full_tracks.csv')
pandas.set_option('display.precision', 5)
pandas.read_csv('img/track_signs_assymetry_means.csv', index_col='name')
pandas.set_option('display.precision', 5)
pandas.concat([pandas.read_csv('img/track_signs_assymetry_means.csv', index_col='name'),
pandas.read_csv('img/track_signs_assymetry_means_mc.csv', index_col='name')])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Unfortunately Bokeh can't solve this as Bokeh can't control the CSS of the parent element, which belongs to Jupyter. This can be solved in two ways
|
<ASSISTANT_TASK:>
Python Code:
Image(url="https://raw.githubusercontent.com/birdsarah/bokeh-miscellany/master/cut-off-tooltip.png", width=400, height=400)
from IPython.core.display import HTML
HTML(
<style>
div.output_subarea {
overflow-x: visible;
}
</style>
)
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool
source = ColumnDataSource(
data=dict(
x=[1, 2, 3, 4, 5],
y=[2, 5, 8, 2, 7],
desc=['A', 'b', 'C', 'd', 'E'],
imgs = [
'http://bokeh.pydata.org/static/snake.jpg',
'http://bokeh.pydata.org/static/snake2.png',
'http://bokeh.pydata.org/static/snake3D.png',
'http://bokeh.pydata.org/static/snake4_TheRevenge.png',
'http://bokeh.pydata.org/static/snakebite.jpg'
]
)
)
hover = HoverTool(
tooltips=
<div>
<div>
<img
src="@imgs" height="42" alt="@imgs" width="42"
style="float: left; margin: 0px 15px 15px 0px;"
border="2"
></img>
</div>
<div>
<span style="font-size: 17px; font-weight: bold;">@desc</span>
<span style="font-size: 15px; color: #966;">[$index]</span>
</div>
<div>
<span style="font-size: 15px;">Location</span>
<span style="font-size: 10px; color: #696;">($x, $y)</span>
</div>
</div>
)
p = figure(plot_width=200, plot_height=200, tools=[hover], title='Hover')
p.circle('x', 'y', size=20, source=source)
show(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Get domains (without protocols) (including extension, e.g. .com) for URLs with both http and https protocols.
Step2: 3. Below is a list of language codes. Determine how many are some form of English
Step3: 4. Fix all langauge codes so that they are formatted as such
|
<ASSISTANT_TASK:>
Python Code:
urls = ['http://www.domain.com',
'https://somedomain.com',
'http://my-domain-123.net',
'https://google.com',
'http://www.foo.com',
'https://bar-baz3.com',
'ftp://domain2.com']
import re
# A complete match checking for the presence of some alphanumeric after the // followed
# by at least one group of .alphanumeric could be 'https://\w+(.\w+)+'
# I don't check if it ends with .com, .net or others but there are too many options here
[m.string for u in urls for m in [re.search('https://', u)] if m]
[m.group(1) for u in urls for m in [re.search('https?://(\w+(.\w+)+)', u)] if m]
languages = ['Ar','It','it','En','En_gb','jp','en_GB','EN_IE','en-NZ','en','es','ES-es']
sum([1 for c in languages for m in [re.search('^en', c, re.IGNORECASE)] if m])
[m.group(1).lower() + ('-' + m.group(4).upper() if m.group(2) else '')\
for c in languages for m in [re.search('(^[A-Za-z]{2})((.)([A-Za-z]{2}$))?', c)] if m]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: General, Analysis-unrelated Setup
Step2: Task
Step3: Analysis Setup
Step4: <hr />
Step5: Task
Step6: <hr />
Step7: Task
Step8: <hr />
Step9: Task
Step10: <hr />
Step11: Task
Step12: <hr />
Step13: Task
|
<ASSISTANT_TASK:>
Python Code:
import order as od
import scinum as sn
# campaign
c_2017 = od.Campaign("2017_13Tev_25ns", 1, ecm=13, bx=25)
# processes
p_data = od.Process("data", 1,
is_data=True,
label="data",
)
p_ttH = od.Process("ttH", 2,
label=r"$t\bar{t}H$",
xsecs={
13: sn.Number(0.5071, {"scale": (sn.Number.REL, 0.058, 0.092)}),
},
)
p_tt = od.Process("tt", 3,
label=r"$t\bar{t}$",
xsecs={
13: sn.Number(831.76, {"scale": (19.77, 29.20)}),
},
)
# datasets
d_data = od.Dataset("data", 1,
campaign=c_2017,
is_data=True,
n_files=100,
n_events=200000,
keys=["/data/2017.../AOD"],
)
d_ttH = od.Dataset("ttH", 2,
campaign=c_2017,
n_files=50,
n_events=100000,
keys=["/ttH_powheg.../.../AOD"],
)
d_tt = od.Dataset("tt", 3,
campaign=c_2017,
n_files=500,
n_events=87654321,
keys=["/tt_powheg.../.../AOD"],
)
d_WW = od.Dataset("WW", 4,
campaign=c_2017,
n_files=100,
n_events=54321,
keys=["/WW_madgraph.../.../AOD"],
)
# link processes to datasets
d_data.add_process(p_data)
d_ttH.add_process(p_ttH)
d_tt.add_process(p_tt)
print([len(d.processes) for d in [d_data, d_ttH, d_tt]])
d_ttH.get_process("ttH").get_xsec(d_ttH.campaign.ecm)
ana = od.Analysis("ttH", 1)
# create a config by passing the campaign, so id and name will be identical
cfg = ana.add_config(c_2017)
# add processes manually
cfg.add_process(p_data)
cfg.add_process(p_ttH)
cfg.add_process(p_tt)
# add datasets in a loop
for name in ["data", "ttH", "tt"]:
cfg.add_dataset(c_2017.get_dataset(name))
cfg.get_dataset("ttH").n_events / float(cfg.get_dataset("ttH").n_files)
ch_bb = cfg.add_channel("ttH_bb", 1)
cat_5j = ch_bb.add_category("eq5j",
label="5 jets",
selection="n_jets == 5",
)
cat_6j = ch_bb.add_category("ge6j",
label=r"$\geq$ 6 jets",
selection="n_jets >= 6",
)
# divide the 6j category further
cat_6j_3b = cat_6j.add_category("ge6j_eq3b",
label=r"$\geq$ 6 jets, 3 b-tags",
selection="n_jets >= 6 && n_btags == 3",
)
cat_6j_4b = cat_6j.add_category("ge6j_ge4b",
label=r"$\geq$ 6 jets, $\geq$ 4 b-tags",
selection="n_jets >= 6 && n_btags >= 4",
)
cfg.get_channel("ttH_bb").get_category("ge6j_ge4b", deep=True).label_root
cfg.add_shift("nominal", 1)
cfg.add_shift("lumi_up", 2, type="rate")
cfg.add_shift("lumi_down", 3, type="rate")
cfg.add_shift("scale_up", 4, type="shape")
cfg.add_shift("scale_down", 5, type="shape")
print(len(cfg.shifts))
shifts = [s for s in cfg.shifts if s.source == "scale"]
print(shifts)
cfg.add_variable("jet1_pt",
expression="Reco__jet1__pt",
binning=(25, 0., 500,),
unit="GeV",
x_title=r"Leading jet $p_{T}$",
)
cfg.add_variable("jet1_px",
expression="Reco__jet1__pt * cos(Reco__jet1__phi)",
binning=(25, 0., 500,),
unit="GeV",
x_title=r"Leading jet $p_{x}$",
)
print(len(cfg.variables))
cfg.get_variable("jet1_px").get_full_title(root=True)
cfg.set_aux("lumi", 40.)
cfg.set_aux(("globalTag", "data"), "80X_dataRun2...")
cfg.set_aux(("globalTag", "mc"), "80X_mcRun2...")
print(len(cfg.aux))
print(cfg.get_aux(("globalTag", "mc")))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Redo the model with a 75% - 25% training/test split and compare the results. Are they better or worse than before? Discuss why this may be.
Step2: Load the breast cancer dataset (datasets.load_breast_cancer()) and perform basic exploratory analysis. What attributes to we have? What are we trying to predict?
Step3: Using the breast cancer data, create a classifier to predict the type of seed. Perform the above hold out evaluation (50-50 and 75-25) and discuss the results.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%matplotlib inline
import numpy as np
import pydotplus
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import tree
from sklearn.externals.six import StringIO
from sklearn.cross_validation import train_test_split
from sklearn import tree
from sklearn import metrics
iris_info = datasets.load_iris()
iris_info
x = iris.data[:,2:]
y = iris.target
x
y
iris['feature_names']
dt = tree.DecisionTreeClassifier()
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.5,train_size=0.5)
dt = dt.fit(x_train,y_train)
def output(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y, y_pred)),"\n")
if show_classification_report:
print("Classification report")
print(metrics.classification_report(y,y_pred),"\n")
if show_confussion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
output(x_train,y_train,dt)
output(x_test,y_test,dt)
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)
output(x_train,y_train,dt)
output(x_test,y_test,dt)
breast_cancer_data = datasets.load_breast_cancer()
type(breast_cancer_data)
breast_cancer_data
breast_cancer_data['feature_names']
print(type(breast_cancer_data.data))
print(breast_cancer_data.target_names)
print(breast_cancer_data.DESCR)
breast_cancer_df = pd.DataFrame(breast_cancer_data.data, columns=breast_cancer_data['feature_names'])
breast_cancer_df
breast_cancer_df['diagnosis']= breast_cancer_data.target
breast_cancer_df.corr()
x_seed = breast_cancer_data.data[:,:2]
y_seed = breast_cancer_data.target
x_seed
y_seed
x_train, x_test, y_train, y_test = split_test(x,y,test_size=0.5,train_size=0.5)
dt = dt.fit(x_train,y_train)
output(x_train,y_train,dt)
output(x_test,y_test,dt)
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)
output(x_train,y_train,dt)
output(x_test,y_test,dt)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set two location for which we want compute the measurments, in this example $P_1$
Step2: Note
Step3: Geodetic curve
Step4: Extract Latitude and Longitude from and add the destination point which is missed in the previous list
Step5: We can plot the resulting geodesic with
Step6: Surface's area on the ellipsoid
|
<ASSISTANT_TASK:>
Python Code:
from pyproj import Geod
g = Geod(ellps='WGS84')
lat1,lon1 = (40.7143528, -74.0059731) # New York, NY
lat2,lon2 = (1.359, 103.989) # Delhi, India
az12,az21,dist = g.inv(lon1,lat1,lon2,lat2)
az12,az21,dist
# using geograhiclib:
# Compute path from 1 to 2
from geographiclib.geodesic import Geodesic
g = Geodesic.WGS84.Inverse(lat1, lon1, lat2, lon2)
g
h = Geodesic.WGS84.Direct(lat1, lon1, g['azi1'], g['s12']/2)
print(h['lat2'], h['lon2']);
gc = [Geodesic.WGS84.Direct(lat1, lon1, g['azi1'], i) for i in range(0,int(g['s12']),100000)]
lat = [i['lat2'] for i in gc]
lat.append(lat2)
lon = [i['lon2'] for i in gc]
lon.append(lon2)
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(lon,lat,'-');
# Computing the area of a geodesic polygon
def p(lat,lon): return {'lat': lat, 'lon': lon}
Geodesic.WGS84.Area([p(0, 0), p(0, 90), p(90, 0)])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: file_path
Step2: LSTM - Hyper Params
Step3: vgg16
Step4: load_vgg16
Step5: File Info
Step6: Text Reader
Step7: LSTM First Layer
Step8: matrix_concat
Step9: LSTM Second Layer
Step10: Test
Step11: Result Check
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from PIL import Image
import numpy as np
from scipy.misc import imread, imresize
from imagenet_classes import class_names
import os
#File Path
# filepath_input = "./data/run/" #input csv file path
filepath_ckpt = "./ckpt/model_weight.ckpt" #weight saver check point file path
filepath_pred = "./output/predicted.csv" #predicted value file path
filename_queue_description = tf.train.string_input_producer(['./data/description/raw_data.csv'])
num_record = 50
label_vec_size = 5
input_vec_size = 27
batch_size = 50
state_size_1 = 100
state_size_2 = 4096 + state_size_1
hidden = 15
learning_rate = 0.01
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l)
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
print(i, k, np.shape(weights[k]))
sess.run(self.parameters[i].assign(weights[k]))
with tf.Session() as sess_vgg:
imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg)
img_files = ['./data/img/cropped/' + i for i in os.listdir('./data/img/cropped')]
imgs = [imread(file, mode='RGB') for file in img_files]
temps = [sess_vgg.run(vgg.fc1, feed_dict={vgg.imgs: [imgs[i]]})[0] for i in range(50)]
reimgs= np.reshape(a=temps, newshape=[50,-1])
sess_vgg.close()
reader = tf.TextLineReader()
key,value = reader.read(filename_queue_description)
record_defaults =[[-1], [-1], [-1], [-1], [-1], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2]]
lab1, lab2, lab3, lab4, lab5, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15 = tf.decode_csv(value, record_defaults)
feature_label = tf.stack([lab1, lab2, lab3, lab4, lab5])
feature_word = tf.stack([w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15])
with tf.Session() as sess_data:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
img_queue = []
for i in range(num_record):
# image = sess.run(images)
label, raw_word = sess_data.run([feature_label, feature_word])
onehot = tf.one_hot(indices=raw_word, depth=27)
if i == 0:
full_input = onehot
full_label = label
else:
full_input = tf.concat([full_input, onehot], 0)
full_label = tf.concat([full_label, label], 0)
# print(sess.run(tf.shape(image)))
# batch = tf.train.batch([image, label], 1)
# print(sess.run(batch))
coord.request_stop()
coord.join(threads)
sess_data.close()
with tf.name_scope('batch') as scope:
# full_label = tf.reshape(full_label, [batch_size, hidden, label_vec_size])
full_input = tf.reshape(full_input, [batch_size, hidden, input_vec_size])
input_batch, label_batch = tf.train.batch([full_input, full_input], batch_size=1)
with tf.name_scope('lstm_layer_1') as scope:
with tf.variable_scope('lstm_layer_1'):
rnn_cell_1 = tf.contrib.rnn.BasicLSTMCell(state_size_1, reuse=None)
output_1, _ = tf.contrib.rnn.static_rnn(rnn_cell_1, tf.unstack(full_input, axis=1), dtype=tf.float32)
# output_w_1 = tf.Variable(tf.truncated_normal([hidden, state_size_1, input_vec_size]))
# output_b_1 = tf.Variable(tf.zeros([input_vec_size]))
# pred_temp = tf.matmul(output_1, output_w_1) + output_b_1
with tf.Session() as sess_temp:
print(sess_temp.run(tf.shape(output_1)))
input_2 = [tf.concat([out, reimgs], axis=1) for out in output_1]
with tf.name_scope('lstm_layer_2') as scope:
with tf.variable_scope('lstm_layer_2'):
rnn_cell_2 = tf.contrib.rnn.BasicLSTMCell(state_size_2, reuse=None)
output_2, _ = tf.contrib.rnn.static_rnn(rnn_cell_2, tf.unstack(input_2, axis=0), dtype=tf.float32)
output_w_2 = tf.Variable(tf.truncated_normal([hidden, state_size_2, input_vec_size]))
output_b_2 = tf.Variable(tf.zeros([input_vec_size]))
pred = tf.nn.softmax(tf.matmul(output_2, output_w_2) + output_b_2)
with tf.name_scope('loss') as scope:
loss = tf.constant(0, tf.float32)
for i in range(hidden):
loss += tf.losses.softmax_cross_entropy(tf.unstack(full_input, axis=1)[i], tf.unstack(pred, axis=0)[i])
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess_train:
sess_train.run(tf.global_variables_initializer())
saver = tf.train.Saver()
save_path = saver.save(sess_train, filepath_ckpt)
for i in range(31):
sess_train.run(train)
if i % 5 == 0:
print("loss : ", sess_train.run(loss))
# print("pred : ", sess.run(pred))
save_path = saver.save(sess_train, filepath_ckpt)
print("= Weigths are saved in " + filepath_ckpt)
sess_train.close()
with tf.Session() as sess_vgg_test:
imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg_test)
test_img_files = ['./data/img/cropped/001.png']
test_imgs = [imread(file, mode='RGB') for file in test_img_files]
# bilinear_test_imgs = [imresize(arr=img,interp='bilinear') for img in test_imgs]
temps = [sess_vgg_test.run(vgg.fc1, feed_dict={vgg.imgs: [img]})[0] for img in test_imgs]
test_reimgs= np.reshape(a=temps, newshape=[1,-1])
sess_vgg_test.close()
start_input = tf.zeros([1,15,27])
with tf.Session() as sess_init_generator:
input_init = sess_init_generator.run(start_input)
sos = [0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
input_init[0][0] = sos
with tf.name_scope('lstm_layer_1') as scope:
with tf.variable_scope('lstm_layer_1'):
rnn_cell_1 = tf.contrib.rnn.BasicLSTMCell(state_size_1, reuse=True)
output_test_1, _ = tf.contrib.rnn.static_rnn(rnn_cell_1, tf.unstack(input_init, axis=1), dtype=tf.float32)
# output_t_1 = tf.contrib.rnn.static_rnn(rnn_cell, tf.unstack(full_input, axis=1), dtype=tf.float32)
# pred = tf.nn.softmax(tf.matmul(output1, output_w[0]) + output_b[0])
input_2 = [tf.concat([out, test_reimgs], axis=1) for out in output_test_1]
with tf.name_scope('lstm_layer_2') as scope:
with tf.variable_scope('lstm_layer_2'):
rnn_cell_2 = tf.contrib.rnn.BasicLSTMCell(state_size_2, reuse=None)
output_2, _ = tf.contrib.rnn.static_rnn(rnn_cell_2, tf.unstack(input_2, axis=0), dtype=tf.float32)
output_w_2 = tf.Variable(tf.truncated_normal([hidden, state_size_2, input_vec_size]))
output_b_2 = tf.Variable(tf.zeros([input_vec_size]))
pred = tf.nn.softmax(tf.matmul(output_2, output_w_2) + output_b_2)
sess_model = tf.Session()
saver = tf.train.Saver(allow_empty=True)
saver.restore(sess_model, filepath_ckpt)
for i in range(hidden):
result = sess_model.run(pred)
result_temp = result[i]
if i == hidden -1:
pass
else:
input_init[0][i+1] = result_temp
print(result.shape)
decoded_result = np.argmax(a=result, axis=2)
print(result)
print(decoded_result)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizing MNIST
Step2: Now it's your turn to plot some random representatives from each of 10 (obviously) available classes
Step3: The whole dataset is somewhat large so we restrict ourselves to the random subset of 5,000 images (corresponding indices are held in train_indices)
Step4: PCA, Multidimensional scaling (MDS) and Locally Linear Embedding (LLE)
Step5: Isomap
Step6: After it is done we can track how an appearance of the digit changes along the line. One can take two most distant points as the endpoints of the interpolation segment. The following code should extract closest points to the line. Use scipy.spatial.KDTree for the fast nearest neighbour computation.
Step8: Now we define a bunch of helper functions for interpolation visualization. Note the diplay_manifold_flythrough function. First two arguments are array of images and their coordinates on the 2D plane.
Step9: Now use diplay_manifold_flythrough to display the manifold fly-through over the scatter plot of the subset
Step11: Additionally create an animation of the interpolation. Specifically, you should obtain an animation similar to the one presented below
Step12: t-Distributed Stochastic Neighbor Embedding (t-SNE)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
import os
from sklearn.datasets import fetch_mldata
# Fetch MNIST dataset and create a local copy.
if os.path.exists('mnist.npz'):
with np.load('mnist.npz', 'r') as data:
X = data['X']
y = data['y']
else:
mnist = fetch_mldata("MNIST original")
X, y = mnist.data / 255.0, mnist.target
np.savez('mnist.npz', X=X, y=y)
# Preferably arrange images as a 10x10 matrix.
n_train_samples = 5000
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_indices = indices[: n_train_samples]
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import MDS, LocallyLinearEmbedding
# Your code goes here.
from sklearn.manifold import Isomap
indices_of_2 = np.arange(X.shape[0])[y == 2]
np.random.shuffle(indices_of_2)
train_indices_of_2 = indices_of_2[: n_train_samples]
# Your code goes here.
from scipy.spatial import KDTree
from scipy.spatial.distance import pdist, cdist, squareform
from scipy import linspace
def find_representatives(kdtree, from_point, to_point, n_points):
# Given two 2D points this function should return a sequence of the dataset representatives (indices) that
# we encounter nearby as we go from from_point to to_point. This can be done by taking a set points on
# the segment and finding corresponding nearest neighbours in the dataset.
# Your code goes here.
return representatives
n_points = 100
# Your code starts here and shoudld end with:
# representatives = find_representatives(kdtree, X_embedded[from_idx, :], X_embedded[to_idx, :], n_points)
from IPython.display import HTML
from matplotlib import animation
VIDEO_TAG = <video controls>
<source src="data:video/x-m4v;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>
def anim_to_html(anim):
if not hasattr(anim, '_encoded_video'):
with open('temp.mp4', 'wb') as f:
anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'], writer='ffmpeg')
video = open('temp.mp4', "rb").read()
anim._encoded_video = video.encode("base64")
return VIDEO_TAG.format(anim._encoded_video)
def display_animation(anim):
plt.close(anim._fig)
return HTML(anim_to_html(anim))
def diplay_manifold_flythrough(X, coords, fig, ax):
imagebox = OffsetImage(X[0].reshape(28, 28), cmap=plt.cm.gray_r)
annbox = AnnotationBbox(imagebox, coords[0])
ax.add_artist(annbox)
def init():
return imagebox, annbox
def animate(i):
imagebox.set_data(X[i].reshape(28, 28))
annbox.xyann = coords[i]
return imagebox, annbox
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=X.shape[0], interval=20, blit=True)
return display_animation(anim)
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(frameon=False)
plt.setp(ax, xticks=(), yticks=())
# Your code starts here and should end with:
# diplay_manifold_flythrough(..., ..., fig, ax)
HTML(<video controls>
<source src='./interpolation.mp4' type='video/mp4'>
Your browser does not support the video tag.
</video>)
def display_interpolation(X, steps, fig):
# NOTE: First argument corresponds to the sequence of images.
n_images = X.shape[0]
im = plt.imshow(X[0].reshape(28, 28), cmap=plt.cm.gray_r)
def init():
return im,
def animate(i):
# Your code goes here.
im.set_array(img)
return im,
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=steps, interval=20, blit=True)
return display_animation(anim)
fig = plt.figure(figsize=(3, 3))
ax = plt.axes(frameon=False)
plt.setp(ax, xticks=(), yticks=())
# Your code starts here and should end with:
# display_interpolation(.., 500, fig)
from sklearn.manifold import TSNE
# Your code goes here.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constructing a Text Generation Model
Step2: Get the Dataset
Step3: First 10 Songs
Step4: Create Sequences and Labels
Step5: Train a Text Generation Model
Step6: View the Training Graph
Step7: Generate new lyrics!
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Other imports for processing data
import string
import numpy as np
import pandas as pd
!wget --no-check-certificate \
https://drive.google.com/uc?id=1LiJFZd41ofrWoBtW-pMYsfz1w8Ny0Bj8 \
-O /tmp/songdata.csv
def tokenize_corpus(corpus, num_words=-1):
# Fit a Tokenizer on the corpus
if num_words > -1:
tokenizer = Tokenizer(num_words=num_words)
else:
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
return tokenizer
def create_lyrics_corpus(dataset, field):
# Remove all other punctuation
dataset[field] = dataset[field].str.replace('[{}]'.format(string.punctuation), '')
# Make it lowercase
dataset[field] = dataset[field].str.lower()
# Make it one long string to split by line
lyrics = dataset[field].str.cat()
corpus = lyrics.split('\n')
# Remove any trailing whitespace
for l in range(len(corpus)):
corpus[l] = corpus[l].rstrip()
# Remove any empty lines
corpus = [l for l in corpus if l != '']
return corpus
# Read the dataset from csv - just first 10 songs for now
dataset = pd.read_csv('/tmp/songdata.csv', dtype=str)[:10]
# Create the corpus using the 'text' column containing lyrics
corpus = create_lyrics_corpus(dataset, 'text')
# Tokenize the corpus
tokenizer = tokenize_corpus(corpus)
total_words = len(tokenizer.word_index) + 1
print(tokenizer.word_index)
print(total_words)
sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
sequences.append(n_gram_sequence)
# Pad sequences for equal input length
max_sequence_len = max([len(seq) for seq in sequences])
sequences = np.array(pad_sequences(sequences, maxlen=max_sequence_len, padding='pre'))
# Split sequences between the "input" sequence and "output" predicted word
input_sequences, labels = sequences[:,:-1], sequences[:,-1]
# One-hot encode the labels
one_hot_labels = tf.keras.utils.to_categorical(labels, num_classes=total_words)
# Check out how some of our data is being stored
# The Tokenizer has just a single index per word
print(tokenizer.word_index['know'])
print(tokenizer.word_index['feeling'])
# Input sequences will have multiple indexes
print(input_sequences[5])
print(input_sequences[6])
# And the one hot labels will be as long as the full spread of tokenized words
print(one_hot_labels[5])
print(one_hot_labels[6])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
model = Sequential()
model.add(Embedding(total_words, 64, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(20)))
model.add(Dense(total_words, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(input_sequences, one_hot_labels, epochs=200, verbose=1)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
plot_graphs(history, 'accuracy')
seed_text = "im feeling chills"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = np.argmax(model.predict(token_list), axis=-1)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cnrm-cerfacs', 'sandbox-1', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Static connectivity
Step2: Define the frequency band we are interested to examine, in Hz
Step3: Define the sampling frequency, in Hz
Step5: We will invoke the estimator using the full by-name arguments. The last arguement, pairs is None by default, which means all "full connectivity", otherwise you check the documentation about the structure of the value.
Step6: Make the connectivity matrix symmetric
Step7: Plot
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy
from scipy import io
eeg = np.load("data/eeg_eyes_opened.npy")
num_trials, num_channels, num_samples = np.shape(eeg)
eeg_ts = np.squeeze(eeg[0, :, :])
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from dyconnmap.fc import iplv
band = [1.0, 4.0]
sampling_frequency = 160.0
ts, avg = iplv(eeg_ts, fb=band, fs=sampling_frequency, pairs=None)
print(fTime series array shape: {np.shape(ts)}
Average time series array shape: {np.shape(avg)})
avg_symm = avg + avg.T
np.fill_diagonal(avg_symm, 1.0)
import matplotlib.pyplot as plt
mtx_min = 0.0 # we know it's 0.0 because of the estimator's properties
mtx_max = np.max(avg)
plt.figure(figsize=(6, 6))
cax = plt.imshow(avg_symm, vmin=mtx_min, vmax=mtx_max, cmap=plt.cm.Spectral)
cb = plt.colorbar(fraction=0.046, pad=0.04)
cb.ax.set_ylabel('Imaginary PLV', fontdict={'fontsize': 20})
plt.title('Connectivity Matrix', fontdict={'fontsize': 20})
plt.xlabel('ROI', fontdict={'fontsize': 20})
plt.ylabel('ROI', fontdict={'fontsize': 20})
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download forecast data from SPF
Step2: Manage forecast data
Step3: Download and manage data from FRED
Step4: Prepare dataset and export
Step5: Plots
Step6: Delete SPF data files
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import fredpy as fp
import requests
import os
import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
url = 'https://www.philadelphiafed.org/-/media/frbp/assets/surveys-and-data/survey-of-professional-forecasters/historical-data/inflation.xlsx?la=en&hash=F9C3E76769B4586C3E36E403DFA54BDC'
r = requests.get(url,verify=True)
with open("../xlsx/inflation.xlsx", "wb") as code:
code.write(r.content)
# Load data from Excel file
inflation_forecasts = pd.read_excel('../xlsx/inflation.xlsx',sheet_name='INFLATION')
# Construct DatatimeIndex
inflation_forecasts.index = pd.DatetimeIndex(inflation_forecasts['YEAR'].astype(int).astype(str)+'-Q'+inflation_forecasts['QUARTER'].astype(int).astype(str),freq='QS')
# Shift dates forward one year
inflation_forecasts.index = inflation_forecasts.index.shift(4)
# Drop unnecessary columns
inflation_forecasts = inflation_forecasts.drop(['YEAR','QUARTER','INFCPI10YR'],axis=1)
# Fill missing value for July 1974
inflation_forecasts = inflation_forecasts.interpolate(limit_area='inside')
# Rename remaining columns
inflation_forecasts.columns = ['DEFLATOR','CPI']
fig,ax = plt.subplots()
inflation_forecasts.plot(ax=ax,lw=2,grid=True)
ax.set_title('Inflation forecasts from SPF')
ax.set_ylabel('Percent');
# Download GDP deflator data
deflator = fp.series('GDPDEF').data
# Construct inflation rate over previous 4 quarters
deflator = 100*(deflator/deflator.shift(4)-1)
# Download CPI data
cpi = fp.series('CPIAUCSL').data
# Construct inflation rate over previous 12 months
cpi = 100*(cpi/cpi.shift(12)-1)
# Take only CPI inflation values for the last month of quarter, but date as first month of quarter
cpi = cpi.resample('Q').ffill()
cpi = cpi.resample('QS').bfill()
# Download 1 year T-bill rate
nominal_rate = fp.series('GS1').data
# Convert to quarterly frequency
nominal_rate = nominal_rate.asfreq('QS')
df = pd.concat([inflation_forecasts,deflator,cpi,nominal_rate],axis=1)
df = df[df['DEFLATOR'].notna()]
df.columns = [
'deflator inflation - forecast',
'cpi inflation - forecast',
'deflator inflation - actual',
'cpi inflation - actual',
'nominal interest'
]
df.to_csv('../csv/inflation_forecast_data.csv')
# Actual and forecasted CPI inflation
fig,ax = plt.subplots()
cpi = df[df['cpi inflation - forecast'].notna()][['cpi inflation - actual','cpi inflation - forecast']]
cpi.plot(legend=False,ax=ax,lw=2)
ax.legend(['Actual','Expected'],loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('CPI Inflation')
ax.set_ylabel('Percent')
ax.grid()
# Actual and forecasted GDP deflator inflation
fig,ax = plt.subplots()
deflator = df[df['deflator inflation - forecast'].notna()][['deflator inflation - actual','deflator inflation - forecast']]
deflator.plot(legend=False,ax=ax,lw=2)
ax.legend(['Actual','Expected'],loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title('GDP Deflator Inflation')
ax.set_ylabel('Percent')
ax.grid()
# Figure for website
fig,ax = plt.subplots()
ax.plot(deflator['deflator inflation - actual'],'-b',lw=2,label='$\\pi$')
ax.plot(deflator['deflator inflation - forecast'],'--r',lw=2,label='$E\\pi$')
ax.fill_between(deflator.index,deflator['deflator inflation - forecast'],np.max([deflator['deflator inflation - actual'],deflator['deflator inflation - forecast']],axis=0),color='g',alpha=0.25,label='$\\pi>E\\pi$')
ax.fill_between(deflator.index,np.max([deflator['deflator inflation - actual'],deflator['deflator inflation - forecast']],axis=0),deflator['deflator inflation - actual'],color='r',alpha=0.25,label='$\\pi<E\\pi$')
fp.recessions(start=deflator.index[0],end=deflator.index[-1])
ax.grid()
ax.set_ylabel('Percent')
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=4,borderaxespad=0., mode="expand",fontsize=13)
plt.savefig('../png/fig_US_Inflation_Forecast_site.png',bbox_inches='tight')
os.remove('../xlsx/inflation.xlsx')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: plot() is a versatile command, and will take an arbitrary number of arguments. For example, to plot x versus y, you can issue the command
Step2: For every x, y pair of arguments, there is an optional third argument which is the format string that indicates the color and line type of the plot. The letters and symbols of the format string are from MATLAB, and you concatenate a color string with a line style string. The default format string is b-, which is a solid blue line. For example, to plot the above with red circles, you would chose ro.
Step3: matplotlib has a few methods in the pyplot module that make creating common types of plots faster and more convenient because they automatically create a Figure and an Axes object. The most widely used are
Step4: Exercise 4.1
Step5: We can use functions from Bio.SeqUtils to get idea about a sequence
Step6: One letter code protein sequences can be converted into three letter codes using seq3 utility
Step7: Alphabets defines how the strings are going to be treated as sequence object. Bio.Alphabet module defines the available alphabets for Biopython. Bio.Alphabet.IUPAC provides basic definition for DNA, RNA and proteins.
Step8: Parsing sequence file format
Step9: Biopython provides specific functions to allow parsing/reading sequence files.
Step10: Sequence objects can be written into files using file handles with the function SeqIO.write(). We need to provide the name of the output sequence file and the sequence file format.
Step11: Connecting with biological databases
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as mpyplot
mpyplot.plot([1,2,3,4])
mpyplot.ylabel('some numbers')
mpyplot.show()
mpyplot.plot([1,2,3,4], [1,4,9,16])
import matplotlib.pyplot as mpyplot
mpyplot.plot([1,2,3,4], [1,4,9,16], 'ro')
mpyplot.axis([0, 6, 0, 20])
mpyplot.show()
seq = 'ATGGTGCATCTGACTCCTGAGGAGAAGTCTGCCGTTACTGCCCTGTGGGGCAAGGTG'
gc = [40.0, 60.0, 80.0, 60.0, 40.0, 60.0, 40.0, 40.0, 40.0, 60.0,
40.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0,
60.0, 40.0, 40.0, 40.0, 40.0, 40.0, 60.0, 60.0, 80.0, 80.0,
80.0, 60.0, 40.0, 40.0, 20.0, 40.0, 60.0, 80.0, 80.0, 80.0,
80.0, 60.0, 60.0, 60.0, 80.0, 80.0, 100.0, 80.0, 60.0, 60.0,
60.0, 40.0, 60.0]
window_ids = range(len(gc))
import matplotlib.pyplot as mpyplot
mpyplot.plot(window_ids, gc, '--' )
mpyplot.xlabel('5 bases window id along the sequence')
mpyplot.ylabel('%GC')
mpyplot.title('GC plot for sequence\n' + seq)
mpyplot.show()
# Creating sequence
from Bio.Seq import Seq
my_seq = Seq("AGTACACTGGT")
print(my_seq)
print(my_seq[10])
print(my_seq[1:5])
print(len(my_seq))
print(my_seq.count("A"))
# Calculate the molecular weight
from Bio.SeqUtils import GC, molecular_weight
print(GC(my_seq))
print(molecular_weight(my_seq))
from Bio.SeqUtils import seq3
print(seq3(my_seq))
from Bio.Alphabet import IUPAC
my_dna = Seq("AGTACATGACTGGTTTAG", IUPAC.unambiguous_dna)
print(my_dna)
print(my_dna.alphabet)
my_dna.complement()
my_dna.reverse_complement()
my_dna.translate()
with open( "data/glpa.fa" ) as f:
print(f.read())
# Reading FASTA files
from Bio import SeqIO
with open("data/glpa.fa") as f:
for protein in SeqIO.parse(f, 'fasta'):
print(protein.id)
print(protein.seq)
# Writing FASTA files
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
sequence = 'MYGKIIFVLLLSEIVSISASSTTGVAMHTSTSSSVTKSYISSQTNDTHKRDTYAATPRAHEVSEISVRTVYPPEEETGERVQLAHHFSEPEITLIIFG'
seq = Seq(sequence, IUPAC.protein)
protein = [SeqRecord(seq, id="THEID", description='a description'),]
with open( "biopython.fa", "w") as f:
SeqIO.write(protein, f, 'fasta')
with open( "biopython.fa" ) as f:
print(f.read())
# Read FASTA file from NCBI GenBank
from Bio import Entrez
Entrez.email = 'A.N.Other@example.com' # Always tell NCBI who you are
handle = Entrez.efetch(db="nucleotide", id="71066805", rettype="gb")
seq_record = SeqIO.read(handle, "gb")
handle.close()
print(seq_record.id, 'with', len(seq_record.features), 'features')
print(seq_record.seq)
print(seq_record.format("fasta"))
# Read SWISSPROT record
from Bio import ExPASy
handle = ExPASy.get_sprot_raw('HBB_HUMAN')
prot_record = SeqIO.read(handle, "swiss")
handle.close()
print(prot_record.description)
print(prot_record.seq)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numerical Data Types
Step2: Sequence Data Types
Step3: Tuple
Step4: Boolean
Step5: <a id='flowcontrol'/>
Step9: you see
Step10: Creating your own class
|
<ASSISTANT_TASK:>
Python Code:
# print something
print(c)
# what is hello_str
# reverse indexing
# stepwise indexing (start:stop:step)
my_dict = {'one':1, 'two':2, 'three':3}
print(my_dict['one'])
# how to test if certain key is in dict
print('one' in my_dict)
print('four' in my_dict)
### example immutable
x = 10
y = x
x = x + 1
print('y: ',y)
### example mutable
x_l = ['10']
y_l = x_l
x_l.append('11')
print('y_l: ', y_l)
y = 5
dir(y)
print(y.bit_length())
import time
class MinimalCake:
def __init__(self, eier=2, mehl=500, milch=250):
self.eggs = eier
self.flour = mehl
self.milk = milch
self.stirred = False
def stir(self):
print('stirring...')
time.sleep(2)
print('...dough')
self.stirred = True
one_minimal_cake = MinimalCake()
print(one_minimal_cake.eggs, one_minimal_cake.stirred)
one_minimal_cake.stir()
print(one_minimal_cake.stirred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Title
Step2: Resources
Step3: Run the model on a single batch of data, and inspect the output
Step4: Compile the model for training
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -U tensorflow-addons
import tensorflow as tf
import tensorflow_addons as tfa
# Build the model
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu', input_shape=(None, 5)),
tf.keras.layers.Dense(3)
])
import numpy as np
result = model(tf.constant(np.random.randn(10,5), dtype = tf.float32)).numpy()
print("min:", result.min())
print("max:", result.max())
print("mean:", result.mean())
print("shape:", result.shape)
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.categorical_crossentropy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trace-Them-All as a Profiler
Step2: The ASCII Format
Step3: Annotated metadata (like perftrace)
Step4: Call counts (like traceprof)
Step5: The Tree Spans Format
Step6: The Trace Event Format
|
<ASSISTANT_TASK:>
Python Code:
export CHGDISABLE=1
~/hg/hg debugpython << 'EOS'
from timeit import timeit
from bindings import tracing
def nop(): pass
@tracing.wrapfunc
def wrap(): pass
@tracing.wrapfunc
@tracing.meta(lambda: [("color", "blue")])
def wrap_meta(): pass
def bindings(tracer=tracing.singleton):
id = tracer.span([("name", "bindings")]); tracer.enter(id); tracer.exit(id)
base = timeit(nop, number=10000)
for f in [nop, wrap, wrap_meta, bindings]:
duration = timeit(f, number=10000)
print("{:>6.1f}x {:>6.0f}ns # {}".format(duration / base - 1, (duration - base) * 1e5, f.__name__))
EOS
cat > ~/print << EOF
#!$HOME/hg/hg debugpython
import sys
from bindings import tracing
t = tracing.tracingdata()
with t:
exec(sys.stdin.read())
if sys.argv[1] == 'ascii':
min_micros = int(sys.argv[2])
print(t.ascii(min_micros).strip())
elif sys.argv[1] == 'traceevent':
import json
print(json.dumps(t.traceevent(), sort_keys=True, indent=4, separators=(',', ': ')))
else:
import pprint
pprint.pprint(t.treespans())
EOF
chmod +x ~/print
~/print ascii 0 << 'EOS'
@tracing.wrapfunc
def fib(x):
return x if x <= 2 else fib(x - 1) + fib(x - 2)
fib(5)
EOS
~/print ascii 0 << 'EOS'
@tracing.wrapfunc
@tracing.meta(lambda x: [("x", str(x))])
def fib(x):
return x if x <= 2 else fib(x - 1) + fib(x - 2)
fib(4)
EOS
~/print ascii 50000 << 'EOS'
import time
sleep = tracing.wrapfunc(time.sleep)
for i in range(10):
sleep(0.01)
EOS
~/print treespans 0 << 'EOS'
@tracing.wrapfunc
def fact(x):
return x if x <= 1 else x * fact(x - 1)
fact(2)
EOS
echo 'tracing.wrapfunc(id)(1)' | ~/print traceevent
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Include an exploratory visualization of the dataset
Step3: Step 2
Step4: Model Architecture
Step5: Train, Validate and Test the Model
Step6: Step 3
Step7: Predict the Sign Type for Each Image
Step8: Analyze Performance
Step9: Output Top 5 Softmax Probabilities For Each Image Found on the Web
Step10: Note
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
from keras.datasets import cifar10
from sklearn.model_selection import train_test_split
# TODO: Fill this in based on where you saved the training and testing data
#training_file = "traffic-signs-data/train.p"
#validation_file = "traffic-signs-data/valid.p"
#testing_file = "traffic-signs-data/test.p"
#with open(training_file, mode='rb') as f:
# train = pickle.load(f)
#with open(validation_file, mode='rb') as f:
# valid = pickle.load(f)
#with open(testing_file, mode='rb') as f:
# test = pickle.load(f)
#X_train, y_train = train['features'], train['labels']
#X_valid, y_valid = valid['features'], valid['labels']
#X_test, y_test = test['features'], test['labels']
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
# y_train.shape is 2d, (50000, 1). While Keras is smart enough to handle this
# it's a good idea to flatten the array.
y_train = y_train.reshape(-1)
y_test = y_test.reshape(-1)
y_valid = y_valid.reshape(-1)
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_valid))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
label_name = ["" for x in range(43)]
label_name[0] = "Speed limit (20km/h)"
label_name[1] = "Speed limit (30km/h)"
label_name[2] = "Speed limit (50km/h)"
label_name[3] = "Speed limit (60km/h)"
label_name[4] = "Speed limit (70km/h)"
label_name[5] = "Speed limit (80km/h)"
label_name[6] = "End of speed limit (80km/h)"
label_name[7] = "Speed limit (100km/h)"
label_name[8] = "Speed limit (120km/h)"
label_name[9] = "No passing"
label_name[10] = "No passing for vehicles over 3.5 metric tons"
label_name[11] = "Right-of-way at the next intersection"
label_name[12] = "Priority road"
label_name[13] = "Yield"
label_name[14] = "Stop"
label_name[15] = "No vehicles"
label_name[16] = "Vehicles over 3.5 metric tons prohibited"
label_name[17] = "No entry"
label_name[18] = "General caution"
label_name[19] = "Dangerous curve to the left"
label_name[20] = "Dangerous curve to the right"
label_name[21] = "Double curve"
label_name[22] = "Bumpy road"
label_name[23] = "Slippery road"
label_name[24] = "Road narrows on the right"
label_name[25] = "Road work"
label_name[26] = "Traffic signals"
label_name[27] = "Pedestrians"
label_name[28] = "Children crossing"
label_name[29] = "Bicycles crossing"
label_name[30] = "Beware of ice/snow"
label_name[31] = "Wild animals crossing"
label_name[32] = "End of all speed and passing limits"
label_name[33] = "Turn right ahead"
label_name[34] = "Turn left ahead"
label_name[35] = "Ahead only"
label_name[36] = "Go straight or right"
label_name[37] = "Go straight or left"
label_name[38] = "Keep right"
label_name[39] = "Keep left"
label_name[40] = "Roundabout mandatory"
label_name[41] = "End of no passing"
label_name[42] = "End of no passing by vehicles over 3.5 metric tons"
unique_label = set(y_valid)
maximum_traffic_signs_to_print = 20
def print_overite(title):
print(title, end='\r')
def fast_draw_all_traffic_signs_in_different_images():
size_dataset = len(y_valid)
for label in unique_label:
plt.figure(figsize=(16,0.8))
traffic_sign_index = 0
number_of_signs_printed = 0
for i in range(size_dataset):
#title = "Label: " + str(label) + "/" + str(len(unique_label) - 1) + " processing " + str(i) + "/" + str(size_dataset - 1)
#print_overite(title)
if (y_valid[i] == label):
traffic_signs = plt.subplot(1, maximum_traffic_signs_to_print, traffic_sign_index+1)
traffic_signs.imshow(X_valid[i], interpolation='nearest')
traffic_signs.axis('off')
traffic_sign_index += 1
number_of_signs_printed += 1
if (number_of_signs_printed == maximum_traffic_signs_to_print):
break
print(str(label+1) + "/" + str(len(unique_label)) + " - " + label_name[label])
plt.show()
def slow_draw_all_traffic_signs_in_one_image():
number_of_labels = len(unique_label)
size = 0.8
width = size * maximum_traffic_signs_to_print
height = size * number_of_labels
size_dataset = len(y_valid)
print("Number of labels: " + str(number_of_labels))
print("Total size of the image w:" + str(width) + " h:" + str(height))
print("Total size of the dataset: " + str(size_dataset))
print("Creating subplots, this might take a long time...")
fig, traffic_signs = plt.subplots(number_of_labels, maximum_traffic_signs_to_print, figsize=(width,height))
for label in range(number_of_labels):
traffic_sign_index = 0
number_of_signs_printed = 0
for i in range(size_dataset):
title = "Processing label: " + str(label) + "/" + str(number_of_labels - 1)
print_overite(title)
if (y_valid[i] == label):
traffic_signs[label][traffic_sign_index].imshow(X_valid[i], interpolation='nearest')
traffic_signs[label][traffic_sign_index].axis('off')
traffic_sign_index += 1
number_of_signs_printed += 1
if (number_of_signs_printed == maximum_traffic_signs_to_print):
break
print()
print("Painting...")
fast_draw_all_traffic_signs_in_different_images()
#slow_draw_all_traffic_signs_in_one_image()
### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc.
### Feel free to use as many code cells as needed.
#X_train, y_train = train['features'], train['labels']
#X_valid, y_valid = valid['features'], valid['labels']
#X_test, y_test = test['features'], test['labels']
import cv2
import tensorflow as tf
import numpy as np
def preprocess(x):
x = [cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) for image in x]
return np.reshape(x, (-1, 32, 32, 1))
X_train = preprocess(X_train)
X_valid = preprocess(X_valid)
X_test = preprocess(X_test)
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
### Define your architecture here.
### Feel free to use as many code cells as needed.
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Hyperparameters
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
# Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
# Layer 5: Fully Connected. Input = 84. Output = n_classes.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
#Constants
EPOCHS = 10
BATCH_SIZE = 128
# Features and Labels
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
#Training Pipeline
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
#Model Evaluation
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
#Train the Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
print("EPOCH {}".format(i+1) + ", accuracy: {:.3f}".format(evaluate(X_valid, y_valid)))
saver.save(sess, './lenet')
print("Model saved")
#Evaluate accuracy of the system
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
#Model Evaluation
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def evaluate_accuracy(kind, x, y):
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
accuracy = evaluate(x, y)
print(kind + " Accuracy = {:.3f}".format(accuracy))
evaluate_accuracy("Validation", X_valid, y_valid)
evaluate_accuracy("Training", X_train, y_train)
evaluate_accuracy("Test", X_test, y_test)
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import matplotlib.image as mpimg
#Images from http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset#Downloads
#newImageFileNames = [
# "new-images/00000.ppm",
# "new-images/00001.ppm",
# "new-images/00002.ppm",
# "new-images/00003.ppm",
# "new-images/00004.ppm"
#]
#y_newImages = [16, 1, 38, 33, 11]
#Images from Google search https://www.google.co.uk/search?q=german+road+signs
newImageFileNames = [
"random-web-images/label_1.jpg",
"random-web-images/label_17.jpg",
"random-web-images/label_18.jpg",
"random-web-images/label_25.jpg",
"random-web-images/label_28.jpg"
]
y_newImages = [1, 17, 18, 25, 28]
newImages = [mpimg.imread(newImageFileName) for newImageFileName in newImageFileNames]
def displayImages(images):
size = len(images)
for i in range(size):
image = plt.subplot(1, size, i+1)
image.imshow(images[i])
displayImages(newImages)
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
#Pre-Process images
newImages = [cv2.resize(newImage, (32, 32)) for newImage in newImages]
X_newImages = preprocess(newImages).astype(np.float32)
def predict_signs(X):
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
softmax = tf.nn.softmax(logits)
label_predictions = sess.run(softmax, feed_dict={x: X})
return [np.argmax(label_prediction) for label_prediction in label_predictions]
predicted_labels = predict_signs(X_newImages)
print("Predicted labels: " + str(predicted_labels))
print("Correct labels: " + str(y_newImages))
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
def ratio_correct_matches(predicted_labels, correct_labels):
number_of_correct_matches = 0
number_of_labels = len(predicted_labels)
for i in range(number_of_labels):
if (predicted_labels[i] == correct_labels[i]):
number_of_correct_matches += 1
return number_of_correct_matches * 100 / number_of_labels
ratio_success = ratio_correct_matches(predicted_labels, y_newImages)
print("Accuracy is " + str(ratio_success) + "%")
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
def softmax_top_probabilities(X, number_of_probabilities):
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
softmax = tf.nn.softmax(logits)
softmax_probabilities = sess.run(softmax, feed_dict={x: X})
top_probabilities = tf.nn.top_k(softmax_probabilities, number_of_probabilities)
return sess.run(top_probabilities)
top_probabilities = softmax_top_probabilities(X_newImages, 5)
for i in range(len(X_newImages)):
print("Top five softmax probabilities for image number " + str(i+1))
print(top_probabilities.values[i])
print("-> top five labels: " + str(top_probabilities.indices[i]))
print()
#Traffic sign General Caution (label 18) analysis
problematic_label = 18
general_caution = []
general_caution_label = []
for i in range(len(y_test)):
if (y_test[i] == problematic_label):
general_caution.append(X_test[i])
general_caution_label.append(problematic_label)
evaluate_accuracy("General Caution", general_caution, general_caution_label)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Basic Linear Model
Step2: 2. 1-Layer Neural Network
|
<ASSISTANT_TASK:>
Python Code:
import keras
# Constants
Num_Classes = Num_Classes
batch_size = 4
lr = 0.01
# Helper Functions
# get_batches(..) copied from utils.py
# gen.flow_from_directory() is an iterator that yields batches of images
# from a directory indefinitely.
from keras.preprocessing import image
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batche_size=4, class_mode='categorical',
target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size,
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
# fast array saving/loading
import bcolz
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()
def load_array(fname): return bcolz.open(fname)[:]
# One-Hot Encoding for Keras
from sklearn.preprocessing import OneHotEncoder
def onehot(x): return np.array(OneHotEncoder().fit_transform(x.reshape(-1, 1))).todense()
# should I use that or from Keras?
# def onehot(x): return keras.utils.np_utils.to_categorical(x)
# from utils.py -- retrieving data saved by bcolz
def get_data(path, target_size=(224,224)):
batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
return np.concatenate([batches.next() for i in range(batches.nb_sample)])
LM = keras.model.Sequential([Dense(Num_Classes, input_shape=(784,))])
LM.compile(optimizer=SGD(lr=0.01), loss='mse')
# LM.compile(optimizer=RMSprop(lr=0.01), loss='mse')
import os, sys
sys.path.insert(os.path.join(1, '../utils/'))
import Vgg16
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Persistent random walk model
Step2: Online study
Step3: <div style="background-color
Step4: Before any data points are passed to the study instance, we further provide prior probabilities for the two scenarios. We expect about one news announcement containing unexpected information per day and set a prior probability of $1/390$ for the chaotic scenario (one normal trading day consists of 390 trading minutes).
Step5: Finally, we can supply log-return values to the study instance, data point by data point. We use the step method to infer new parameter estimates and the updated probabilities of the two scenarios. Note that in this example, we use a for loop to feed all data points to the algorithm because all data points are already available. In a real application of the OnlineStudy class, one can supply each new data point as it becomes available and analyze it in real-time.
Step6: Volatility spikes
Step7: Note that the volatility estimates of the first few trading minutes are not as accurate as later ones, as we initialize the algorithm with a non-informative prior distribution. One could of course provide a custom prior distribution as a more realistic starting point. Despite this fade-in period, the period of increased volatility after market open is captured nicely, as well as the (more subtle) increase in volatility during the last 45 minutes of the trading day. Large individual log-return values also result in an volatility spikes (around 10
Step8: As a correlation coefficient that deviates significantly from zero would be immediately exploitable to predict future price movements, we mostly find correlation values near zero (in accordance with the efficient market hypothesis). However, between 1
Step9: Automatic tuning
Step10: Real-time model selection
Step11: As expected, the probability that the chaotic scenario can explain all past log-return values at a given point in time quickly falls off to practically zero. Indeed, a correlated random walk with slowly changing volatility and correlation of subsequent returns is better suited to describe the price fluctuations of SPY in the majority of time steps.
Step12: Here, we find clear peaks indicating an increased probability for the chaotic scenario, i.e. that previously gained information about the market dynamics has become useless. Lets assume that we are concerned about market behavior as soon as there is at least a 1% risk that normal market dynamics can not describe the current price movement. This leaves us with three distinct events in the following time steps
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import bayesloop as bl
import sympy.stats as stats
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_color_codes() # use seaborn colors
# minute-scale pricing data
prices = np.array(
[ 221.14 , 221.09 , 221.17 , 221.3 , 221.3 , 221.26 ,
221.32 , 221.17 , 221.2 , 221.27 , 221.19 , 221.12 ,
221.08 , 221.1 , 221.075, 221.03 , 221.04 , 221.03 ,
221.11 , 221.14 , 221.135, 221.13 , 221.04 , 221.15 ,
221.21 , 221.25 , 221.21 , 221.17 , 221.21 , 221.2 ,
221.21 , 221.17 , 221.1 , 221.13 , 221.18 , 221.15 ,
221.2 , 221.2 , 221.23 , 221.25 , 221.25 , 221.25 ,
221.25 , 221.22 , 221.2 , 221.15 , 221.18 , 221.13 ,
221.1 , 221.08 , 221.13 , 221.09 , 221.08 , 221.07 ,
221.09 , 221.1 , 221.06 , 221.1 , 221.11 , 221.18 ,
221.26 , 221.46 , 221.38 , 221.35 , 221.3 , 221.18 ,
221.18 , 221.18 , 221.17 , 221.175, 221.13 , 221.03 ,
220.99 , 220.97 , 220.9 , 220.885, 220.9 , 220.91 ,
220.94 , 220.935, 220.84 , 220.86 , 220.89 , 220.91 ,
220.89 , 220.84 , 220.83 , 220.74 , 220.755, 220.72 ,
220.69 , 220.72 , 220.79 , 220.79 , 220.81 , 220.82 ,
220.8 , 220.74 , 220.75 , 220.73 , 220.69 , 220.72 ,
220.73 , 220.69 , 220.71 , 220.72 , 220.8 , 220.81 ,
220.79 , 220.8 , 220.79 , 220.74 , 220.77 , 220.79 ,
220.87 , 220.86 , 220.92 , 220.92 , 220.88 , 220.87 ,
220.88 , 220.87 , 220.94 , 220.93 , 220.92 , 220.94 ,
220.94 , 220.9 , 220.94 , 220.9 , 220.91 , 220.85 ,
220.85 , 220.83 , 220.85 , 220.84 , 220.87 , 220.91 ,
220.85 , 220.77 , 220.83 , 220.79 , 220.78 , 220.78 ,
220.79 , 220.83 , 220.87 , 220.88 , 220.9 , 220.97 ,
221.05 , 221.02 , 221.01 , 220.99 , 221.04 , 221.05 ,
221.06 , 221.07 , 221.12 , 221.06 , 221.07 , 221.03 ,
221.01 , 221.03 , 221.03 , 221.01 , 221.02 , 221.04 ,
221.04 , 221.07 , 221.105, 221.1 , 221.09 , 221.08 ,
221.07 , 221.08 , 221.03 , 221.06 , 221.1 , 221.11 ,
221.11 , 221.18 , 221.2 , 221.34 , 221.29 , 221.235,
221.22 , 221.2 , 221.21 , 221.22 , 221.19 , 221.17 ,
221.19 , 221.13 , 221.13 , 221.12 , 221.14 , 221.11 ,
221.165, 221.19 , 221.18 , 221.19 , 221.18 , 221.15 ,
221.16 , 221.155, 221.185, 221.19 , 221.2 , 221.2 ,
221.16 , 221.18 , 221.16 , 221.11 , 221.07 , 221.095,
221.08 , 221.08 , 221.09 , 221.11 , 221.08 , 221.08 ,
221.1 , 221.08 , 221.11 , 221.07 , 221.11 , 221.1 ,
221.09 , 221.07 , 221.14 , 221.12 , 221.08 , 221.09 ,
221.05 , 221.08 , 221.065, 221.05 , 221.06 , 221.085,
221.095, 221.07 , 221.05 , 221.09 , 221.1 , 221.145,
221.12 , 221.14 , 221.12 , 221.12 , 221.12 , 221.11 ,
221.14 , 221.15 , 221.13 , 221.12 , 221.11 , 221.105,
221.105, 221.13 , 221.14 , 221.1 , 221.105, 221.105,
221.11 , 221.13 , 221.15 , 221.11 , 221.13 , 221.08 ,
221.11 , 221.12 , 221.12 , 221.12 , 221.13 , 221.15 ,
221.18 , 221.21 , 221.18 , 221.15 , 221.15 , 221.15 ,
221.15 , 221.15 , 221.13 , 221.13 , 221.16 , 221.13 ,
221.11 , 221.12 , 221.09 , 221.07 , 221.06 , 221.04 ,
221.06 , 221.09 , 221.07 , 221.045, 221. , 220.99 ,
220.985, 220.95 , 221. , 221.01 , 221.005, 220.99 ,
221.03 , 221.055, 221.06 , 221.03 , 221.03 , 221.03 ,
221. , 220.95 , 220.96 , 220.97 , 220.965, 220.97 ,
220.94 , 220.93 , 220.9 , 220.9 , 220.9 , 220.91 ,
220.94 , 220.92 , 220.94 , 220.91 , 220.92 , 220.935,
220.875, 220.89 , 220.91 , 220.92 , 220.93 , 220.93 ,
220.91 , 220.9 , 220.89 , 220.9 , 220.9 , 220.93 ,
220.94 , 220.92 , 220.93 , 220.88 , 220.88 , 220.86 ,
220.9 , 220.92 , 220.85 , 220.83 , 220.83 , 220.795,
220.81 , 220.78 , 220.7 , 220.69 , 220.6 , 220.58 ,
220.61 , 220.63 , 220.68 , 220.63 , 220.63 , 220.595,
220.66 , 220.645, 220.64 , 220.6 , 220.579, 220.53 ,
220.53 , 220.5 , 220.42 , 220.49 , 220.49 , 220.5 ,
220.475, 220.405, 220.4 , 220.425, 220.385, 220.37 ,
220.49 , 220.46 , 220.45 , 220.48 , 220.51 , 220.48 ]
)
plt.figure(figsize=(8,2))
plt.plot(prices)
plt.ylabel('price [USD]')
plt.xlabel('Nov 28, 2016')
plt.xticks([30, 90, 150, 210, 270, 330, 390],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlim([0, 390]);
logPrices = np.log(prices)
logReturns = np.diff(logPrices)
plt.figure(figsize=(8,2))
plt.plot(np.arange(1, 390), logReturns, c='r')
plt.ylabel('log-returns')
plt.xlabel('Nov 28, 2016')
plt.xticks([30, 90, 150, 210, 270, 330, 390],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.yticks([-0.001, -0.0005, 0, 0.0005, 0.001])
plt.xlim([0, 390]);
S = bl.OnlineStudy(storeHistory=True)
L = bl.om.ScaledAR1('rho', bl.oint(-1, 1, 100),
'sigma', bl.oint(0, 0.006, 400))
S.set(L)
T1 = bl.tm.CombinedTransitionModel(
bl.tm.GaussianRandomWalk('s1', bl.cint(0, 1.5e-01, 15), target='rho'),
bl.tm.GaussianRandomWalk('s2', bl.cint(0, 1.5e-04, 50), target='sigma')
)
T2 = bl.tm.Independent()
S.add('normal', T1)
S.add('chaotic', T2)
S.setTransitionModelPrior([389/390., 1/390.])
for r in tqdm_notebook(logReturns):
S.step(r)
plt.figure(figsize=(8, 4.5))
# data plot
plt.subplot(211)
plt.plot(np.arange(1, 390), logReturns, c='r')
plt.ylabel('log-returns')
plt.xticks([30, 90, 150, 210, 270, 330, 390],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.yticks([-0.001, -0.0005, 0, 0.0005, 0.001])
plt.xlim([0, 390])
# parameter plot
plt.subplot(212)
S.plot('sigma', color='r')
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlabel('Nov 28, 2016')
plt.ylim([0, 0.00075])
plt.xlim([-2, 388]);
plt.figure(figsize=(8, 4.5))
# data plot
plt.subplot(211)
plt.plot(prices)
plt.ylabel('price [USD]')
plt.xticks([30, 90, 150, 210, 270, 330, 390],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlim([0, 390])
# parameter plot
plt.subplot(212)
S.plot('rho', color='#0000FF')
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlabel('Nov 28, 2016')
plt.ylim([-0.4, 0.4])
plt.xlim([-2, 388]);
# extract parameter grid values (rho) and corresponding prob. values (p)
rho, p = S.getParameterDistributions('rho')
# evaluate Prob.(rho < 0) for all time steps
P = bl.Parser(S)
p_neg_rho = np.array([P('rho < 0.', t=t, silent=True) for t in range(1, 389)])
# plotting
plt.figure(figsize=(8, 4.5))
plt.subplot(211)
plt.axhline(y=0.8, lw=1.5, c='g')
plt.plot(p_neg_rho, lw=1.5, c='k')
plt.fill_between(np.arange(len(p_neg_rho)), 0, p_neg_rho > 0.8, lw=0, facecolor='g', alpha=0.5)
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.ylabel('prob. of neg. corr.')
plt.xlim([-2, 388])
plt.subplot(212)
plt.plot(prices)
plt.fill_between(np.arange(2, len(p_neg_rho)+2), 220.2, 220.2 + (p_neg_rho > 0.8)*1.4, lw=0, facecolor='g', alpha=0.5)
plt.ylabel('price [USD]')
plt.xticks([30, 90, 150, 210, 270, 330, 390],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlim([0, 390])
plt.ylim([220.2, 221.6])
plt.xlabel('Nov 28, 2016');
plt.figure(figsize=(8, 4.5))
plt.subplot(221)
S.plot('s1', color='green')
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlabel('Nov 28, 2016')
plt.xlim([-2, 388])
plt.ylim([0, 0.06])
plt.subplot(222)
S.plot('s1', t=388, facecolor='green', alpha=0.7)
plt.yticks([])
plt.xticks([0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08], ['0', '1', '2', '3', '4', '5', '6', '7', '8'])
plt.xlabel('s1 ($\cdot 10^{-2}$)')
plt.xlim([-0.005, 0.08])
plt.subplot(223)
S.plot('s2', color='green')
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlabel('Nov 28, 2016')
plt.xlim([-2, 388])
plt.ylim([0, 0.0001])
plt.subplot(224)
S.plot('s2', t=388, facecolor='green', alpha=0.7)
plt.yticks([])
plt.xticks([0, 0.00001, 0.00002, 0.00003], ['0', '1', '2', '3'])
plt.xlabel('s2 ($\cdot 10^{-5}$)')
plt.xlim([0, 0.00003])
plt.tight_layout()
plt.figure(figsize=(8, 2))
S.plot('chaotic', lw=2, c='k')
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlabel('Nov 28, 2016')
plt.xlim([0, 388])
plt.ylabel('p("chaotic")')
plt.figure(figsize=(8, 2))
S.plot('chaotic', local=True, c='k', lw=2)
plt.xticks([28, 88, 148, 208, 268, 328, 388],
['10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm'])
plt.xlabel('Nov 28, 2016')
plt.xlim([0, 388])
plt.ylabel('p("chaotic")')
plt.axvline(58, 0, 1, zorder=1, c='r', lw=1.5, ls='dashed', alpha=0.7)
plt.axvline(178, 0, 1, zorder=1, c='r', lw=1.5, ls='dashed', alpha=0.7);
p = S.getTransitionModelProbabilities('chaotic', local=True)
np.argwhere(p > 0.01)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We can load different Dependency Grammar parsers from NLTK
Step3: Dependency Version of the Penn Treebank
Step5: "Using the output of zpar (like Malt-TAB but with zero-based indexing)"
Step7: Projective Dependency Parsing
Step9: Non-Projective Dependency Parsing
|
<ASSISTANT_TASK:>
Python Code:
from nltk.grammar import DependencyGrammar
from nltk.parse import (
DependencyGraph,
ProjectiveDependencyParser,
NonprojectiveDependencyParser,
)
treebank_data = Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
dg = DependencyGraph(treebank_data)
dg.tree().pprint()
for head, rel, dep in dg.triples():
print(
'({h[0]}, {h[1]}), {r}, ({d[0]}, {d[1]})'
.format(h=head, r=rel, d=dep)
)
from nltk.corpus import dependency_treebank
t = dependency_treebank.parsed_sents()[0]
print(t.to_conll(3)) # doctest: +NORMALIZE_WHITESPACE
zpar_data =
Pierre NNP 1 NMOD
Vinken NNP 7 SUB
, , 1 P
61 CD 4 NMOD
years NNS 5 AMOD
old JJ 1 NMOD
, , 1 P
will MD -1 ROOT
join VB 7 VC
the DT 10 NMOD
board NN 8 OBJ
as IN 8 VMOD
a DT 14 NMOD
nonexecutive JJ 14 NMOD
director NN 11 PMOD
Nov. NNP 8 VMOD
29 CD 15 NMOD
. . 7 P
zdg = DependencyGraph(zpar_data, zero_based=True)
print(zdg.tree())
grammar = DependencyGrammar.fromstring(
'fell' -> 'price' | 'stock'
'price' -> 'of' 'the'
'of' -> 'stock'
'stock' -> 'the'
)
print(grammar)
dp = ProjectiveDependencyParser(grammar)
for t in sorted(dp.parse(['the', 'price', 'of', 'the', 'stock', 'fell'])):
print(t)
grammar = DependencyGrammar.fromstring(
'taught' -> 'play' | 'man'
'man' -> 'the'
'play' -> 'golf' | 'dog' | 'to'
'dog' -> 'his'
)
print(grammar)
dp = NonprojectiveDependencyParser(grammar)
g, = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf'])
print(g.root['word'])
print(g)
x = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf'])
for i in x:
print(i)
for _, node in sorted(g.nodes.items()):
if node['word'] is not None:
print('{address} {word}: {d}'.format(d=node['deps'][''], **node))
print(g.tree())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
arrays = []
for x_ in x:
array = np.array(x_)
arrays.append(array)
return np.stack(arrays, axis=0) / 256.
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
# class_num = np.array(x).max()
class_num = 10
num = len(x)
out = np.zeros((num, class_num))
for i in range(num):
out[i, x[i]-1] = 1
return out
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
# print ('image_shape')
# print (image_shape)
shape = (None, )
shape = shape + image_shape
# print ('shape')
# print (shape)
inputs = tf.placeholder(tf.float32, shape=shape, name='x')
# print ('inputs')
# print (inputs)
return inputs
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
shape = (None, )
shape = shape + (n_classes, )
return tf.placeholder(tf.float32, shape=shape, name='y')
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
return tf.placeholder(tf.float32, name='keep_prob')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides, maxpool=True):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
input_channel = x_tensor.get_shape().as_list()[-1]
weights_size = conv_ksize + (input_channel,) + (conv_num_outputs,)
conv_strides = (1,) + conv_strides + (1,)
pool_ksize = (1,) + pool_ksize + (1,)
pool_strides = (1,) + pool_strides + (1,)
weights = tf.Variable(tf.random_normal(weights_size, stddev=0.01))
biases = tf.Variable(tf.zeros(conv_num_outputs))
out = tf.nn.conv2d(x_tensor, weights, conv_strides, padding='SAME')
out = out + biases
out = tf.nn.relu(out)
if maxpool:
out = tf.nn.max_pool(out, pool_ksize, pool_strides, padding='SAME')
return out
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
num, hight, width, channel = tuple(x_tensor.get_shape().as_list())
new_shape = (-1, hight * width * channel)
# print ('new_shape')
# print (new_shape)
return tf.reshape(x_tensor, new_shape)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
num, dim = x_tensor.get_shape().as_list()
weights = tf.Variable(tf.random_normal((dim, num_outputs), stddev=np.sqrt(2 / num_outputs)))
biases = tf.Variable(tf.zeros(num_outputs))
return tf.nn.relu(tf.matmul(x_tensor, weights) + biases)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
num, dim = x_tensor.get_shape().as_list()
weights = tf.Variable(tf.random_normal((dim, num_outputs), np.sqrt(2 / num_outputs)))
biases = tf.Variable(tf.zeros(num_outputs))
return tf.matmul(x_tensor, weights) + biases
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv_ksize3 = (3, 3)
conv_ksize1 = (1, 1)
conv_ksize5 = (5, 5)
conv_ksize7 = (7, 7)
conv_strides1 = (1, 1)
conv_strides2 = (2, 2)
pool_ksize = (2, 2)
pool_strides = (2, 2)
channels = [32,128,512,512]
# L = 4
out = x
# 6 layers
# for i in range(int(L / 4)):
out = conv2d_maxpool(out, channels[0], conv_ksize7, conv_strides1, pool_ksize, pool_strides, maxpool=True)
out = conv2d_maxpool(out, channels[1], conv_ksize5, conv_strides1, pool_ksize, pool_strides, maxpool=True)
out = conv2d_maxpool(out, channels[2], conv_ksize3, conv_strides1, pool_ksize, pool_strides, maxpool=True)
# out = conv2d_maxpool(out, channels[3], conv_ksize5, conv_strides2, pool_ksize, pool_strides, maxpool=True)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
out = flatten(out)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
# by remove this fully connected layer can improve performance
out = fully_conn(out, 256)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
out = tf.nn.dropout(out, keep_prob)
out = output(out, 10)
# TODO: return output
return out
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
feed_dict = {keep_prob: keep_probability, x: feature_batch, y: label_batch}
session.run(optimizer, feed_dict=feed_dict)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
# here will print loss, train_accuracy, and val_accuracy
# I implemented the val_accuracy, please read them all, thanks
# print train_accuracy to see overfit
loss = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
train_accuracy = session.run(accuracy, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
batch = feature_batch.shape[0]
num_valid = valid_features.shape[0]
val_accuracy = 0
for i in range(0, num_valid, batch):
end_i = i + batch
if end_i > num_valid:
end_i = num_valid
batch_accuracy = session.run(accuracy, feed_dict={
x: valid_features[i:end_i], y: valid_labels[i:end_i], keep_prob: 1.0})
batch_accuracy *= (end_i - i)
val_accuracy += batch_accuracy
val_accuracy /= num_valid
print ('loss is {}, train_accuracy is {}, val_accuracy is {}'.format(loss, train_accuracy, val_accuracy))
# TODO: Tune Parameters
epochs = 10
batch_size = 128
keep_probability = 0.8
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]
results = {}
best_val = -1
best_svm = None
pass
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
for i in range(len(learning_rates)):
for j in range(len(regularization_strengths)):
svm = LinearSVM()
svm.train(X_train_feats, y_train, learning_rates[i], regularization_strengths[j], num_iters=1500)
train_acc = np.mean(y_train == svm.predict(X_train_feats))
val_acc = np.mean(y_val == svm.predict(X_val_feats))
results[(learning_rates[i], regularization_strengths[j])] = (train_acc, val_acc)
if val_acc > best_val:
best_val = val_acc
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print(test_accuracy)
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print(X_train_feats.shape)
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=3000, batch_size=200,
learning_rate=5e-1, learning_rate_decay=0.95,
reg=0., verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val_feats) == y_val).mean()
print('Validation accuracy: ', val_acc)
################################################################################
# END OF YOUR CODE #
################################################################################
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (net.predict(X_test_feats) == y_test).mean()
print(test_acc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Visualize the First Six Training Images
Step2: 3. View an Image in More Detail
Step3: 4. Rescale the Images by Dividing Every Pixel in Every Image by 255
Step4: 5. Encode Categorical Integer Labels Using a One-Hot Scheme
Step5: 6. Define the Model Architecture
Step6: 7. Compile the Model
Step7: 8. Calculate the Classification Accuracy on the Test Set (Before Training)
Step8: 9. Train the Model
Step9: 10. Load the Model with the Best Classification Accuracy on the Validation Set
Step10: 11. Calculate the Classification Accuracy on the Test Set
|
<ASSISTANT_TASK:>
Python Code:
from keras.datasets import mnist
# use Keras to import pre-shuffled MNIST database
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("The MNIST database has a training set of %d examples." % len(X_train))
print("The MNIST database has a test set of %d examples." % len(X_test))
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.cm as cm
import numpy as np
# plot first six training images
fig = plt.figure(figsize=(20,20))
for i in range(6):
ax = fig.add_subplot(1, 6, i+1, xticks=[], yticks=[])
ax.imshow(X_train[i], cmap='gray')
ax.set_title(str(y_train[i]))
def visualize_input(img, ax):
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
ax.annotate(str(round(img[x][y],2)), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
visualize_input(X_train[0], ax)
# rescale [0,255] --> [0,1]
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
from keras.utils import np_utils
# print first ten (integer-valued) training labels
print('Integer-valued labels:')
print(y_train[:10])
# one-hot encode the labels
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# print first ten (one-hot) training labels
print('One-hot labels:')
print(y_train[:10])
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
# define the model
model = Sequential()
model.add(Flatten(input_shape=X_train.shape[1:]))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
# summarize the model
model.summary()
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
from keras.callbacks import ModelCheckpoint
# train the model
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5',
verbose=1, save_best_only=True)
hist = model.fit(X_train, y_train, batch_size=128, epochs=10,
validation_split=0.2, callbacks=[checkpointer],
verbose=1, shuffle=True)
# load the weights that yielded the best validation accuracy
model.load_weights('mnist.model.best.hdf5')
# evaluate test accuracy
score = model.evaluate(X_test, y_test, verbose=0)
accuracy = 100*score[1]
# print test accuracy
print('Test accuracy: %.4f%%' % accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Square brackets surround lists, and commas separate the elements in the list
Step2: Please note that there are two ways of creating an empty list.
Step3: 2. How to add items to a list
Step4: We learn that append has one positional parameter object. Remember that you should ignore 'self' (it just means that append is a list method). append returns None. It might be a bit confusing at first that a list method returns None. Please carefully look at the difference between the two following examples. Please predict what will be printed in each code snippet below
Step5: The reason why the first example is the correct one is that lists are mutable, which means that you can change the contents of a list. You can hence change the items in a list without assigning it to a new variable.
Step6: 3. How to extract/inspect items in a list
Step7: We can hence use this index number to extract items from a list (just as with strings).
Step8: Obviously, we can also use negative indices
Step9: And we can extract one part of a list using slicing
Step10: If you insert an index that is higher than what is present in the list, you will get an IndexError
Step11: Two additional methods are useful for inspecting lists
Step12: The count method has one positional parameter value and returns an integer. As the name already indicates, the method returns how often the value occurs in the list.
Step13: The index method has one positional parameter value, and two keyword parameters, and returns the first index of the value. It is hence similar to the count method, but now the first index is returned of the value instead of the count.
Step14: We get a ValueError when the value is not in the list.
Step15: 4. Basic List Operations
Step16: The '*' sign makes it possible to repeat a list
Step17: Of course, you can use lists in membership boolean expressions
Step18: And you can use lists in comparison boolean expressions
Step19: 5. Use built-in functions on lists
Step20: 6. An overview of list methods
Step21: In order to have a complete overview of all list methods, you can use the dir built-in function
Step22: Exercises
Step23: Exercise 2
Step24: Exercise 3
Step25: Exercise 4
Step26: Exercise 4
|
<ASSISTANT_TASK:>
Python Code:
friends = ['John', 'Bob', 'Mary']
stuff_to_pack = ['socks','shirt','toothbrush']
print(friends)
print(stuff_to_pack)
#list of integers
print([1, 24, 76])
#list of strings
print(['red', 'yellow', 'blue'])
#mixed list
print(['red', 24, 98.6])
#list with a list included
print([1, [5, 6], 7])
#empty list
print([])
one_way = []
print(one_way)
another_way = list()
print(another_way)
help(list.append)
a_list = [1, 3, 4]
a_list.append(5)
print(a_list)
a_list = [1, 3, 4]
a_list = a_list.append(5)
print(a_list)
help(str.replace)
a_string = 'hello'
a_string.replace('l', 'b')
print(a_string) # a_string has not been changed
a_string = 'hello'
a_new_string = a_string.replace('l', 'b')
print(a_string)
print(a_new_string)
fruit_basket = ['apple', 'banana', 'cherry']
fruit = 'banana'
print(fruit_basket[1])
print(fruit[1])
friend_list = ['John', 'Bob', 'Marry']
print(friend_list[0])
print(friend_list[1])
print(friend_list[2])
friend_list = ['John', 'Bob', 'Marry']
print(friend_list[-2])
friend_list = ['John', 'Bob', 'Marry']
list_with_fewer_friends = friend_list[:2]
print(list_with_fewer_friends)
print(friend_list[5])
help(list.count)
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
number_of_bobs = friend_list.count('Bob')
print(number_of_bobs)
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
number_of_franks = friend_list.count('Frank')
print(number_of_franks)
help(list.index)
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
first_index_with_john = friend_list.index('Bob')
print(first_index_with_john)
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
friend_list.index('Frank')
one_list = ['where', 'is']
another_list = ['the', 'rest', '?']
print(one_list + another_list)
a_list = ['Hello', 'world']
print(a_list * 3)
life = ['a', 'lot', 'of', 'stuff']
print('meaning' in life)
print([3, 2] == [2, 3])
print([2,3] == [2,3])
nums = [3, 41, 12, 9, 74, 15]
print(len(nums)) # number of items in a list
print(max(nums)) # highest value in a list
print(min(nums)) # lowest value in a list
print(sum(nums)) # sum of all values in a list
#define some lists and variables
a = [1,2,3]
b = 4
c = [5,6,7]
x = 1
i = 2
#do some operations
a.append(b) # Add item b to the end of a
a.extend(c) # Add the elements of list c at the end of a
a.insert(i,b) # Insert item b at position i
a.pop(i) # Remove from a the i'th element and return it. If i is not specified, remove the last element
a.index(x) # Return the index of the first element of a with value x. Error if it does not exist
a.count(x) # Return how often value x is found in a
a.remove(x) # Remove from a the first element with value x. Error if it does not exist
a.sort() # Sort the elements of list a
a.reverse() # Reverses list a (no return value!)
print(a)
dir(list)
# your code here
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
# your code here
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
# your code here
friend_list = ['John', 'Bob', 'John', 'Marry', 'Bob']
# your code here
a_string = 'hello'
a_list = ['hello']
# your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Utility method for display
Step3: Fetch the data
Step4: Derive a reference
Step5: Evaluate segmentations using the reference
Step6: Improved output
Step7: You can also export the data as a table for your LaTeX manuscript using the to_latex function.
Step9: Segmentation Representation and the Hausdorff Distance
|
<ASSISTANT_TASK:>
Python Code:
import SimpleITK as sitk
import numpy as np
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
%matplotlib inline
import matplotlib.pyplot as plt
import gui
from ipywidgets import interact, fixed
def display_with_overlay(
segmentation_number, slice_number, image, segs, window_min, window_max
):
Display a CT slice with segmented contours overlaid onto it. The contours are the edges of
the labeled regions.
img = image[:, :, slice_number]
msk = segs[segmentation_number][:, :, slice_number]
overlay_img = sitk.LabelMapContourOverlay(
sitk.Cast(msk, sitk.sitkLabelUInt8),
sitk.Cast(
sitk.IntensityWindowing(
img, windowMinimum=window_min, windowMaximum=window_max
),
sitk.sitkUInt8,
),
opacity=1,
contourThickness=[2, 2],
)
# We assume the original slice is isotropic, otherwise the display would be distorted
plt.imshow(sitk.GetArrayViewFromImage(overlay_img))
plt.axis("off")
plt.show()
image = sitk.ReadImage(fdata("liverTumorSegmentations/Patient01Homo.mha"))
segmentation_file_names = [
"liverTumorSegmentations/Patient01Homo_Rad01.mha",
"liverTumorSegmentations/Patient01Homo_Rad02.mha",
"liverTumorSegmentations/Patient01Homo_Rad03.mha",
]
segmentations = [
sitk.ReadImage(fdata(file_name), sitk.sitkUInt8)
for file_name in segmentation_file_names
]
interact(
display_with_overlay,
segmentation_number=(0, len(segmentations) - 1),
slice_number=(0, image.GetSize()[2] - 1),
image=fixed(image),
segs=fixed(segmentations),
window_min=fixed(-1024),
window_max=fixed(976),
);
# Use majority voting to obtain the reference segmentation. Note that this filter does not resolve ties. In case of
# ties, it will assign max_label_value+1 or a user specified label value (labelForUndecidedPixels) to the result.
# Before using the results of this filter you will have to check whether there were ties and modify the results to
# resolve the ties in a manner that makes sense for your task. The filter implicitly accommodates multiple labels.
labelForUndecidedPixels = 10
reference_segmentation_majority_vote = sitk.LabelVoting(
segmentations, labelForUndecidedPixels
)
manual_plus_majority_vote = list(segmentations)
# Append the reference segmentation to the list of manual segmentations
manual_plus_majority_vote.append(reference_segmentation_majority_vote)
interact(
display_with_overlay,
segmentation_number=(0, len(manual_plus_majority_vote) - 1),
slice_number=(0, image.GetSize()[1] - 1),
image=fixed(image),
segs=fixed(manual_plus_majority_vote),
window_min=fixed(-1024),
window_max=fixed(976),
);
# Use the STAPLE algorithm to obtain the reference segmentation. This implementation of the original algorithm
# combines a single label from multiple segmentations, the label is user specified. The result of the
# filter is the voxel's probability of belonging to the foreground. We then have to threshold the result to obtain
# a reference binary segmentation.
foregroundValue = 1
threshold = 0.95
reference_segmentation_STAPLE_probabilities = sitk.STAPLE(
segmentations, foregroundValue
)
# We use the overloaded operator to perform thresholding, another option is to use the BinaryThreshold function.
reference_segmentation_STAPLE = reference_segmentation_STAPLE_probabilities > threshold
manual_plus_staple = list(segmentations)
# Append the reference segmentation to the list of manual segmentations
manual_plus_staple.append(reference_segmentation_STAPLE)
interact(
display_with_overlay,
segmentation_number=(0, len(manual_plus_staple) - 1),
slice_number=(0, image.GetSize()[1] - 1),
image=fixed(image),
segs=fixed(manual_plus_staple),
window_min=fixed(-1024),
window_max=fixed(976),
);
from enum import Enum
# Use enumerations to represent the various evaluation measures
class OverlapMeasures(Enum):
jaccard, dice, volume_similarity, false_negative, false_positive = range(5)
class SurfaceDistanceMeasures(Enum):
(
hausdorff_distance,
mean_surface_distance,
median_surface_distance,
std_surface_distance,
max_surface_distance,
) = range(5)
# Select which reference we want to use (majority vote or STAPLE)
reference_segmentation = reference_segmentation_STAPLE
# Empty numpy arrays to hold the results
overlap_results = np.zeros(
(len(segmentations), len(OverlapMeasures.__members__.items()))
)
surface_distance_results = np.zeros(
(len(segmentations), len(SurfaceDistanceMeasures.__members__.items()))
)
# Compute the evaluation criteria
# Note that for the overlap measures filter, because we are dealing with a single label we
# use the combined, all labels, evaluation measures without passing a specific label to the methods.
overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
reference_surface = sitk.LabelContour(reference_segmentation)
# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside
# relationship, is irrelevant)
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
reference_surface, squaredDistance=False, useImageSpacing=True
)
)
statistics_image_filter = sitk.StatisticsImageFilter()
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
for i, seg in enumerate(segmentations):
# Overlap measures
overlap_measures_filter.Execute(seg, reference_segmentation)
overlap_results[
i, OverlapMeasures.jaccard.value
] = overlap_measures_filter.GetJaccardCoefficient()
overlap_results[
i, OverlapMeasures.dice.value
] = overlap_measures_filter.GetDiceCoefficient()
overlap_results[
i, OverlapMeasures.volume_similarity.value
] = overlap_measures_filter.GetVolumeSimilarity()
overlap_results[
i, OverlapMeasures.false_negative.value
] = overlap_measures_filter.GetFalseNegativeError()
overlap_results[
i, OverlapMeasures.false_positive.value
] = overlap_measures_filter.GetFalsePositiveError()
# Hausdorff distance
hausdorff_distance_filter.Execute(reference_segmentation, seg)
surface_distance_results[
i, SurfaceDistanceMeasures.hausdorff_distance.value
] = hausdorff_distance_filter.GetHausdorffDistance()
segmented_surface = sitk.LabelContour(seg)
# Symmetric surface distance measures
segmented_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
segmented_surface, squaredDistance=False, useImageSpacing=True
)
)
# Multiply the binary surface segmentations with the distance maps. The resulting distance
# maps contain non-zero values only on the surface (they can also contain zero on the surface)
seg2ref_distance_map = reference_distance_map * sitk.Cast(
segmented_surface, sitk.sitkFloat32
)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(
reference_surface, sitk.sitkFloat32
)
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
# Get all non-zero distances and then add zero distances if required.
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + list(
np.zeros(num_segmented_surface_pixels - len(seg2ref_distances))
)
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])
ref2seg_distances = ref2seg_distances + list(
np.zeros(num_reference_surface_pixels - len(ref2seg_distances))
)
all_surface_distances = seg2ref_distances + ref2seg_distances
# The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In
# general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two
# segmentations, though in our case it is. More on this below.
surface_distance_results[
i, SurfaceDistanceMeasures.mean_surface_distance.value
] = np.mean(all_surface_distances)
surface_distance_results[
i, SurfaceDistanceMeasures.median_surface_distance.value
] = np.median(all_surface_distances)
surface_distance_results[
i, SurfaceDistanceMeasures.std_surface_distance.value
] = np.std(all_surface_distances)
surface_distance_results[
i, SurfaceDistanceMeasures.max_surface_distance.value
] = np.max(all_surface_distances)
# Print the matrices
np.set_printoptions(precision=3)
print(overlap_results)
print(surface_distance_results)
import pandas as pd
from IPython.display import display, HTML
# Graft our results matrix into pandas data frames
overlap_results_df = pd.DataFrame(
data=overlap_results,
index=list(range(len(segmentations))),
columns=[name for name, _ in OverlapMeasures.__members__.items()],
)
surface_distance_results_df = pd.DataFrame(
data=surface_distance_results,
index=list(range(len(segmentations))),
columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()],
)
# Display the data as HTML tables and graphs
display(HTML(overlap_results_df.to_html(float_format=lambda x: "%.3f" % x)))
display(HTML(surface_distance_results_df.to_html(float_format=lambda x: "%.3f" % x)))
overlap_results_df.plot(kind="bar").legend(bbox_to_anchor=(1.6, 0.9))
surface_distance_results_df.plot(kind="bar").legend(bbox_to_anchor=(1.6, 0.9))
# The formatting of the table using the default settings is less than ideal
print(overlap_results_df.to_latex())
# We can improve on this by specifying the table's column format and the float format
print(
overlap_results_df.to_latex(
column_format="ccccccc", float_format=lambda x: "%.3f" % x
)
)
# Create our segmentations and display
image_size = [64, 64]
circle_center = [30, 30]
circle_radius = [20, 20]
# A filled circle with radius R
seg = (
sitk.GaussianSource(sitk.sitkUInt8, image_size, circle_radius, circle_center) > 200
)
# A torus with inner radius r
reference_segmentation1 = seg - (
sitk.GaussianSource(sitk.sitkUInt8, image_size, circle_radius, circle_center) > 240
)
# A torus with inner radius r_2<r
reference_segmentation2 = seg - (
sitk.GaussianSource(sitk.sitkUInt8, image_size, circle_radius, circle_center) > 250
)
gui.multi_image_display2D(
[reference_segmentation1, reference_segmentation2, seg],
["reference 1", "reference 2", "segmentation"],
figure_size=(12, 4),
);
def surface_hausdorff_distance(reference_segmentation, seg):
Compute symmetric surface distances and take the maximum.
reference_surface = sitk.LabelContour(reference_segmentation)
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
reference_surface, squaredDistance=False, useImageSpacing=True
)
)
statistics_image_filter = sitk.StatisticsImageFilter()
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(reference_surface)
num_reference_surface_pixels = int(statistics_image_filter.GetSum())
segmented_surface = sitk.LabelContour(seg)
segmented_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
segmented_surface, squaredDistance=False, useImageSpacing=True
)
)
# Multiply the binary surface segmentations with the distance maps. The resulting distance
# maps contain non-zero values only on the surface (they can also contain zero on the surface)
seg2ref_distance_map = reference_distance_map * sitk.Cast(
segmented_surface, sitk.sitkFloat32
)
ref2seg_distance_map = segmented_distance_map * sitk.Cast(
reference_surface, sitk.sitkFloat32
)
# Get the number of pixels in the reference surface by counting all pixels that are 1.
statistics_image_filter.Execute(segmented_surface)
num_segmented_surface_pixels = int(statistics_image_filter.GetSum())
# Get all non-zero distances and then add zero distances if required.
seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)
seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])
seg2ref_distances = seg2ref_distances + list(
np.zeros(num_segmented_surface_pixels - len(seg2ref_distances))
)
ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)
ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])
ref2seg_distances = ref2seg_distances + list(
np.zeros(num_reference_surface_pixels - len(ref2seg_distances))
)
all_surface_distances = seg2ref_distances + ref2seg_distances
return np.max(all_surface_distances)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
# Use reference1, larger inner annulus radius, the surface based computation
# has a smaller difference.
hausdorff_distance_filter.Execute(reference_segmentation1, seg)
print(
"HausdorffDistanceImageFilter result (reference1-segmentation): "
+ str(hausdorff_distance_filter.GetHausdorffDistance())
)
print(
"Surface Hausdorff result (reference1-segmentation): "
+ str(surface_hausdorff_distance(reference_segmentation1, seg))
)
# Use reference2, smaller inner annulus radius, the surface based computation
# has a larger difference.
hausdorff_distance_filter.Execute(reference_segmentation2, seg)
print(
"HausdorffDistanceImageFilter result (reference2-segmentation): "
+ str(hausdorff_distance_filter.GetHausdorffDistance())
)
print(
"Surface Hausdorff result (reference2-segmentation): "
+ str(surface_hausdorff_distance(reference_segmentation2, seg))
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Secondly, prepare and import BERT modules.
Step2: Thirdly, prepare for training
Step3: Now, let's play!
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
assert 'COLAB_TPU_ADDR' in os.environ, 'ERROR: Not connected to a TPU runtime; please see the first cell in this notebook for instructions!'
TPU_ADDRESS = 'grpc://' + os.environ['COLAB_TPU_ADDR']
print('TPU address is', TPU_ADDRESS)
from google.colab import auth
auth.authenticate_user()
with tf.Session(TPU_ADDRESS) as session:
print('TPU devices:')
pprint.pprint(session.list_devices())
# Upload credentials to TPU.
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(session, credentials=auth_info)
# Now credentials are set for all future sessions on this TPU.
import sys
!test -d bert_repo || git clone https://github.com/google-research/bert bert_repo
if not 'bert_repo' in sys.path:
sys.path += ['bert_repo']
TASK = 'MRPC' #@param {type:"string"}
assert TASK in ('MRPC', 'CoLA'), 'Only (MRPC, CoLA) are demonstrated here.'
# Download glue data.
! test -d download_glue_repo || git clone https://gist.github.com/60c2bdb54d156a41194446737ce03e2e.git download_glue_repo
!python download_glue_repo/download_glue_data.py --data_dir='glue_data' --tasks=$TASK
TASK_DATA_DIR = 'glue_data/' + TASK
print('***** Task data directory: {} *****'.format(TASK_DATA_DIR))
!ls $TASK_DATA_DIR
# Available pretrained model checkpoints:
# uncased_L-12_H-768_A-12: uncased BERT base model
# uncased_L-24_H-1024_A-16: uncased BERT large model
# cased_L-12_H-768_A-12: cased BERT large model
BERT_MODEL = 'uncased_L-12_H-768_A-12' #@param {type:"string"}
BERT_PRETRAINED_DIR = 'gs://cloud-tpu-checkpoints/bert/' + BERT_MODEL
print('***** BERT pretrained directory: {} *****'.format(BERT_PRETRAINED_DIR))
!gsutil ls $BERT_PRETRAINED_DIR
BUCKET = 'YOUR_BUCKET' #@param {type:"string"}
assert BUCKET, 'Must specify an existing GCS bucket name'
OUTPUT_DIR = 'gs://{}/bert/models/{}'.format(BUCKET, TASK)
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# Setup task specific model and TPU running config.
import modeling
import optimization
import run_classifier
import tokenization
# Model Hyper Parameters
TRAIN_BATCH_SIZE = 32
EVAL_BATCH_SIZE = 8
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
WARMUP_PROPORTION = 0.1
MAX_SEQ_LENGTH = 128
# Model configs
SAVE_CHECKPOINTS_STEPS = 1000
ITERATIONS_PER_LOOP = 1000
NUM_TPU_CORES = 8
VOCAB_FILE = os.path.join(BERT_PRETRAINED_DIR, 'vocab.txt')
CONFIG_FILE = os.path.join(BERT_PRETRAINED_DIR, 'bert_config.json')
INIT_CHECKPOINT = os.path.join(BERT_PRETRAINED_DIR, 'bert_model.ckpt')
DO_LOWER_CASE = BERT_MODEL.startswith('uncased')
processors = {
"cola": run_classifier.ColaProcessor,
"mnli": run_classifier.MnliProcessor,
"mrpc": run_classifier.MrpcProcessor,
}
processor = processors[TASK.lower()]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(vocab_file=VOCAB_FILE, do_lower_case=DO_LOWER_CASE)
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(TPU_ADDRESS)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=OUTPUT_DIR,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=ITERATIONS_PER_LOOP,
num_shards=NUM_TPU_CORES,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
train_examples = processor.get_train_examples(TASK_DATA_DIR)
num_train_steps = int(
len(train_examples) / TRAIN_BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
model_fn = run_classifier.model_fn_builder(
bert_config=modeling.BertConfig.from_json_file(CONFIG_FILE),
num_labels=len(label_list),
init_checkpoint=INIT_CHECKPOINT,
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=True,
use_one_hot_embeddings=True)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=run_config,
train_batch_size=TRAIN_BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE)
# Train the model.
print('MRPC/CoLA on BERT base model normally takes about 2-3 minutes. Please wait...')
train_features = run_classifier.convert_examples_to_features(
train_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
print('***** Started training at {} *****'.format(datetime.datetime.now()))
print(' Num examples = {}'.format(len(train_examples)))
print(' Batch size = {}'.format(TRAIN_BATCH_SIZE))
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print('***** Finished training at {} *****'.format(datetime.datetime.now()))
# Eval the model.
eval_examples = processor.get_dev_examples(TASK_DATA_DIR)
eval_features = run_classifier.convert_examples_to_features(
eval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
print('***** Started evaluation at {} *****'.format(datetime.datetime.now()))
print(' Num examples = {}'.format(len(eval_examples)))
print(' Batch size = {}'.format(EVAL_BATCH_SIZE))
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps = int(len(eval_examples) / EVAL_BATCH_SIZE)
eval_input_fn = run_classifier.input_fn_builder(
features=eval_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=True)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
print('***** Finished evaluation at {} *****'.format(datetime.datetime.now()))
output_eval_file = os.path.join(OUTPUT_DIR, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
print("***** Eval results *****")
for key in sorted(result.keys()):
print(' {} = {}'.format(key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using A For Loop
Step2: Using Map()
Step3: Map the capitalizer function to regimentNames, convert the map into a list, and view the variable
Step4: Using List Comprehension
|
<ASSISTANT_TASK:>
Python Code:
regimentNames = ['Night Riflemen', 'Jungle Scouts', 'The Dragoons', 'Midnight Revengence', 'Wily Warriors']
# create a variable for the for loop results
regimentNamesCapitalized_f = []
# for every item in regimentNames
for i in regimentNames:
# capitalize the item and add it to regimentNamesCapitalized_f
regimentNamesCapitalized_f.append(i.upper())
# View the outcome
regimentNamesCapitalized_f
capitalizer = lambda x: x.upper()
regimentNamesCapitalized_m = list(map(capitalizer, regimentNames)); regimentNamesCapitalized_m
regimentNamesCapitalized_l = [x.upper() for x in regimentNames]; regimentNamesCapitalized_l
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Downloading Dataset
Step3: Batch Generator
Step4: Model Architecture
Step5: When we have our model and the data, we use trax.supervised.training to define training and eval tasks and create a training loop. The Trax training loop optimizes training and will create TensorBoard logs and model checkpoints for you.
|
<ASSISTANT_TASK:>
Python Code:
%%capture
!pip install --upgrade trax
import trax
from trax import layers as tl
from trax.supervised import training
# Trax offers the WideResnet architecture in it's models module
from trax.models.resnet import WideResnet
trax.fastmath.set_backend('tensorflow-numpy')
%%capture
train_stream = trax.data.TFDS('cifar10', keys=('image', 'label'), train=True)()
eval_stream = trax.data.TFDS('cifar10', keys=('image', 'label'), train=False)()
train_data_pipeline = trax.data.Serial(
trax.data.Shuffle(),
trax.data.Batch(64),
trax.data.AddLossWeights(),
)
train_batches_stream = train_data_pipeline(train_stream)
eval_data_pipeline = trax.data.Serial(
trax.data.Batch(64),
trax.data.AddLossWeights(),
)
eval_batches_stream = eval_data_pipeline(eval_stream)
thin_model = tl.Serial(
WideResnet(widen_factor = 1),
tl.LogSoftmax()
)
wide_model = tl.Serial(
WideResnet(widen_factor = 2),
tl.LogSoftmax()
)
train_task = training.TrainTask(
labeled_data=train_batches_stream,
loss_layer=tl.CrossEntropyLoss(),
optimizer=trax.optimizers.Adam(0.01),
n_steps_per_checkpoint=1000,
)
eval_task = training.EvalTask(
labeled_data=eval_batches_stream,
metrics=[tl.CrossEntropyLoss(), tl.Accuracy()],
n_eval_batches=20,
)
training_loop = training.Loop(thin_model,
train_task,
eval_tasks=[eval_task],
output_dir='./thin_model')
training_loop.run(5000)
training_loop = training.Loop(wide_model,
train_task,
eval_tasks=[eval_task],
output_dir='./wide_model')
training_loop.run(5000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Approximate solvers for the Euler equations of gas dynamics
Step2: An implementation of this solver for use in Clawpack can be found here. Recall that an exact Riemann solver for the Euler equations appears in exact_solvers/euler.py.
Step3: Recall that in the true solution the middle wave is a contact discontinuity and carries only a jump in the density. For that reason the three-dimensional phase space plot is generally shown projected onto the pressure-velocity plane as shown above
Step4: Single-shock solution
Step5: It is evident that the solution consists of a single right-going shock. The exact solution cannot be seen because it coincides exactly with the Roe solution. The path of the shock in the first plot also cannot be seen since it is plotted under the path of the rightmost Roe solution wave. The two solutions differ only in the wave speeds predicted for the other two waves, but since these waves have zero strength this makes no difference.
Step7: Notice that in the exact solution, the right edge of the rarefaction travels to the right. In the Roe solution, all waves travel to the left. As in the case of the shallow water equations, here too this behavior can lead to unphysical solutions when this approximate solver is used in a numerical discretization. In order to correct this, we can split the single wave into two when a transonic rarefaction is present, in a way similar to what is done in the shallow water equations. We do not go into details here.
Step8: Examples
Step9: Preservation of positivity
Step10: As we can see, in this example each Roe solver wave moves much more slowly than the leading edge of the corresponding true rarefaction. In order to maintain conservation, this implies that the middle Roe state must have lower density than the true middle state. This leads to a negative density. Note that the velocity and pressure take huge values in the intermediate state.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import numpy as np
from exact_solvers import euler
from utils import riemann_tools as rt
from ipywidgets import interact
from ipywidgets import widgets
State = euler.Primitive_State
def roe_averages(q_l, q_r, gamma=1.4):
rho_sqrt_l = np.sqrt(q_l[0])
rho_sqrt_r = np.sqrt(q_r[0])
p_l = (gamma-1.)*(q_l[2]-0.5*(q_l[1]**2)/q_l[0])
p_r = (gamma-1.)*(q_r[2]-0.5*(q_r[1]**2)/q_r[0])
denom = rho_sqrt_l + rho_sqrt_r
u_hat = (q_l[1]/rho_sqrt_l + q_r[1]/rho_sqrt_r)/denom
H_hat = ((q_l[2]+p_l)/rho_sqrt_l + (q_r[2]+p_r)/rho_sqrt_r)/denom
c_hat = np.sqrt((gamma-1)*(H_hat-0.5*u_hat**2))
return u_hat, c_hat, H_hat
def Euler_roe(q_l, q_r, gamma=1.4):
Approximate Roe solver for the Euler equations.
rho_l = q_l[0]
rhou_l = q_l[1]
u_l = rhou_l/rho_l
rho_r = q_r[0]
rhou_r = q_r[1]
u_r = rhou_r/rho_r
u_hat, c_hat, H_hat = roe_averages(q_l, q_r, gamma)
dq = q_r - q_l
s1 = u_hat - c_hat
s2 = u_hat
s3 = u_hat + c_hat
alpha2 = (gamma-1.)/c_hat**2 *((H_hat-u_hat**2)*dq[0]+u_hat*dq[1]-dq[2])
alpha3 = (dq[1] + (c_hat - u_hat)*dq[0] - c_hat*alpha2) / (2.*c_hat)
alpha1 = dq[0] - alpha2 - alpha3
r1 = np.array([1., u_hat-c_hat, H_hat - u_hat*c_hat])
r2 = np.array([1., u_hat, 0.5*u_hat**2])
q_l_star = q_l + alpha1*r1
q_r_star = q_l_star + alpha2*r2
states = np.column_stack([q_l,q_l_star,q_r_star,q_r])
speeds = [s1, s2, s3]
wave_types = ['contact','contact', 'contact']
def reval(xi):
rho = (xi<s1)*states[0,0] + (s1<=xi)*(xi<s2)*states[0,1] + \
(s2<=xi)*(xi<s3)*states[0,2] + (s3<=xi)*states[0,3]
mom = (xi<s1)*states[1,0] + (s1<=xi)*(xi<s2)*states[1,1] + \
(s2<=xi)*(xi<s3)*states[1,2] + (s3<=xi)*states[1,3]
E = (xi<s1)*states[2,0] + (s1<=xi)*(xi<s2)*states[2,1] + \
(s2<=xi)*(xi<s3)*states[2,2] + (s3<=xi)*states[2,3]
return rho, mom, E
return states, speeds, reval, wave_types
def compare_solutions(left_state, right_state, solvers=['Exact','HLLE']):
q_l = np.array(euler.primitive_to_conservative(*left_state))
q_r = np.array(euler.primitive_to_conservative(*right))
outputs = []
states = {}
for solver in solvers:
if solver.lower() == 'exact':
outputs.append(euler.exact_riemann_solution(q_l,q_r))
if solver.lower() == 'hlle':
outputs.append(Euler_hlle(q_l, q_r))
states['hlle'] = outputs[-1][0]
if solver.lower() == 'roe':
outputs.append(Euler_roe(q_l, q_r))
states['roe'] = outputs[-1][0]
plot_function = \
rt.make_plot_function([val[0] for val in outputs],
[val[1] for val in outputs],
[val[2] for val in outputs],
[val[3] for val in outputs],
solvers, layout='vertical',
variable_names=euler.primitive_variables,
derived_variables=euler.cons_to_prim,
vertical_spacing=0.15,
show_time_legend=True)
interact(plot_function,
t=widgets.FloatSlider(min=0,max=0.9,step=0.1,value=0.4));
return states
left = State(Density = 3.,
Velocity = 0.,
Pressure = 3.)
right = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
euler.phase_plane_plot(left, right, approx_states=states['roe'])
left = State(Density = 0.1,
Velocity = 0.,
Pressure = 0.1)
right = State(Density = 1.,
Velocity = 1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
euler.phase_plane_plot(left, right, approx_states=states['roe'])
M = 2. # Mach number of the shock wave
gamma = 1.4
mu = 2*(M**2-1)/(M*(gamma+1.))
right = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
c_r = np.sqrt(gamma*right.Pressure/right.Density)
rho_l = right.Density * M/(M-mu)
p_l = right.Pressure * ((2*M**2-1)*gamma+1)/(gamma+1)
u_l = mu*c_r
left = State(Density = rho_l,
Velocity = u_l,
Pressure = p_l)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
euler.phase_plane_plot(left, right, approx_states=states['roe'])
left = State(Density = 0.1,
Velocity = -2.,
Pressure = 0.1)
right = State(Density = 1.,
Velocity = -1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
def Euler_hlle(q_l, q_r, gamma=1.4):
HLLE approximate solver for the Euler equations.
rho_l = q_l[0]
rhou_l = q_l[1]
u_l = rhou_l/rho_l
rho_r = q_r[0]
rhou_r = q_r[1]
u_r = rhou_r/rho_r
E_r = q_r[2]
E_l = q_l[2]
u_hat, c_hat, H_hat = roe_averages(q_l, q_r, gamma)
p_r = (gamma-1.) * (E_r - rho_r*u_r**2/2.)
p_l = (gamma-1.) * (E_l - rho_l*u_l**2/2.)
H_r = (E_r+p_r) / rho_r
H_l = (E_l+p_l) / rho_l
c_r = np.sqrt((gamma-1.)*(H_r-u_r**2/2.))
c_l = np.sqrt((gamma-1.)*(H_l-u_l**2/2.))
s1 = min(u_l-c_l,u_hat-c_hat)
s2 = max(u_r+c_r,u_hat+c_hat)
rho_m = (rhou_r - rhou_l - s2*rho_r + s1*rho_l)/(s1-s2)
rhou_m = (rho_r*u_r**2 - rho_l*u_l**2 \
+ p_r - p_l - s2*rhou_r + s1*rhou_l)/(s1-s2)
E_m = ( u_r*(E_r+p_r) - u_l*(E_l+p_l) - s2*E_r + s1*E_l)/(s1-s2)
q_m = np.array([rho_m, rhou_m, E_m])
states = np.column_stack([q_l,q_m,q_r])
speeds = [s1, s2]
wave_types = ['contact','contact']
def reval(xi):
rho = (xi<s1)*rho_l + (s1<=xi)*(xi<=s2)*rho_m + (s2<xi)*rho_r
mom = (xi<s1)*rhou_l + (s1<=xi)*(xi<=s2)*rhou_m + (s2<xi)*rhou_r
E = (xi<s1)*E_l + (s1<=xi)*(xi<=s2)*E_m + (s2<xi)*E_r
return rho, mom, E
return states, speeds, reval, wave_types
left = State(Density = 3.,
Velocity = 0.,
Pressure = 3.)
right = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','HLLE'])
euler.phase_plane_plot(left, right, approx_states=states['hlle'])
left = State(Density = 1.,
Velocity = -5.,
Pressure = 1.)
right = State(Density = 1.,
Velocity = 1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact', 'Roe'])
left = State(Density = 1.,
Velocity = -10.,
Pressure = 1.)
right = State(Density = 1.,
Velocity = 1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact', 'HLLE']);
euler.phase_plane_plot(left,right,approx_states=states['hlle'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For examples, let's say we have an array of values and their mean.
Step3: How stable is this measure? To answer this, we'll sample with replacements from data a large number of times and calculate the mean of each of them. Note that each bootstrap sample should have the same number of values as the original sample (but it may not include all of the original data because of repititions). The reason the sizes must be same is that the variances will be different otherwise.
Step4: So the error of bootstrap sample means) are normally distributed around the sample means. This is due to the Law of Large Numbers.
Step5: Note that it is incorrect to calculate percentiles on the sample means directly.This is because it depends on a particular bootstrap distribution being a good approximation to the true distribution of x.
Step9: Now we'll use the bootstrap method to construct a confidence interval around this score.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
data = pd.Series(np.random.rand(100))
actual_mean = data.mean()
print('{:.3f}'.format(actual_mean))
def calc_sample_means(data, n):
Make n bootstrap samples from data and calculate their means.
sample_means = []
for _ in range(n):
sample_data = data.sample(n=data.shape[0], replace=True)
sample_means.append(sample_data.mean())
return pd.Series(sample_means)
means = calc_sample_means(data, 10000)
errors = means - actual_mean
alpha = 0.95
delta = (1 - alpha) / 2.0
error_low, error_high = errors.quantile([alpha - delta, alpha + delta]).values.tolist()
ax = errors.plot(kind='hist')
plt.axvline(x=error_low, color='black', linewidth=2.0)
plt.axvline(x=error_high, color='black', linewidth=2.0)
mean_low, mean_high = actual_mean - error_low, actual_mean + error_high
print('95% confidence interval: [{:.3f}, {:.3f}]'.format(mean_low, mean_high))
ax = means.plot(kind='hist')
plt.axvline(x=actual_mean, color='green', linewidth=2.0)
plt.axvline(x=mean_low, color='black', linewidth=2.0)
plt.axvline(x=mean_high, color='black', linewidth=2.0)
import seaborn as sns
import os
data_df = sns.load_dataset('iris')
data_df.head()
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import f1_score
from sklearn.model_selection import StratifiedKFold, cross_val_score
estimator = GaussianNB()
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)
scores = cross_val_score(
estimator,
data_df.drop('species', axis=1).values,
data_df.species.values,
cv = cv,
scoring='f1_macro'
)
scores = pd.Series(data=scores, name='cv_f1_scores')
ax = scores.plot(kind='bar', rot=0, title='F1 scores from 10_fold cross_validation.')
mean_score = scores.mean()
print('{:.3f}'.format(mean_score))
from sklearn.metrics import f1_score
def gen_bootstrap_samples(data_df, n):
Make n bootstrap samples from data_df assuming 10 fold CV.
Return a generator for train and test DataFrames.
assert n > 1
for _ in range(n):
# Since we used 10-fold cross-validation
train_df = data_df.sample(frac=0.9, replace=True)
test_df_candidates = data_df.drop(train_df.index, axis=0)
test_df = test_df_candidates.sample(frac=0.1, replace=True)
yield train_df, test_df
def get_score(train_df, test_df):
Calculate f1 macro score by training a model using train_df
and evaluating predictions against test_df.
estimator = GaussianNB().fit(train_df.drop('species', axis=1).values, train_df.species.values)
predictions = estimator.predict(test_df.drop('species', axis=1).values)
return f1_score(test_df.species.values, predictions, average='macro')
def bootstrap_f1_scores(data_df, n):
Calculate and return f1 scores for n bootstrap samples using data_df, assuming 10 fold cross validation.
scores = []
for train_df, test_df in gen_bootstrap_samples(data_df, n):
score = get_score(train_df, test_df)
scores.append(score)
return pd.Series(scores)
bootstrap_scores = bootstrap_f1_scores(data_df, n=1000)
bootstrap_scores.plot(kind='hist')
score_errors = bootstrap_scores - mean_score
alpha = 0.95
delta = (1 - alpha) / 2.0
error_low, error_high = score_errors.quantile([alpha - delta, alpha + delta]).values.tolist()
score_low, score_high = mean_score - error_low, mean_score + error_high
print('95% Confidence interval for mean score {:.3f} is [{:.3f}, {:.3f}]'.format(mean_score, score_low, score_high))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Most likely, the ImageHash library will be missing in a typical setup. The following cell, installs the library.
Step2: Feature Extraction
Step3: To check whether the file search has succeeded, display some found images to get a feeling of the data we are going to deal with.
Step4: Detecting Similar Documents
Step5: Histogram Intersection
Step6: Comparison of Different Similarity Measures and Metrics in a QBE Scenario
Step7: The next cell computes different measures and metrics and saves them in a dataframe.
Step8: If we inspect the dataframe, we will see that each measure/metric yields different results which is not very surprising...
Step9: To facilitate the assessment of the effectiveness of the different measures and metrics, the next cell creates an HTML overview document with the first found documents.
Step10: A Local Feature - ORB
Step11: To give an example, we will extract ORB features from the first two images and match them. The discovered matches will be illustrated below.
Step12: ATTENTION ! Depending on your computer setup, the next cell will take some time to finish. See the log below to get an estimation. The experiment has been run with with MacBook Pro (13-inch, 2018, 2,7 GHz Intel Core i7, 16 GB, and macOS Mojave).
Step13: In a little more sophisticated approach, we will compute the average distance for each query-image pair for all matches. This value yields another similarity score.
Step14: Histogram-based Clustering
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import tarfile as TAR
import sys
from datetime import datetime
from PIL import Image
import warnings
import json
import pickle
import zipfile
from math import *
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
import matplotlib
# enlarge plots
plt.rcParams['figure.figsize'] = [7, 5]
import imagehash
from sklearn.preprocessing import normalize
from scipy.spatial.distance import minkowski
from scipy.spatial.distance import hamming
from scipy.stats import wasserstein_distance
from scipy.stats import spearmanr
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches, BRIEF, corner_peaks, corner_harris)
from skimage.color import rgb2gray
from skimage.io import imread,imshow
def printLog(text):
now = str(datetime.now())
print("[" + now + "]\t" + text)
# forces to output the result of the print command immediately, see: http://stackoverflow.com/questions/230751/how-to-flush-output-of-python-print
sys.stdout.flush()
def findJPEGfiles(path):
# a list for the JPEG files
jpgFilePaths=[]
for root, dirs, files in os.walk(path):
for file_ in files:
if file_.endswith(".jpg"):
# debug
# print(os.path.join(root, file_))
jpgFilePaths.append(os.path.join(root, file_))
return jpgFilePaths
outputDir= "./analysis/"
verbose=True
#general preparations, e.g., create missing output directories
if not os.path.exists(outputDir):
if verbose:
print("Creating " + outputDir)
os.mkdir(outputDir)
!pip install ImageHash
#baseDir="/Users/david/src/__datasets/orbis_pictus/sbbget_downloads_comenius/download_temp/"
baseDir="/Users/david/src/__datasets/orbis_pictus/jpegs/download_temp/"
jpgFiles=findJPEGfiles(baseDir)
# extract all features
printLog("Extracting features of %i documents..."%len(jpgFiles))
histograms=[]
# "data science" utility structures
ppnList=[]
nameList=[]
combinedHistograms=[]
combinedNormalizedHistograms=[]
jpegFilePaths=[]
pHashes=[]
for jpg in jpgFiles:
tokens=jpg.split("/")
# load an image
image = Image.open(jpg)
# bug: images are not of the same size, has to be fixed to obtain normalized histograms!!!
# q'n'd fix - brute force resizing
image=image.resize((512,512),Image.LANCZOS)
histogram = image.histogram()
histogramDict=dict()
# save its unique ID and name
histogramDict['ppn']=tokens[-3]+"/"+tokens[-2]
histogramDict['extractName']=tokens[-1]
# save the histogram data in various forms
histogramDict['redHistogram'] = histogram[0:256]
histogramDict['blueHistogram'] = histogram[256:512]
histogramDict['greenHistogram'] = histogram[512:768]
hist=np.array(histogram)
normalizedRGB = (hist)/(max(hist))
# create a perceptual hash for the image
pHashes.append(imagehash.phash(image))
image.close()
# fill the DS data structures
ppnList.append(histogramDict['ppn'])
nameList.append(histogramDict['extractName'])
combinedHistograms.append(histogramDict['redHistogram']+histogramDict['blueHistogram']+histogramDict['greenHistogram'])
combinedNormalizedHistograms.append(normalizedRGB)
jpegFilePaths.append(jpg)
printLog("Done.")
img1=imread(jpegFilePaths[0])
img2=imread(jpegFilePaths[1388])
img3=imread(jpegFilePaths[1389])
#Creates two subplots and unpacks the output array immediately
f, (ax1, ax2,ax3) = plt.subplots(1, 3, sharex='all', sharey='all')
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
ax1.set_title("Image #0")
ax1.imshow(img1)
ax2.set_title("Image #1388")
ax2.imshow(img2)
ax3.set_title("Image #1389")
ax3.imshow(img3)
plt.plot(combinedNormalizedHistograms[0],"r")
plt.plot(combinedNormalizedHistograms[1388],"g")
histCut=np.absolute((np.subtract(combinedNormalizedHistograms[0],combinedNormalizedHistograms[1388])))
print(np.sum(histCut))
plt.plot(histCut,"k--")
plt.title("Histogramm Difference (black) of Two Histograms")
plt.show()
plt.plot(combinedNormalizedHistograms[0],"r")
plt.plot(combinedNormalizedHistograms[1388],"g")
histCut=(np.minimum(combinedNormalizedHistograms[0],combinedNormalizedHistograms[1388]))
print(np.sum(histCut))
plt.plot(histCut,"k--")
plt.title("Histogramm Intersection (black) of Two Histograms")
plt.show()
qbeIndex=1392#1685 # beim 1685 sind p1 und p2 am Anfang gleich
img1=imread(jpegFilePaths[qbeIndex])
#plt.title(jpegFilePaths[qbeIndex])
plt.axis('off')
imshow(img1)
def squareRooted(x):
return round(sqrt(sum([a*a for a in x])),3)
def cosSimilarity(x,y):
numerator = sum(a*b for a,b in zip(x,y))
denominator = squareRooted(x)*squareRooted(y)
return round(numerator/float(denominator),3)
printLog("Calculating QBE scenarios...")
qbeHist=combinedNormalizedHistograms[qbeIndex]
dataDict={"index":[],"p1":[],"p2":[],"histdiff":[],"histcut":[],"emd":[],"cosine":[],"phash":[]}
for i,hist in enumerate(combinedNormalizedHistograms):
dataDict["index"].append(i)
# Manhattan distance
dataDict["p1"].append(minkowski(qbeHist,hist,p=1))
# Euclidean distance
dataDict["p2"].append(minkowski(qbeHist,hist,p=2))
# histogram difference
histDiff=np.absolute((np.subtract(qbeHist,combinedNormalizedHistograms[i])))
dataDict["histdiff"].append(np.sum(histDiff))
# histogram cut
histCut=np.minimum(qbeHist,combinedNormalizedHistograms[i])
dataDict["histcut"].append(np.sum(histCut))
# earth mover's distance aka Wasserstein
dataDict["emd"].append(wasserstein_distance(qbeHist,hist))
# cosine similarity
dataDict["cosine"].append(cosSimilarity(qbeHist,combinedNormalizedHistograms[i]))
# pHash with Hamming distance
dataDict["phash"].append(hamming(pHashes[qbeIndex],pHashes[i]))
df=pd.DataFrame(dataDict)
printLog("Done.")
df.sort_values(by=['p1']).head(20).describe()
measures=["p1","p2","histdiff","histcut","emd","cosine","phash"]
ranks=dict()
printLog("Creating QBE report files...")
htmlFile=open(outputDir+"_qbe.html", "w")
printLog("HTML output will be saved to: %s"%outputDir+"_qbe.html")
htmlFile.write("<html><head>\n")
htmlFile.write("<link href='../css/helvetica.css' rel='stylesheet' type='text/css'>\n")
#htmlFile.write("<style>body {color: black;text-align: center; font-family: helvetica;} h1 {font-size:15px;position: fixed; padding-top:5px; top: 0;width: 100%;background: rgba(255,255,255,0.5);} h2 {font-size:15px;position: fixed; right: 0;width: 150px; padding-top:25px; padding-right:15px; background: rgba(255,255,255,0.5);} p {font-size:10px;} .score{font-size:6px; text-align: right;}")
htmlFile.write("</style></head>\n")
htmlFile.write("<h2>mir comparison.</h2>")
htmlFile.write("<table><tr>\n")
for measureName in measures:
typeOfMeasure="distance"
# check whether the measure is a similarity or a distance measure
# (assuming identity (i.e., identity of indiscernibles) of the measure)
if df[df.index==qbeIndex][measureName].tolist()[0]>0:
df2=df.sort_values(by=[measureName],ascending=False).head(20)
typeOfMeasure="similarity"
else:
df2=df.sort_values(by=[measureName],ascending=True).head(20)
typeOfMeasure="distance"
htmlFile.write("<td>\n")
measureTitle=measureName
if typeOfMeasure=="similarity":
measureTitle=measureName.replace("dist_","sim_")
htmlFile.write("<h1>"+measureTitle+"</h1>\n")
htmlFile.write("<p>"+typeOfMeasure+"</p>\n")
ranks[measureName]=df2.index.tolist()
jpegFilePathsReport=[]
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
for row in df2.itertuples(index=False):
i=row.index
score=getattr(row, measureName)
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePathsReport.append(outputDir+jpgPath)
if i==qbeIndex:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
else:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
#htmlFile.write("<p class='score'>"+str(score)+"</p>")
htmlFile.write("<p class='score'> </p>\n")
htmlFile.write("</td>\n")
# close the HTML file
htmlFile.write("</tr></table>\n")
htmlFile.write("</body></html>\n")
htmlFile.close()
printLog("Done.")
qbeIndexLocalFeat=17#qbeIndex#17 #17=Welt
img1=imread(jpegFilePaths[qbeIndexLocalFeat],as_gray=True)
img2=imread(jpegFilePaths[1301],as_gray=True)
img3=imread(jpegFilePaths[1671],as_gray=True)
#Creates two subplots and unpacks the output array immediately
f, (ax1, ax2,ax3) = plt.subplots(1, 3, sharex='all', sharey='all')
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
ax1.set_title("Query #%i"%qbeIndexLocalFeat)
ax1.imshow(img1)
ax2.set_title("Index #1301")
ax2.imshow(img2)
ax3.set_title("Index #1671")
ax3.imshow(img3)
# extract features
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
# match features
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# visualize the results
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)
ax.axis('off')
ax.set_title("Image 1 vs. Image 2")
printLog("Calculating ORB QBE scenarios...")
#qbeIndexLocalFeat
# prepare QBE image
descriptor_extractor = ORB(n_keypoints=200)
# prepare QBE image
qbeImage=imread(jpegFilePaths[qbeIndexLocalFeat],as_gray=True)
descriptor_extractor.detect_and_extract(qbeImage)
qbeKeypoints = descriptor_extractor.keypoints
qbeDescriptors = descriptor_extractor.descriptors
orbDescriptors=[]
orbMatches=[]
# match QBE image against the corpus
dataDict={"index":[],"matches_orb":[]}
for i,jpeg in enumerate(jpegFilePaths):
dataDict["index"].append(i)
compImage=imread(jpeg,as_gray=True)
descriptor_extractor.detect_and_extract(compImage)
keypoints = descriptor_extractor.keypoints
descriptors = descriptor_extractor.descriptors
orbDescriptors.append(descriptors)
matches = match_descriptors(qbeDescriptors, descriptors, cross_check=True)#,max_distance=0.5)
orbMatches.append(matches)
# naive approach: count the number of matched descriptors
dataDict["matches_orb"].append(matches.shape[0])
if i%100==0:
printLog("Processed %i documents of %i."%(i,len(jpegFilePaths)))
df=pd.DataFrame(dataDict)
printLog("Done.")
df2=df.sort_values(by=['matches_orb'],ascending=False).head(20)
df2.describe()
printLog("Calculating Hamming distances for ORB features and calculating average distance...")
averageDistancePerImage=[]
for i,matches in enumerate(orbMatches):
# matches qbe
# matches[:, 0]
# matches document
# matches[:, 1]
qbeMatchIndices=matches[:, 0]
queryMatchIndices=matches[:, 1]
sumDistances=0.0
noMatches=len(qbeMatchIndices)
for j,qbeMatchIndex in enumerate(qbeMatchIndices):
sumDistances+=hamming(qbeDescriptors[qbeMatchIndex],orbDescriptors[i][queryMatchIndices[j]])
avgDistance=sumDistances/noMatches
averageDistancePerImage.append((avgDistance,i))
if i%100==0:
printLog("Processed %i documents of %i."%(i,len(orbMatches)))
averageDistancePerImage.sort(key=lambda tup: tup[0])
printLog("Done.\n")
# create the report files
measures=["matches_orb"]
ranks=dict()
printLog("Creating QBE ORB report files...")
htmlFile=open(outputDir+"_orb.html", "w")
printLog("HTML output will be saved to: %s"%outputDir+"_orb.html")
htmlFile.write("<html><head>\n")
htmlFile.write("<link href='../css/helvetica.css' rel='stylesheet' type='text/css'>\n")
#htmlFile.write("<style>body {color: black;text-align: center; font-family: helvetica;} h1 {font-size:15px;position: fixed; padding-top:5px; top: 0;width: 100%;background: rgba(255,255,255,0.5);} h2 {font-size:15px;position: fixed; right: 0;width: 150px; padding-top:25px; padding-right:15px; background: rgba(255,255,255,0.5);} p {font-size:10px;} .score{font-size:6px; text-align: right;}")
htmlFile.write("</style></head>\n")
htmlFile.write("<h2>orb comparison.</h2>")
htmlFile.write("<table><tr>\n")
for measureName in measures:
typeOfMeasure="similarity"
htmlFile.write("<td>\n")
htmlFile.write("<h1>"+measureName+"</h1>\n")
htmlFile.write("<p>"+typeOfMeasure+"</p>\n")
ranks[measureName]=df2.index.tolist()
jpegFilePathsReport=[]
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
for row in df2.itertuples(index=False):
i=row.index
score=getattr(row, measureName)
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePathsReport.append(outputDir+jpgPath)
if i==qbeIndex:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
else:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
#htmlFile.write("<p class='score'>"+str(score)+"</p>")
htmlFile.write("<p class='score'> </p>\n")
htmlFile.write("</td>\n")
# the non-naive approach using the average distance
htmlFile.write("<td>\n")
htmlFile.write("<h1>dist_avg_orb</h1>\n")
htmlFile.write("<p>"+typeOfMeasure+"</p>\n")
for (dist,index) in averageDistancePerImage[:20]:
typeOfMeasure="similarity"
jpegFilePathsReport=[]
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
i=index
score=dist
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePathsReport.append(outputDir+jpgPath)
if i==qbeIndex:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
else:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
htmlFile.write("<p class='score'>"+str(score)+"</p>")
htmlFile.write("<p class='score'> </p>\n")
htmlFile.write("</td>\n")
#eof
# close the HTML file
htmlFile.write("</tr></table>\n")
htmlFile.write("</body></html>\n")
htmlFile.close()
printLog("Done.")
printLog("Clustering...")
X=np.array(combinedHistograms)
numberOfClusters=20
kmeans = MiniBatchKMeans(n_clusters=numberOfClusters, random_state = 0, batch_size = 6)
kmeans=kmeans.fit(X)
printLog("Done.")
printLog("Creating report files...")
htmlFiles=[]
jpegFilePaths=[]
for i in range(0,numberOfClusters):
htmlFile=open(outputDir+str(i)+".html", "w")
htmlFile.write("<html><head><link href='../css/helvetica.css' rel='stylesheet' type='text/css'></head>\n<body>\n")
#htmlFile.write("<h1>Cluster "+str(i)+"</h1>\n")
htmlFile.write("<img src='"+str(i)+".png' width=200 />") # cluster center histogram will created below
htmlFiles.append(htmlFile)
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
for i, label in enumerate(kmeans.labels_):
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePaths.append(outputDir+jpgPath)
htmlFiles[label].write("<img height=200 src='"+jpgPath+"' alt='"+str(len(jpegFilePaths)-1)+"'/>\n")
# close the HTML files
for h in htmlFiles:
h.write("</body></html>\n")
h.close()
# create the summarization main HTML page
htmlFile = open(outputDir+"_main.html", "w")
printLog("HTML output will be saved to: %s"%outputDir+"_main.html")
htmlFile.write("<html><head><link href='../css/helvetica.css' rel='stylesheet' type='text/css'></head><body>\n")
htmlFile.write("<h2>cluster results.</h2>\n")
for i in range(0, numberOfClusters):
htmlFile.write("<iframe src='./"+str(i)+".html"+"' height=400 ><p>Long live Netscape!</p></iframe>")
htmlFile.write("</body></html>\n")
htmlFile.close()
printLog("Done.")
# save the cluster center histograms as images to assist the visualization
printLog("Rendering %i cluster center histograms..."%len(kmeans.cluster_centers_))
for j, histogram in enumerate(kmeans.cluster_centers_):
plt.figure(0)
# clean previous plots
plt.clf()
plt.title("Cluster %i"%j)
#red
for i in range(0, 256):
plt.bar(i, histogram[i],color='red', alpha=0.3)
# blue
for i in range(256, 512):
plt.bar(i-256, histogram[i], color='blue', alpha=0.3)
# green
for i in range(512, 768):
plt.bar(i-512, histogram[i], color='green', alpha=0.3)
#debug
#plt.show()
plt.savefig(outputDir+str(j)+".png")
printLog("Done.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The standard bootstrap method
Step5: The Bayesian bootstrap (with a Dirichlet prior)
Step6: Test both the weighted statistic method and the weighted sampling methods
Step11: Define a function to compute confidence intervals and use it
Step12: Below we apply the -Bayesian, but could just as well be classical- bootstrap method to a linear regression by bootstrapping the data.
Step13: From this plot and the confidence interval on the slope we can confidently say that there is no evidence for a correlation between the two variables.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
data = pd.read_csv('american_presidents.csv', header=0, index_col=None)
data
data.describe()
data.plot(x='order',y='height_cm', color='blue')
data.plot('order', kind='hist', color='blue')
import random
import numpy.random as npr
def bootstrap(data, num_samples, statistic, alpha):
Returns the results from num_samples bootstrap samples for an input test statistic and its 100*(1-alpha)% confidence level interval.
# Generate the indices for the required number of permutations/(resamplings with replacement) required
idx = npr.randint(0, len(data), (num_samples, len(data)))
# Generate the multiple resampled data set from the original one
samples = data[idx]
# Apply the 'statistic' function given to each of the data sets produced by the resampling and order the resulting statistic by decreasing size.
stats = np.sort(statistic(samples, 1))
stat = stats.mean()
# Return the value of the computed statistic at the upper and lower percentiles specified by the alpha parameter given. These are, by definition, the boundaries of the Confidence Interval for that value of alpha. E.g. alpha=0.05 ==> CI 95%
low_ci = stats[int((alpha / 2.0) * num_samples)]
high_ci = stats[int((1 - alpha / 2.0) * num_samples)]
#sd = np.std(stat)
# To include Bessel's correction for unbiased standard deviation:
sd = np.std(stat, ddof=1)
# or manually:
# sd = np.sqrt(len(data) / (len(data) - 1)) * np.std(stats)
return stat, sd, low_ci, high_ci
def bayes_bstrp(data, statistic, nbstrp, samplesize):
Implements the Bayesian bootstrap method.
def Dirichlet_sample(m,n):
Returns a matrix of values drawn from a Dirichlet distribution with parameters = 1.
'm' rows of values, with 'n' Dirichlet draws in each one.
# Draw from Gamma distribution
Dirichlet_params = np.ones(m*n) # Set Dirichlet distribution parameters
# https://en.wikipedia.org/wiki/Dirichlet_distribution#Gamma_distribution
Dirichlet_weights = np.asarray([random.gammavariate(a,1) for a in Dirichlet_params])
Dirichlet_weights = Dirichlet_weights.reshape(m,n) # Fold them (row by row) into a matrix
row_sums = Dirichlet_weights.sum(axis=1)
Dirichlet_weights = Dirichlet_weights / row_sums[:, np.newaxis] # Reweight each row to be normalised to 1
return Dirichlet_weights
Dirich_wgts_matrix = Dirichlet_sample(nbstrp, data.shape[0]) #Generate sample of Dirichlet weights
# If statistic can be directly computed using the weights (such as the mean), do this since it will be faster.
if statistic==np.mean or statistic==np.average:
results = np.asarray([np.average(data, weights=Dirich_wgts_matrix[i]) for i in xrange(nbstrp)])
return results
# Otherwise resort to sampling according to the Dirichlet weights and computing the statistic
else:
results = np.zeros(nbstrp)
for i in xrange(nbstrp): #Sample from data according to Dirichlet weights
weighted_sample = np.random.choice(data, samplesize, replace=True, p = Dirich_wgts_matrix[i])
results[i] = statistic(weighted_sample) #Compute the statistic for each sample
return results
height_data = data['height_cm'].values
posterior_mean = bayes_bstrp(height_data, np.mean, nbstrp=10000, samplesize=1000)
print posterior_mean
posterior_median = bayes_bstrp(height_data, np.median, nbstrp=10000, samplesize=1000)
print posterior_median
def CI(sample, alpha=0.05):
Returns the 100*(1-alpha)% confidence level interval for a test statistic computed on a bootstrap sample.
sample.sort()
num_samples = sample.shape[0]
low_ci = sample[int((alpha / 2.0) * num_samples)]
high_ci = sample[int((1 - alpha / 2.0) * num_samples)]
return [low_ci, high_ci]
meanCI = CI(posterior_mean, alpha=0.05)
print "The mean of the posterior is:\t{0:.4g}".format(posterior_mean.mean())
print "With confidence interval:\t[{0:.4g}, {1:.4g}]".format(meanCI[0],meanCI[1])
#print posterior_median.mean(), CI(posterior_median)
fig,ax =plt.subplots(2,1, sharex=True)
ax[0].hist(height_data, color='blue')
ax[0].set_xlabel('Heights of American Presidents (in cm)')
ax[0].set_ylabel('Frequency')
ax[1].hist(posterior_mean, color='blue')
ax[1].set_xlabel('Bayesian Bootstrap posterior of the mean (95% CI in red)')
ax[1].set_ylabel('Frequency')
ax[1].plot([meanCI[0], meanCI[1]], [0, 0], 'r', linewidth=8)
plt.show()
from scipy import stats
x = data['order'].values
y = data['height_cm'].values
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
print slope
print intercept
def bayes_bstrp1(data, statistic, nbstrp, samplesize):
Implements the Bayesian bootstrap method.
Input can be a 1D Numpy array, or for test statistics of two variables: a Pandas DataFrame with two columns: x,y
def Dirichlet_sample(m,n):
Returns a matrix of values drawn from a Dirichlet distribution with parameters = 1.
'm' rows of values, with 'n' Dirichlet draws in each one.
Dirichlet_params = np.ones(m*n) # Set Dirichlet distribution parameters
# https://en.wikipedia.org/wiki/Dirichlet_distribution#Gamma_distribution
Dirichlet_weights = np.asarray([random.gammavariate(a,1) for a in Dirichlet_params]) # Draw from Gamma distrib
Dirichlet_weights = Dirichlet_weights.reshape(m,n) # Fold them (row by row) into a matrix
row_sums = Dirichlet_weights.sum(axis=1)
Dirichlet_weights = Dirichlet_weights / row_sums[:, np.newaxis] # Reweight each row to be normalised to 1
return Dirichlet_weights
Dirich_wgts_matrix = Dirichlet_sample(nbstrp, data.shape[0]) #Generate sample of Dirichlet weights
if data.ndim==1:
# If statistic can be directly computed using the weights (such as the mean), do this since it will be faster.
if statistic==np.mean or statistic==np.average:
results = np.asarray([np.average(data, weights=Dirich_wgts_matrix[i]) for i in xrange(nbstrp)])
return results
# Otherwise resort to sampling according to the Dirichlet weights and computing the statistic
else:
results = np.zeros(nbstrp)
for i in xrange(nbstrp): #Sample from data according to Dirichlet weights
weighted_sample = np.random.choice(data, samplesize, replace=True, p = Dirich_wgts_matrix[i])
results[i] = statistic(weighted_sample) #Compute the statistic for each sample
return results
elif data.ndim>=2:
# If statistic can be directly computed using the weights (such as the mean), do this since it will be faster.
if statistic==np.mean or statistic==np.average:
results = np.asarray([np.average(data[data.columns[1]].values, weights=Dirich_wgts_matrix[i])
for i in xrange(nbstrp)])
return results
# Otherwise resort to sampling according to the Dirichlet weights and computing the statistic
else:
index_sample=np.zeros((nbstrp,samplesize))
results = []
for i in xrange(nbstrp): #Sample from data according to Dirichlet weights
# Now instead of sampling data points directly, we sample over their index (i.e. by row number)
# which is exactly equivalent, but which preserves the x,y pairings during the sampling
index_sample[i,:] = np.random.choice(np.arange(data.shape[0]), samplesize, replace=True,
p = Dirich_wgts_matrix[i])
# We index from the DataFrame this way because Pandas does not support slicing like this
# http://stackoverflow.com/questions/23686561/slice-a-pandas-dataframe-by-an-array-of-indices-and-column-names
results.append(statistic(data.values[index_sample[i].astype(int),0],
data.values[index_sample[i].astype(int),1]))
return np.array(results)
posterior_mean1 = bayes_bstrp1(height_data, np.mean, nbstrp=10000, samplesize=1000)
print posterior_mean1
posterior_median1 = bayes_bstrp(height_data, np.median, nbstrp=10000, samplesize=1000)
print posterior_median1
# Copy the columns containing x and y (in that order) into a new Pandas DataFrame, to be used for Bayesian bootstrap
test_df = data[['order','height_cm']]
linregres_posterior = bayes_bstrp1(test_df, stats.linregress, nbstrp=100, samplesize=60)
print linregres_posterior
# These 5 values are: slope, intercept, R, p_value, std_err
slopes = linregres_posterior[:,0]
slopemean = slopes.mean()
slopeCI = CI(slopes)
print "The mean slope and its 95% CI are:\t{0:.4g}\t\t[{1:.4g}, {2:.4g}]".format(slopemean,slopeCI[0],slopeCI[1])
intercepts = linregres_posterior[:,1]
interceptmean = intercept.mean()
interceptCI = CI(intercepts)
print "The mean intercept and its 95% CI are:\t{0:.4g}\t\t[{1:.4g}, {2:.4g}]".format(interceptmean,interceptCI[0],
interceptCI[1])
# Plot the data points
plt.scatter(data['order'].values, data['height_cm'].values)
# The linear function we will use to plot fit coefficients
def linfit(x,slope,intercept):
return slope*x + intercept
x = data['order'].values
y = data['height_cm'].values
# Choose linear regressions for 10 of the bootstrap samples at random and plot them
ids = npr.randint(0, linregres_posterior.shape[0], 10)
otherfits = [linfit(x, linregres_posterior[i,0], linregres_posterior[i,1]) for i in ids]
for i in otherfits:
plt.plot(x, i, color='#BBBBBB')
# The fit to the original data
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
plt.plot(x, linfit(x, slope, intercept), color='black', linewidth=2)
plt.xlim(0,x.max()+1)
plt.show()
from statsmodels.nonparametric.smoothers_lowess import lowess
# for some odd reason this loess function takes the y values as the first argument and x as second
test_df = data[['height_cm', 'order']]
posterior_loess = bayes_bstrp1(test_df, lowess, nbstrp=100, samplesize=60)
print posterior_loess
x = data['order'].values
y = data['height_cm'].values
# To see all the loess curves found:
#for i in posterior_loess:
# plt.plot(i[:,0], i[:,1], color='#BBBBBB')
ids = npr.randint(0, posterior_loess.shape[0], 20)
for i in ids:
plt.plot(posterior_loess[i,:,0], posterior_loess[i,:,1], color='#BBBBBB')
plt.scatter(x, y)
original_loess = lowess(y, x)
plt.plot(original_loess[:,0], original_loess[:,1], color='black', linewidth=2)
plt.xlim(0,x.max()+1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to tensor slicing
Step2: Extract tensor slices
Step3: Alternatively, you can use a more Pythonic syntax. Note that tensor slices are evenly spaced over a start-stop range.
Step4: <img src="images/tf_slicing/slice_1d_1.png">
Step5: <img src="images/tf_slicing/slice_1d_2.png">
Step6: <img src="images/tf_slicing/slice_2d_1.png">
Step7: You can also use tf.strided_slice to extract slices of tensors by 'striding' over the tensor dimensions.
Step8: <img src="images/tf_slicing/slice_1d_3.png">
Step9: <img src="images/tf_slicing/gather_1.png">
Step10: <img src="images/tf_slicing/gather_2.png">
Step11: Insert data into tensors
Step12: Methods like tf.scatter_nd which require zero-initialized tensors are similar to sparse tensor initializers. You can use tf.gather_nd and tf.scatter_nd to mimic the behavior of sparse tensor ops.
Step13: <img src="images/tf_slicing/gather_nd_sparse.png">
Step14: This is similar to
Step15: To insert data into a tensor with pre-existing values, use tf.tensor_scatter_nd_add.
Step16: Similarly, use tf.tensor_scatter_nd_sub to subtract values from a tensor with pre-existing values.
Step17: Use tf.tensor_scatter_nd_min to copy element-wise minimum values from one tensor to another.
Step18: Similarly, use tf.tensor_scatter_nd_max to copy element-wise maximum values from one tensor to another.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
t1 = tf.constant([0, 1, 2, 3, 4, 5, 6, 7])
print(tf.slice(t1,
begin=[1],
size=[3]))
print(t1[1:4])
print(t1[-3:])
t2 = tf.constant([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
print(t2[:-1, 1:3])
t3 = tf.constant([[[1, 3, 5, 7],
[9, 11, 13, 15]],
[[17, 19, 21, 23],
[25, 27, 29, 31]]
])
print(tf.slice(t3,
begin=[1, 1, 0],
size=[1, 1, 2]))
print(tf.gather(t1,
indices=[0, 3, 6]))
# This is similar to doing
t1[::3]
alphabet = tf.constant(list('abcdefghijklmnopqrstuvwxyz'))
print(tf.gather(alphabet,
indices=[2, 0, 19, 18]))
t4 = tf.constant([[0, 5],
[1, 6],
[2, 7],
[3, 8],
[4, 9]])
print(tf.gather_nd(t4,
indices=[[2], [3], [0]]))
t5 = np.reshape(np.arange(18), [2, 3, 3])
print(tf.gather_nd(t5,
indices=[[0, 0, 0], [1, 2, 1]]))
# Return a list of two matrices
print(tf.gather_nd(t5,
indices=[[[0, 0], [0, 2]], [[1, 0], [1, 2]]]))
# Return one matrix
print(tf.gather_nd(t5,
indices=[[0, 0], [0, 2], [1, 0], [1, 2]]))
t6 = tf.constant([10])
indices = tf.constant([[1], [3], [5], [7], [9]])
data = tf.constant([2, 4, 6, 8, 10])
print(tf.scatter_nd(indices=indices,
updates=data,
shape=t6))
# Gather values from one tensor by specifying indices
new_indices = tf.constant([[0, 2], [2, 1], [3, 3]])
t7 = tf.gather_nd(t2, indices=new_indices)
# Add these values into a new tensor
t8 = tf.scatter_nd(indices=new_indices, updates=t7, shape=tf.constant([4, 5]))
print(t8)
t9 = tf.SparseTensor(indices=[[0, 2], [2, 1], [3, 3]],
values=[2, 11, 18],
dense_shape=[4, 5])
print(t9)
# Convert the sparse tensor into a dense tensor
t10 = tf.sparse.to_dense(t9)
print(t10)
t11 = tf.constant([[2, 7, 0],
[9, 0, 1],
[0, 3, 8]])
# Convert the tensor into a magic square by inserting numbers at appropriate indices
t12 = tf.tensor_scatter_nd_add(t11,
indices=[[0, 2], [1, 1], [2, 0]],
updates=[6, 5, 4])
print(t12)
# Convert the tensor into an identity matrix
t13 = tf.tensor_scatter_nd_sub(t11,
indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 1], [2, 2]],
updates=[1, 7, 9, -1, 1, 3, 7])
print(t13)
t14 = tf.constant([[-2, -7, 0],
[-9, 0, 1],
[0, -3, -8]])
t15 = tf.tensor_scatter_nd_min(t14,
indices=[[0, 2], [1, 1], [2, 0]],
updates=[-6, -5, -4])
print(t15)
t16 = tf.tensor_scatter_nd_max(t14,
indices=[[0, 2], [1, 1], [2, 0]],
updates=[6, 5, 4])
print(t16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Point estimate of model parameters
Step2: Posterior estimate of model parameters
|
<ASSISTANT_TASK:>
Python Code:
# Generative model
mu_x = 10.0
sigma_x = 2.0
x_s = edm.Normal(mu_x, sigma_x)
# Sample data produced by model
n_samples = 100
samples = np.zeros(n_samples)
with tf.Session() as sess:
for i in range(n_samples):
samples[i] = sess.run(x_s)
# Descriptive statistics
print('Mean: {}'.format(np.mean(samples)))
print('StDev: {}'.format(np.std(samples)))
# Tear down model and work off observations only
tf.reset_default_graph()
# Model for data
N = 100
theta_mu = tf.Variable(0.0)
theta_sigma = tf.Variable(1.0)
x = edm.Normal(loc=tf.ones(N)*theta_mu, scale=tf.ones(N)*theta_sigma)
x_train = samples[:N]
# Descriptive statistics for observed data
print('Mean: {}'.format(np.mean(x_train)))
print('StDev: {}'.format(np.std(x_train)))
mle = edi.MAP({}, {x: x_train})
mle.run()
sess = ed.get_session()
sess.run([theta_mu, theta_sigma])
tf.reset_default_graph()
theta_mu_d = edm.Normal(0.0, 1.0)
theta_sigma_d = edm.InverseGamma(0.01, 0.01)
x_d = edm.Normal(loc=tf.ones(N)*theta_mu_d, scale=tf.ones(N)*theta_sigma_d)
q_mu = edm.Normal(tf.Variable(0.0), 1.0)
q_sigma = edm.InverseGamma(tf.nn.softplus(tf.Variable(0.01)), tf.nn.softplus(tf.Variable(0.01)))
infer = edi.KLqp({theta_mu_d: q_mu, theta_sigma_d: q_sigma}, {x_d: x_train})
infer.run()
sess = ed.get_session()
sess.run([q_mu, q_sigma])
_ = plt.hist([sess.run(q_mu) for _ in range(10000)], bins=20)
_ = plt.hist([sess.run(q_sigma) for _ in range(10000)], bins=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's localize the N100m (using MEG only)
Step2: We can also plot the result using outlines of the head and brain.
Step3: Plot the result in 3D brain with the MRI image using Nilearn
Step4: Calculate and visualise magnetic field predicted by dipole with maximum GOF
Step5: Estimate the time course of a single dipole with fixed position and
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.simulation import simulate_evoked
from nilearn.plotting import plot_anat
from nilearn.datasets import load_mni152_template
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
color = ['k'] * len(dip)
color[np.argmax(dip.gof)] = 'r'
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='outlines',
color=color)
subject = 'sample'
mni_pos = dip.to_mni(subject=subject, trans=fname_trans,
subjects_dir=subjects_dir)
mri_pos = dip.to_mri(subject=subject, trans=fname_trans,
subjects_dir=subjects_dir)
# Find an anatomical label for the best fitted dipole
best_dip_idx = dip.gof.argmax()
label = dip.to_volume_labels(fname_trans, subject=subject,
subjects_dir=subjects_dir,
aseg='aparc.a2009s+aseg')[best_dip_idx]
# Draw dipole position on MRI scan and add anatomical label from parcellation
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
fig_T1 = plot_anat(t1_fname, cut_coords=mri_pos[0],
title=f'Dipole location: {label}')
try:
template = load_mni152_template(resolution=1)
except TypeError: # in nilearn < 0.8.1 this did not exist
template = load_mni152_template()
fig_template = plot_anat(template, cut_coords=mni_pos[0],
title='Dipole loc. (MNI Space)')
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4],
gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1],
top=0.85))
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, pred_evoked], weights=[1, -1])
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2:], **plot_params)
fig.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
fig.tight_layout()
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup paths and prepare raw data.
Step2: 1) Fit ICA model using the FastICA algorithm.
Step3: 2) identify bad components by analyzing latent sources.
Step4: 3) Assess component selection and unmixing quality.
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.preprocessing import ICA
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, None, fir_design='firwin') # already lowpassed @ 40
raw.annotations = mne.Annotations([1], [10], 'BAD')
raw.plot(block=True)
# For the sake of example we annotate first 10 seconds of the recording as
# 'BAD'. This part of data is excluded from the ICA decomposition by default.
# To turn this behavior off, pass ``reject_by_annotation=False`` to
# :meth:`mne.preprocessing.ICA.fit`.
raw.annotations = mne.Annotations([0], [10], 'BAD')
ica = ICA(n_components=0.95, method='fastica', random_state=0, max_iter=100)
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
ica.fit(raw, picks=picks, decim=3, reject=dict(mag=4e-12, grad=4000e-13),
verbose='warning') # low iterations -> does not fully converge
# maximum number of components to reject
n_max_ecg, n_max_eog = 3, 1 # here we don't expect horizontal EOG components
title = 'Sources related to %s artifacts (red)'
# generate ECG epochs use detection via phase statistics
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5, picks=picks)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
ica.plot_scores(scores, exclude=ecg_inds, title=title % 'ecg', labels='ecg')
show_picks = np.abs(scores).argsort()[::-1][:5]
ica.plot_sources(raw, show_picks, exclude=ecg_inds, title=title % 'ecg')
ica.plot_components(ecg_inds, title=title % 'ecg', colorbar=True)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
# detect EOG by correlation
eog_inds, scores = ica.find_bads_eog(raw)
ica.plot_scores(scores, exclude=eog_inds, title=title % 'eog', labels='eog')
show_picks = np.abs(scores).argsort()[::-1][:5]
ica.plot_sources(raw, show_picks, exclude=eog_inds, title=title % 'eog')
ica.plot_components(eog_inds, title=title % 'eog', colorbar=True)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
# estimate average artifact
ecg_evoked = ecg_epochs.average()
ica.plot_sources(ecg_evoked, exclude=ecg_inds) # plot ECG sources + selection
ica.plot_overlay(ecg_evoked, exclude=ecg_inds) # plot ECG cleaning
eog_evoked = create_eog_epochs(raw, tmin=-.5, tmax=.5, picks=picks).average()
ica.plot_sources(eog_evoked, exclude=eog_inds) # plot EOG sources + selection
ica.plot_overlay(eog_evoked, exclude=eog_inds) # plot EOG cleaning
# check the amplitudes do not change
ica.plot_overlay(raw) # EOG artifacts remain
# To save an ICA solution you can say:
# ica.save('my_ica.fif')
# You can later load the solution by saying:
# from mne.preprocessing import read_ica
# read_ica('my_ica.fif')
# Apply the solution to Raw, Epochs or Evoked like this:
# ica.apply(epochs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading all the input data
Step2: Plotting the inputs
Step3: Plotting the depth
Step4: The red points indicate the position of the baseline turbines, the contour plot illustrate the water depth in meters and the red line illustrates the position of the borders limiting the domain of exploration of the optimization.
Step5: Setting up TOPFARM
Step6: Exercise make a loop to plot the convergence of AEP vs the number of wind direction points
Step7: Optimization using the AEP
Step8: Copying the new positions of the turbines
Step9: Plotting the new positions
Step10: Calculating the new AEP with a higher wind rose accuracy
|
<ASSISTANT_TASK:>
Python Code:
# FUSEDWind imports
from fusedwind.plant_flow.vt import GenericWindFarmTurbineLayout, WTPC, WeibullWindRoseVT, GenericWindRoseVT
# Topfarm lib imports
from topfarm.aep import AEP
from topfarm.layout_distribution import spiral, DistributeSpiral, DistributeXY, DistributeFilledPolygon
from topfarm.plot import OffshorePlot, PrintOutputs, plot_wt_layout, plot_wind_rose
from topfarm.tlib import DistFromTurbines, PolyFill, document, DistFromBorders #,ConverHullArea
from topfarm.foundation import FoundationLength
from topfarm.elnet import ElNetLength, elnet
from topfarm.optimizers import *
from topfarm.topfarm import Topfarm
#GCL imports
from gclarsen.fusedwasp import PlantFromWWH, WTDescFromWTG
from gclarsen.fused import FGCLarsen
from numpy import *
import numpy as np
# For plotting
import pylab as plt
datadir = './examples/notebook/'
dat = loadtxt(datadir+'WaterDepth1.dat')
X, Y = meshgrid(linspace(0., 1000., 50), linspace(0., 1000., 50))
depth = array(zip(X.flatten(), Y.flatten(), dat.flatten()))
borders = array([[200, 200], [150, 500], [200, 800], [600, 900], [700, 700], [900, 500], [800, 200], [500, 100], [200, 200]])
baseline = array([[587.5, 223.07692308], [525., 346.15384615], [837.5, 530.76923077], [525., 530.76923077], [525., 838.46153846], [837.5, 469.23076923]])
wt_desc = WTDescFromWTG(datadir+'V80-2MW-offshore.wtg').wt_desc
wt_layout = GenericWindFarmTurbineLayout([WTPC(wt_desc=wt_desc, position=pos) for pos in baseline])
# The wind rose
weibull_array = np.array([[ 0.00000000e+00, 3.59673400e-02, 9.22422800e+00, 2.38867200e+00],
[ 3.00000000e+01, 3.94977300e-02, 9.86435600e+00, 2.44726600e+00],
[ 6.00000000e+01, 5.17838000e-02, 9.65220200e+00, 2.41992200e+00],
[ 9.00000000e+01, 6.99794900e-02, 9.98217800e+00, 2.58789100e+00],
[ 1.20000000e+02, 8.36383000e-02, 1.00946000e+01, 2.74804700e+00],
[ 1.50000000e+02, 6.43412500e-02, 9.64369000e+00, 2.59179700e+00],
[ 1.80000000e+02, 8.64220000e-02, 9.63377500e+00, 2.58007800e+00],
[ 2.10000000e+02, 1.17690000e-01, 1.05678600e+01, 2.54492200e+00],
[ 2.40000000e+02, 1.51555100e-01, 1.14525200e+01, 2.46679700e+00],
[ 2.70000000e+02, 1.47361100e-01, 1.17423700e+01, 2.60351600e+00],
[ 3.00000000e+02, 1.00109800e-01, 1.16923200e+01, 2.62304700e+00],
[ 3.30000000e+02, 5.16542400e-02, 1.01385800e+01, 2.32226600e+00]])
wind_rose = WeibullWindRoseVT()
wind_rose.wind_directions = weibull_array[:,0]
wind_rose.frequency = weibull_array[:,1]
wind_rose.k = weibull_array[:,3]
wind_rose.A = weibull_array[:,2]
# Minimum distance between turbines
dist_WT_D = 3.0
%matplotlib inline
import pylab as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['axes.titleweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 14
# To see all the options:
#plt.rcParams.keys()
plot_wt_layout(wt_layout, borders, depth)
plot_wind_rose(wind_rose)
ws = linspace(4, 25, 21)
wd = linspace(0, 360, 36)[:-1]
aep = AEP(wt_layout=wt_layout,
wind_rose=wind_rose,
wf=FGCLarsen(),
wind_speeds=ws,
wind_directions=wd,
scaling=1.0,
wt_positions=baseline)
aep.run()
print 'Net AEP=',aep.net_aep/1e6, 'MWh'
document(CONMINOpt)
components = {
'foundation': FoundationLength(borders=borders, scaling=0.0, depth=depth),
'distribute': DistributeXY(wt_layout=wt_layout, borders=borders),
'elnet': ElNetLength(scaling=0.0),
'wt_dist': DistFromTurbines(scaling=wt_desc.rotor_diameter * dist_WT_D),
'dist_from_borders': DistFromBorders(wt_layout=wt_layout, borders=borders, scaling=0.0),
'plotting': OffshorePlot(baseline=baseline, borders=borders, depth=depth, distribution='xy',
add_inputs=['elnet_length', 'foundation_length', 'min_dist' ],
title='foundation_length'),
'driver': COBYLAOpt(rhobeg=1)}
workflows = {'driver': ['distribute', 'foundation','wt_dist', 'elnet', 'dist_from_borders', 'plotting']}
#objectives = {'driver': 'foundation.foundation_length'}
objectives = {'driver': '0.5 * foundation.foundation_length + 0.5*elnet.elnet_length'}
constraints = {'driver': ['wt_dist.min_dist>0.8',
'elnet.elnet_length<1.1',
'dist_from_borders'
]}
design_variables = {'driver': 'distribute'}
connections = {'distribute.wt_positions': ['foundation.wt_positions',
'wt_dist.wt_positions',
'plotting.wt_positions',
'elnet.wt_positions',
'dist_from_borders.wt_positions'],
'foundation.foundation_length': 'plotting.foundation_length',
'foundation.foundations': 'plotting.foundations',
'elnet.elnet_layout': 'plotting.elnet_layout',
'elnet.elnet_length': 'plotting.elnet_length',
'wt_dist.min_dist': 'plotting.min_dist'}
input_parameters = {}
top = Topfarm(components, workflows, objectives, constraints, design_variables, connections, input_parameters)
top.run()
baseline = top.distribute.wt_positions
components = {
'elnet': ElNetLength(scaling=0.0),
'foundation': FoundationLength(borders=borders, scaling=0.0, depth=depth),
'aep': AEP(wt_layout=wt_layout,
wind_rose=wind_rose,
wf=FGCLarsen(),
wind_speeds=[4, 8, 12],
wind_directions=linspace(0, 360, 12)[:-1],
scaling=0.0),
#'area': ConverHullArea(wt_layout=wt_layout, scaling=0.0),
'dist_from_borders': DistFromBorders(wt_layout=wt_layout, borders=borders, scaling=0.0),
'wt_dist': DistFromTurbines(scaling=wt_desc.rotor_diameter * dist_WT_D),
'distribute': DistributeXY(wt_layout=wt_layout, borders=borders),
'plotting': OffshorePlot(baseline=baseline, borders=borders, depth=depth, distribution='xy',
add_inputs=['area', 'capacity_factor', 'elnet_length', 'net_aep', 'foundation_length', 'min_dist' ],
title='capacity_factor'),
'driver': COBYLAOpt(rhobeg=1)}
workflows = {'driver': ['distribute', 'foundation', 'elnet', 'aep', 'dist_from_borders', 'wt_dist', 'plotting']}
objectives = {'driver': '-aep.net_aep'}
# objectives = {'driver': '-aep.net_aep + 0.4*elnet.elnet_length'}
#objectives = {'driver': '-aep.capacity_factor/area.area'}
constraints = {'driver': ['wt_dist.min_dist>0.8',
'foundation.foundation_length<1.02',
'dist_from_borders'
#'foundation.foundation_length<1.02',
#'elnet.elnet_length<1.02',
]}
design_variables = {'driver': 'distribute'}
connections = {'distribute.wt_positions': ['foundation.wt_positions',
'elnet.wt_positions',
'wt_dist.wt_positions',
'aep.wt_positions',
'plotting.wt_positions',
'dist_from_borders.wt_positions',
#'area.wt_positions'
],
'foundation.foundation_length': 'plotting.foundation_length',
'foundation.foundations': 'plotting.foundations',
'elnet.elnet_layout': 'plotting.elnet_layout',
'elnet.elnet_length': 'plotting.elnet_length',
'wt_dist.min_dist': 'plotting.min_dist',
'aep.capacity_factor': 'plotting.capacity_factor',
'aep.net_aep': 'plotting.net_aep',
#'area.area': 'plotting.area'
}
input_parameters = {}
top2 = Topfarm(components, workflows, objectives, constraints, design_variables, connections, input_parameters)
top2.run()
baseline = top2.distribute.wt_positions
plot_wt_layout(wt_layout, borders, depth)
ws = linspace(4, 25, 21)
wd = linspace(0, 360, 181)[:-1]
aep = AEP(wt_layout=wt_layout,
wind_rose=wind_rose,
wf=FGCLarsen(),
wind_speeds=ws,
wind_directions=wd,
scaling=1.0,
wt_positions=baseline)
aep.run()
print 'Net AEP=',aep.net_aep/1e6, 'MWh'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a meshed screen with a central hole
Step2: The z-position of all mesh points is computed to lay on a toroid with 1.625 m focal length.<br>
Step3: The screen is placed at z=3.625 m from th origin. A beam is assumed to propagate in z direction
Step4: define the timing
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy import constants
import pygmsh
from MeshedFields import *
with pygmsh.geo.Geometry() as geom:
Lx = 0.215
Ly = 0.150
Ri = 0.002
lca = 0.005
lci = 0.001
stretch = 50.0
p1 = geom.add_point([Lx/2.0*stretch, Ly/2.0], lca)
p2 = geom.add_point([-Lx/2.0*stretch, Ly/2.0], lca)
p3 = geom.add_point([-Lx/2.0*stretch, -Ly/2.0], lca)
p4 = geom.add_point([Lx/2.0*stretch, -Ly/2.0], lca)
p1i = geom.add_point([Ri*stretch, 0.0], lci)
p2i = geom.add_point([0.0, Ri], lci)
p3i = geom.add_point([-Ri*stretch, 0.0], lci)
p4i = geom.add_point([0.0, -Ri], lci)
pc = geom.add_point([0.0, 0.0])
pa = geom.add_point([0.0, 0.01])
# the mesh is circumscribed with a polygon
l1 = geom.add_line(p1, p2)
l2 = geom.add_line(p2, p3)
l3 = geom.add_line(p3, p4)
l4 = geom.add_line(p4, p1)
outline = geom.add_curve_loop([l1, l2, l3, l4])
# the hole is circumscribed with four elliptic arcs
e1i = geom.add_ellipse_arc(start=p1i, center=pc, point_on_major_axis=pa, end=p2i)
e2i = geom.add_ellipse_arc(start=p2i, center=pc, point_on_major_axis=pa, end=p3i)
e3i = geom.add_ellipse_arc(start=p3i, center=pc, point_on_major_axis=pa, end=p4i)
e4i = geom.add_ellipse_arc(start=p4i, center=pc, point_on_major_axis=pa, end=p1i)
hole = geom.add_curve_loop([e1i,e2i,e3i,e4i])
pl = geom.add_plane_surface(outline, holes=[hole])
mesh = geom.generate_mesh()
mesh
# un-stretch
pts = np.array([np.array([p[0]/stretch,p[1],0.0]) for p in mesh.points])
tris = mesh.cells_dict['triangle']
def ToroidZ(x,y,f):
# return f-math.sqrt(f*f-y*y) + 2*f-math.sqrt(4*f*f-x*x)
return math.sqrt( math.pow(math.sqrt(f*f-y*y)+f,2) -x*x ) - 2*f
pts = np.array([np.array([p[0],p[1],ToroidZ(p[0],p[1],1.625)]) for p in pts])
screen = MeshedField(pts,tris)
print("%d points" % len(screen.points))
print("%d triangles" % len(screen.triangles))
area = screen.MeshArea()
normals = screen.MeshNormals()
average = np.sum(normals, axis=0)/screen.Np
print("total mesh area = %7.3f cm²" % (1.0e4*np.sum(area)))
print("screen normal = %s" % average)
screen.ShowMeshedField(showAxes=True)
def RotXZ(φ):
return np.array([[np.cos(φ),0,-np.sin(φ)],[0,1,0],[np.sin(φ),0,np.cos(φ)]])
RR = RotXZ(45.0/180.0*math.pi)
pts = np.array([np.dot(RR,p) for p in pts])
screen = MeshedField(pts,tris)
print("%d points" % len(screen.points))
print("%d triangles" % len(screen.triangles))
area = screen.MeshArea()
normals = screen.MeshNormals()
average = np.sum(normals, axis=0)/screen.Np
print("total mesh area = %7.3f cm²" % (1.0e4*np.sum(area)))
print("screen normal = %s" % average)
screen.ShowMeshedField(showAxes=True)
pts = np.array([p+np.array([0.0,0.0,3.625]) for p in pts])
screen = MeshedField(pts,tris)
screen.ShowMeshedField(showAxes=True)
# time step
screen.dt = 0.5e-13
# some time shift of the waveform start
delay = 15.0e-12
# all points use the same timing grid
screen.Nt = 800
screen.t0 = np.array([p[2]/constants.c-screen.Nt/2*screen.dt+delay for p in screen.pos])
filename="OL8_ToroidalMirrorWithHole.h5"
screen.WriteMeshedField(filename)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, <b>so I make one row per input-target pair</b>. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
np.random.rand()
## Your code here
from collections import Counter
word_counter = Counter(int_words)
#Absolute frequency
WORD_THRESHOLD = 300
prob_dict = {}
for word in word_counter:
prob = 1 - np.sqrt(WORD_THRESHOLD / word_counter[word])
prob_dict[word] = prob
'''
np.random.rand restituisce valore tra 0 e 1
prod_dict[word] rappresenta la probabilità che la data parola debba essere scartata
1 - prod_dict[word] invece la prob che la parola debba essere tenuta
Se la prob_dict[word] è alta (molto prob da scartare), 1 - prob_dict[word] sarà molto basso
di conseguenza sarà improbabile che np.random.rand() restituirà un valore che superi la soglia per essere ammesso nella lista
'''
train_words = []
for word in int_words:
if np.random.rand() < (1 - prob_dict[word]):
train_words.append(word)
len(train_words)
from random import randint
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
random_size = randint(1, window_size)
#L'indice di start può essere negativo, se quello di fine supera la fine PYTHON prende comunque l'ultimo
#Uso inline if
start_idx = idx - random_size if idx - random_size > 0 else 0
#Deve essere un set per evitare parole ripetute nel target.
#La seconda parte deve incrementare l'indice di 1 per evitare di includere la parola stessa
context = set(words[start_idx:random_size] + words[idx+1:(idx+1)+random_size])
return context
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
text_x, test_y = next(get_batches(int_words, batch_size=128, window_size=5))
print(np.array(text_x).shape)
print(np.array(test_y).shape)
train_graph = tf.Graph()
with train_graph.as_default():
#Batch size may vary
inputs = tf.placeholder(tf.int32, [None], name='inputs')
#Batch size and window may vary
labels = tf.placeholder(tf.int32, [None, None], name='labels')
n_vocab = len(int_to_vocab)
n_embedding = 300 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.truncated_normal((n_vocab, n_embedding),stddev=0.1))# create embedding weight matrix here
#Lookup in embedding di inputs
embed = tf.nn.embedding_lookup(params=embedding, ids=inputs)# use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
#Guarda doc sampled_softmax_loss, i weights devono essere nella forma [num_classes, dim]
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding),stddev=0.01))# create softmax weight matrix here
#I label sono sempre i vocaboli attorno
softmax_b = tf.Variable(tf.zeros(n_vocab))# create softmax biases here
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(weights=softmax_w,
biases=softmax_b,
labels=labels,
inputs=embed,
num_sampled=n_sampled,
num_classes=n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
import random
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
for i in range(1000):
sys.stdout.write("\r" + str(i))
sys.stdout.flush()
import sys
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
sys.stdout.write("\rEpoch {}/{} ".format(e, epochs) +
"Iteration: {} ".format(iteration) +
"Avg. Training loss: {:.4f} ".format(loss/100) +
"{:.4f} sec/batch".format((end-start)/100))
sys.stdout.flush()
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Rather than putting True or False directly in our code, we usually get boolean values from boolean operators. These are operators that answer yes/no questions. We'll go through some of these operators below.
Step3: Comparisons frequently work like you'd hope
Step4: But sometimes they can be tricky
Step5: Comparison operators can be combined with the arithmetic operators we've already seen to express a virtually limitless range of mathematical tests. For example, we can check if a number is odd by checking that the modulus with 2 returns 1
Step7: Remember to use == instead of = when making comparisons. If you write n == 2 you are asking about the value of n. When you write n = 2 you are changing the value of n.
Step8: Quick, can you guess the value of this expression?
Step9: (Click the "output" button to see the answer)
Step10: The if and else keywords are often used in other languages; its more unique keyword is elif, a contraction of "else if".
Step11: Boolean conversion
Step12: We can use non-boolean objects in if conditions and other places where a boolean would be expected. Python will implicitly treat them as their corresponding boolean value
|
<ASSISTANT_TASK:>
Python Code:
x = True
print(x)
print(type(x))
def can_run_for_president(age):
Can someone of the given age run for president in the US?
# The US Constitution says you must be at least 35 years old
return age >= 35
print("Can a 19-year-old run for president?", can_run_for_president(19))
print("Can a 45-year-old run for president?", can_run_for_president(45))
3.0 == 3
'3' == 3
def is_odd(n):
return (n % 2) == 1
print("Is 100 odd?", is_odd(100))
print("Is -1 odd?", is_odd(-1))
def can_run_for_president(age, is_natural_born_citizen):
Can someone of the given age and citizenship status run for president in the US?
# The US Constitution says you must be a natural born citizen *and* at least 35 years old
return is_natural_born_citizen and (age >= 35)
print(can_run_for_president(19, True))
print(can_run_for_president(55, False))
print(can_run_for_president(55, True))
True or True and False
def inspect(x):
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is unlike anything I've ever seen...")
inspect(0)
inspect(-15)
def f(x):
if x > 0:
print("Only printed when x is positive; x =", x)
print("Also only printed when x is positive; x =", x)
print("Always printed, regardless of x's value; x =", x)
f(1)
f(0)
print(bool(1)) # all numbers are treated as true, except 0
print(bool(0))
print(bool("asf")) # all strings are treated as true, except the empty string ""
print(bool(""))
# Generally empty sequences (strings, lists, and other types we've yet to see like lists and tuples)
# are "falsey" and the rest are "truthy"
if 0:
print(0)
elif "spam":
print("spam")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up default parameters. We use 28 subjects, which is the median sample size of the set of fMRI studies published in 2015 that were estimated from Neurosynth in the paper. We use a heuristic correction for multiple comparisons of p<0.001 and 10 voxels, like that show by Eklund et al. (2016, PNAS) to result in Type I error rates of 0.6-0.9.
Step2: In order to recreate the figure from the paper exactly, we need to fix the random seed so that it will generate exactly the same random data. If you wish to generate new data, then set the recreate_paper_figure variable to False and rerun the notebook.
Step3: Use the standard MNI152 2mm brain mask as the mask for the generated data
Step4: Generate a dataset for each subject. fMRI data within the mask are generated using a Gaussian distribution (mean=1000, standard deviation=100). Behavioral data are generated using a Gaussian distribution (mean=100, standard deviation=1).
Step5: Spatially smooth data using a 6 mm FWHM Gaussian kernel
Step6: Use FSL's GLM tool to run a regression at each voxel
Step7: Use FSL's cluster tool to identify clusters of activation that exceed the specified cluster-forming threshold
Step8: Generate a plot showing the brain-behavior relation from the top cluster
Step9: Generate a thresholded statistics image for display
Step10: Generate a figure showing the location of the selected activation focus.
|
<ASSISTANT_TASK:>
Python Code:
import numpy
import nibabel
import os
import nilearn.plotting
import matplotlib.pyplot as plt
from statsmodels.regression.linear_model import OLS
import nipype.interfaces.fsl as fsl
import scipy.stats
if not 'FSLDIR' in os.environ.keys():
raise Exception('This notebook requires that FSL is installed and the FSLDIR environment variable is set')
%matplotlib inline
pthresh=0.001 # cluster forming threshold
cthresh=10 # cluster extent threshold
nsubs=28 # number of subjects
recreate_paper_figure=False
if recreate_paper_figure:
seed=6636
else:
seed=numpy.ceil(numpy.random.rand()*100000).astype('int')
print(seed)
numpy.random.seed(seed)
maskimg=os.path.join(os.getenv('FSLDIR'),'data/standard/MNI152_T1_2mm_brain_mask.nii.gz')
mask=nibabel.load(maskimg)
maskdata=mask.get_data()
maskvox=numpy.where(maskdata>0)
print('Mask includes %d voxels'%len(maskvox[0]))
imgmean=1000 # mean activation within mask
imgstd=100 # standard deviation of noise within mask
behavmean=100 # mean of behavioral regressor
behavstd=1 # standard deviation of behavioral regressor
data=numpy.zeros((maskdata.shape + (nsubs,)))
for i in range(nsubs):
tmp=numpy.zeros(maskdata.shape)
tmp[maskvox]=numpy.random.randn(len(maskvox[0]))*imgstd+imgmean
data[:,:,:,i]=tmp
newimg=nibabel.Nifti1Image(data,mask.get_affine(),mask.get_header())
newimg.to_filename('fakedata.nii.gz')
regressor=numpy.random.randn(nsubs,1)*behavstd+behavmean
numpy.savetxt('regressor.txt',regressor)
smoothing_fwhm=6 # FWHM in millimeters
smooth=fsl.IsotropicSmooth(fwhm=smoothing_fwhm,
in_file='fakedata.nii.gz',
out_file='fakedata_smooth.nii.gz')
smooth.run()
glm = fsl.GLM(in_file='fakedata_smooth.nii.gz',
design='regressor.txt',
out_t_name='regressor_tstat.nii.gz',
demean=True)
glm.run()
tcut=scipy.stats.t.ppf(1-pthresh,nsubs-1)
cl = fsl.Cluster()
cl.inputs.threshold = tcut
cl.inputs.in_file = 'regressor_tstat.nii.gz'
cl.inputs.out_index_file='tstat_cluster_index.nii.gz'
results=cl.run()
clusterimg=nibabel.load(cl.inputs.out_index_file)
clusterdata=clusterimg.get_data()
indices=numpy.unique(clusterdata)
clustersize=numpy.zeros(len(indices))
clustermean=numpy.zeros((len(indices),nsubs))
indvox={}
for c in range(1,len(indices)):
indvox[c]=numpy.where(clusterdata==c)
clustersize[c]=len(indvox[c][0])
for i in range(nsubs):
tmp=data[:,:,:,i]
clustermean[c,i]=numpy.mean(tmp[indvox[c]])
corr=numpy.corrcoef(regressor.T,clustermean[-1])
print('Found %d clusters exceeding p<%0.3f and %d voxel extent threshold'%(c,pthresh,cthresh))
print('Largest cluster: correlation=%0.3f, extent = %d voxels'%(corr[0,1],len(indvox[c][0])))
# set cluster to show - 0 is the largest, 1 the second largest, and so on
cluster_to_show=0
# translate this variable into the index of indvox
cluster_to_show_idx=len(indices)-cluster_to_show-1
# plot the (circular) relation between fMRI signal and
# behavioral regressor in the chosen cluster
plt.scatter(regressor.T,clustermean[cluster_to_show_idx])
plt.title('Correlation = %0.3f'%corr[0,1],fontsize=14)
plt.xlabel('Fake behavioral regressor',fontsize=18)
plt.ylabel('Fake fMRI data',fontsize=18)
m, b = numpy.polyfit(regressor[:,0], clustermean[cluster_to_show_idx], 1)
axes = plt.gca()
X_plot = numpy.linspace(axes.get_xlim()[0],axes.get_xlim()[1],100)
plt.plot(X_plot, m*X_plot + b, '-')
plt.savefig('scatter.png',dpi=600)
tstat=nibabel.load('regressor_tstat.nii.gz').get_data()
thresh_t=clusterdata.copy()
cutoff=numpy.min(numpy.where(clustersize>cthresh))
thresh_t[thresh_t<cutoff]=0
thresh_t=thresh_t*tstat
thresh_t_img=nibabel.Nifti1Image(thresh_t,mask.get_affine(),mask.get_header())
mid=len(indvox[cluster_to_show_idx][0])/2
coords=numpy.array([indvox[cluster_to_show_idx][0][mid],
indvox[cluster_to_show_idx][1][mid],
indvox[cluster_to_show_idx][2][mid],1]).T
mni=mask.get_qform().dot(coords)
nilearn.plotting.plot_stat_map(thresh_t_img,
os.path.join(os.getenv('FSLDIR'),'data/standard/MNI152_T1_2mm_brain.nii.gz'),
threshold=cl.inputs.threshold,
cut_coords=mni[:3])
plt.savefig('slices.png',dpi=600)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Word counting
Step5: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.
Step7: Write a function sort_word_counts that return a list of sorted word counts
Step8: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt
Step9: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'):
Split a string into a list of words, removing punctuation and stop words.
all_words= []
for line in s.splitlines():
words = line.split(" ")
all_words.extend(words)
for words in all_words:
filter(all_words, punctuation)
return all_words
tokenize("There is no cow level \nWow, sally that was great.")
assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \
['this', 'way', 'that', 'things', 'will', 'end']
wasteland =
APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
assert tokenize(wasteland, stop_words='is the of and') == \
['april','cruellest','month','breeding','lilacs','out','dead','land',
'mixing','memory','desire','stirring','dull','roots','with','spring',
'rain']
def count_words(data):
Return a word count dictionary from the list of words in data.
# YOUR CODE HERE
raise NotImplementedError()
assert count_words(tokenize('this and the this from and a a a')) == \
{'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}
def sort_word_counts(wc):
Return a list of 2-tuples of (word, count), sorted by count descending.
# YOUR CODE HERE
raise NotImplementedError()
assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \
[('a', 4), ('this', 3), ('and', 2), ('the', 1)]
# YOUR CODE HERE
raise NotImplementedError()
assert swc[0]==('i',43)
assert len(swc)==848
# YOUR CODE HERE
raise NotImplementedError()
assert True # use this for grading the dotplot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, max_error, explained_variance_score, mean_absolute_percentage_error
# initialise & fit Decision Tree Regressor
model = DecisionTreeRegressor(criterion='squared_error',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
random_state=101)
model.fit(X_train, y_train)
# create dictionary that contains feature importance
feature_importance= dict(zip(X_train.columns, model.feature_importances_))
print('Feature Importance',feature_importance)
# make prediction for test data & evaluate performance
y_pred = model.predict(X_test)
print('RMSE:',mean_squared_error(y_test, y_pred, squared = False))
print('MAE:',mean_absolute_error(y_test, y_pred))
print('MAPE:',mean_absolute_percentage_error(y_test, y_pred))
print('Max Error:',max_error(y_test, y_pred))
print('Explained Variance Score:',explained_variance_score(y_test, y_pred))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizing the data
|
<ASSISTANT_TASK:>
Python Code:
a = pk.load(open("slide14.pkl","rb"), encoding='latin1')
print("DATA = ", a)
print("ONE ROW = ", a[0])
plt.rcParams['figure.figsize'] = [6,4]
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 12
plt.figure()
skip=20
plt.scatter( a[::skip,4], a[::skip,5], marker='x', alpha=1,edgecolor="none", label="$m^*$ - ground truth")
plt.scatter( a[::skip,4], a[::skip,6], marker='.', color='b',alpha=1,edgecolor="none", label="$m_L^*$ - Lambert's approximation")
plt.scatter( a[::skip,4], a[::skip,7], marker='.', color='r',alpha=1,edgecolor="none", label="$m_D^*$ - MIMA")
#plt.hexbin( a[:,5]/a[:,4], a[:,7] / a[:,6], bins='log')
plt.ylabel("Kg")
plt.xlabel("$ \Delta V_L$, [m/s]")
plt.ylim(200,2000)
plt.xlim(1000,6000)
plt.legend()
plt.tight_layout(1)
plt.savefig(open("slide14.png", "w"))
b = a[a[:,5] < 2000]
b = b[b[:,5] > 500]
RMSE = np.sqrt(np.mean((b[:,5] - b[:,7])**2))
RMSE_LAMBERT = np.sqrt(np.mean((b[:,5] - b[:,6])**2))
MAE = np.mean(np.abs((b[:,5] - b[:,7])))
MAE_LAMBERT = np.mean(np.abs((b[:,5] - b[:,6])))
print(RMSE)
print(RMSE_LAMBERT)
print(MAE)
print(MAE_LAMBERT)
b[0]
len(b)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot with sliders for $\omega_0$ and $\omega_1$ from 0 to 2 with steps of 0.25
Step2: Exercise 08.2 (multiple function plotting)
Step3: Plot of $\sin(x) / x$ and $1/\left| x \right|$ between $-6\pi$ and $6\pi$
Step4: Exercise 08.3 (demographics and interactive plotting)
Step5: Exercise 08.4 (crime reports by location)
Step6: The service https
Step7: The following converts the fetched data into a list of dictionaries
Step8: To get an idea of how the data is arranged, we can look at the first report in the list. To make the displayed data easier to read, we use the 'pretty print' module pprint.
Step9: Task
Step10: This function retrieves data from the UK police URL and returns it in a json; the default parameters for the coordinates are that of Cambridge used above
Step11: Run your program for different parts of Cambridge, starting with the area around your college, and for different months and years.
Step12: Iterate over all reports in the list, and extract the category string from each report. If the category string (the 'key') is already in the dictionary increment the associated counter. Otherwise add the key to the dictionary, and associate the value 1.
Step13: When adding the tick labels (crime categories), it may be necessary to rotate the labels, e.g.
Step14: Create a plot to represent crimes by year-month and category (not sure about order of the data in the dictionary)
|
<ASSISTANT_TASK:>
Python Code:
def function_plot(ω0=1, ω1=1):
# Define x axis range
x = np.linspace(-4*np.pi, 4*np.pi, 100)
# Add labels to x and y axis
plt.xlabel('$x$')
plt.ylabel('$\exp(x/10) \cdot \sin(\omega_{1}x) \cdot \cos(\omega_{0}x)$')
# Limit x axis between start and end point of the range
plt.xlim(x[0], x[-1])
# Add a title
plt.title('Plot of $f$ for $ω_0 = {}$ and $ω_1 = {}$'.format(ω0, ω1))
# Plot the function
plt.plot(x, np.exp(x/10) * np.sin(ω1*x) * np.cos(ω0*x))
plot()
# Add sliders for the two parameters
interact(function_plot, ω0=(0, 2, 0.25), ω1=(0, 2, 0.25));
# Define x axis range using an even number of points to avoid division by 0
x = np.linspace(-6*np.pi, 6*np.pi, 100)
# Add labels to x and y axis
plt.xlabel('$x$')
plt.ylabel('$\sin(x)/x$')
# Limit x axis between start and end point of the range
plt.xlim(x[0], x[-1])
# Plot the function
plt.plot(x, np.sin(x)/x);
# Define x axis range using an even number of points to avoid division by 0
x = np.linspace(-6*np.pi, 6*np.pi, 100)
# Add label to x axis
plt.xlabel('$x$')
# Limit x axis between start and end point of the range
plt.xlim(x[0], x[-1])
# Limit y axis between -0.3 and 1
plt.ylim(-0.3, 1)
# Plot the first function
plt.plot(x, np.sin(x)/x, label='$\sin(x)/x$')
# Plot the second function on the same plot
plt.plot(x, 1/np.abs(x), label='$1/|x|$')
# Add a legend
plt.legend();
def demographics_plot(year=2011, grCC=0, grEX=0, grFL=0, grHS=0, grSC=0):
# Initialize district tuple, population and annual growth arrays
district = ('Cambridge City', 'East Cambridgeshire', 'Fenland', 'Huntingdonshire',
'South Cambridgeshire')
population = np.array((123900, 83800, 95300, 169500, 148800))
annual_growth = np.array((grCC, grEX, grFL, grHS, grSC))
# Specify slice colours
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral', 'red']
# Explode the 1st slice (Cambridge City)
explode = (0.1, 0.0, 0, 0, 0)
# Set figure size
plt.figure(figsize=(10,10))
# Plot pie chart using a linear annual growth in population
plt.pie(population * (1 + (year-2011) * annual_growth / 100), explode=explode, labels=district, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
# Add title
plt.title('{} population distribution in Cambridgeshire'.format(year))
# Add sliders for the annual growth of each district
interact(demographics_plot,
year=(2011, 2021, 1),
grCC=(0, 10, 0.1),
grEX=(0, 10, 0.1),
grFL=(0, 10, 0.1),
grHS=(0, 10, 0.1),
grSC=(0, 10, 0.1));
import json
import requests
# A triangle that includes most of the Cambridge city centre
# (long, lat) for three vertices of a triangle (no spaces!)
p0 = '52.211546,0.116465'
p1 = '52.203510,0.145500'
p2 = '52.189730,0.113050'
# year-month of interest
year_month = '2016-05'
# Construct request URL string using the above data
url = 'https://data.police.uk/api/crimes-street/all-crime?poly=' + p0 + ':' + p1 + ':' + p2 + '&date=' + year_month
# Fetch data from https://data.police.uk
r = requests.get(url)
crime_data = r.json()
import pprint
if crime_data:
pprint.pprint(crime_data[0])
categories = ('anti-social-behaviour', 'bicycle-theft', 'burglary', 'criminal-damage-arson', \
'drugs', 'other-crime', 'other-theft', 'public-order', 'shoplifting', \
'theft-from-the-person', 'vehicle-crime', 'violent-crime')
def get_crime_data(year_month, p0='52.211546,0.116465', p1='52.203510,0.145500', p2='52.189730,0.113050'):
"Get the crime data for a given year and month (in the format YYYY-MM) and coordinates"
# Construct request URL string using the above data
url = 'https://data.police.uk/api/crimes-street/all-crime?poly=' + p0 + ':' + p1 + ':' + p2 + '&date=' + year_month
# Fetch data from https://data.police.uk
r = requests.get(url)
return r.json()
def crime_plot(year_month, p0='52.211546,0.116465', p1='52.203510,0.145500', p2='52.189730,0.113050'):
"Plot the crime data on a barplot for a given year and month (in the format YYYY-MM) and coordinates"
# Get the crime data
crime_data = get_crime_data(year_month, p0, p1, p2)
# Initialize a dict for crime category frequencies
categories_freq = {}
# Count the frequencies
for crime in crime_data:
curr_category = crime['category']
if curr_category in categories_freq:
categories_freq[curr_category] += 1
else:
categories_freq[curr_category] = 1
# Define values for x axis ticks
x_values = np.arange(len(categories_freq))
# Create barplot
plt.bar(x_values, categories_freq.values(), align='center')
# Add labels to x axis ticks
plt.xticks(x_values, categories_freq.keys(), rotation=90)
# Add axis labels
#plt.xlabel('Crime Category')
plt.ylabel('Number of Crimes')
# Add title
plt.title('Crimes in {}'.format(year_month))
# Test for a month
crime_plot('2017-01')
categories_freq = {}
# Iterate over all reports
for report in crime_data:
# Get category type from the report
category = report['category']
if category in categories_freq:
# Increment counter here
pass # This can be removed once this 'if' block has a body
else:
# Add category to dictionary here
pass # This can be removed once this 'else' block has a body
# Initialize the starting year and month and the number of months to retrieve
start_year, start_month, num_months = 2016, 1, 6
# Initialize an empty list for all crimes
all_crimes = []
#crime_freq = {}
for unused in range(num_months):
# For every month in range get crime data
crime_data = get_crime_data(str(start_year) + '-' + str(start_month))
# Append every crime retrieved to the list of all crimes
for crime in crime_data:
all_crimes.append([crime['id'], crime['month'], crime['category']])
# Update month and year
start_month += 1
if start_month % 13 == 0:
start_month = 1
start_year += 1
# Initialize figure setting size
plt.figure(figsize=(20,20))
# for each category in the above list create a dict of frequencies and plot it on a line
for category in categories:
category_freq = {}
for crime in all_crimes:
if crime[2] == category:
if crime[1] in category_freq:
category_freq[crime[1]] += 1
else:
category_freq[crime[1]] = 1
# Define values for x axis ticks
x_values = np.arange(len(category_freq))
# Create a plot using dict values (not sure if they are in the right order here...)
plt.plot(x_values, list(category_freq.values()), '-o', label=category)
# Add x axis values labels using keys
plt.xticks(x_values, category_freq.keys(), rotation=90)
# Add legend, title and labels
plt.legend()
plt.title('Crime categories in time')
plt.xlabel('Month')
plt.ylabel('Number of Crimes');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.1 Importing the data
Step2: 2.2 Looking at the data
Step3: Export part of the dataset as HTML files for inspection ByCity, ByCountry,
Step4: 2.3.2 Working with the missing data
Step5: Or, if we chose to ignore the global temperature measurements for which we don't have all of the 8 fields
Step6: If we chose to fill in the missing values with the values of the previous corresponding measurement
Step7: The method we will use will depend on the problem we will try to solve with our data.
Step8: As it can be observed the uncertainty of the measurements in the 18th and 19th century was very high. Early data was collected by technicians using mercury thermometers, where any variation in the visit time impacted measurements. In the 1940s, the construction of airports caused many weather stations to be moved. In the 1980s, there was a move to electronic thermometers that are said to have a cooling bias. One can chose to ignore or give smaller weights to older, less reliable measurements. For the data exploitation part we will consider data from 1900 onward.
Step9: 3.2 Which cities have experienced the biggest change of temperature the last 50 years?
Step10: 4. Data Expoitation
Step11: 4.1 Making the process a stationary process
Step12: Now we can check if in fact the process after differencing is stationary with the Dickey-Fuller Test. Here the null hypothesis is that the time series is non-stationary. The test results comprise of a Test Statistic and some Critical Values for different confidence levels. If the ‘Test Statistic’ is less than the ‘Critical Value’, we can reject the null hypothesis and say that the series is stationary.
Step13: 4.1.2 Detrend by Model Fitting
Step14: Looking at the results we get from the Dickey-Fuller Test for the two methods of making the time series stationary, we can see that we got better results here through the method of differencing. Therefore, in what follows we will use $X_{diff}$. We could have gotten better results for the method based on modeling the trend if we had allowed a more complex model than the linear one.
Step15: We now take it back to the original scale (no differencing).
Step16: 5. Evaluation
Step17: 5.2 Out-of-sample performance
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# Show matplotlib graphs inside the notebook.
%matplotlib inline
import os.path
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import plotly
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
from sklearn import linear_model
from statsmodels.tsa.arima_model import ARIMA
from myutils import makeTimeSeries
from myutils import differenciate
from myutils import test_stationarity
import warnings
warnings.filterwarnings("ignore")
folder = os.path.join('data', 'temperatures','GlobalLandTemperatures')
filename_ByCity = os.path.join(folder, 'GlobalLandTemperaturesByCity.csv')
filename_ByCountry = os.path.join(folder, 'GlobalLandTemperaturesByCountry.csv')
filename_ByMajorCity = os.path.join(folder, 'GlobalLandTemperaturesByMajorCity.csv')
filename_ByState = os.path.join(folder, 'GlobalLandTemperaturesByState.csv')
filename_Global = os.path.join(folder, 'GlobalTemperatures.csv')
ByCity=pd.read_csv(filename_ByCity)
ByCountry=pd.read_csv(filename_ByCountry)
ByMajorCity=pd.read_csv(filename_ByMajorCity)
ByState=pd.read_csv(filename_ByState)
Global=pd.read_csv(filename_Global)
ByCity[:10000].to_html('ByCity.html')
ByCountry[:10000].to_html('ByCountry.html')
ByMajorCity[:10000].to_html('ByMajorCity.html')
ByState[:10000].to_html('ByState.html')
Global.to_html('Global.html')
#Removing duplicates from ByCountry
ByCountry_clear = ByCountry[~ByCountry['Country'].isin(
['Denmark', 'France', 'Europe', 'Netherlands',
'United Kingdom'])]
#ByCountry_clear.loc[ByCountry_clear['Country'] == 'Denmark (Europe)']
ByCountry_clear = ByCountry_clear.replace(
['Denmark (Europe)', 'France (Europe)', 'Netherlands (Europe)', 'United Kingdom (Europe)'],
['Denmark', 'France', 'Netherlands', 'United Kingdom'])
#countries = np.unique(ByCountry_clear['Country'])
#np.set_printoptions(threshold=np.inf)
#print(countries)
#Removing duplicates from ByCity
ByCity_clear = ByCity[~ByCity['City'].isin(
['Guatemala'])]
ByCity_clear = ByCity_clear.replace(['Guatemala City'],['Guatemala'])
#cities = np.unique(ByCity_clear['City'])
#print(cities)
Global.dropna(subset=['LandAverageTemperature']).head()
Global.dropna(axis=0).head()
Global.fillna(method='pad').head()
mean_Global= []
mean_Global_uncertainty = []
years = np.unique(Global['dt'].apply(lambda x: x[:4]))
for year in years:
mean_Global.append(Global[Global['dt'].apply(
lambda x: x[:4]) == year]['LandAverageTemperature'].mean())
mean_Global_uncertainty.append(Global[Global['dt'].apply(
lambda x: x[:4]) == year]['LandAverageTemperatureUncertainty'].mean())
#print(years.dtype)
x=years.astype(int)
minimum=np.array(mean_Global) + np.array(mean_Global_uncertainty)
y=np.array(mean_Global)
maximum=np.array(mean_Global) - np.array(mean_Global_uncertainty)
plt.figure(figsize=(16,8))
plt.plot(x,minimum,'b')
plt.hold
plt.plot(x,y,'r')
plt.hold
plt.plot(x,maximum,'b')
plt.hold
plt.fill_between(x,y1=minimum,y2=maximum)
plt.xlabel('years',fontsize=16)
plt.xlim(1748,2017)
plt.ylabel('Temperature, °C',fontsize=16)
plt.title('Yearly Global Temperature',fontsize=24)
countries = np.unique(ByCountry_clear['Country'])
mean_temp = []
for country in countries:
mean_temp.append(ByCountry_clear[ByCountry_clear['Country'] == country]['AverageTemperature'].mean())
#when taking the mean the missing data are automatically ignored=>see data cleaning section
#use choropleth map provided by pyplot
data = [ dict(
type = 'choropleth',
locations = countries,
z = mean_temp,
locationmode = 'country names',
text = countries,
colorbar = dict(autotick = True, tickprefix = '',
title = '\n °C')
)
]
layout = dict(
title = 'Average Temperature in Countries',
geo = dict(
showframe = False,
showocean = True,
oceancolor = 'rgb(0,255,255)',
),
)
fig = dict(data=data, layout=layout)
py.iplot(fig,validate=False)
years_in_MajorCities=np.unique(ByMajorCity['dt'].apply(lambda x: x[:4]))
cities = np.unique(ByMajorCity['City'])
dt=[years_in_MajorCities[-51],years_in_MajorCities[-1]]
T1=[]
T2=[]
lon=[]
lat=[]
for city in cities:
T1.append(ByMajorCity[(ByMajorCity['City'] == city) & (ByMajorCity['dt'].apply(lambda x: x[:4]) == dt[0])]['AverageTemperature'].mean())
T2.append(ByMajorCity[(ByMajorCity['City'] == city) & (ByMajorCity['dt'].apply(lambda x: x[:4]) == dt[1])]['AverageTemperature'].mean())
lon.append(ByMajorCity[ByMajorCity['City'] == city]['Longitude'].iloc[1])
lat.append(ByMajorCity[ByMajorCity['City'] == city]['Latitude'].iloc[1])
lon=np.array(lon)
lat=np.array(lat)
for i in range(0,lon.size):
if lon[i].endswith('W'):
west=lon[i]
west=float(west[:-1])
east=str(360-west)
lon[i]=east+'E'
for i in range(0,lat.size):
if lat[i].endswith('S'):
south=lat[i]
south=float(south[:-1])
north=str(1-south)
lat[i]=north+'N'
lon=pd.DataFrame(lon)
lat=pd.DataFrame(lat)
long=lon[0].apply(lambda x: x[:-1])
lati=lat[0].apply(lambda x: x[:-1])
dT=np.array(T2)-np.array(T1)
data = [ dict(
type = 'scattergeo',
lon = long,
lat = lati,
text=cities,
mode = 'markers',
marker = dict(
size = 8,
opacity = 0.8,
reversescale = True,
autocolorscale = False,
symbol = 'square',
line = dict(
width=1,
color='rgba(102, 102, 102)'
),
color = dT,
colorbar=dict(
title="\n °C"
)
))]
layout = dict(
title = 'Change in the temperature the last 50 years',
colorbar = True,
geo = dict(
showland = True,
landcolor = "rgb(250, 250, 250)",
subunitcolor = "rgb(217, 217, 217)",
countrycolor = "rgb(217, 217, 217)",
showocean = True,
oceancolor = 'rgb(0,255,255)',
),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, validate=False)
mean_Global=pd.DataFrame(mean_Global)
mean_Global['dt']=years
ts=makeTimeSeries(mean_Global)
#print(ts)
plt.figure(figsize=(16,8))
plt.plot(ts)
plt.xlabel('time',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
plt.title('Yearly Global Temperature',fontsize=24)
X = ts[0]['1900':'2000'] #training set, temporal split
#print(X)
X_diff=differenciate(X)
#print(X_diff)
plt.figure(figsize=(16,8))
plt.plot(X_diff)
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
plt.title('Yearly Global Temperature (after differencing)',fontsize=24)
test_stationarity(X_diff)
regresor = linear_model.LinearRegression()
y=np.array(X.dropna())
t=np.arange(y.size)
y=y.reshape(-1,1)
t=t.reshape(-1,1)
regresor.fit(t,y)
trend=regresor.predict(t)
# detrend
detrended = [y[i]-trend[i] for i in range(0, y.size)]
y=pd.DataFrame(y)
y.index=X.index
trend=pd.DataFrame(trend)
trend.index=X.index
detrended=pd.DataFrame(detrended)
detrended.index=X.index
print('Coefficients: \n', regresor.coef_)
print("Mean of error: %.2f" % np.mean((trend - y) ** 2))
# plot trend
plt.figure(figsize=(16,8))
plt.plot(y,color='blue',label='time series')
plt.plot(trend,color='green',label='trend')
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
plt.title('Trend of Yearly Global Temperature',fontsize=24)
plt.legend()
plt.show()
# plot detrended
plt.figure(figsize=(16,8))
plt.plot(detrended)
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
plt.title('Detrended Yearly Global Temperature',fontsize=24)
plt.show()
test_stationarity(detrended[0])
model = ARIMA(ts[0]['1900':'2000'], order=(1, 1, 2))
results_ARIMA = model.fit(disp=-1)
plt.figure(figsize=(16,8))
plt.plot(X_diff,color='blue',label='original')
plt.plot(results_ARIMA.fittedvalues, color='red',label='predicted')
plt.title('RSS: %.4f'% sum((results_ARIMA.fittedvalues-X_diff)**2),fontsize=20)
plt.legend(loc='best')
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True)
#print(predictions_ARIMA_diff.head())
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
#print (predictions_ARIMA_diff_cumsum.head())
predictions_ARIMA = pd.Series(X.ix[0], index=X.index)
predictions_ARIMA = predictions_ARIMA.add(predictions_ARIMA_diff_cumsum,fill_value=0)
#predictions_ARIMA.head()
plt.figure(figsize=(16,8))
plt.plot(X,color='blue',label='original')
plt.plot(predictions_ARIMA,color='green',label='predicted')
plt.title('RMSE= %.4f'% np.sqrt(sum((predictions_ARIMA-X)**2)/len(X)),fontsize=24)
plt.legend(loc='best')
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
X_test = ts[0]['2001':] #test set, temporal split
#print(X_test)
preds=results_ARIMA.predict('2001-01-01','2015-01-01')
#preds.head
preds_cumsum = preds.cumsum()
preds=preds_cumsum+X[-1]
#print (preds)
#print(X_test)
plt.figure(figsize=(16,8))
plt.plot(X_test,color='blue',label='original')
plt.plot(preds, color='red',label='predicted')
plt.title('RMSE= %.4f'% np.sqrt(sum((preds-X_test)**2)/len(X_test)),fontsize=24)
plt.legend(loc='best')
plt.xlabel('years',fontsize=16)
plt.ylabel('Temperature, °C',fontsize=16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we're going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample below
Step2: We have the training data loaded into trainloader and we make that an iterator with iter(trainloader). Later, we'll use this to loop through the dataset for training, like
Step3: This is what one of the images looks like.
Step4: First, let's try to build a simple network for this dataset using weight matrices and matrix multiplications. Then, we'll see how to do it using PyTorch's nn module which provides a much more convenient and powerful method for defining network architectures.
Step5: Now we have 10 outputs for our network. We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to. Something that looks like this
Step6: Building networks with PyTorch
Step7: Let's go through this bit by bit.
Step8: You can define the network somewhat more concisely and clearly using the torch.nn.functional module. This is the most common way you'll see networks defined as many operations are simple element-wise functions. We normally import this module as F, import torch.nn.functional as F.
Step9: Activation functions
Step10: Initializing weights and biases
Step11: For custom initialization, we want to modify these tensors in place. These are actually autograd Variables, so we need to get back the actual tensors with model.fc1.weight.data. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
Step12: Forward pass
Step13: As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!
Step14: The operations are availble by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use model[0].
Step15: You can also pass in an OrderedDict to name the individual layers and operations, instead of using incremental integers. Note that dictionary keys must be unique, so each operation must have a different name.
Step16: Now you can access layers either by integer or the name
|
<ASSISTANT_TASK:>
Python Code:
# Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
# Download and load the training data
trainset = datasets.MNIST('MNIST_data/',
download=True,
train=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=64,
shuffle=True)
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
plt.imshow(images[1].numpy().squeeze(),
cmap='Greys_r');
def sigmoid_activation(x: torch.tensor):
return 1 / (1 + torch.exp(-x))
# Flattening the input image to have size 784 x 1
inputs = images.view(images.shape[0], -1)
inputs.shape
# Network parameters
num_hidden_units = 256
W1 = torch.randn(784, num_hidden_units)
b1 = torch.randn(num_hidden_units)
# 10 output units
W2 = torch.randn(num_hidden_units, 10)
b2 = torch.randn(10)
hidden = sigmoid_activation(torch.mm(inputs, W1) + b1)
out = torch.mm(hidden, W2) + b2
out.shape
def softmax(x):
## TODO: Implement the softmax function here
# Using view to obtain one value per row
# summation across the columns using dim=1
return torch.exp(x) / torch.sum(torch.exp(x), dim=1).view(-1, 1)
# Here, out should be the output of the network in the previous excercise with shape (64,10)
probabilities = softmax(out)
# Does it have the right shape? Should be (64, 10)
print(probabilities.shape)
# Does it sum to 1?
print(probabilities.sum(dim=1))
from torch import nn
class Network(nn.Module):
def __init__(self):
# Important!
# Without it, PyTorch will not be able to follow the setup!
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
# Create the network and look at it's text representation
model = Network()
model
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
## Your solution here
class MyNetwork(nn.Module):
def __init__(self):
super().__init__()
# Network parameters
self.h1 = nn.Linear(in_features=784,
out_features=128)
self.h2 = nn.Linear(in_features=128,
out_features=64)
self.out = nn.Linear(in_features=64,
out_features=10)
# Define sigmoid activation and softmax output
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x: torch.tensor):
x = self.h1(x)
x = self.relu(x)
x = self.h2(x)
x = self.relu(x)
x = self.out(x)
x = self.softmax(x)
return x
model = MyNetwork()
model
print(model.h1.weight)
print(model.h1.bias)
# Set biases to all zeros
model.h1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.h1.weight.data.normal_(std=0.01)
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
print(model[0])
model[0].weight
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
print(model[0])
print(model.fc1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is pretty clear that there is a linear trend here. If I wanted to predict what would happen if we tried the input of x=0.6, it would be a good guess to pick something like y=1.6 or so. Training the computer to do this is what we mean by Machine Learning.
Step2: You can see that, with a 20% split, our small fake dataset doesn't have very many points. Really we shouldn't be working with less than 100 points for anything we do. Any fewer than that and the statistics just start breaking. Ideally we'd have tens of millions of data points. We'll talk later about how to get that much data, but we'll start small for now. We'll load in the Class02_fakedata2.csv file and split it 80/20 training/testing datasets.
Step3: Linear Regression
Step4: We now want to see what this looks like! We start by looking at the fit coefficient and intercept. When we have more than one input variable, there will be a coefficient corresponding to each feature.
Step5: That doesn't really tell us much. It would be better if we could compare the model to the test data. We will use the inputs from the test data and run them through the model. It will predict what the outputs should be. We can then compare them to the actual outputs. We'll plot the predictions as a line (since they will all lie on the same line due to our model being a linear regression).
Step6: This looks pretty good. We can go one step futher and define a quantitative measure of the quality of the fit. We will subtract the difference between the prediction and the actual value for each point. We then square all of those and average them. Finally we take the square root of all of that. This is known as the RMS error (for Root Mean Squared).
Step7: Using Multiple Inputs
Step8: I've put all the steps together in one cell and commented on each step.
Step9: Not too surprising that the RMS error isn't very good. This is the real world after all. However, we saw in Class 01 that there may be some dependence on some of the other variables like the LDL. We can try a linear regression with both of them as inputs. I have to change the code a little to do this. Compare this with the previous cell to see what needs to change.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
fakedata1 = pd.DataFrame(
[[ 0.862, 2.264],
[ 0.694, 1.847],
[ 0.184, 0.705],
[ 0.41 , 1.246]], columns=['input','output'])
fakedata1.plot(x='input',y='output',kind='scatter')
from sklearn.model_selection import train_test_split
faketrain1, faketest1 = train_test_split(fakedata1, test_size=0.2, random_state=23)
faketrain1.plot(x='input',y='output',kind='scatter')
faketest1.plot(x='input',y='output',kind='scatter')
fakedata2 = pd.read_csv('Class02_fakedata2.csv')
faketrain2, faketest2 = train_test_split(fakedata2, test_size=0.2, random_state=23)
faketrain2.plot(x='input',y='output',kind='scatter')
faketest2.plot(x='input',y='output',kind='scatter')
faketrain2.head()
from sklearn.linear_model import LinearRegression
# Step 1: Create linear regression object
regr = LinearRegression()
# Step 2: Train the model using the training sets
features = faketrain2[['input']].values
labels = faketrain2['output'].values
regr.fit(features,labels)
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
testinputs = faketest2[['input']].values
predictions = regr.predict(testinputs)
actuals = faketest2['output'].values
import matplotlib.pyplot as plt
plt.scatter(testinputs, actuals, color='black', label='Actual')
plt.plot(testinputs, predictions, color='blue', linewidth=1, label='Prediction')
# We also add a legend to our plot. Note that we've added the 'label' option above. This will put those labels together in a single legend.
plt.legend(loc='upper left', shadow=False, scatterpoints=1)
plt.xlabel('input')
plt.ylabel('output')
plt.scatter(testinputs, (actuals-predictions), color='green', label='Residuals just because $\lambda$')
plt.xlabel('input')
plt.ylabel('residuals')
plt.legend(loc='upper left', shadow=False, scatterpoints=1)
import numpy as np
print("RMS Error: {0:.3f}".format( np.sqrt(np.mean((predictions - actuals) ** 2))))
diabetes = pd.read_csv('../Class01/Class01_diabetes_data.csv')
diabetes.head()
# Step 1: Split off the test data
dia_train, dia_test = train_test_split(diabetes, test_size=0.2, random_state=23)
# Step 2: Create linear regression object
dia_model = LinearRegression()
# Step 3: Train the model using the training sets
features = dia_train[['BMI']].values
labels = dia_train['Target'].values
# Step 4: Fit the model
dia_model.fit(features,labels)
# Step 5: Get the predictions
testinputs = dia_test[['BMI']].values
predictions = dia_model.predict(testinputs)
actuals = dia_test['Target'].values
# Step 6: Plot the results
plt.scatter(testinputs, actuals, color='black', label='Actual')
plt.plot(testinputs, predictions, color='blue', linewidth=1, label='Prediction')
plt.xlabel('BMI') # Label the x axis
plt.ylabel('Target') # Label the y axis
plt.legend(loc='upper left', shadow=False, scatterpoints=1)
# Step 7: Get the RMS value
print("RMS Error: {0:.3f}".format( np.sqrt(np.mean((predictions - actuals) ** 2))))
# Step 2: Create linear regression object
dia_model2 = LinearRegression()
# Possible columns:
# 'Age', 'Sex', 'BMI', 'BP', 'TC', 'LDL', 'HDL', 'TCH', 'LTG', 'GLU'
#
inputcolumns = [ 'BMI', 'HDL']
# Step 3: Train the model using the training sets
features = dia_train[inputcolumns].values
labels = dia_train['Target'].values
# Step 4: Fit the model
dia_model2.fit(features,labels)
# Step 5: Get the predictions
testinputs = dia_test[inputcolumns].values
predictions = dia_model2.predict(testinputs)
actuals = dia_test['Target'].values
# Step 6: Plot the results
#
# Note the change here in how we plot the test inputs. We can only plot one variable, so we choose the first.
# Also, it no longer makes sense to plot the fit points as lines. They have more than one input, so we only visualize them as points.
#
plt.scatter(testinputs[:,0], actuals, color='black', label='Actual')
plt.scatter(testinputs[:,0], predictions, color='blue', label='Prediction')
plt.legend(loc='upper left', shadow=False, scatterpoints=1)
# Step 7: Get the RMS value
print("RMS Error: {0:.3f}".format( np.sqrt(np.mean((predictions - actuals) ** 2))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: dm_control
Step4: Machine-locked MuJoCo license.
Step5: Imports
Step6: Data
Step7: Dataset and environment
Step8: D4PG learner
Step9: Training loop
Step10: Evaluation
|
<ASSISTANT_TASK:>
Python Code:
!pip install dm-acme
!pip install dm-acme[reverb]
!pip install dm-acme[tf]
!pip install dm-sonnet
!git clone https://github.com/deepmind/deepmind-research.git
%cd deepmind-research
#@title Edit and run
mjkey =
REPLACE THIS LINE WITH YOUR MUJOCO LICENSE KEY
.strip()
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL deps
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Fetch MuJoCo binaries from Roboti
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
# Install dm_control
!pip install dm_control
#@title Add your MuJoCo License and run
mjkey =
.strip()
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL dependencies
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Get MuJoCo binaries
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Install dm_control
!pip install dm_control[locomotion_mazes]
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
import collections
import copy
from typing import Mapping, Sequence
import acme
from acme import specs
from acme.agents.tf import actors
from acme.agents.tf import d4pg
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import loggers
from acme.wrappers import single_precision
from acme.tf import utils as tf2_utils
import numpy as np
from rl_unplugged import dm_control_suite
import sonnet as snt
import tensorflow as tf
task_name = 'cartpole_swingup' #@param
tmp_path = '/tmp/dm_control_suite'
gs_path = 'gs://rl_unplugged/dm_control_suite'
!mkdir -p {tmp_path}/{task_name}
!gsutil cp {gs_path}/{task_name}/* {tmp_path}/{task_name}
num_shards_str, = !ls {tmp_path}/{task_name}/* | wc -l
num_shards = int(num_shards_str)
batch_size = 10 #@param
task = dm_control_suite.ControlSuite(task_name)
environment = task.environment
environment_spec = specs.make_environment_spec(environment)
dataset = dm_control_suite.dataset(
'/tmp',
data_path=task.data_path,
shapes=task.shapes,
uint8_features=task.uint8_features,
num_threads=1,
batch_size=batch_size,
num_shards=num_shards)
def discard_extras(sample):
return sample._replace(data=sample.data[:5])
dataset = dataset.map(discard_extras).batch(batch_size)
# Create the networks to optimize.
action_spec = environment_spec.actions
action_size = np.prod(action_spec.shape, dtype=int)
policy_network = snt.Sequential([
tf2_utils.batch_concat,
networks.LayerNormMLP(layer_sizes=(300, 200, action_size)),
networks.TanhToSpec(spec=environment_spec.actions)])
critic_network = snt.Sequential([
networks.CriticMultiplexer(
observation_network=tf2_utils.batch_concat,
action_network=tf.identity,
critic_network=networks.LayerNormMLP(
layer_sizes=(400, 300),
activate_final=True)),
# Value-head gives a 51-atomed delta distribution over state-action values.
networks.DiscreteValuedHead(vmin=-150., vmax=150., num_atoms=51)])
# Create the target networks
target_policy_network = copy.deepcopy(policy_network)
target_critic_network = copy.deepcopy(critic_network)
# Create variables.
tf2_utils.create_variables(network=policy_network,
input_spec=[environment_spec.observations])
tf2_utils.create_variables(network=critic_network,
input_spec=[environment_spec.observations,
environment_spec.actions])
tf2_utils.create_variables(network=target_policy_network,
input_spec=[environment_spec.observations])
tf2_utils.create_variables(network=target_critic_network,
input_spec=[environment_spec.observations,
environment_spec.actions])
# The learner updates the parameters (and initializes them).
learner = d4pg.D4PGLearner(
policy_network=policy_network,
critic_network=critic_network,
target_policy_network=target_policy_network,
target_critic_network=target_critic_network,
dataset=dataset,
discount=0.99,
target_update_period=100)
for _ in range(100):
learner.step()
# Create a logger.
logger = loggers.TerminalLogger(label='evaluation', time_delta=1.)
# Create an environment loop.
loop = acme.EnvironmentLoop(
environment=environment,
actor=actors.DeprecatedFeedForwardActor(policy_network),
logger=logger)
loop.run(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The translatePDF method does the following
|
<ASSISTANT_TASK:>
Python Code:
# Load our dependencies
import pybrl as brl
filename = "lorem_ipsum.pdf" # of course :P
pdf_password = None
language = 'english'
# Let's translate the PDF file.
translated = brl.translatePDF(filename, password = pdf_password, language = language) # Easy, right?
# Let's explore what this object looks like:
print(len(translated)) # = 2 (One for each page)
print(len(translated[0])) # = 1 group of text in the page.
# There might be more if (i.e.) a box of text is in a corner.
print(translated[0][0].keys()) # type, text, layout
print(translated[0][0]['type']) # 'text'
print(translated[0][0]['layout']) # The bounding box of this group
print(translated[0][0]['text'][0]) # The first word: ['000001', '111000', '101010', '111010', '100010', '101100']
tex = "" # Template contents and what will be edited.
output = "output.tex" # Output path to the tex file
TEMPLATE_PATH = "template.tex" # Path to the Template tex file
# Load the Template
with open(TEMPLATE_PATH, "r") as f:
tex = f.read()
# Concatenate all the text.
content = ""
for page in translated:
for group in page:
grouptxt = group['text']
# Convert to Unicode characters:
unicode_brl = brl.toUnicodeSymbols(grouptxt, flatten=True)
content += "\n\n" + unicode_brl
# Create the new TeX
output_tex = tex.replace("%%% Content will go here %%%", content)
# Save it
with open(output, "w") as f:
f.write(output_tex)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step3: <br>
Step5: <br>
Step6: <br>
Step7: <br>
Step8: <br>
Step10: <br>
Step11: <br>
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(123456)
# Generate 100 random patterns for class1
mu_vec1 = np.array([[0],[0]])
cov_mat1 = np.array([[3,0],[0,3]])
x1_samples = np.random.multivariate_normal(mu_vec1.ravel(), cov_mat1, 100)
# Generate 100 random patterns for class2
mu_vec2 = np.array([[9],[0]])
cov_mat2 = np.array([[3,0],[0,3]])
x2_samples = np.random.multivariate_normal(mu_vec2.ravel(), cov_mat2, 100)
# Generate 100 random patterns for class3
mu_vec3 = np.array([[6],[6]])
cov_mat3 = np.array([[4,0],[0,4]])
x3_samples = np.random.multivariate_normal(mu_vec3.ravel(), cov_mat3, 100)
%pylab inline
import numpy as np
from matplotlib import pyplot as plt
f, ax = plt.subplots(figsize=(7, 7))
ax.scatter(x1_samples[:,0], x1_samples[:,1], marker='o', color='green', s=40, alpha=0.5, label='$\omega_1$')
ax.scatter(x2_samples[:,0], x2_samples[:,1], marker='s', color='blue', s=40, alpha=0.5, label='$\omega_2$')
ax.scatter(x3_samples[:,0], x3_samples[:,1], marker='^', color='red', s=40, alpha=0.5, label='$\omega_2$')
plt.legend(loc='upper right')
plt.title('Training Dataset', size=20)
plt.ylabel('$x_2$', size=20)
plt.xlabel('$x_1$', size=20)
plt.show()
def discriminant_function(x_vec, cov_mat, mu_vec):
Calculates the value of the discriminant function for a dx1 dimensional
sample given the covariance matrix and mean vector.
Keyword arguments:
x_vec: A dx1 dimensional numpy array representing the sample.
cov_mat: numpy array of the covariance matrix.
mu_vec: dx1 dimensional numpy array of the sample mean.
Returns a float value as result of the discriminant function.
W_i = (-1/2) * np.linalg.inv(cov_mat)
assert(W_i.shape[0] > 1 and W_i.shape[1] > 1), 'W_i must be a matrix'
w_i = np.linalg.inv(cov_mat).dot(mu_vec)
assert(w_i.shape[0] > 1 and w_i.shape[1] == 1), 'w_i must be a column vector'
omega_i_p1 = (((-1/2) * (mu_vec).T).dot(np.linalg.inv(cov_mat))).dot(mu_vec)
omega_i_p2 = (-1/2) * np.log(np.linalg.det(cov_mat))
omega_i = omega_i_p1 - omega_i_p2
assert(omega_i.shape == (1, 1)), 'omega_i must be a scalar'
g = ((x_vec.T).dot(W_i)).dot(x_vec) + (w_i.T).dot(x_vec) + omega_i
return float(g)
import operator
def classify_data(x_vec, g, mu_vecs, cov_mats):
Classifies an input sample into 1 out of 3 classes determined by
maximizing the discriminant function g_i().
Keyword arguments:
x_vec: A dx1 dimensional numpy array representing the sample.
g: The discriminant function.
mu_vecs: A list of mean vectors as input for g.
cov_mats: A list of covariance matrices as input for g.
Returns a tuple (g_i()_value, class label).
assert(len(mu_vecs) == len(cov_mats)), 'Number of mu_vecs and cov_mats must be equal.'
g_vals = []
for m,c in zip(mu_vecs, cov_mats):
g_vals.append(g(x_vec, mu_vec=m, cov_mat=c))
max_index, max_value = max(enumerate(g_vals), key=operator.itemgetter(1))
return (max_value, max_index + 1)
class1_as_1 = 0
class1_as_2 = 0
class1_as_3 = 0
for row in x1_samples:
g = classify_data(
row,
discriminant_function,
[mu_vec1, mu_vec2, mu_vec3],
[cov_mat1, cov_mat2, cov_mat3]
)
if g[1] == 2:
class1_as_2 += 1
elif g[1] == 3:
class1_as_3 += 1
else:
class1_as_1 += 1
class2_as_1 = 0
class2_as_2 = 0
class2_as_3 = 0
for row in x2_samples:
g = classify_data(
row,
discriminant_function,
[mu_vec1, mu_vec2, mu_vec3],
[cov_mat1, cov_mat2, cov_mat3]
)
if g[1] == 2:
class2_as_2 += 1
elif g[1] == 3:
class2_as_3 += 1
else:
class2_as_1 += 1
class3_as_1 = 0
class3_as_2 = 0
class3_as_3 = 0
for row in x3_samples:
g = classify_data(
row,
discriminant_function,
[mu_vec1, mu_vec2, mu_vec3],
[cov_mat1, cov_mat2, cov_mat3]
)
if g[1] == 2:
class3_as_2 += 1
elif g[1] == 3:
class3_as_3 += 1
else:
class3_as_1 += 1
import prettytable
confusion_mat = prettytable.PrettyTable(["sample dataset", "w1 (predicted)", "w2 (predicted)", "w3 (predicted)"])
confusion_mat.add_row(["w1 (actual)",class1_as_1, class1_as_2, class1_as_3])
confusion_mat.add_row(["w2 (actual)",class2_as_1, class2_as_2, class2_as_3])
confusion_mat.add_row(["w3 (actual)",class3_as_1, class3_as_2, class3_as_3])
print(confusion_mat)
misclass = x1_samples.shape[0]*3 - class1_as_1 - class2_as_2 - class3_as_3
bayes_err = misclass / (len(x1_samples)*3)
print('Empirical Error: {:.2f} ({:.2f}%)'.format(bayes_err, bayes_err * 100))
import prettytable
mu_est1 = np.array([[sum(x1_samples[:,0])/len(x1_samples[:,0])],[sum(x1_samples[:,1])/len(x1_samples[:,1])]])
mu_est2 = np.array([[sum(x2_samples[:,0])/len(x2_samples[:,0])],[sum(x2_samples[:,1])/len(x2_samples[:,1])]])
mu_est3 = np.array([[sum(x3_samples[:,0])/len(x3_samples[:,0])],[sum(x3_samples[:,1])/len(x3_samples[:,1])]])
mu_mle = prettytable.PrettyTable(["", "mu_1", "mu_2", "mu_3"])
mu_mle.add_row(["MLE",mu_est1, mu_est2, mu_est3])
mu_mle.add_row(["actual",mu_vec1, mu_vec2, mu_vec3])
print(mu_mle)
import prettytable
def mle_est_cov(x_samples, mu_est):
Calculates the Maximum Likelihood Estimate for the covariance matrix.
Keyword Arguments:
x_samples: np.array of the samples for 1 class, n x d dimensional
mu_est: np.array of the mean MLE, d x 1 dimensional
Returns the MLE for the covariance matrix as d x d numpy array.
cov_est = np.zeros((2,2))
for x_vec in x_samples:
x_vec = x_vec.reshape(2,1)
assert(x_vec.shape == mu_est.shape), 'mean and x vector hmust be of equal shape'
cov_est += (x_vec - mu_est).dot((x_vec - mu_est).T)
return cov_est / len(x_samples)
cov_est1 = mle_est_cov(x1_samples, mu_est1)
cov_est2 = mle_est_cov(x2_samples, mu_est2)
cov_est3 = mle_est_cov(x3_samples, mu_est3)
cov_mle = prettytable.PrettyTable(["", "covariance_matrix_1", "covariance_matrix_2", "covariance_matrix_3"])
cov_mle.add_row(["MLE", cov_est1, cov_est2, cov_est3])
cov_mle.add_row(['','','',''])
cov_mle.add_row(["actual", cov_mat1, cov_mat2, cov_mat3])
print(cov_mle)
class1_as_1 = 0
class1_as_2 = 0
class1_as_3 = 0
for row in x1_samples:
g = classify_data(
row,
discriminant_function,
[mu_est1, mu_est2, mu_est3],
[cov_est1, cov_est2, cov_est3]
)
if g[1] == 2:
class1_as_2 += 1
elif g[1] == 3:
class1_as_3 += 1
else:
class1_as_1 += 1
class2_as_1 = 0
class2_as_2 = 0
class2_as_3 = 0
for row in x2_samples:
g = classify_data(
row,
discriminant_function,
[mu_est1, mu_est2, mu_est3],
[cov_est1, cov_est2, cov_est3]
)
if g[1] == 2:
class2_as_2 += 1
elif g[1] == 3:
class2_as_3 += 1
else:
class2_as_1 += 1
class3_as_1 = 0
class3_as_2 = 0
class3_as_3 = 0
for row in x3_samples:
g = classify_data(
row,
discriminant_function,
[mu_est1, mu_est2, mu_est3],
[cov_est1, cov_est2, cov_est3]
)
if g[1] == 2:
class3_as_2 += 1
elif g[1] == 3:
class3_as_3 += 1
else:
class3_as_1 += 1
import prettytable
confusion_mat = prettytable.PrettyTable(["sample dataset", "w1 (predicted)", "w2 (predicted)", "w3 (predicted)"])
confusion_mat.add_row(["w1 (actual)",class1_as_1, class1_as_2, class1_as_3])
confusion_mat.add_row(["w2 (actual)",class2_as_1, class2_as_2, class2_as_3])
confusion_mat.add_row(["w3 (actual)",class3_as_1, class3_as_2, class3_as_3])
print(confusion_mat)
misclass = x1_samples.shape[0]*3 - class1_as_1 - class2_as_2 - class3_as_3
bayes_err = misclass / (len(x1_samples)*3)
print('Empirical Error: {:.2f} ({:.2f}%)'.format(bayes_err, bayes_err * 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Then we define two util functions.
Step4: Second one is for writing string in the file.
Step5: Than we define some constants for future use.
Step6: Compose bash command for running detached docker container from tooldog-analyser-sandbox image, that will be destroyed after stopping.
Step7: Save container ID for future use.
Step9: Yay! Now we can do stuff inside the container.
Step10: We install argparse2tool
Step11: As well as the tool for analysing
Step12: Now we are ready to generate CWL
Step13: Write CWL to file
Step14: Finally, let's stop the container. It will be removed automatically.
|
<ASSISTANT_TASK:>
Python Code:
import subprocess
def run_command(command):
Run bash command and return the result
:param str command: String representation of bash command
:return: Returns a generator of output of the result of running bash command in bytes
:rtype: iter
command = command.split()
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def write_to_file(filename, string = ''):
Write string to file
:param str filename: Filename
:param str string: String to write
f = open(filename, 'w')
f.write(string)
f.close()
PYTHON_VERSION = 2 # version of python tool
TOOL_NAME = 'alignment' # name of the tool, assuming it is available via PyPi
OUTPUT_FORMAT = 'cwl' # output: cwl / xml
OUTPUT_FILENAME = TOOL_NAME + "." + OUTPUT_FORMAT # output filename
PYTHON_PATH = "/usr/local/lib/python3.5/dist-packages/" if PYTHON_VERSION == 3 else \
"/usr/local/lib/python2.7/dist-packages/" # PYTHONPATH, required for argparse2tool
cmd = 'docker run -d -e PYTHONPATH=' + PYTHON_PATH + ' --rm inkuzm/tooldog-analyser-sandbox tail -f /dev/null'
CONTAINER_ID = ""
for line in run_command(cmd):
CONTAINER_ID = line.decode("utf-8")[:-1]
print(CONTAINER_ID)
def exec_command_in_container(command, cid):
Run bash command inside container cid
:param str command: String representation of bash command
:return: Returns a generator of output of the result of running bash command in bytes
:rtype: iter
cmd = 'docker exec ' + cid + ' ' + command
return run_command(cmd)
for line in exec_command_in_container('pip3 install argparse2tool', CONTAINER_ID):
print(line.decode("utf-8")[:-1])
for line in exec_command_in_container('pip3 install ' + TOOL_NAME, CONTAINER_ID):
print(line.decode("utf-8")[:-1])
cwl_tool = '' # string representation of cwl tool
for line in exec_command_in_container('tooldog --generate_cwl_tool', CONTAINER_ID):
cwl_tool += line.decode("utf-8")[:-1]
write_to_file(OUTPUT_FILENAME, cwl_tool)
run_command('docker stop ' + CONTAINER_ID)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What are datasets in federated learning?
Step2: fedjax.FederatedData provides methods for accessing metadata about the federated dataset, like the total number of clients, client ids, and number of examples for each client.
Step3: As we notice, the client ids start with a random set of bits. This is to ensure that one can easily slice a FederatedData to obtain a random subset. While the client ids returned above look sorted, there is no such guarantee in general.
Step4: Client datasets
Step5: FedJAX assumes that an individual client dataset is small and can easily fit in memory. This assumption is also reflected in many of FedJAX's design decisions. The examples in a client dataset can be viewed as a table, where the rows are
Step6: For Shakespeare, we are training a character-level language model, where the task is next character prediction, so the features are
Step7: However, you probably noticed that x and y are arrays of integers not text. This is because fedjax.datasets.shakespeare.load_data() does some minimal preprocessing, such as a simple character look up that mapped characters to integer IDs. Later, we'll go over how this preprocessing was applied and how to add your own custom preprocessing.
Step8: Accessing client datasets from fedjax.FederatedData
Step9: They are generators, so we iterate over them to get the individual client datasets as tuples of (client_id, client_dataset).
Step10: shuffled_clients() provides a stream of infinitely repeating shuffled client datasets, using buffered shuffling. It is suitable for training rounds where a nearly random shuffling is good enough.
Step11: get_clients()
Step12: get_client()
Step13: Batching client datasets
Step14: batch() for illustrations
Step15: padded_batch() for evaluation
Step16: All batches contain an extra bool feature keyed by '__mask__'.
Step17: shuffle_repeat_batch() for training
Step18: The number of batches produced from the iteration can be controlled by the (num_epochs, num_steps,
Step19: If num_epochs is set and num_steps is None, as few batches as needed to go
Step20: If num_steps is set and num_steps is None, exactly this many batches are
Step21: If both num_epochs and num_steps are set, the fewer number of batches
Step22: If reproducible iteration order is desired, a fixed seed can be used. When
Step23: Preprocessing
Step24: Preprocessing at the client level
Step25: The actual client level preprocessing in the Shakespeare dataset is a bit involved, so let's do something simpler
Step26: Now, we can add another batch level preprocessor to produce x and y labels.
Step27: In memory federated datasets
|
<ASSISTANT_TASK:>
Python Code:
# Uncomment these to install fedjax.
# !pip install fedjax
# !pip install --upgrade git+https://github.com/google/fedjax.git
# !pip install tensorflow_datasets
import functools
import itertools
import fedjax
import numpy as np
# We cap max sentence length to 8.
train_fd, test_fd = fedjax.datasets.shakespeare.load_data(sequence_length=8)
print('num_clients =', train_fd.num_clients())
# train_fd.client_ids() is a generator of client ids.
# itertools has efficient and convenient functions for working with generators.
for client_id in itertools.islice(train_fd.client_ids(), 3):
print('client_id =', client_id)
print('# examples =', train_fd.client_size(client_id))
# Slicing are based on the lexicographic order of client ids.
train_fd_0 = train_fd.slice(start=b'0', stop=b'1')
print('num_clients whose id starts with 0 =', train_fd_0.num_clients())
client_id = b'105f96df763d4ddf:ALL_S_WELL_THAT_ENDS_WELL_GUIDERIUS_AND_ARVIRAGUS'
client_dataset = train_fd.get_client(client_id)
print(client_dataset)
client_dataset.all_examples()
examples = client_dataset.all_examples()
print('x', examples['x'][0])
print('y', examples['y'][0])
# Unlike load_data(), load_split() always loads a single unprocessed split.
raw_fd = fedjax.datasets.shakespeare.load_split('train')
raw_fd.get_client(client_id).all_examples()
# clients() and shuffled_clients() are sequential read friendly.
clients = train_fd.clients()
shuffled_clients = train_fd.shuffled_clients(buffer_size=100, seed=0)
print('clients =', clients)
print('shuffled_clients =', shuffled_clients)
# We use itertools.islice to select first three clients.
for client_id, client_dataset in itertools.islice(clients, 3):
print('client_id =', client_id)
print('# examples =', len(client_dataset))
print('shuffled_clients()')
for client_id, client_dataset in itertools.islice(shuffled_clients, 3):
print('client_id =', client_id)
print('# examples =', len(client_dataset))
client_ids = [
b'1db830204507458e:THE_TAMING_OF_THE_SHREW_SEBASTIAN',
b'140784b36d08efbc:PERICLES__PRINCE_OF_TYRE_GHOST_OF_VAUGHAN',
b'105f96df763d4ddf:ALL_S_WELL_THAT_ENDS_WELL_GUIDERIUS_AND_ARVIRAGUS'
]
for client_id, client_dataset in train_fd.get_clients(client_ids):
print('client_id =', client_id)
print('# examples =', len(client_dataset))
client_id = b'1db830204507458e:THE_TAMING_OF_THE_SHREW_SEBASTIAN'
print('client_id =', client_id)
print('# examples =', len(train_fd.get_client(client_id)))
client_id = b'105f96df763d4ddf:ALL_S_WELL_THAT_ENDS_WELL_GUIDERIUS_AND_ARVIRAGUS'
client_dataset = train_fd.get_client(client_id)
batches = list(client_dataset.batch(batch_size=8))
batches[-1]
# use list() to consume generator and store in memory.
padded_batches = list(client_dataset.padded_batch(batch_size=8, num_batch_size_buckets=3))
print('# batches =', len(padded_batches))
padded_batches[0]
padded_batches[-1]
print('# batches')
len(list(client_dataset.shuffle_repeat_batch(batch_size=8)))
infinite_bs = client_dataset.shuffle_repeat_batch(
batch_size=8, num_epochs=None, num_steps=None)
for i, b in zip(range(6), infinite_bs):
print(i)
print('# batches w/ drop_remainder=False')
print(len(list(client_dataset.shuffle_repeat_batch(batch_size=8, num_epochs=1, num_steps=None))))
print('# batches w/ drop_remainder=True')
print(len(list(client_dataset.shuffle_repeat_batch(batch_size=8, num_epochs=1, num_steps=None, drop_remainder=True))))
print('# batches w/ num_steps set and drop_remainder=True')
print(len(list(client_dataset.shuffle_repeat_batch(batch_size=8, num_epochs=None, num_steps=3, drop_remainder=True))))
print('# batches w/ num_epochs and num_steps set')
print(len(list(client_dataset.shuffle_repeat_batch(batch_size=8, num_epochs=1, num_steps=6))))
# Random shuffling.
print(list(client_dataset.shuffle_repeat_batch(batch_size=2, seed=None))[0])
# Fixed shuffling.
print(list(client_dataset.shuffle_repeat_batch(batch_size=2, seed=0))[0])
# A preprocessing function should return a new dict of examples instead of
# modifying its input.
def parity_feature(examples):
return {'z': examples['y'] % 2, **examples}
# preprocess_batch returns a new FederatedData object that has one more
# preprocessing step at the very end than the original.
train_fd_z = train_fd.preprocess_batch(parity_feature)
client_id = b'105f96df763d4ddf:ALL_S_WELL_THAT_ENDS_WELL_GUIDERIUS_AND_ARVIRAGUS'
next(iter(train_fd_z.get_client(client_id).padded_batch(batch_size=4)))
# Load unpreprocessed data.
raw_fd = fedjax.datasets.shakespeare.load_split('train')
raw_fd.get_client(client_id).all_examples()
# We don't actually need client_id, but `FederatedData` supplies it so that
# different processing based on clients can be done.
def truncate_and_cast(client_id, examples, max_length=10):
labels = list(b''.join(examples['snippets']))
num_sequences = (len(labels) + max_length - 1) // max_length
padded = np.zeros((num_sequences, max_length), dtype=np.int32)
for i in range(num_sequences):
chars = labels[i * max_length:(i + 1) * max_length]
padded[i, :len(chars)] = chars
return {'snippets': padded}
partial_fd = raw_fd.preprocess_client(truncate_and_cast)
partial_fd.get_client(client_id).all_examples()
def snippets_to_xy(examples):
snippets = examples['snippets']
return {'x': snippets[:, :-1], 'y': snippets[:, 1:]}
partial_fd.preprocess_batch(snippets_to_xy).get_client(client_id).all_examples()
# Obtain MNIST dataset from tensorflow and convert to numpy format.
import tensorflow_datasets as tfds
(ds_train, ds_test) = tfds.load('mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=False)
features, labels = list(ds_train.batch(60000).as_numpy_iterator())[0]
print('features shape', features.shape)
print('labels shape', labels.shape)
# Randomly split dataset into 100 clients and load them to a dictionary.
indices = np.random.randint(100, size=60000)
client_id_to_dataset_mapping = {}
for i in range(100):
client_id_to_dataset_mapping[i] = {'x': features[indices==i, :, : , :],
'y': labels[indices==i]}
# Create fedjax.InMemoryDataset.
iid_mnist_federated_data = fedjax.InMemoryFederatedData(
client_id_to_dataset_mapping)
print('number of clients in iid_mnist_data',
iid_mnist_federated_data.num_clients())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convolutional Networks
Step2: Convolution
Step4: Aside
Step5: Convolution
Step6: Max pooling
Step7: Max pooling
Step8: Fast layers
Step9: Convolutional "sandwich" layers
Step10: Three-layer ConvNet
Step11: Gradient check
Step12: Overfit small data
Step13: Plotting the loss, training accuracy, and validation accuracy should show clear overfitting
Step14: Train the net
Step15: Visualize Filters
Step16: Spatial Batch Normalization
Step17: Spatial batch normalization
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.cnn import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
x_shape = (2, 3, 4, 4)
w_shape = (3, 3, 4, 4)
x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)
w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)
b = np.linspace(-0.1, 0.2, num=3)
conv_param = {'stride': 2, 'pad': 1}
out, _ = conv_forward_naive(x, w, b, conv_param)
correct_out = np.array([[[[-0.08759809, -0.10987781],
[-0.18387192, -0.2109216 ]],
[[ 0.21027089, 0.21661097],
[ 0.22847626, 0.23004637]],
[[ 0.50813986, 0.54309974],
[ 0.64082444, 0.67101435]]],
[[[-0.98053589, -1.03143541],
[-1.19128892, -1.24695841]],
[[ 0.69108355, 0.66880383],
[ 0.59480972, 0.56776003]],
[[ 2.36270298, 2.36904306],
[ 2.38090835, 2.38247847]]]])
# Compare your output to ours; difference should be around 2e-8
print('Testing conv_forward_naive')
print('difference: ', rel_error(out, correct_out))
from scipy.misc import imread, imresize
kitten, puppy = imread('kitten.jpg'), imread('puppy.jpg')
# kitten is wide, and puppy is already square
d = kitten.shape[1] - kitten.shape[0]
kitten_cropped = kitten[:, d//2:-d//2, :]
img_size = 200 # Make this smaller if it runs too slow
x = np.zeros((2, 3, img_size, img_size))
x[0, :, :, :] = imresize(puppy, (img_size, img_size)).transpose((2, 0, 1))
x[1, :, :, :] = imresize(kitten_cropped, (img_size, img_size)).transpose((2, 0, 1))
# Set up a convolutional weights holding 2 filters, each 3x3
w = np.zeros((2, 3, 3, 3))
# The first filter converts the image to grayscale.
# Set up the red, green, and blue channels of the filter.
w[0, 0, :, :] = [[0, 0, 0], [0, 0.3, 0], [0, 0, 0]]
w[0, 1, :, :] = [[0, 0, 0], [0, 0.6, 0], [0, 0, 0]]
w[0, 2, :, :] = [[0, 0, 0], [0, 0.1, 0], [0, 0, 0]]
# Second filter detects horizontal edges in the blue channel.
w[1, 2, :, :] = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
# Vector of biases. We don't need any bias for the grayscale
# filter, but for the edge detection filter we want to add 128
# to each output so that nothing is negative.
b = np.array([0, 128])
# Compute the result of convolving each input in x with each filter in w,
# offsetting by b, and storing the results in out.
out, _ = conv_forward_naive(x, w, b, {'stride': 1, 'pad': 1})
def imshow_noax(img, normalize=True):
Tiny helper to show images as uint8 and remove axis labels
if normalize:
img_max, img_min = np.max(img), np.min(img)
img = 255.0 * (img - img_min) / (img_max - img_min)
plt.imshow(img.astype('uint8'))
plt.gca().axis('off')
# Show the original images and the results of the conv operation
plt.subplot(2, 3, 1)
imshow_noax(puppy, normalize=False)
plt.title('Original image')
plt.subplot(2, 3, 2)
imshow_noax(out[0, 0])
plt.title('Grayscale')
plt.subplot(2, 3, 3)
imshow_noax(out[0, 1])
plt.title('Edges')
plt.subplot(2, 3, 4)
imshow_noax(kitten_cropped, normalize=False)
plt.subplot(2, 3, 5)
imshow_noax(out[1, 0])
plt.subplot(2, 3, 6)
imshow_noax(out[1, 1])
plt.show()
np.random.seed(231)
# x = np.random.randn(2, 2, 5, 5)
# w = np.random.randn(2, 2, 3, 3)
# b = np.random.randn(2,)
# dout = np.random.randn(2, 2, 5, 5)
# conv_param = {'stride': 1, 'pad': 1}
x = np.random.randn(4, 3, 5, 5)
w = np.random.randn(2, 3, 3, 3)
b = np.random.randn(2,)
dout = np.random.randn(4, 2, 5, 5)
conv_param = {'stride': 1, 'pad': 1}
dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout)
out, cache = conv_forward_naive(x, w, b, conv_param)
dx, dw, db = conv_backward_naive(dout, cache)
# Your errors should be around 1e-8'
print('Testing conv_backward_naive function')
print('dx error: ', rel_error(dx, dx_num))
print('dw error: ', rel_error(dw, dw_num))
print('db error: ', rel_error(db, db_num))
x_shape = (2, 3, 4, 4)
x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)
pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2}
out, _ = max_pool_forward_naive(x, pool_param)
correct_out = np.array([[[[-0.26315789, -0.24842105],
[-0.20421053, -0.18947368]],
[[-0.14526316, -0.13052632],
[-0.08631579, -0.07157895]],
[[-0.02736842, -0.01263158],
[ 0.03157895, 0.04631579]]],
[[[ 0.09052632, 0.10526316],
[ 0.14947368, 0.16421053]],
[[ 0.20842105, 0.22315789],
[ 0.26736842, 0.28210526]],
[[ 0.32631579, 0.34105263],
[ 0.38526316, 0.4 ]]]])
# Compare your output with ours. Difference should be around 1e-8.
print('Testing max_pool_forward_naive function:')
print('difference: ', rel_error(out, correct_out))
np.random.seed(231)
x = np.random.randn(3, 2, 8, 8)
dout = np.random.randn(3, 2, 4, 4)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout)
out, cache = max_pool_forward_naive(x, pool_param)
dx = max_pool_backward_naive(dout, cache)
# Your error should be around 1e-12
print('Testing max_pool_backward_naive function:')
print('dx error: ', rel_error(dx, dx_num))
from cs231n.fast_layers import conv_forward_fast, conv_backward_fast
from time import time
np.random.seed(231)
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25,)
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}
t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()
print('Testing conv_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('Difference: ', rel_error(out_naive, out_fast))
# TODO: speed naive bp, because it can't run to the end.
# t0 = time()
# dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
# t1 = time()
# dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
# t2 = time()
# print('\nTesting conv_backward_fast:')
# print('Naive: %fs' % (t1 - t0))
# print('Fast: %fs' % (t2 - t1))
# print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
# print('dx difference: ', rel_error(dx_naive, dx_fast))
# print('dw difference: ', rel_error(dw_naive, dw_fast))
# print('db difference: ', rel_error(db_naive, db_fast))
from cs231n.fast_layers import max_pool_forward_fast, max_pool_backward_fast
np.random.seed(231)
x = np.random.randn(100, 3, 32, 32)
dout = np.random.randn(100, 3, 16, 16)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
t0 = time()
out_naive, cache_naive = max_pool_forward_naive(x, pool_param)
t1 = time()
out_fast, cache_fast = max_pool_forward_fast(x, pool_param)
t2 = time()
print('Testing pool_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('fast: %fs' % (t2 - t1))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('difference: ', rel_error(out_naive, out_fast))
t0 = time()
dx_naive = max_pool_backward_naive(dout, cache_naive)
t1 = time()
dx_fast = max_pool_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting pool_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
from cs231n.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward
np.random.seed(231)
x = np.random.randn(2, 3, 16, 16)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param)
dx, dw, db = conv_relu_pool_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout)
print('Testing conv_relu_pool')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
from cs231n.layer_utils import conv_relu_forward, conv_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 8, 8)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
out, cache = conv_relu_forward(x, w, b, conv_param)
dx, dw, db = conv_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout)
print('Testing conv_relu:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
model = ThreeLayerConvNet()
N = 50
X = np.random.randn(N, 3, 32, 32)
y = np.random.randint(10, size=N)
loss, grads = model.loss(X, y)
print('Initial loss (no regularization): ', loss)
model.reg = 0.5
loss, grads = model.loss(X, y)
print('Initial loss (with regularization): ', loss)
num_inputs = 2
input_dim = (3, 16, 16)
reg = 0.0
num_classes = 10
np.random.seed(231)
X = np.random.randn(num_inputs, *input_dim)
y = np.random.randint(num_classes, size=num_inputs)
model = ThreeLayerConvNet(num_filters=3, filter_size=3,
input_dim=input_dim, hidden_dim=7,
dtype=np.float64)
loss, grads = model.loss(X, y)
for param_name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
np.random.seed(231)
num_train = 100
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
model = ThreeLayerConvNet(weight_scale=1e-2)
solver = Solver(model, small_data,
num_epochs=30, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 6e-5, #1e-3
},
verbose=True, print_every=1)
solver.train()
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001)
solver = Solver(model, data,
num_epochs=1, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=20)
solver.train()
from cs231n.vis_utils import visualize_grid
grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(5, 5)
plt.show()
np.random.seed(231)
# Check the training-time forward pass by checking means and variances
# of features both before and after spatial batch normalization
N, C, H, W = 2, 3, 4, 5
x = 4 * np.random.randn(N, C, H, W) + 10
print('Before spatial batch normalization:')
print(' Shape: ', x.shape)
print(' Means: ', x.mean(axis=(0, 2, 3)))
print(' Stds: ', x.std(axis=(0, 2, 3)))
# Means should be close to zero and stds close to one
gamma, beta = np.ones(C), np.zeros(C)
bn_param = {'mode': 'train'}
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization:')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
# Means should be close to beta and stds close to gamma
gamma, beta = np.asarray([3, 4, 5]), np.asarray([6, 7, 8])
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization (nontrivial gamma, beta):')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
np.random.seed(231)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, C, H, W = 10, 4, 11, 12
bn_param = {'mode': 'train'}
gamma = np.ones(C)
beta = np.zeros(C)
for t in range(50):
x = 2.3 * np.random.randn(N, C, H, W) + 13
spatial_batchnorm_forward(x, gamma, beta, bn_param)
bn_param['mode'] = 'test'
x = 2.3 * np.random.randn(N, C, H, W) + 13
a_norm, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After spatial batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=(0, 2, 3)))
print(' stds: ', a_norm.std(axis=(0, 2, 3)))
np.random.seed(231)
N, C, H, W = 2, 3, 4, 5
x = 5 * np.random.randn(N, C, H, W) + 12
gamma = np.random.randn(C)
beta = np.random.randn(C)
dout = np.random.randn(N, C, H, W)
bn_param = {'mode': 'train'}
fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Daophot object creates temporary working directory (runner directory), which is passed to Allstar constructor to share.
Step2: Daophot got FITS file in construction, which will be automatically ATTACHed.
Step3: Check some results returned by FIND, every method for daophot command returns results object.
Step4: Also, take a look into runner directory
Step5: We see symlinks to input image and opt files, and i.coo - result of FIND
Step6: List of stars generated by daophot commands, can be easily get as astwro.starlist.Starlist being essentially pandas.DataFrame
Step7: Let's check 10 stars with least A1 error (mag_err column). (pandas style)
Step8: (3) SORT the output from PHOTOMETRY
Step9: Here we write sorted list back info photometry file at default name (overwriting existing one), because it's convenient to use default files in next commands.
Step10: (4) PICK to generate a set of likely PSF stars
Step11: If no error reported, symlink to image file (renamed to i.fits), and all daophot output files (i.*) are in the working directory of runner
Step12: One may examine and improve i.lst list of PSF stars. Or use astwro.tools.gapick.py to obtain list of PSF stars optimised by genetic algorithm.
Step13: (6) Run GROUP and NSTAR or ALLSTAR on your NEI file
Step14: All result objects, has get_buffer() method, useful to lookup unparsed daophot or allstar output
Step15: *(8) EXIT from DAOPHOT and send this new picture to the image display *
Step16: We can also generate region file for psf stars
Step17: (9) Back in DAOPHOT II ATTACH the original picture and run SUBSTAR
Step18: You have now created a new picture which has the PSF stars still in it but from which the known neighbors of these PSF stars have been mostly removed
Step19: Check last image with subtracted PSF stars neighbours.
Step20: Once you have produced a frame in which the PSF stars and their neighbors all subtract out cleanly, one more time through PSF should produce a point-spread function you can be proud of.
|
<ASSISTANT_TASK:>
Python Code:
from astwro.sampledata import fits_image
frame = fits_image()
from astwro.pydaophot import Daophot, Allstar
dp = Daophot(image=frame)
al = Allstar(dir=dp.dir)
res = dp.FInd(frames_av=1, frames_sum=1)
print ("{} pixels analysed, sky estimate {}, {} stars found.".format(res.pixels, res.sky, res.stars))
!ls -lt $dp.dir
res = dp.PHotometry(apertures=[8], IS=35, OS=50)
stars = res.photometry_starlist
stars.sort_values('mag_err').iloc[:10]
sorted_stars = stars.sort_values('mag')
sorted_stars.renumber()
dp.write_starlist(sorted_stars, 'i.ap')
!head -n20 $dp.PHotometry_result.photometry_file
dp.PHotometry_result.photometry_file
pick_res = dp.PIck(faintest_mag=20, number_of_stars_to_pick=40)
ls $dp.dir
dp.set_options('VARIABLE PSF', 2)
psf_res = dp.PSf()
alls_res = al.ALlstar(image_file=frame, stars=psf_res.nei_file, subtracted_image_file='is.fits')
print (alls_res.get_buffer())
sub_img = alls_res.subtracted_image_file
from astwro.starlist.ds9 import write_ds9_regions
reg_file_path = dp.file_from_runner_dir('lst.reg')
write_ds9_regions(pick_res.picked_starlist, reg_file_path)
# One can run ds9 directly from notebook:
!ds9 $sub_img -regions $reg_file_path
ls $al.dir
sub_res = dp.SUbstar(subtract=alls_res.profile_photometry_file, leave_in=pick_res.picked_stars_file)
for i in range(3):
print ("Iteration {}: Allstar chi: {}".format(i, alls_res.als_stars.chi.mean()))
dp.image = 'is.fits'
respsf = dp.PSf()
print ("Iteration {}: PSF chi: {}".format(i, respsf.chi))
alls_res = al.ALlstar(image_file=frame, stars='i.nei')
dp.image = frame
dp.SUbstar(subtract='i.als', leave_in='i.lst')
print ("Final: Allstar chi: {}".format(alls_res.als_stars.chi.mean()))
alls_res.als_stars
!ds9 $dp.SUbstar_result.subtracted_image_file -regions $reg_file_path
dp.image = 'is.fits'
psf_res = dp.PSf()
print ("PSF file: {}".format(psf_res.psf_file))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2016-10-07
Step2: 1. L1-Regularized Logistic Regression
Step3: Question Compute the cross-validated predictions of the l1-regularized logistic regression with default parameters on our data.
Step4: Question What criterion is used to chose the optimal C? See the documentation at http
Step5: Question Plot the corresponding ROC curve, and compare to that obtained for
Step6: Question Compare the regression weights obtained with and without l1-regularization, in two side-by-side plots.
Step7: 2. L2-regularized logistic regression
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%pylab inline
# Load the data as usual (here the code for Python 2.7)
X = np.loadtxt('data/small_Endometrium_Uterus.csv', delimiter=',', skiprows=1, usecols=range(1, 3001))
y = np.loadtxt('data/small_Endometrium_Uterus.csv', delimiter=',', skiprows=1, usecols=[3001],
converters={3001: lambda s: 0 if s=='Endometrium' else 1}, dtype='int')
# Set up a stratified 10-fold cross-validation
from sklearn import cross_validation
folds = cross_validation.StratifiedKFold(y, 10, shuffle=True)
# Create a function that does cross-validation and scales the features on each training set.
from sklearn import preprocessing
def cross_validate_with_scaling(design_matrix, labels, classifier, cv_folds):
Perform a cross-validation and returns the predictions.
Use a scaler to scale the features to mean 0, standard deviation 1.
Parameters:
-----------
design_matrix: (n_samples, n_features) np.array
Design matrix for the experiment.
labels: (n_samples, ) np.array
Vector of labels.
classifier: sklearn classifier object
Classifier instance; must have the following methods:
- fit(X, y) to train the classifier on the data X, y
- predict_proba(X) to apply the trained classifier to the data X and return probability estimates
cv_folds: sklearn cross-validation object
Cross-validation iterator.
Return:
-------
pred: (n_samples, ) np.array
Vectors of predictions (same order as labels).
pred = np.zeros(labels.shape) # vector of 0 in which to store the predictions
for tr, te in cv_folds:
# Restrict data to train/test folds
Xtr = design_matrix[tr, :]
ytr = labels[tr]
Xte = design_matrix[te, :]
#print Xtr.shape, ytr.shape, Xte.shape
# Scale data
scaler = preprocessing.StandardScaler() # create scaler
Xtr = scaler.fit_transform(Xtr) # fit the scaler to the training data and transform training data
Xte = scaler.transform(Xte) # transform test data
# Fit classifier
classifier.fit(Xtr, ytr)
# Predict probabilities (of belonging to +1 class) on test data
yte_pred = classifier.predict_proba(Xte) # two-dimensional array
# Identify the index, in yte_pred, of the positive class (y=1)
# using classifier.classes_
# index_of_class_1 = np.nonzero(classifier.classes_ == 1)[0][0]
# or alternatively:
index_of_class_1 = (1-classifier.classes_[0])/2 # 0 if the first sample is positive, 1 otherwise
pred[te] = yte_pred[:, index_of_class_1]
return pred
from sklearn import linear_model
clf = linear_model.LogisticRegression(penalty='l1')
from sklearn import grid_search
param_grid = {'C':[1e-3, 1e-2, 1e-1, 1., 1e2, 1e3]}
clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l1'), param_grid)
print clf.best_estimator_
# This code plots the regression weights of the classifier 'clf'
plt.plot(range(len(clf.best_estimator_.coef_[0])), clf.best_estimator_.coef_[0],
color='blue', marker='+', linestyle='')
plt.xlabel('Genes', fontsize=16)
plt.ylabel('Weights', fontsize=16)
plt.title('Logistic regression weights', fontsize=16)
plt.xlim([0, X.shape[1]])
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121) # use a 1x2 subplot grid; ax will refer to the 1st subplot
number_of_weights = #TODO
logreg_weights = #TODO
ax.plot(range(number_of_weights), logreg_weights,
color='blue', marker='+', linestyle='')
ax.set_xlabel('Genes', fontsize=16)
ax.set_ylabel('Weights', fontsize=16)
ax.set_title('Logistic regression weights', fontsize=16)
ax.set_xlim([0, X.shape[1]])
ax = fig.add_subplot(122) # use a 1x2 subplot grid; ax will refer to the 2nd subplot
l1_logreg_weights = #TODO
ax.plot(ange(number_of_weights), l1_logreg_weights,
color='blue', marker='+', linestyle='')
ax.set_xlabel('Genes', fontsize=16)
ax.set_ylabel('Weights', fontsize=16)
ax.set_title('Regularized Logistic regression weights', fontsize=16)
ax.set_xlim([0, X.shape[1]])
plt.tight_layout()
clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l2'), param_grid)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 0 (Example)
Step2: Question 1
Step3: Question 2
Step4: Question 3
Step5: Question 4
Step6: Part 2
Step7: Question 6
Step8: Question 7
Step9: Question 8
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold'+col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver'+col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#'+col[1:]}, inplace=True)
names_ids = df.index.str.split('\s\(') # split the index by '('
df.index = names_ids.str[0] # the [0] element is the country name (new index)
df['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)
df = df.drop('Totals')
df.head()
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the row for Afghanistan, which is a Series object. The assignment
# question description will tell you the general format the autograder is expecting
return df.iloc[0]
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_zero()
def answer_one():
return "YOUR ANSWER HERE"
def answer_two():
return "YOUR ANSWER HERE"
def answer_three():
return "YOUR ANSWER HERE"
def answer_four():
return "YOUR ANSWER HERE"
census_df = pd.read_csv('census.csv')
census_df.head()
def answer_five():
return "YOUR ANSWER HERE"
def answer_six():
return "YOUR ANSWER HERE"
def answer_seven():
return "YOUR ANSWER HERE"
def answer_eight():
return "YOUR ANSWER HERE"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Replacing with quantized layers
Step2: Converting a Model Automatically
Step3: Quantizing a Model With AutoQKeras
|
<ASSISTANT_TASK:>
Python Code:
units = 64
embedding_dim = 64
loss = 'binary_crossentropy'
def create_model(batch_size=None):
x = x_in = Input(shape=(maxlen,), batch_size=batch_size, dtype=tf.int32)
x = Embedding(input_dim=max_features, output_dim=embedding_dim)(x)
x = Activation('linear', name='embedding_act')(x)
x = Bidirectional(LSTM(units))(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
model = tf.keras.Model(inputs=[x_in], outputs=[x])
return model
tf.keras.backend.clear_session()
with strategy.scope():
model = create_model(BATCH_SIZE)
custom_objects = {}
model.compile(
optimizer=Adam(learning_rate=0.01),
loss=loss,
metrics=['acc'])
model.summary()
print('Train...')
model.fit(
train_dataset,
epochs=10,
batch_size=BATCH_SIZE,
validation_data=test_dataset,
verbose=2)
def create_qmodel(batch_size=None):
x = x_in = Input(shape=(maxlen,), batch_size=batch_size, dtype=tf.int32)
x = Embedding(input_dim=max_features, output_dim=embedding_dim)(x)
x = QActivation('binary', name='embedding_act')(x)
x = QLSTM(
units,
activation='quantized_tanh(4)',
recurrent_activation='quantized_relu(4,0,1)',
kernel_quantizer='stochastic_ternary("auto")',
recurrent_quantizer='quantized_bits(2,1,1,alpha=1.0)',
bias_quantizer='quantized_bits(4,0,1)')(x)
x = QDense(
1,
kernel_quantizer="quantized_bits(4,0,1)",
bias_quantizer='quantized_bits(4,0,1)')(x)
x = QActivation('sigmoid')(x)
model = tf.keras.Model(inputs=[x_in], outputs=[x])
return model
tf.keras.backend.clear_session()
with strategy.scope():
qmodel = create_qmodel(BATCH_SIZE)
custom_objects = {}
qmodel.compile(
optimizer=Adam(learning_rate=0.01),
loss=loss,
metrics=['acc'])
qmodel.summary()
print('Train...')
qmodel.fit(train_dataset,
batch_size=BATCH_SIZE,
epochs=10,
verbose=2,
validation_data=test_dataset)
bits = 4
quantizer_config = {
"bidirectional": {
'activation' : f"quantized_tanh({bits})",
'recurrent_activation' : f"quantized_relu(4,0,1)",
'kernel_quantizer' : f"quantized_bits({bits}, alpha='auto')",
'recurrent_quantizer' : f"quantized_bits({bits}, alpha='auto')",
'bias_quantizer' : f"quantized_bits({bits}, alpha='auto')",
},
"dense": {
'kernel_quantizer' : f"quantized_bits({bits}), alpha='auto'",
'bias_quantizer' : f"quantized_bits({bits}), alpha='auto'"
},
"embedding_act": f"quantized_bits({bits}), alpha='auto'",
}
tf.keras.backend.clear_session()
with strategy.scope():
model = create_model(BATCH_SIZE)
custom_objects = {}
qmodel = model_quantize(model, quantizer_config, bits, custom_objects)
qmodel.compile(
optimizer=Adam(learning_rate=0.01),
loss=loss,
metrics=['acc'])
qmodel.summary()
print('Train...')
qmodel.fit(train_dataset,
batch_size=BATCH_SIZE,
epochs=10,
verbose=2,
validation_data=test_dataset)
tf.keras.backend.clear_session()
with strategy.scope():
model = create_model(BATCH_SIZE)
custom_objects = {}
model.compile(
optimizer=Adam(learning_rate=0.01),
loss=loss,
metrics=['acc'])
quantization_config = {
"kernel": {
"stochastic_binary": 1,
"stochastic_ternary": 2,
"quantized_bits(4,0,1,alpha=1.0)": 4,
"quantized_po2(4,1)": 4
},
"recurrent_kernel": {
"stochastic_binary": 1,
"stochastic_ternary": 2,
"quantized_bits(4,0,1,alpha=1.0)": 4,
"quantized_po2(4,1)": 4
},
"recurrent_activation": {
"quantized_relu(4,0,1)": 4
},
"bias": {
"quantized_bits(4,0,1)": 4,
"quantized_po2(4,1)": 4
},
"activation" : {
"stochastic_ternary('auto')": 2,
"quantized_tanh(4)" : 4,
"quantized_relu_po2(4,1)": 4,
"quantized_relu(4,2)": 4,
},
"linear": {
"stochastic_ternary('auto')" : 2,
"quantized_tanh(4)" : 4,
"quantized_relu_po2(4,1)": 4,
"quantized_relu(3,1)": 3,
"quantized_relu(4,2)": 4,
}
}
limit = {
"Dense": [4],
"Bidirectional": [4],
"Activation": [4],
"default" : [4]*4
}
goal = {
"type": "bits",
"params": {
"delta_p": 8.0,
"delta_n": 8.0,
"rate": 2.0,
"stress": 1.0,
"input_bits": 4,
"output_bits": 4,
"ref_bits": 4,
"config": {
"default": ["parameters", "activations"]
}
}
}
run_config = {
"output_dir": tempfile.mkdtemp(),
"goal": goal,
"quantization_config": quantization_config,
"learning_rate_optimizer": False,
"transfer_weights": False,
"mode": "random",
"seed": 42,
"limit": limit,
"tune_filters": "layer",
"tune_filters_exceptions": "^dense",
"distribution_strategy": strategy,
"layer_indexes": range(2, len(model.layers) - 1),
"max_trials": 1000
}
print("quantizing layers:", [model.layers[i].name for i in run_config["layer_indexes"]])
autoqk = AutoQKeras(model, metrics=["acc"], custom_objects={}, **run_config)
autoqk.fit(
train_dataset,
validation_data=test_dataset,
batch_size=BATCH_SIZE,
epochs=10,
verbose=2)
qmodel = autoqk.get_best_model()
qmodel.save_weights("qmodel.h5")
print_qmodel_summary(qmodel)
print(get_quantization_dictionary(qmodel))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Low frequency drifts and line noise
Step2: we see high amplitude undulations in low frequencies, spanning across tens of
Step3: On MEG sensors we see narrow frequency peaks at 60, 120, 180, 240 Hz,
Step4: we can see typical time courses and non dipolar topographies
|
<ASSISTANT_TASK:>
Python Code:
import mne
from mne.datasets import sample
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
(raw.copy().pick_types(meg='mag')
.del_proj(0)
.plot(duration=60, n_channels=100, remove_dc=False))
raw.plot_psd(fmax=250)
average_ecg = create_ecg_epochs(raw).average()
print('We found %i ECG events' % average_ecg.nave)
average_ecg.plot_joint()
average_eog = create_eog_epochs(raw).average()
print('We found %i EOG events' % average_eog.nave)
average_eog.plot_joint()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading FFT routines
Step2: Initializing Data
Step3: $W$ TRANSFORM FROM AXES-0
Step4: Forward Transform
Step5: Inverse Transform
Step6: $W$ TRANSFORM FROM AXES-1
Step7: Forward Transform
Step8: Inverse Transform
Step9: $W$ TRANSFORM FROM AXES-2
Step10: Forward Transform
Step11: Inverse Transform
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import ctypes
from ctypes import *
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import math
import time
%matplotlib inline
gridDIM = 64
size = gridDIM*gridDIM*gridDIM
axes0 = 0
axes1 = 1
axes2 = 2
makeC2C = 0
makeR2C = 1
makeC2R = 1
axesSplit_0 = 0
axesSplit_1 = 1
axesSplit_2 = 2
segment_axes0 = 0
segment_axes1 = 0
segment_axes2 = 0
DIR_BASE = "/home/robert/Documents/new1/FFT/code/"
# FAFT
_faft128_3D = ctypes.cdll.LoadLibrary( DIR_BASE+'FAFT128_3D_R2C.so' )
_faft128_3D.FAFT128_3D_R2C.restype = int
_faft128_3D.FAFT128_3D_R2C.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_float, ctypes.c_float, ctypes.c_int,
ctypes.c_int, ctypes.c_int, ctypes.c_int]
cuda_faft = _faft128_3D.FAFT128_3D_R2C
# Inv FAFT
_ifaft128_3D = ctypes.cdll.LoadLibrary(DIR_BASE+'IFAFT128_3D_C2R.so')
_ifaft128_3D.IFAFT128_3D_C2R.restype = int
_ifaft128_3D.IFAFT128_3D_C2R.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_float, ctypes.c_float, ctypes.c_int,
ctypes.c_int, ctypes.c_int, ctypes.c_int]
cuda_ifaft = _ifaft128_3D.IFAFT128_3D_C2R
def Gaussian(x,mu,sigma):
return np.exp( - (x-mu)**2/sigma**2/2. )/(sigma*np.sqrt( 2*np.pi ))
def fftGaussian(p,mu,sigma):
return np.exp(-1j*mu*p)*np.exp( - p**2*sigma**2/2. )
# Gaussian parameters
mu_x = 1.5
sigma_x = 1.
mu_y = 1.5
sigma_y = 1.
mu_z = 1.5
sigma_z = 1.
# Grid parameters
x_amplitude = 5.
p_amplitude = 6. # With the traditional method p amplitude is fixed to: 2 * np.pi /( 2*x_amplitude )
dx = 2*x_amplitude/float(gridDIM) # This is dx in Bailey's paper
dp = 2*p_amplitude/float(gridDIM) # This is gamma in Bailey's paper
delta = dx*dp/(2*np.pi)
x_range = np.linspace( -x_amplitude, x_amplitude-dx, gridDIM)
p = np.linspace( -p_amplitude, p_amplitude-dp, gridDIM)
x = x_range[ np.newaxis, np.newaxis, : ]
y = x_range[ np.newaxis, :, np.newaxis ]
z = x_range[ :, np.newaxis, np.newaxis ]
f = Gaussian(x,mu_x,sigma_x)*Gaussian(y,mu_y,sigma_y)*Gaussian(z,mu_z,sigma_z)
plt.imshow( f[:, :, 0], extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
axis_font = {'size':'24'}
plt.text( 0., 5.1, '$W$' , **axis_font)
plt.colorbar()
#plt.ylim(0,0.44)
print ' Amplitude x = ',x_amplitude
print ' Amplitude p = ',p_amplitude
print ' '
print 'mu_x = ', mu_x
print 'mu_y = ', mu_y
print 'mu_z = ', mu_z
print 'sigma_x = ', sigma_x
print 'sigma_y = ', sigma_y
print 'sigma_z = ', sigma_z
print ' '
print 'n = ', x.size
print 'dx = ', dx
print 'dp = ', dp
print ' standard fft dp = ',2 * np.pi /( 2*x_amplitude ) , ' '
print ' '
print 'delta = ', delta
print ' '
print 'The Gaussian extends to the numerical error in single precision:'
print ' min = ', np.min(f)
# Matrix for the 33th. complex values
f33 = np.zeros( [64, 1 ,64], dtype = np.complex64 )
# Copy to GPU
if 'f_gpu' in globals():
f_gpu.gpudata.free()
if 'f33_gpu' in globals():
f33_gpu.gpudata.free()
f_gpu = gpuarray.to_gpu( np.ascontiguousarray( f , dtype = np.float32 ) )
f33_gpu = gpuarray.to_gpu( np.ascontiguousarray( f33 , dtype = np.complex64 ) )
# Executing FFT
t_init = time.time()
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes0, axes0, makeR2C, axesSplit_0 )
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes1, axes1, makeC2C, axesSplit_0 )
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes2, axes2, makeC2C, axesSplit_0 )
t_end = time.time()
print 'computation time = ', t_end - t_init
plt.imshow( np.append( f_gpu.get()[:, :32, :], f33_gpu.get().real, axis=1 )[32,:,:]
/float(np.sqrt(size)),
extent=[-p_amplitude , p_amplitude-dp, 0, p_amplitude-dp] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( 0., 5.2, '$Re \\mathcal{F}(W)$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(0 , x_amplitude)
plt.imshow( np.append( f_gpu.get()[:, 32:, :], f33_gpu.get().imag, axis=1 )[32,:,:]
/float(np.sqrt(size)),
extent=[-p_amplitude , p_amplitude-dp, 0, p_amplitude-dp] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( 0., 5.2, '$Im \\mathcal{F}(W)$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(0 , x_amplitude)
# Executing iFFT
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes2, axes2, makeC2C, axesSplit_0 )
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes1, axes1, makeC2C, axesSplit_0 )
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes0, axes0, makeC2R, axesSplit_0 )
plt.imshow( f_gpu.get()[32,:,:]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{xy}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( f_gpu.get()[:,32,:]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{xz}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( f_gpu.get()[:,:,32]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{yz}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
# Matrix for the 33th. complex values
f33 = np.zeros( [64, 64, 1], dtype = np.complex64 )
# One gpu array.
if 'f_gpu' in globals():
f_gpu.gpudata.free()
if 'f33_gpu' in globals():
f33_gpu.gpudata.free()
f_gpu = gpuarray.to_gpu( np.ascontiguousarray( f , dtype = np.float32 ) )
f33_gpu = gpuarray.to_gpu( np.ascontiguousarray( f33 , dtype = np.complex64 ) )
# Executing FFT
t_init = time.time()
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes1, axes1, makeR2C, axesSplit_1 )
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes0, axes0, makeC2C, axesSplit_1 )
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes2, axes2, makeC2C, axesSplit_1 )
t_end = time.time()
print 'computation time = ', t_end - t_init
plt.imshow( np.append( f_gpu.get()[:, :, :32], f33_gpu.get().real, axis=2 )[32,:,:]
/float(np.sqrt(size)),
extent=[-p_amplitude , 0, -p_amplitude , p_amplitude-dp] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( 0., 5.2, '$Re \\mathcal{F}(W)$', **axis_font )
plt.xlim(-x_amplitude , 0)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( np.append( f_gpu.get()[:, :, 32:], f33_gpu.get().imag, axis=2 )[32,:,:]
/float(np.sqrt(size)),
extent=[-p_amplitude , 0, -p_amplitude , p_amplitude-dp] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( 0., 5.2, '$Im \\mathcal{F}(W)$', **axis_font )
plt.xlim(-x_amplitude , 0)
plt.ylim(-x_amplitude , x_amplitude-dx)
# Executing iFFT
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes2, axes2, makeC2C, axesSplit_1 )
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes0, axes0, makeC2C, axesSplit_1 )
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes1, axes1, makeC2R, axesSplit_1 )
plt.imshow( f_gpu.get()[32,:,:]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{xy}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( f_gpu.get()[:,32,:]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{xz}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( f_gpu.get()[:,:,32]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{yz}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
# Matrix for the 33th. complex values
f33 = np.zeros( [1, 64, 64], dtype = np.complex64 )
# One gpu array.
if 'f_gpu' in globals():
f_gpu.gpudata.free()
if 'f33_gpu' in globals():
f33_gpu.gpudata.free()
f_gpu = gpuarray.to_gpu( np.ascontiguousarray( f , dtype = np.float32 ) )
f33_gpu = gpuarray.to_gpu( np.ascontiguousarray( f33 , dtype = np.complex64 ) )
# Executing FFT
t_init = time.time()
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes2, axes2, makeR2C, axesSplit_2 )
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes1, axes1, makeC2C, axesSplit_2 )
cuda_faft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, delta, segment_axes0, axes0, makeC2C, axesSplit_2 )
t_end = time.time()
print 'computation time = ', t_end - t_init
plt.imshow( np.append( f_gpu.get()[:32, :, :], f33_gpu.get().real, axis=0 )[:,:,32]
/float(np.sqrt(size)),
extent=[-p_amplitude , p_amplitude-dp, 0, p_amplitude-dp] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( 0., 5.2, '$Re \\mathcal{F}(W)$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(0 , x_amplitude-dx)
plt.imshow( np.append( f_gpu.get()[32:, :, :], f33_gpu.get().imag, axis=0 )[:,:,32]
/float(np.sqrt(size)),
extent=[-p_amplitude , p_amplitude-dp, 0, p_amplitude-dp] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( 0., 5.2, '$Im \\mathcal{F}(W)$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(0 , x_amplitude-dx)
# Executing iFFT
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes0, axes0, makeC2C, axesSplit_2 )
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes1, axes1, makeC2C, axesSplit_2 )
cuda_ifaft( int(f_gpu.gpudata), int(f33_gpu.gpudata), dx, -delta, segment_axes2, axes2, makeC2R, axesSplit_2 )
plt.imshow( f_gpu.get()[32,:,:]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{xy}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( f_gpu.get()[:,32,:]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{xz}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
plt.imshow( f_gpu.get()[:,:,32]/float(size) ,
extent=[-x_amplitude , x_amplitude-dx, -x_amplitude , x_amplitude-dx] )
plt.colorbar()
axis_font = {'size':'24'}
plt.text( -1, 5.2, '$W_{yz}$', **axis_font )
plt.xlim(-x_amplitude , x_amplitude-dx)
plt.ylim(-x_amplitude , x_amplitude-dx)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Step 2
Step3: This time we add a non-Poissonian template correlated with the Galactic Center Excess and also one spatially distributed as a thin disk. The latter is designed to account for the unresolved point sources attributed to the disk of the Milky Way (known sources in the 3FGL are masked).
Step4: Step 3
Step5: Step 4
Step6: This can take up to an hour to run. The output of this run will be analyzed in detail in the next example.
|
<ASSISTANT_TASK:>
Python Code:
# Import relevant modules
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
from NPTFit import nptfit # module for performing scan
from NPTFit import create_mask as cm # module for creating the mask
from NPTFit import dnds_analysis # module for analysing the output
from NPTFit import psf_correction as pc # module for determining the PSF correction
n = nptfit.NPTF(tag='GCE_Example')
fermi_data = np.load('fermi_data/fermidata_counts.npy').astype(np.int32)
fermi_exposure = np.load('fermi_data/fermidata_exposure.npy')
n.load_data(fermi_data, fermi_exposure)
pscmask=np.array(np.load('fermi_data/fermidata_pscmask.npy'), dtype=bool)
analysis_mask = cm.make_mask_total(band_mask = True, band_mask_range = 2,
mask_ring = True, inner = 0, outer = 30,
custom_mask = pscmask)
n.load_mask(analysis_mask)
dif = np.load('fermi_data/template_dif.npy')
iso = np.load('fermi_data/template_iso.npy')
bub = np.load('fermi_data/template_bub.npy')
gce = np.load('fermi_data/template_gce.npy')
dsk = np.load('fermi_data/template_dsk.npy')
n.add_template(dif, 'dif')
n.add_template(iso, 'iso')
n.add_template(bub, 'bub')
n.add_template(gce, 'gce')
n.add_template(dsk, 'dsk')
# Remove the exposure correction for PS templates
rescale = fermi_exposure/np.mean(fermi_exposure)
n.add_template(gce/rescale, 'gce_np', units='PS')
n.add_template(dsk/rescale, 'dsk_np', units='PS')
n.add_poiss_model('dif', '$A_\mathrm{dif}$', fixed=True, fixed_norm=12.85)
n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0,2], False)
n.add_poiss_model('gce', '$A_\mathrm{gce}$', [0,2], False)
n.add_poiss_model('bub', '$A_\mathrm{bub}$', [0,2], False)
n.add_non_poiss_model('gce_np',
['$A_\mathrm{gce}^\mathrm{ps}$','$n_1^\mathrm{gce}$','$n_2^\mathrm{gce}$','$S_b^{(1), \mathrm{gce}}$'],
[[-6,1],[2.05,30],[-2,1.95],[0.05,40]],
[True,False,False,False])
n.add_non_poiss_model('dsk_np',
['$A_\mathrm{dsk}^\mathrm{ps}$','$n_1^\mathrm{dsk}$','$n_2^\mathrm{dsk}$','$S_b^{(1), \mathrm{dsk}}$'],
[[-6,1],[2.05,30],[-2,1.95],[0.05,40]],
[True,False,False,False])
pc_inst = pc.PSFCorrection(psf_sigma_deg=0.1812)
f_ary, df_rho_div_f_ary = pc_inst.f_ary, pc_inst.df_rho_div_f_ary
n.configure_for_scan(f_ary, df_rho_div_f_ary, nexp=1)
n.perform_scan(nlive=100)
from IPython.display import Image
Image(url = "https://imgs.xkcd.com/comics/compiling.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The problem
Step3: HMC
Step4: Set the initial state
Step5: Build the kernel and inference loop
Step6: BlackJAX does not provide a default inference loop, but it easy to implement with JAX's lax.scan
Step7: Inference
Step8: NUTS
Step9: Use Stan's window adaptation
Step10: We can use the obtained parameters to define a new kernel. Note that we do not have to use the same kernel that was used for the adaptation
Step11: Sample multiple chains
Step12: This scales very well to hundreds of chains on CPU, tens of thousand on GPU
|
<ASSISTANT_TASK:>
Python Code:
import jax
import jax.numpy as jnp
import jax.scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
import blackjax
%load_ext watermark
%watermark -d -m -v -p jax,jaxlib,blackjax
jax.devices()
loc, scale = 10, 20
observed = np.random.normal(loc, scale, size=1_000)
def logprob_fn(loc, scale, observed=observed):
Univariate Normal
logpdf = stats.norm.logpdf(observed, loc, scale)
return jnp.sum(logpdf)
logprob = lambda x: logprob_fn(**x)
inv_mass_matrix = np.array([0.5, 0.5])
num_integration_steps = 60
step_size = 1e-3
hmc = blackjax.hmc(logprob, step_size, inv_mass_matrix, num_integration_steps)
initial_position = {"loc": 1.0, "scale": 2.0}
initial_state = hmc.init(initial_position)
initial_state
%%time
hmc_kernel = jax.jit(hmc.step)
def inference_loop(rng_key, kernel, initial_state, num_samples):
@jax.jit
def one_step(state, rng_key):
state, _ = kernel(rng_key, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
%%time
rng_key = jax.random.PRNGKey(0)
states = inference_loop(rng_key, hmc_kernel, initial_state, 10_000)
loc_samples = states.position["loc"].block_until_ready()
scale_samples = states.position["scale"]
fig, (ax, ax1) = plt.subplots(ncols=2, figsize=(15, 6))
ax.plot(loc_samples)
ax.set_xlabel("Samples")
ax.set_ylabel("loc")
ax1.plot(scale_samples)
ax1.set_xlabel("Samples")
ax1.set_ylabel("scale")
inv_mass_matrix = np.array([0.5, 0.5])
step_size = 1e-3
nuts = blackjax.nuts(logprob, step_size, inv_mass_matrix)
initial_position = {"loc": 1.0, "scale": 2.0}
initial_state = nuts.init(initial_position)
initial_state
%%time
rng_key = jax.random.PRNGKey(0)
states = inference_loop(rng_key, nuts.step, initial_state, 4_000)
loc_samples = states.position["loc"].block_until_ready()
scale_samples = states.position["scale"]
fig, (ax, ax1) = plt.subplots(ncols=2, figsize=(15, 6))
ax.plot(loc_samples)
ax.set_xlabel("Samples")
ax.set_ylabel("loc")
ax1.plot(scale_samples)
ax1.set_xlabel("Samples")
ax1.set_ylabel("scale")
%%time
warmup = blackjax.window_adaptation(
blackjax.nuts,
logprob,
1000,
)
state, kernel, _ = warmup.run(
rng_key,
initial_position,
)
%%time
states = inference_loop(rng_key, nuts.step, initial_state, 1_000)
loc_samples = states.position["loc"].block_until_ready()
scale_samples = states.position["scale"]
fig, (ax, ax1) = plt.subplots(ncols=2, figsize=(15, 6))
ax.plot(loc_samples)
ax.set_xlabel("Samples")
ax.set_ylabel("loc")
ax1.plot(scale_samples)
ax1.set_xlabel("Samples")
ax1.set_ylabel("scale")
num_chains = 4
initial_positions = {"loc": np.ones(num_chains), "scale": 2.0 * np.ones(num_chains)}
initial_states = jax.vmap(nuts.init, in_axes=(0))(initial_positions)
def inference_loop_multiple_chains(
rng_key, kernel, initial_state, num_samples, num_chains
):
def one_step(states, rng_key):
keys = jax.random.split(rng_key, num_chains)
states, _ = jax.vmap(kernel)(keys, states)
return states, states
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
%%time
states = inference_loop_multiple_chains(
rng_key, nuts.step, initial_states, 2_000, num_chains
)
states.position["loc"].block_until_ready()
%%time
num_chains = 40
initial_positions = {"loc": np.ones(num_chains), "scale": 2.0 * np.ones(num_chains)}
initial_states = jax.vmap(nuts.init, in_axes=(0,))(initial_positions)
states = inference_loop_multiple_chains(
rng_key, nuts.step, initial_states, 1_000, num_chains
)
states.position["loc"].block_until_ready()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
Step2: Step 3
Step3: Step 4
Step4: Let's now proceed to tokenize these tweets in addition to lemmatizing them! This will help improve the performance of our LDA model!
Step5: Lets now add these tokenized tweets to our dictionary!
Step6: I will not turn the dictionary back into a dataframe, run it through the filtration function before re-casting the dataframe into a dictionary.
Step7: Gensim LDA Process
Step8: Now, I will now filter out extreme words - that is words that appear far too often and words that are rare.
Step9: We now need to voctorize all the tweets so that it can be fed to the LDA algorithm! To do this, we will create a bag of words model from our tweets.
Step10: Now for the LDA part!
Step11: I can then save this lda model!
Step12: I now wish to extract all of the words that appear in each of the 30 topics that the LDA model was able to create.
Step13: I will then place this LDA Counter Object back into our dictionary!
|
<ASSISTANT_TASK:>
Python Code:
gabr_tweets = extract_users_tweets("gabr_ibrahim", 2000)
gabr_dict = dict()
gabr_dict['gabr_ibrahim'] = {"content" : [], "hashtags" : [], "retweet_count": [], "favorite_count": []}
for tweet in gabr_tweets:
text = extract_text(tweet)
hashtags = extract_hashtags(tweet)
rts = tweet.retweet_count
fav = tweet.favorite_count
gabr_dict['gabr_ibrahim']['content'].append(text)
gabr_dict['gabr_ibrahim']['hashtags'].extend(hashtags)
gabr_dict['gabr_ibrahim']["retweet_count"].append(rts)
gabr_dict['gabr_ibrahim']["favorite_count"].append(fav)
gabr_tweets_df = pd.DataFrame.from_dict(gabr_dict, orient='index')
gabr_tweets_df.head()
clean_gabr_tweets = filtration(gabr_tweets_df, "content")
clean_gabr_tweets = dataframe_to_dict(clean_gabr_tweets)
clean_gabr_tweets #this is a list of 1 dictionary
import spacy
import nltk
from gensim.models import Phrases
from gensim.models.word2vec import LineSentence
from gensim.corpora import Dictionary, MmCorpus
from gensim.models.ldamulticore import LdaMulticore
import pyLDAvis
import pyLDAvis.gensim
from collections import Counter
from gensim.corpora.dictionary import Dictionary
nlp = spacy.load('en')
gabr_tweets = clean_gabr_tweets[0]['gabr_ibrahim']['content']
gabr_tweets[:5]
tokenized_tweets = []
for tweet in gabr_tweets:
tokenized_tweet = nlp(tweet)
tweet = "" # we want to keep each tweet seperate
for token in tokenized_tweet:
if token.is_space:
continue
elif token.is_punct:
continue
elif token.is_stop:
continue
elif token.is_digit:
continue
elif len(token) == 1:
continue
elif len(token) == 2:
continue
else:
tweet += str(token.lemma_) + " " #creating lemmatized version of tweet
tokenized_tweets.append(tweet)
tokenized_tweets = list(map(str.strip, tokenized_tweets)) # strip whitespace
tokenized_tweets = [x for x in tokenized_tweets if x != ""] # remove empty entries
tokenized_tweets[:5] # you can see how this is different to the raw tweets!
clean_gabr_tweets[0]['gabr_ibrahim']['tokenized_tweets'] = tokenized_tweets
clean_gabr_tweets_df = pd.DataFrame.from_dict(clean_gabr_tweets[0], orient='index')
clean_gabr_tweets_df.head()
clean_gabr_tweets_df = filtration(clean_gabr_tweets_df, "tokenized_tweets")
clean_gabr_tweets = dataframe_to_dict(clean_gabr_tweets_df)
clean_gabr_tweets[0]['gabr_ibrahim']['tokenized_tweets'][:5]
list_of_tweets_gabr = clean_gabr_tweets[0]['gabr_ibrahim']['tokenized_tweets']
gensim_format_tweets = []
for tweet in list_of_tweets_gabr:
list_form = tweet.split()
gensim_format_tweets.append(list_form)
gensim_format_tweets[:5]
gensim_dictionary = Dictionary(gensim_format_tweets)
gensim_dictionary.filter_extremes(no_below=10, no_above=0.4)
gensim_dictionary.compactify() # remove gaps after words that were removed
!pwd
file_path_corpus = "/home/igabr/new-project-4"
def bag_of_words_generator(lst, dictionary):
assert type(dictionary) == Dictionary, "Please enter a Gensim Dictionary"
for i in lst:
yield dictionary.doc2bow(i)
MmCorpus.serialize(file_path_corpus+"{}.mm".format("gabr_ibrahim"), bag_of_words_generator(gensim_format_tweets, gensim_dictionary))
corpus = MmCorpus(file_path_corpus+"{}.mm".format("gabr_ibrahim"))
corpus.num_terms # the number of terms in our corpus!
corpus.num_docs # the number of documets. These are the number of tweets!
lda = LdaMulticore(corpus, num_topics=30, id2word=gensim_dictionary, chunksize=2000, workers=100, passes=100)
lda.save(file_path_corpus+"lda_model_{}".format("gabr_ibrahim"))
lda = LdaMulticore.load(file_path_corpus+"lda_model_{}".format("gabr_ibrahim"))
from collections import Counter
word_list = []
for i in range(30):
for term, frequency in lda.show_topic(i, topn=100): #returns top 100 words for a topic
if frequency != 0:
word_list.append(term)
temp = Counter(word_list)
len(temp)
# This can be done later to help filter the important words.
important_words = []
for k, v in temp.items():
if v >= 10:
if k not in nltk_stopwords:
doc = nlp(k)
for token in doc:
if not token.is_stop:
if len(token) != 2:
important_words.append(k)
important_words
len(important_words)
clean_gabr_tweets[0]['gabr_ibrahim'].keys()
clean_gabr_tweets[0]['gabr_ibrahim']['LDA'] = temp
pickle_object(clean_gabr_tweets, "gabr_ibrahim_tweets_LDA_Complete")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Forward mode
Step2: Hessian-vector products
|
<ASSISTANT_TASK:>
Python Code:
import tangent
import tensorflow as tf
def f(x):
a = x * x
b = x * a
c = a + b
return c
df = tangent.grad(f)
df
df(33)
forward_df = tangent.grad(f, mode='forward')
forward_df(33, dx=1)
forward_df(33, dx=2)
forward_df(33, dx=-1)
def f(x):
a = x * x * x
b = a * x ** 2.0
return tf.reduce_sum(b)
hvp = tangent.grad(tangent.grad(f, mode='reverse'), mode='forward')
def f(W,x):
h1 = tf.matmul(x,W)
h2 = tf.tanh(h1)
out = tf.reduce_sum(h2)
return out
dfdW = tangent.grad(f)
dfdW
W = tf.Variable(tf.zeros([100, 10]))
x = tf.Variable(tf.zeros([10, 100]))
dfdW(W, x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Connect to the target database
Step2: 3. Run SQL on the target using the %%sql cell magic or %sql line magic
Step3: Bind variables
Step4: Additional example of the integration with the IPython environment
Step5: The integration with Pandas opens many additional possibilities for data analysis
|
<ASSISTANT_TASK:>
Python Code:
%load_ext sql
%config SqlMagic.connect_args="{'kerberos_service_name':'impala', 'auth_mechanism':'GSSAPI'}"
%sql impala://impalasrv-prod:21050/test2
%%sql
select * from emp
Employee_name="SCOTT"
%sql select * from emp where ename=:Employee_name
myResultSet=%sql select ename "Employee Name", sal "Salary" from emp
%matplotlib inline
import matplotlib
matplotlib.style.use('ggplot')
myResultSet.bar()
my_DataFrame=myResultSet.DataFrame()
my_DataFrame.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='red'>Please put your datahub API key into a file called APIKEY and place it to the notebook folder or assign your API key directly to the variable API_key!</font>
Step2: At first, we need to define the dataset name and variables we want to use.
Step3: For the start, let's see where exactly PyeongChang (green) and Beijing (orange) are located.
Step4: We are selecting the Gangwon province area in South Korea, where the PyeongChang 2018 Winter Olympics is taking place.
Step5: At the same time, we are also selecting Beijing, Zhangjiakou and Yanqing in China where the Beijing 2022 Winter Olympics will be taking place.
Step6: Download the data with package API
Step7: package1.make_package()
Step8: Work with downloaded files
Step9: So, we are going to look into snow depth in February, when the Olympic Games take place. First we will filter out values from February and calculate the mean values.
Step10: From the plot below we can see that both of the places have snow during February. However, the snow cover tends to be quite low, usually less than 10 cm. Even less for Beijing, where the average snow depth in February is 0.016 meters, while in PyeongChang it is around 0.034 meters.
Step11: Fortunately, after we plotted the mean temperature data for January, we discovered that that even though both of the locations fail to meet the natural snow conditions, they at least have quite constantly cold air to enable artificial snowmaking. Beijing tends to have a bit colder degrees than PyeongChang, on average -9.72 C, while in PyeongChang it is -6.20 C.
Step12: As for making artificial snow, it is important that the temperature drops below -3.5 C, so we will find out on how many days in January the conditions are right.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from dh_py_access import package_api
import dh_py_access.lib.datahub as datahub
import xarray as xr
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from po_data_process import comparison_bar_chart, make_comparison_plot
import warnings
warnings.filterwarnings("ignore")
server = 'api.planetos.com'
API_key = open('APIKEY').readlines()[0].strip() #'<YOUR API KEY HERE>'
version = 'v1'
dh=datahub.datahub(server,version,API_key)
dataset='ncep_cfsr_global_03'
variable_names = 'Temperature_height_above_ground,Snow_depth_surface'
time_start = '1979-01-01T00:00:00'
time_end = '2010-12-31T23:00:00'
plt.figure(figsize=(10,8))
m = Basemap(projection='merc',llcrnrlat=7,urcrnrlat=58,\
llcrnrlon=62,urcrnrlon=149,lat_ts=20,resolution='l')
x,y = m(128.47,37.55)
x2,y2 = m(116.39,39.99)
m.drawcoastlines()
m.drawcountries()
m.bluemarble()
m.scatter(x,y,50,marker='o',color='#00FF00',zorder=4)
m.scatter(x2,y2,50,marker='o',color='orange',zorder=4)
plt.show()
area_name1 = 'PyeongChang'
latitude_north1 = 37.79; longitude_west1 = 128.76
latitude_south1 = 37.20; longitude_east1 = 127.54
area_name2 = 'Beijing'
latitude_north2 = 41.12; longitude_west2 = 114.46
latitude_south2 = 39.56; longitude_east2 = 116.93
package1 = package_api.package_api(dh,dataset,variable_names,longitude_west1,longitude_east1,latitude_south1,latitude_north1,time_start,time_end,area_name=area_name1)
package2 = package_api.package_api(dh,dataset,variable_names,longitude_west2,longitude_east2,latitude_south2,latitude_north2,time_start,time_end,area_name=area_name2)
package1.make_package()
package2.make_package()
package1.download_package()
package2.download_package()
dd1 = xr.open_dataset(package1.local_file_name,decode_cf=False)
del (dd1['Temperature_height_above_ground'].attrs['missing_value'])
del (dd1['Snow_depth_surface'].attrs['missing_value'])
dd1 = xr.conventions.decode_cf(dd1)
dd1['Temp_celsius'] = dd1.Temperature_height_above_ground
dd1['Temp_celsius'].values = dd1['Temp_celsius'].values -272.15
dd1['Temp_celsius'].attrs['units'] = 'Celsius'
dd2 = xr.open_dataset(package2.local_file_name,decode_cf=False)
del (dd2['Temperature_height_above_ground'].attrs['missing_value'])
del (dd2['Snow_depth_surface'].attrs['missing_value'])
dd2 = xr.conventions.decode_cf(dd2)
dd2['Temp_celsius'] = dd2.Temperature_height_above_ground
dd2['Temp_celsius'].values = dd2['Temp_celsius'].values -272.15
dd2['Temp_celsius'].attrs['units'] = 'Celsius'
i_snow = np.where(dd1.Snow_depth_surface['time.month'].values == 2)
feb_mean_snow1 = dd1.Snow_depth_surface[i_snow].resample(time="1AS").mean('time').mean(axis=(1,2))
feb_mean_snow2 = dd2.Snow_depth_surface[i_snow].resample(time="1AS").mean('time').mean(axis=(1,2))
comparison_bar_chart(feb_mean_snow1,area_name1, feb_mean_snow2,area_name2,'Year', np.arange(1979,2011,1),'Snow depth [m]','Mean snow depth in February')
print ('Overall average snow cover in PyeongChang in February is ' + str("%.3f" % np.mean(feb_mean_snow1.values)) + ' m')
print ('Overall average snow cover in Beijing in February is ' + str("%.3f" % np.mean(feb_mean_snow2.values)) + ' m')
i = np.where(dd1.Temp_celsius['time1.month'].values == 1)
#make_comparison_plot(data1,area_name1,data2,area_name2,title,**kwargs)
make_comparison_plot(dd1.Temp_celsius[i].resample(time1="1AS").mean('time1').mean(axis=(1,2,3)),area_name1, dd2.Temp_celsius[i].resample(time1="1AS").mean('time1').mean(axis=(1,2,3)),area_name2,'Mean temperature at 2 m in January',xaxis_label = 'Year',yaxis_label = 'Temperature [$^oC$]')
print ('Overall mean temperature in PyeongChang in February ' + str("%.2f" % dd1.Temp_celsius[i].resample(time1="1AS").mean('time1').mean(axis=(0,1,2,3)).values))
print ('Overall mean temperature in Beijing in February ' + str("%.2f" % dd2.Temp_celsius[i].resample(time1="1AS").mean('time1').mean(axis=(0,1,2,3)).values))
i_jan = np.where(dd1.Temp_celsius['time1.month'].values == 2)
temp_jan1 = dd1.Temp_celsius[i_jan].resample(time1="1D").mean('time1').mean(axis=(1,2,3))
temp_jan2 = dd2.Temp_celsius[i_jan].resample(time1="1D").mean('time1').mean(axis=(1,2,3))
make_comparison_plot(temp_jan1[np.where(temp_jan1.values < -3.5)].groupby('time1.year').count(),area_name1,temp_jan2[np.where(temp_jan2.values < -3.5)].groupby('time1.year').count(),area_name2,'Days in January with temperature below -3.5')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Describes the tests needed to validate the GetChecksums functionality.
Step2: Basic getChecksum
Step3: Request the checksum of a file in the collection.
Step4: The correct checksum should be returned by all the pillars.
Step5: The correct salted checksum should be returned by all the pillars (except the ChecksumPillar).
|
<ASSISTANT_TASK:>
Python Code:
%env CLIENT bitrepository-client-1.9-RC1
!wget -Nq "https://sbforge.org/download/attachments/25395346/${CLIENT}.zip"
!unzip -quo ${CLIENT}.zip
%alias bitmag ${CLIENT}/bin/bitmag.sh %l
%env TESTFILE1 README.md
%bitmag put-file -c integrationtest1 -f $TESTFILE1 -i $TESTFILE1
!md5sum $TESTFILE1
%bitmag get-checksums -c integrationtest1 -i $TESTFILE1
import hmac
import hashlib
import base64
def getSaltedChecksum(file,saltHex,algorithm):
#Note, the salt is a hex string
salt = bytearray.fromhex(saltHex)
digester = hmac.new(salt,None,algorithm)
with open(file,'rb') as from_fh:
while True:
chunk = from_fh.read()
if not chunk:
break
digester.update(chunk)
return digester.hexdigest().lower()
file = os.environ.get('TESTFILE1')
print(getSaltedChecksum(file,saltHex='abcd',algorithm=hashlib.md5))
%bitmag get-checksums -c integrationtest1 -i $TESTFILE1 -R HMAC_MD5 -S 'abcd'
%bitmag get-checksums -c integrationtest1 -i ThisFileDoesNotExist
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will use this helper funciton to write lists containing article ids, categories, and authors for each article in our database to local file.
Step3: Pull data from BigQuery
Step5: In the following cells, you will create a local file which contains a list of article categories and a list of article authors.
Step7: When creating the author list, we'll only use the first author information for each article.
Step9: Create train and test sets.
Step11: Repeat the query as above but change outcome of the farm fingerprint hash to collect the remaining 10% of the data for the test set.
Step12: Let's have a look at the two csv files we just created containing the training and test set. We'll also do a line count of both files to confirm that we have achieved an approximate 90/10 train/test split.
|
<ASSISTANT_TASK:>
Python Code:
import os
import tensorflow as tf
import numpy as np
from google.cloud import bigquery
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '2.1'
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
def write_list_to_disk(my_list, filename):
with open(filename, 'w') as f:
for item in my_list:
line = "%s\n" % item
f.write(line)
sql=
#standardSQL
SELECT
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
content_id
content_ids_list = bigquery.Client().query(sql).to_dataframe()['content_id'].tolist()
write_list_to_disk(content_ids_list, "content_ids.txt")
print("Some sample content IDs {}".format(content_ids_list[:3]))
print("The total number of articles is {}".format(len(content_ids_list)))
sql=
TODO: Modify the query above to instead create a list of all categories in the dataset.
You'll need to change the content_id to the appropriate field as well as the index.
categories_list = #TODO: Modify the query above to create the list of categories
write_list_to_disk(categories_list, "categories.txt")
print(categories_list)
sql=
#standardSQL
SELECT
REGEXP_EXTRACT((SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)), r"^[^,]+") AS first_author
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND (SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
GROUP BY
first_author
authors_list = bigquery.Client().query(sql).to_dataframe()['first_author'].tolist()
write_list_to_disk(authors_list, "authors.txt")
print("Some sample authors {}".format(authors_list[:10]))
print("The total number of authors is {}".format(len(authors_list)))
sql=
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL AND
TODO: Use FARM_FINGERPRINT on the concatenated visitor_id and content_id to create a training set of approximately 90% of the data
training_set_df = bigquery.Client().query(sql).to_dataframe()
training_set_df.to_csv('training_set.csv', header=False, index=False, encoding='utf-8')
training_set_df.head()
sql=
WITH site_history as (
SELECT
fullVisitorId as visitor_id,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS content_id,
(SELECT MAX(IF(index=7, value, NULL)) FROM UNNEST(hits.customDimensions)) AS category,
(SELECT MAX(IF(index=6, value, NULL)) FROM UNNEST(hits.customDimensions)) AS title,
(SELECT MAX(IF(index=2, value, NULL)) FROM UNNEST(hits.customDimensions)) AS author_list,
SPLIT(RPAD((SELECT MAX(IF(index=4, value, NULL)) FROM UNNEST(hits.customDimensions)), 7), '.') as year_month_array,
LEAD(hits.customDimensions, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) as nextCustomDimensions
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
AND
fullVisitorId IS NOT NULL
AND
hits.time != 0
AND
hits.time IS NOT NULL
AND
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) IS NOT NULL
)
SELECT
visitor_id,
content_id,
category,
REGEXP_REPLACE(title, r",", "") as title,
REGEXP_EXTRACT(author_list, r"^[^,]+") as author,
DATE_DIFF(DATE(CAST(year_month_array[OFFSET(0)] AS INT64), CAST(year_month_array[OFFSET(1)] AS INT64), 1), DATE(1970,1,1), MONTH) as months_since_epoch,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) as next_content_id
FROM
site_history
WHERE (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(nextCustomDimensions)) IS NOT NULL AND
#TODO: Modify the FARM_FINGERPRINT you used in the previous cell to create a test set of approximately 10% of the data
test_set_df = bbigquery.Client().query(sql).to_dataframe()
test_set_df.to_csv('test_set.csv', header=False, index=False, encoding='utf-8')
test_set_df.head()
%%bash
wc -l *_set.csv
!head *_set.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part Two
|
<ASSISTANT_TASK:>
Python Code:
DISK_LENGTH = 272
def invert(string):
return ''.join(('1' if x == '0' else '0' for x in string))
print(invert('111100001010'))
def fill_disk(string):
while(len(string) < DISK_LENGTH):
string = string + '0' + invert(string[::-1])
return string[:DISK_LENGTH]
with open('../inputs/day16.txt', 'r') as f:
input_data = f.readline().strip()
string = fill_disk(input_data)
print('string length: ', len(string))
print(string)
def create_checksum(string):
checksum = []
for index in range(0, len(string), 2):
if string[index] == string[index + 1]:
checksum.append('1')
else:
checksum.append('0')
if len(checksum) % 2 == 0:
return create_checksum(checksum)
return ''.join(checksum)
checksum = create_checksum(string)
print('checksum length', len(checksum))
print(checksum)
DISK_LENGTH = 35651584
string = fill_disk(input_data)
print('string length: ', len(string))
checksum = create_checksum(string)
print('checksum length', len(checksum))
print(checksum)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p style="font-family
Step2: <p style="font-family
Step3: <p style="font-family
Step4: <p style="font-family
Step5: <p style="font-family
Step6: <p style="font-family
Step7: <h1 style="font-size
Step8: <h1 style="font-size
Step9: <h1 style="font-size
Step10: <h1 style="font-size
Step11: Thats nice ! No NULL values !
Step12: Thats nice ! No NULL values !
Step13: We have some tags which are NULL.
Step14: Thats nice ! No NULL values ! Notice the number of lines have reduced.
Step15: <h1 style="font-size
Step16: <h1 style="font-size
Step17: <h1 style="font-size
Step18: <h1 style="font-size
Step19: More examples
Step20: <h1 style="font-size
Step21: <p style="font-family
Step22: <p style="font-family
Step23: <p style="font-family
Step24: <p style="font-family
Step25: <p style="font-family
Step26: <p style="font-family
Step27: <p style="font-family
Step28: <p style="font-family
Step29: <h1 style="font-size
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
ser = pd.Series([100, 'foo', 300, 'bar', 500], ['tom', 'bob', 'nancy', 'dan', 'eric'])
ser
ser.index
ser.loc[['nancy','bob']]
ser[[4, 3, 1]]
ser.iloc[2]
'bob' in ser
ser
ser * 2
ser[['nancy', 'eric']] ** 2
d = {'one' : pd.Series([100., 200., 300.], index=['apple', 'ball', 'clock']),
'two' : pd.Series([111., 222., 333., 4444.], index=['apple', 'ball', 'cerill', 'dancy'])}
d
df = pd.DataFrame(d)
print(df)
df.index
df.columns
pd.DataFrame(d, index=['dancy', 'ball', 'apple'])
pd.DataFrame(d, index=['dancy', 'ball', 'apple'], columns=['two', 'five'])
data = [{'alex': 1, 'joe': 2}, {'ema': 5, 'dora': 10, 'alice': 20}]
pd.DataFrame(data)
pd.DataFrame(data, index=['orange', 'red'])
pd.DataFrame(data, columns=['joe', 'dora','alice'])
df
df['one']
df['three'] = df['one'] * df['two']
df
df['flag'] = df['one'] > 250
df
three = df.pop('three')
three
df
del df['two']
df
df.insert(2, 'copy_of_one', df['one'])
df
df['one_upper_half'] = df['one'][:2]
df
path = r'/Users/Harish/Documents/HK_Work/Python/ml-20m/'
# Note: Adjust the name of the folder to match your local directory
!ls /Users/Harish/Documents/HK_Work/Python/ml-20m/
!cat /Users/Harish/Documents/HK_Work/Python/ml-20m/movies.csv | wc -l
!head -5 /Users/Harish/Documents/HK_Work/Python/ml-20m/ratings.csv
movies = pd.read_csv(path+'movies.csv', sep=',')
print(type(movies))
movies.head(15)
# Timestamps represent seconds since midnight Coordinated Universal Time (UTC) of January 1, 1970
tags = pd.read_csv(path+'tags.csv', sep=',')
tags.head()
ratings = pd.read_csv(path+'ratings.csv', sep=',', parse_dates=['timestamp'])
ratings.head()
# For current analysis, we will remove timestamp (we will come back to it!)
del ratings['timestamp']
del tags['timestamp']
#Extract 0th row: notice that it is infact a Series
row_0 = tags.iloc[0]
type(row_0)
print(row_0)
row_0.index
row_0['userId']
'rating' in row_0
row_0.name
row_0 = row_0.rename('first_row')
row_0.name
tags.head()
tags.index
tags.columns
# Extract row 0, 11, 2000 from DataFrame
tags.iloc[ [0,11,2000] ]
ratings['rating'].describe()
ratings.describe()
ratings['rating'].mean()
ratings.mean()
ratings['rating'].min()
ratings['rating'].max()
ratings['rating'].std()
ratings['rating'].mode()
ratings.corr()
filter_1 = ratings['rating'] > 5
print(filter_1)
filter_1.any()
filter_2 = ratings['rating'] > 0
filter_2.all()
movies.shape
#is any row NULL ?
movies.isnull().any()
ratings.shape
#is any row NULL ?
ratings.isnull().any()
tags.shape
#is any row NULL ?
tags.isnull().any()
tags = tags.dropna()
#Check again: is any row NULL ?
tags.isnull().any()
tags.shape
%matplotlib inline
ratings.hist(column='rating', figsize=(15,10))
ratings.boxplot(column='rating', figsize=(15,20))
tags['tag'].head()
movies[['title','genres']].head()
ratings[-10:]
tag_counts = tags['tag'].value_counts()
tag_counts[-10:]
tag_counts[:10].plot(kind='bar', figsize=(15,10))
is_highly_rated = ratings['rating'] >= 4.0
ratings[is_highly_rated][30:50]
is_animation = movies['genres'].str.contains('Animation')
movies[is_animation][5:15]
movies[is_animation].head(15)
ratings_count = ratings[['movieId','rating']].groupby('rating').count()
ratings_count
average_rating = ratings[['movieId','rating']].groupby('movieId').mean()
average_rating.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.head()
movie_count = ratings[['movieId','rating']].groupby('movieId').count()
movie_count.tail()
tags.head()
movies.head()
t = movies.merge(tags, on='movieId', how='inner')
t.head()
avg_ratings = ratings.groupby('movieId', as_index=False).mean()
del avg_ratings['userId']
avg_ratings.head()
box_office = movies.merge(avg_ratings, on='movieId', how='inner')
box_office.tail()
is_highly_rated = box_office['rating'] >= 4.0
box_office[is_highly_rated][-5:]
is_comedy = box_office['genres'].str.contains('Comedy')
box_office[is_comedy][:5]
box_office[is_comedy & is_highly_rated][-5:]
movies.head()
movie_genres = movies['genres'].str.split('|', expand=True)
movie_genres[:10]
movie_genres['isComedy'] = movies['genres'].str.contains('Comedy')
movie_genres[:10]
movies['year'] = movies['title'].str.extract('.*\((.*)\).*', expand=True)
movies.tail()
tags = pd.read_csv(path+r'tags.csv', sep=',')
tags.dtypes
tags.head(5)
tags['parsed_time'] = pd.to_datetime(tags['timestamp'], unit='s')
tags['parsed_time'].dtype
tags.head(2)
greater_than_t = tags['parsed_time'] > '2015-02-01'
selected_rows = tags[greater_than_t]
tags.shape, selected_rows.shape
tags.sort_values(by='parsed_time', ascending=True)[:10]
average_rating = ratings[['movieId','rating']].groupby('movieId', as_index=False).mean()
average_rating.tail()
joined = movies.merge(average_rating, on='movieId', how='inner')
joined.head()
joined.corr()
yearly_average = joined[['year','rating']].groupby('year', as_index=False).mean()
yearly_average[:10]
yearly_average[-20:].plot(x='year', y='rating', figsize=(15,10), grid=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Usage of endpoint layers in the Functional API
Step2: Exporting an inference-only model
Step3: Usage of loss endpoint layers in subclassed models
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow import keras
import numpy as np
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy(name="accuracy")
def call(self, logits, targets=None, sample_weight=None):
if targets is not None:
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weight)
self.add_loss(loss)
# Log the accuracy as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(self.accuracy_fn(targets, logits, sample_weight))
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
inputs = keras.Input((764,), name="inputs")
logits = keras.layers.Dense(1)(inputs)
targets = keras.Input((1,), name="targets")
sample_weight = keras.Input((1,), name="sample_weight")
preds = LogisticEndpoint()(logits, targets, sample_weight)
model = keras.Model([inputs, targets, sample_weight], preds)
data = {
"inputs": np.random.random((1000, 764)),
"targets": np.random.random((1000, 1)),
"sample_weight": np.random.random((1000, 1)),
}
model.compile(keras.optimizers.Adam(1e-3))
model.fit(data, epochs=2)
inputs = keras.Input((764,), name="inputs")
logits = keras.layers.Dense(1)(inputs)
preds = LogisticEndpoint()(logits, targets=None, sample_weight=None)
inference_model = keras.Model(inputs, preds)
inference_model.set_weights(model.get_weights())
preds = inference_model.predict(np.random.random((1000, 764)))
class LogReg(keras.Model):
def __init__(self):
super(LogReg, self).__init__()
self.dense = keras.layers.Dense(1)
self.logistic_endpoint = LogisticEndpoint()
def call(self, inputs):
# Note that all inputs should be in the first argument
# since we want to be able to call `model.fit(inputs)`.
logits = self.dense(inputs["inputs"])
preds = self.logistic_endpoint(
logits=logits,
targets=inputs["targets"],
sample_weight=inputs["sample_weight"],
)
return preds
model = LogReg()
data = {
"inputs": np.random.random((1000, 764)),
"targets": np.random.random((1000, 1)),
"sample_weight": np.random.random((1000, 1)),
}
model.compile(keras.optimizers.Adam(1e-3))
model.fit(data, epochs=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create and convert a TensorFlow model
Step2: Generate data
Step3: Add some noise
Step4: Split our data
Step5: Design a model
Step6: Train the model
Step7: Check the training metrics
Step8: Look closer at the data
Step9: Further metrics
Step10: This graph of mean absolute error gives us some further clues. We can see that predictions with our training data show consistently lower error than with our validation data, which means that the network has likely overfit, or learned the training data so rigidly that it can't make effective predictions about new data.
Step11: Oh dear! The graph makes it clear that our network has learned to approximate the sine function in a very limited way. The predictions are highly linear, and only very roughly fit the data.
Step12: We'll now train the new model. To save time, we'll train for only 600 epochs
Step13: Evaluate our new model
Step14: Great results! From these graphs, we can see two exciting things
Step15: Much better! The evaluation metrics we printed show that the model has a low loss and MAE on the test data, and the predictions line up visually with our data fairly well.
Step16: To create a quantized model that runs as efficiently as possible, we have to provide a "representative dataset"—a set of numbers that represent the full range of input values the dataset the model was trained on.
Step17: We can see from the graph that the predictions for the original model, the converted model, and the quantized model are all close enough to be almost indistinguishable. This means that our quantized model is ready to use!
Step18: Our quantized model is 224 bytes smaller than the original version, which is great - but it's only a minor reduction in size. At around 2.4 kilobytes, this model is already so small that the weights make up a small proportion of the overall size, meaning quantization only has a small effect.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TensorFlow is an open source machine learning library
!pip install tensorflow==2.0
import tensorflow as tf
# Numpy is a math library
import numpy as np
# Matplotlib is a graphing library
import matplotlib.pyplot as plt
# math is Python's math library
import math
# We'll generate this many sample datapoints
SAMPLES = 1000
# Set a "seed" value, so we get the same random numbers each time we run this
# notebook. Any number can be used here.
SEED = 1337
np.random.seed(SEED)
tf.random.set_seed(SEED)
# Generate a uniformly distributed set of random numbers in the range from
# 0 to 2π, which covers a complete sine wave oscillation
x_values = np.random.uniform(low=0, high=2*math.pi, size=SAMPLES)
# Shuffle the values to guarantee they're not in order
np.random.shuffle(x_values)
# Calculate the corresponding sine values
y_values = np.sin(x_values)
# Plot our data. The 'b.' argument tells the library to print blue dots.
plt.plot(x_values, y_values, 'b.')
plt.show()
# Add a small random number to each y value
y_values += 0.1 * np.random.randn(*y_values.shape)
# Plot our data
plt.plot(x_values, y_values, 'b.')
plt.show()
# We'll use 60% of our data for training and 20% for testing. The remaining 20%
# will be used for validation. Calculate the indices of each section.
TRAIN_SPLIT = int(0.6 * SAMPLES)
TEST_SPLIT = int(0.2 * SAMPLES + TRAIN_SPLIT)
# Use np.split to chop our data into three parts.
# The second argument to np.split is an array of indices where the data will be
# split. We provide two indices, so the data will be divided into three chunks.
x_train, x_validate, x_test = np.split(x_values, [TRAIN_SPLIT, TEST_SPLIT])
y_train, y_validate, y_test = np.split(y_values, [TRAIN_SPLIT, TEST_SPLIT])
# Double check that our splits add up correctly
assert (x_train.size + x_validate.size + x_test.size) == SAMPLES
# Plot the data in each partition in different colors:
plt.plot(x_train, y_train, 'b.', label="Train")
plt.plot(x_validate, y_validate, 'y.', label="Validate")
plt.plot(x_test, y_test, 'r.', label="Test")
plt.legend()
plt.show()
# We'll use Keras to create a simple model architecture
from tensorflow.keras import layers
model_1 = tf.keras.Sequential()
# First layer takes a scalar input and feeds it through 16 "neurons". The
# neurons decide whether to activate based on the 'relu' activation function.
model_1.add(layers.Dense(16, activation='relu', input_shape=(1,)))
# Final layer is a single neuron, since we want to output a single value
model_1.add(layers.Dense(1))
# Compile the model using a standard optimizer and loss function for regression
model_1.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
# Print a summary of the model's architecture
model_1.summary()
# Train the model on our training data while validating on our validation set
history_1 = model_1.fit(x_train, y_train, epochs=1000, batch_size=16,
validation_data=(x_validate, y_validate))
# Draw a graph of the loss, which is the distance between
# the predicted and actual values during training and validation.
loss = history_1.history['loss']
val_loss = history_1.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Exclude the first few epochs so the graph is easier to read
SKIP = 100
plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')
plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Draw a graph of mean absolute error, which is another way of
# measuring the amount of error in the prediction.
mae = history_1.history['mae']
val_mae = history_1.history['val_mae']
plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')
plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')
plt.title('Training and validation mean absolute error')
plt.xlabel('Epochs')
plt.ylabel('MAE')
plt.legend()
plt.show()
# Use the model to make predictions from our validation data
predictions = model_1.predict(x_train)
# Plot the predictions along with to the test data
plt.clf()
plt.title('Training data predicted vs actual values')
plt.plot(x_test, y_test, 'b.', label='Actual')
plt.plot(x_train, predictions, 'r.', label='Predicted')
plt.legend()
plt.show()
model_2 = tf.keras.Sequential()
# First layer takes a scalar input and feeds it through 16 "neurons". The
# neurons decide whether to activate based on the 'relu' activation function.
model_2.add(layers.Dense(16, activation='relu', input_shape=(1,)))
# The new second layer may help the network learn more complex representations
model_2.add(layers.Dense(16, activation='relu'))
# Final layer is a single neuron, since we want to output a single value
model_2.add(layers.Dense(1))
# Compile the model using a standard optimizer and loss function for regression
model_2.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
# Show a summary of the model
model_2.summary()
history_2 = model_2.fit(x_train, y_train, epochs=600, batch_size=16,
validation_data=(x_validate, y_validate))
# Draw a graph of the loss, which is the distance between
# the predicted and actual values during training and validation.
loss = history_2.history['loss']
val_loss = history_2.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'g.', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Exclude the first few epochs so the graph is easier to read
SKIP = 80
plt.clf()
plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')
plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
# Draw a graph of mean absolute error, which is another way of
# measuring the amount of error in the prediction.
mae = history_2.history['mae']
val_mae = history_2.history['val_mae']
plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')
plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')
plt.title('Training and validation mean absolute error')
plt.xlabel('Epochs')
plt.ylabel('MAE')
plt.legend()
plt.show()
# Calculate and print the loss on our test dataset
loss = model_2.evaluate(x_test, y_test)
# Make predictions based on our test dataset
predictions = model_2.predict(x_test)
# Graph the predictions against the actual values
plt.clf()
plt.title('Comparison of predictions and actual values')
plt.plot(x_test, y_test, 'b.', label='Actual')
plt.plot(x_test, predictions, 'r.', label='Predicted')
plt.legend()
plt.show()
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
tflite_model = converter.convert()
# Save the model to disk
open("sine_model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
# Indicate that we want to perform the default optimizations,
# which includes quantization
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# Define a generator function that provides our test data's x values
# as a representative dataset, and tell the converter to use it
def representative_dataset_generator():
for value in x_test:
# Each scalar value must be inside of a 2D array that is wrapped in a list
yield [np.array(value, dtype=np.float32, ndmin=2)]
converter.representative_dataset = representative_dataset_generator
# Convert the model
tflite_model = converter.convert()
# Save the model to disk
open("sine_model_quantized.tflite", "wb").write(tflite_model)
# Instantiate an interpreter for each model
sine_model = tf.lite.Interpreter('sine_model.tflite')
sine_model_quantized = tf.lite.Interpreter('sine_model_quantized.tflite')
# Allocate memory for each model
sine_model.allocate_tensors()
sine_model_quantized.allocate_tensors()
# Get indexes of the input and output tensors
sine_model_input_index = sine_model.get_input_details()[0]["index"]
sine_model_output_index = sine_model.get_output_details()[0]["index"]
sine_model_quantized_input_index = sine_model_quantized.get_input_details()[0]["index"]
sine_model_quantized_output_index = sine_model_quantized.get_output_details()[0]["index"]
# Create arrays to store the results
sine_model_predictions = []
sine_model_quantized_predictions = []
# Run each model's interpreter for each value and store the results in arrays
for x_value in x_test:
# Create a 2D tensor wrapping the current x value
x_value_tensor = tf.convert_to_tensor([[x_value]], dtype=np.float32)
# Write the value to the input tensor
sine_model.set_tensor(sine_model_input_index, x_value_tensor)
# Run inference
sine_model.invoke()
# Read the prediction from the output tensor
sine_model_predictions.append(
sine_model.get_tensor(sine_model_output_index)[0])
# Do the same for the quantized model
sine_model_quantized.set_tensor(sine_model_quantized_input_index, x_value_tensor)
sine_model_quantized.invoke()
sine_model_quantized_predictions.append(
sine_model_quantized.get_tensor(sine_model_quantized_output_index)[0])
# See how they line up with the data
plt.clf()
plt.title('Comparison of various models against actual values')
plt.plot(x_test, y_test, 'bo', label='Actual')
plt.plot(x_test, predictions, 'ro', label='Original predictions')
plt.plot(x_test, sine_model_predictions, 'bx', label='Lite predictions')
plt.plot(x_test, sine_model_quantized_predictions, 'gx', label='Lite quantized predictions')
plt.legend()
plt.show()
import os
basic_model_size = os.path.getsize("sine_model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("sine_model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
# Install xxd if it is not available
!apt-get -qq install xxd
# Save the file as a C source file
!xxd -i sine_model_quantized.tflite > sine_model_quantized.cc
# Print the source file
!cat sine_model_quantized.cc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Первое считывание данных (преобразуем удобный для человека формат в формат, который использует модель)
Step2: В следующий раз данные можно считывать уже из батчей
Step3: Создаем объект модели
Step4: Создаем словарь и инициализируем модель с его помощью
Step5: Строим модель. Offline - проходит по всей коллекции много раз. Удобно, когда коллекция маленькая.
Step6: Необходимое число итераций можно отслеживать по графику перплексии. Когда она перестала меняться, модель сошлась.
Step7: Выведем топы слов
Step8: Давайте посмотрим также на разреженность матриц
Step9: В темах много общеупотребительных слов (так называемой, фоновой лексики). Чтобы этого избежать, будем использовать разреживающий регуляризатор для матрицы фи. Он будет подавлять слова, которые имеют большую частоту во всей коллекции.
Step10: Попробуем менять коэффициент регуляризации
Step11: Обратите внимание, что разреживать модель рекомендуется только после того, как она сошлась без регуляризации.
Step12: Можно попросить матрицы в чистом виде
Step13: Матрица вероятностей тем в документах.
Step14: Если бы у нас были новые батчи, по которым мы не строим модель, а хотим только получить матрицу theta, можно пользоваться методом transform.
|
<ASSISTANT_TASK:>
Python Code:
from matplotlib import pyplot as plt
%matplotlib inline
import artm
batch_vectorizer = artm.BatchVectorizer(data_path="school.txt", data_format="vowpal_wabbit", target_folder="school_batches",
batch_size=100)
batch_vectorizer = artm.BatchVectorizer(data_path="school_batches", data_format='batches')
T = 10 # количество тем
model_artm = artm.ARTM(num_topics=T, topic_names=["sbj"+str(i) for i in range(T)], class_ids={"text":1},
num_document_passes=1, reuse_theta=True, cache_theta=True, seed=-1)
# число после названия модальностей - это их веса
dictionary = artm.Dictionary('dictionary')
dictionary.gather(batch_vectorizer.data_path)
model_artm.scores.add(artm.PerplexityScore(name='PerplexityScore',
use_unigram_document_model=False,
dictionary='dictionary'))
model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore', class_id="text"))
model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
model_artm.scores.add(artm.TopTokensScore(name="top_words", num_tokens=15, class_id="text"))
model_artm.initialize('dictionary')
model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=40)
plt.plot(model_artm.score_tracker["PerplexityScore"].value)
for topic_name in model_artm.topic_names:
print topic_name + ': ',
tokens = model_artm.score_tracker["top_words"].last_tokens
for word in tokens[topic_name]:
print word,
print
print model_artm.score_tracker["SparsityPhiScore"].last_value
print model_artm.score_tracker["SparsityThetaScore"].last_value
model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=-100, dictionary=dictionary))
#если вы хотите применять регуляризатор только к некоторым модальностям, указывайте это в параметре class_ids: class_ids=["text"]
model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=15)
for topic_name in model_artm.topic_names:
print topic_name + ': ',
tokens = model_artm.score_tracker["top_words"].last_tokens
for word in tokens[topic_name]:
print word,
print
print model_artm.score_tracker["SparsityPhiScore"].last_value
print model_artm.score_tracker["SparsityThetaScore"].last_value
model_artm.regularizers['SparsePhi'].tau = -5*1e4
model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=15)
for topic_name in model_artm.topic_names:
print topic_name + ': ',
tokens = model_artm.score_tracker["top_words"].last_tokens
for word in tokens[topic_name]:
print word,
print
# еще раз посмотрим на разреженность
print model_artm.score_tracker["SparsityPhiScore"].last_value
print model_artm.score_tracker["SparsityThetaScore"].last_value
model_artm.save("my_model")
model_artm.load("my_model")
phi = model_artm.get_phi()
phi
theta = model_artm.get_theta()
theta
theta_test = model_artm.transform(batch_vectorizer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Topographic grid
Step2: Initalizing and running a FlowDirector
Step3: The FlowDirectors are method specific, so if you want to do D8 flow directing, you must use FlowDirectorD8.
Step4: We can illustrate the receiver node FlowDirectionSteepest has assigned to each donor node using a plotting function in Landlab called drainage_plot. We will see many of these plots in this tutorial so let's take a moment to walk through the plot and what it contains.
Step5: Reassuringly we can see that the flow is being sent from high elevations at the top of the grid to low elevations at the bottom of the grid. We can also see that all of the arrows are yellow, and thus all of the flow is traveling on these links.
Step6: For this ramp, the steepest slope is down a link, and not a diagonal, so FlowDirectorD8 gives the same result as FlowDirectorSteepest.
Step7: Similarly, while there is more than one node below each core node, there is only one node that is connected by a link and not a diagonal. Thus FlowDirectorMFD with the keyword diagonals set to True provides the same results as FlowDirectorSteepest and FlowDirectorD8
Step8: When we permit flow along diagonal connections between nodes and flow to all downhill nodes, we see a difference in the directing pattern on this simple ramp. The flow is partitioned between the three downhill nodes, and there is more flow being sent to along the link as compared with the diagonals (the links are a lighter color blue than the diagonals). We can print out the proportion of flow, which is stored as a (number of nodes, number of possible receivers) array for FlowDirectorMFD. We will just print one node for clarity. The order of the receivers is in line with the standard Landlab ordering. The array will list the receiver proportions in the following order
Step9: As we can see, the flow is moving on the link to the South, the South West and South East. There is about a third of the flow moving on the links and diagonals, though more flow is partitioned to the link. This makes sense as the link is steeper than the diagonals.
Step10: In the FlowDirectorMFD we can partition flow based on slope (default) or based on the square root of the slope. This option gives the solution to a steady kinematic wave. We can see that the proportion of flow going on the links and diagonals is more similar.
Step11: In FlowDirectorDINF flow is partitioned to two nodes based on steepness of the eight triangular facets surrounding each node. The partitioning is based on the relation between the link and diagonal slope that form the edge of the facet and the slope of the facet itself. When one of the facet edges has the same slope as the facet, as is the case in this ramp example, all of the flow is partitioned along that edge.
Step12: Just as in the raster example, FlowDirectorSteepest sends flow from donor nodes to only one receiver node. You may ask why the flow is going to the southwest while the links are just as steep to the southeast. When two links have exactly the same slope, in route-to-one methods like FlowDirectorSteepest and FlowDirectorD8 landlab will choose the first steepest link it encounters while searching through the links. When diagonal links are also considered in FlowDirectorD8 the diagonals are examined after the links. Thus if a link and a diagonal leading from one node downhill had exactly the same slope, the flow would be sent down the link.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# import plotting tools
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib as mpl
# import numpy
import numpy as np
# import necessary landlab components
from landlab import RasterModelGrid, HexModelGrid
from landlab.components import (FlowDirectorD8, FlowDirectorDINF,
FlowDirectorMFD, FlowDirectorSteepest)
# import landlab plotting functionality
from landlab.plot.drainage_plot import drainage_plot
# create a plotting routine to make a 3d plot of our surface.
def surf_plot(mg,
surface='topographic__elevation',
title='Surface plot of topography'):
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plot the surface.
Z = mg.at_node[surface].reshape(mg.shape)
color = cm.gray((Z - Z.min()) / (Z.max() - Z.min()))
surf = ax.plot_surface(mg.x_of_node.reshape(mg.shape),
mg.y_of_node.reshape(mg.shape),
Z,
rstride=1,
cstride=1,
facecolors=color,
linewidth=0.,
antialiased=False)
ax.view_init(elev=35, azim=-120)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Elevation')
plt.title(title)
plt.show()
mg1 = RasterModelGrid((10, 10))
_ = mg1.add_field('topographic__elevation', mg1.y_of_node, at='node')
surf_plot(mg1, title='Grid 1: A basic ramp')
fd = FlowDirectorSteepest(mg1, 'topographic__elevation')
fd.run_one_step()
receivers = fd.direct_flow()
print(receivers)
plt.figure()
drainage_plot(mg1, title='Basic Ramp using FlowDirectorSteepest')
mg1 = RasterModelGrid((10, 10))
_ = mg1.add_field('topographic__elevation', mg1.y_of_node, at='node')
fd = FlowDirectorD8(mg1)
fd.run_one_step()
plt.figure()
drainage_plot(mg1, title='Basic Ramp using FlowDirectorD8')
mg1 = RasterModelGrid((10, 10))
_ = mg1.add_field('topographic__elevation', mg1.y_of_node, at='node')
fd = FlowDirectorMFD(mg1,
diagonals=False) # diagonals=False is the default option
fd.run_one_step()
plt.figure()
drainage_plot(mg1, title='Basic Ramp using FlowDirectorMFD without diagonals')
mg1 = RasterModelGrid((10, 10))
_ = mg1.add_field('topographic__elevation', mg1.y_of_node, at='node')
fd = FlowDirectorMFD(mg1, diagonals=True)
fd.run_one_step()
plt.figure()
drainage_plot(mg1, title='Basic Ramp using FlowDirectorMFD with diagonals')
print(mg1.at_node['flow__receiver_proportions'][15])
mg1 = RasterModelGrid((10, 10))
_ = mg1.add_field('topographic__elevation', mg1.y_of_node, at='node')
fd = FlowDirectorMFD(mg1,
diagonals=True,
partition_method='square_root_of_slope')
fd.run_one_step()
plt.figure()
drainage_plot(
mg1,
title=
'Basic Ramp using FlowDirectorMFD with diagonals and steady kinematic wave partitioning'
)
mg1 = RasterModelGrid((10, 10))
_ = mg1.add_field('topographic__elevation', mg1.y_of_node, at='node')
fd = FlowDirectorDINF(mg1)
fd.run_one_step()
plt.figure()
drainage_plot(mg1, title='Basic Ramp using FlowDirectorDINF')
hmg = HexModelGrid((9, 5))
_ = hmg.add_field('topographic__elevation', hmg.y_of_node, at='node')
fd = FlowDirectorSteepest(hmg)
fd.run_one_step()
plt.figure()
drainage_plot(hmg)
hmg = HexModelGrid((9, 5))
_ = hmg.add_field('topographic__elevation', hmg.y_of_node, at='node')
fd = FlowDirectorMFD(hmg)
fd.run_one_step()
plt.figure()
drainage_plot(hmg)
hmg.at_node["topographic__elevation"][hmg.core_nodes].max()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since we're not about to do anything fancy with this environment, we will initialize it with the provided create_environment function (further details on this here).
Step2: At the start of each session, the simulator resets the environment, which triggers a resampling of the user. The reset call generates our initial observation.
Step3: Observations
Step4: We are thus presented with a corpus of 15 documents (num_candidates), each represented by their topic and their production quality score. Note, though, that the user's affinity is not an observable quantity.
Step5: Slates
Step6: The action space gym specification is also provided by the environment.
Step7: When the first slate is available, the simulator will run the environment and generate a new observation, along with a reward for the agent.
Step8: The main job of the agent is to produce a valid slate for each step of the simulation.
Step9: A RecSim agent inherits from AbstractEpisodicRecommenderAgent. Required arguments (which RecSim will pass to the agent at simulation time) for the agent's init are the observation_space and action_space. We can use them to validate whether the environment meets the preconditions for the agent's operation.
Step10: This agent will statically recommend the first K documents of the corpus. For reasons that will become clear soon, we'll also have it print the observation.
Step11: Design
Step12: A hierarchical agent layer is instantiated in a smilar way to usual agents, except that it takes in a constructor for a base agent, that is, an agent whose abstract action it can interpret. In the case of cluster click stats, it will not do any post-processing of the abstract action, that is, it simply relays the action of the base agent to the environment. This implies that the base agent will need to provide a full slate.
Step13: Observe how the 'user' field of the observation dictionary (as printed from within the static agent's step function) now has a new key 'sufficient_statistics', whereas the old user observation (which is vacuous) went under the 'raw_observation' key. This is done to avoid naming conflicts.
Step15: To instantiate an abstract bandit, we must present a list of base agents. In our case, we will have one base agent for each cluster. That agent simply retrieves the documents of that cluster from the corpus and sorts them according to perceived quality.
Step16: We will now instantiate one GreedyClusterAgent for each cluster.
Step17: We can now instantiate our cluster bandit as a combination of ClusterClickStats, AbstractClickBandit, and GreedyClusterAgent
Step18: Our ClusterBandit is ready to use!
|
<ASSISTANT_TASK:>
Python Code:
# @title Install
!pip install --upgrade --no-cache-dir recsim
# @title Imports
# Generic imports
import functools
from gym import spaces
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# RecSim imports
from recsim import agent
from recsim import document
from recsim import user
from recsim.choice_model import MultinomialLogitChoiceModel
from recsim.simulator import environment
from recsim.simulator import recsim_gym
from recsim.simulator import runner_lib
from recsim.environments import interest_exploration
env_config = {'slate_size': 2,
'seed': 0,
'num_candidates': 15,
'resample_documents': True}
ie_environment = interest_exploration.create_environment(env_config)
initial_observation = ie_environment.reset()
print('User Observable Features')
print(initial_observation['user'])
print('User Response')
print(initial_observation['response'])
print('Document Observable Features')
for doc_id, doc_features in initial_observation['doc'].items():
print('ID:', doc_id, 'features:', doc_features)
print('Document observation space')
for key, space in ie_environment.observation_space['doc'].spaces.items():
print(key, ':', space)
print('Response observation space')
print(ie_environment.observation_space['response'])
print('User observation space')
print(ie_environment.observation_space['user'])
slate = [0, 1]
for slate_doc in slate:
print(list(initial_observation['doc'].items())[slate_doc])
ie_environment.action_space
observation, reward, done, _ = ie_environment.step(slate)
from recsim.agent import AbstractEpisodicRecommenderAgent
class StaticAgent(AbstractEpisodicRecommenderAgent):
def __init__(self, observation_space, action_space):
# Check if document corpus is large enough.
if len(observation_space['doc'].spaces) < len(action_space.nvec):
raise RuntimeError('Slate size larger than size of the corpus.')
super(StaticAgent, self).__init__(action_space)
def step(self, reward, observation):
print(observation)
return list(range(self._slate_size))
def create_agent(sess, environment, eval_mode, summary_writer=None):
return StaticAgent(environment.observation_space, environment.action_space)
tmp_base_dir = '/tmp/recsim/'
runner = runner_lib.EvalRunner(
base_dir=tmp_base_dir,
create_agent_fn=create_agent,
env=ie_environment,
max_eval_episodes=1,
max_steps_per_episode=5,
test_mode=True)
# We won't run this, but we totally could
# runner.run_experiment()
from recsim.agents.layers.cluster_click_statistics import ClusterClickStatsLayer
static_agent = StaticAgent(ie_environment.observation_space,
ie_environment.action_space)
static_agent.step(reward, observation)
cluster_static_agent = ClusterClickStatsLayer(StaticAgent,
ie_environment.observation_space,
ie_environment.action_space)
cluster_static_agent.step(reward, observation)
from recsim.agents.layers.abstract_click_bandit import AbstractClickBanditLayer
class GreedyClusterAgent(agent.AbstractEpisodicRecommenderAgent):
Simple agent sorting all documents of a topic according to quality.
def __init__(self, observation_space, action_space, cluster_id, **kwargs):
del observation_space
super(GreedyClusterAgent, self).__init__(action_space)
self._cluster_id = cluster_id
def step(self, reward, observation):
del reward
my_docs = []
my_doc_quality = []
for i, doc in enumerate(observation['doc'].values()):
if doc['cluster_id'] == self._cluster_id:
my_docs.append(i)
my_doc_quality.append(doc['quality'])
if not bool(my_docs):
return []
sorted_indices = np.argsort(my_doc_quality)[::-1]
return list(np.array(my_docs)[sorted_indices])
num_topics = list(ie_environment.observation_space.spaces['doc']
.spaces.values())[0].spaces['cluster_id'].n
base_agent_ctors = [
functools.partial(GreedyClusterAgent, cluster_id=i)
for i in range(num_topics)
]
bandit_ctor = functools.partial(AbstractClickBanditLayer,
arm_base_agent_ctors=base_agent_ctors)
cluster_bandit = ClusterClickStatsLayer(bandit_ctor,
ie_environment.observation_space,
ie_environment.action_space)
observation0 = ie_environment.reset()
slate = cluster_bandit.begin_episode(observation0)
print("Cluster bandit slate 0:")
doc_list = list(observation0['doc'].values())
for doc_position in slate:
print(doc_list[doc_position])
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.